#endif
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
static void handle_large_common (int small ATTRIBUTE_UNUSED);
+static void handle_quad (int);
#endif
static const char *default_arch = DEFAULT_ARCH;
old_gcc_only,
unsupported_with_intel_mnemonic,
unsupported_syntax,
- unsupported
+ unsupported,
+ invalid_vsib_address,
+ unsupported_vector_index_register
};
struct _i386_insn
/* Swap operand in encoding. */
unsigned int swap_operand;
+ /* Force 32bit displacement in encoding. */
+ unsigned int disp32_encoding;
+
/* Error message. */
enum i386_error error;
};
&& !defined (TE_LINUX) \
&& !defined (TE_NETWARE) \
&& !defined (TE_FreeBSD) \
+ && !defined (TE_DragonFly) \
&& !defined (TE_NetBSD)))
/* This array holds the chars that always start a comment. If the
pre-processor is disabled, these aren't very useful. The option
static enum flag_code flag_code;
static unsigned int object_64bit;
+static unsigned int disallow_64bit_reloc;
static int use_rela_relocations = 0;
+#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
+ || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
+ || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
+
+/* The ELF ABI to use. */
+enum x86_elf_abi
+{
+ I386_ABI,
+ X86_64_ABI,
+ X86_64_X32_ABI
+};
+
+static enum x86_elf_abi x86_elf_abi = I386_ABI;
+#endif
+
/* The names used to print error messages. */
static const char *flag_code_names[] =
{
CPU_COREI7_FLAGS, 0, 0 },
{ STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
CPU_L1OM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
+ CPU_K1OM_FLAGS, 0, 0 },
{ STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
CPU_K6_FLAGS, 0, 0 },
{ STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
CPU_K8_FLAGS, 0, 0 },
{ STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
CPU_AMDFAM10_FLAGS, 0, 0 },
- { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BDVER1,
+ { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
CPU_BDVER1_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
+ CPU_BDVER2_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
CPU_8087_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
CPU_ANY_SSE_FLAGS, 0, 1 },
{ STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
CPU_AVX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
+ CPU_AVX2_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
CPU_ANY_AVX_FLAGS, 0, 1 },
{ STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
CPU_RDRND_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
CPU_F16C_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
+ CPU_BMI2_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
CPU_FMA_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
CPU_MOVBE_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
CPU_EPT_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
+ CPU_LZCNT_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
+ CPU_INVPCID_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
CPU_CLFLUSH_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
CPU_SSE4A_FLAGS, 0, 0 },
{ STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
CPU_ABM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
+ CPU_BMI_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
+ CPU_TBM_FLAGS, 0, 0 },
};
#ifdef I386COFF
{"sse_check", set_sse_check, 0},
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
{"largecomm", handle_large_common, 0},
+ {"quad", handle_quad, 8},
#else
{"file", (void (*) (int)) dwarf2_directive_file, 0},
{"loc", dwarf2_directive_loc, 0},
PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
PROCESSOR_GENERIC64, alt_long_patt will be used.
3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
- PROCESSOR_AMDFAM10, and PROCESSOR_BDVER1, alt_short_patt
+ PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
will be used.
When -mtune= isn't used, alt_long_patt will be used if
else
patt = f32_patt;
break;
- case PROCESSOR_PENTIUMPRO:
case PROCESSOR_PENTIUM4:
case PROCESSOR_NOCONA:
case PROCESSOR_CORE:
case PROCESSOR_CORE2:
case PROCESSOR_COREI7:
case PROCESSOR_L1OM:
+ case PROCESSOR_K1OM:
case PROCESSOR_GENERIC64:
patt = alt_long_patt;
break;
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
case PROCESSOR_AMDFAM10:
- case PROCESSOR_BDVER1:
+ case PROCESSOR_BD:
patt = alt_short_patt;
break;
case PROCESSOR_I386:
case PROCESSOR_I486:
case PROCESSOR_PENTIUM:
+ case PROCESSOR_PENTIUMPRO:
case PROCESSOR_GENERIC32:
patt = f32_patt;
break;
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
case PROCESSOR_AMDFAM10:
- case PROCESSOR_BDVER1:
+ case PROCESSOR_BD:
case PROCESSOR_GENERIC32:
/* We use cpu_arch_isa_flags to check if we CAN optimize
with nops. */
case PROCESSOR_CORE2:
case PROCESSOR_COREI7:
case PROCESSOR_L1OM:
+ case PROCESSOR_K1OM:
if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
patt = alt_long_patt;
else
|| new_flag.bitfield.cpul1om)
return;
+ /* If we are targeting Intel K1OM, we must enable it. */
+ if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
+ || new_flag.bitfield.cpuk1om)
+ return;
+
as_bad (_("`%s' is not supported on `%s'"), name, arch);
#endif
}
else
cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
cpu_arch_flags = flags;
+ cpu_arch_isa_flags = flags;
}
*input_line_pointer = e;
demand_empty_rest_of_line ();
as_fatal (_("Intel L1OM is 64bit ELF only"));
return bfd_arch_l1om;
}
+ else if (cpu_arch_isa == PROCESSOR_K1OM)
+ {
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || flag_code != CODE_64BIT)
+ as_fatal (_("Intel K1OM is 64bit ELF only"));
+ return bfd_arch_k1om;
+ }
else
return bfd_arch_i386;
}
unsigned long
-i386_mach ()
+i386_mach (void)
{
- if (!strcmp (default_arch, "x86_64"))
+ if (!strncmp (default_arch, "x86_64", 6))
{
if (cpu_arch_isa == PROCESSOR_L1OM)
{
- if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || default_arch[6] != '\0')
as_fatal (_("Intel L1OM is 64bit ELF only"));
return bfd_mach_l1om;
}
- else
+ else if (cpu_arch_isa == PROCESSOR_K1OM)
+ {
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || default_arch[6] != '\0')
+ as_fatal (_("Intel K1OM is 64bit ELF only"));
+ return bfd_mach_k1om;
+ }
+ else if (default_arch[6] == '\0')
return bfd_mach_x86_64;
+ else
+ return bfd_mach_x64_32;
}
else if (!strcmp (default_arch, "i386"))
return bfd_mach_i386_i386;
else
- as_fatal (_("Unknown architecture"));
+ as_fatal (_("unknown architecture"));
}
\f
void
-md_begin ()
+md_begin (void)
{
const char *hash_err;
(void *) core_optab);
if (hash_err)
{
- as_fatal (_("Internal Error: Can't hash %s: %s"),
+ as_fatal (_("internal Error: Can't hash %s: %s"),
(optab - 1)->name,
hash_err);
}
{
hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
if (hash_err)
- as_fatal (_("Internal Error: Can't hash %s: %s"),
+ as_fatal (_("internal Error: Can't hash %s: %s"),
regtab->reg_name,
hash_err);
}
if (flag_code == CODE_64BIT)
{
+#if defined (OBJ_COFF) && defined (TE_PE)
+ x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
+ ? 32 : 16);
+#else
x86_dwarf2_return_column = 16;
+#endif
x86_cie_data_alignment = -8;
}
else
}
/* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
- if (size == 4 && flag_code != CODE_64BIT)
+ if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
sign = -1;
rel = bfd_reloc_type_lookup (stdoutput, other);
/* Use 2-byte VEX prefix if possible. */
if (i.tm.opcode_modifier.vexopcode == VEX0F
+ && i.tm.opcode_modifier.vexw != VEXW1
&& (i.rex & (REX_W | REX_X | REX_B)) == 0)
{
/* 2-byte VEX prefix. */
/* Don't optimize displacement for movabs since it only takes 64bit
displacement. */
if (i.disp_operands
+ && !i.disp32_encoding
&& (flag_code != CODE_64BIT
|| strcmp (mnemonic, "movabs") != 0))
optimize_disp ();
if (!current_templates)
{
- /* Check if we should swap operand in encoding. */
+ /* Check if we should swap operand or force 32bit displacement in
+ encoding. */
if (mnem_p - 2 == dot_p && dot_p[1] == 's')
i.swap_operand = 1;
+ else if (mnem_p - 4 == dot_p
+ && dot_p[1] == 'd'
+ && dot_p[2] == '3'
+ && dot_p[3] == '2')
+ i.disp32_encoding = 1;
else
goto check_suffix;
mnem_p = dot_p;
}
}
+/* Check if operands are valid for the instruction. */
+
+static int
+check_VecOperands (const insn_template *t)
+{
+ /* Without VSIB byte, we can't have a vector register for index. */
+ if (!t->opcode_modifier.vecsib
+ && i.index_reg
+ && (i.index_reg->reg_type.bitfield.regxmm
+ || i.index_reg->reg_type.bitfield.regymm))
+ {
+ i.error = unsupported_vector_index_register;
+ return 1;
+ }
+
+ /* For VSIB byte, we need a vector register for index and no PC
+ relative addressing is allowed. */
+ if (t->opcode_modifier.vecsib
+ && (!i.index_reg
+ || !((t->opcode_modifier.vecsib == VecSIB128
+ && i.index_reg->reg_type.bitfield.regxmm)
+ || (t->opcode_modifier.vecsib == VecSIB256
+ && i.index_reg->reg_type.bitfield.regymm))
+ || (i.base_reg && i.base_reg->reg_num == RegRip)))
+ {
+ i.error = invalid_vsib_address;
+ return 1;
+ }
+
+ return 0;
+}
+
/* Check if operands are valid for the instruction. Update VEX
operand types. */
}
}
- /* We check register size only if size of operands can be
- encoded the canonical way. */
- check_register = t->opcode_modifier.w;
+ /* We check register size if needed. */
+ check_register = t->opcode_modifier.checkregsize;
overlap0 = operand_type_and (i.types[0], operand_types[0]);
switch (t->operands)
{
continue;
}
+ /* Check if vector operands are valid. */
+ if (check_VecOperands (t))
+ continue;
+
/* Check if VEX operands are valid. */
if (VEX_check_operands (t))
continue;
case unsupported:
err_msg = _("unsupported");
break;
+ case invalid_vsib_address:
+ err_msg = _("invalid VSIB address");
+ break;
+ case unsupported_vector_index_register:
+ err_msg = _("unsupported vector index register");
+ break;
}
as_bad (_("%s for `%s'"), err_msg,
current_templates->start->name);
if (flag_code == CODE_64BIT
&& !i.tm.operand_types[op].bitfield.inoutportreg)
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
lowering is more complicated. */
if (flag_code == CODE_64BIT)
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
}
else
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
}
else
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
lowering is more complicated. */
if (flag_code == CODE_64BIT)
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
{
/* For instructions with VexNDS, the register-only
- source operand must be XMM or YMM register. It is
- encoded in VEX prefix. We need to clear RegMem bit
- before calling operand_type_equal. */
- i386_operand_type op = i.tm.operand_types[dest];
+ source operand must be 32/64bit integer, XMM or
+ YMM register. It is encoded in VEX prefix. We
+ need to clear RegMem bit before calling
+ operand_type_equal. */
+
+ i386_operand_type op;
+ unsigned int vvvv;
+
+ /* Check register-only source operand when two source
+ operands are swapped. */
+ if (!i.tm.operand_types[source].bitfield.baseindex
+ && i.tm.operand_types[dest].bitfield.baseindex)
+ {
+ vvvv = source;
+ source = dest;
+ }
+ else
+ vvvv = dest;
+
+ op = i.tm.operand_types[vvvv];
op.bitfield.regmem = 0;
if ((dest + 1) >= i.operands
- || (!operand_type_equal (&op, ®xmm)
+ || (op.bitfield.reg32 != 1
+ && !op.bitfield.reg64 != 1
+ && !operand_type_equal (&op, ®xmm)
&& !operand_type_equal (&op, ®ymm)))
abort ();
- i.vex.register_specifier = i.op[dest].regs;
+ i.vex.register_specifier = i.op[vvvv].regs;
dest++;
}
}
break;
gas_assert (op < i.operands);
+ if (i.tm.opcode_modifier.vecsib)
+ {
+ if (i.index_reg->reg_num == RegEiz
+ || i.index_reg->reg_num == RegRiz)
+ abort ();
+
+ i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
+ if (!i.base_reg)
+ {
+ i.sib.base = NO_BASE_REGISTER;
+ i.sib.scale = i.log2_scale_factor;
+ i.types[op].bitfield.disp8 = 0;
+ i.types[op].bitfield.disp16 = 0;
+ i.types[op].bitfield.disp64 = 0;
+ if (flag_code != CODE_64BIT)
+ {
+ /* Must be 32 bit */
+ i.types[op].bitfield.disp32 = 1;
+ i.types[op].bitfield.disp32s = 0;
+ }
+ else
+ {
+ i.types[op].bitfield.disp32 = 0;
+ i.types[op].bitfield.disp32s = 1;
+ }
+ }
+ i.sib.index = i.index_reg->reg_num;
+ if ((i.index_reg->reg_flags & RegRex) != 0)
+ i.rex |= REX_X;
+ }
+
default_seg = &ds;
if (i.base_reg == 0)
{
i.rm.mode = 0;
if (!i.disp_operands)
- fake_zero_displacement = 1;
+ {
+ fake_zero_displacement = 1;
+ /* Instructions with VSIB byte need 32bit displacement
+ if there is no base register. */
+ if (i.tm.opcode_modifier.vecsib)
+ i.types[op].bitfield.disp32 = 1;
+ }
if (i.index_reg == 0)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
/* Operand is just <disp> */
if (flag_code == CODE_64BIT)
{
i.types[op] = disp32;
}
}
- else /* !i.base_reg && i.index_reg */
+ else if (!i.tm.opcode_modifier.vecsib)
{
+ /* !i.base_reg && i.index_reg */
if (i.index_reg->reg_num == RegEiz
|| i.index_reg->reg_num == RegRiz)
i.sib.index = NO_INDEX_REGISTER;
else if (i.base_reg->reg_num == RegRip ||
i.base_reg->reg_num == RegEip)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
i.rm.regmem = NO_BASE_REGISTER;
i.types[op].bitfield.disp8 = 0;
i.types[op].bitfield.disp16 = 0;
}
else if (i.base_reg->reg_type.bitfield.reg16)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
switch (i.base_reg->reg_num)
{
case 3: /* (%bx) */
i.types[op].bitfield.disp32 = 1;
}
- i.rm.regmem = i.base_reg->reg_num;
+ if (!i.tm.opcode_modifier.vecsib)
+ i.rm.regmem = i.base_reg->reg_num;
if ((i.base_reg->reg_flags & RegRex) != 0)
i.rex |= REX_B;
i.sib.base = i.base_reg->reg_num;
i.sib.scale = i.log2_scale_factor;
if (i.index_reg == 0)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
/* <disp>(%esp) becomes two byte modrm with no index
register. We've already stored the code for esp
in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
extra modrm byte. */
i.sib.index = NO_INDEX_REGISTER;
}
- else
+ else if (!i.tm.opcode_modifier.vecsib)
{
if (i.index_reg->reg_num == RegEiz
|| i.index_reg->reg_num == RegRiz)
}
else
{
- vex_reg = op + 1;
- gas_assert (vex_reg < i.operands);
+ /* Check register-only source operand when two source
+ operands are swapped. */
+ if (!i.tm.operand_types[op].bitfield.baseindex
+ && i.tm.operand_types[op + 1].bitfield.baseindex)
+ {
+ vex_reg = op;
+ op += 2;
+ gas_assert (mem == (vex_reg + 1)
+ && op < i.operands);
+ }
+ else
+ {
+ vex_reg = op + 1;
+ gas_assert (vex_reg < i.operands);
+ }
}
}
else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
{
- /* For instructions with VexNDD, there should be
- no memory operand and the register destination
+ /* For instructions with VexNDD, the register destination
is encoded in VEX prefix. */
- gas_assert (i.mem_operands == 0
- && (op + 2) == i.operands);
- vex_reg = op + 1;
+ if (i.mem_operands == 0)
+ {
+ /* There is no memory operand. */
+ gas_assert ((op + 2) == i.operands);
+ vex_reg = op + 1;
+ }
+ else
+ {
+ /* There are only 2 operands. */
+ gas_assert (op < 2 && i.operands == 2);
+ vex_reg = 1;
+ }
}
else
gas_assert (op < i.operands);
if (vex_reg != (unsigned int) ~0)
{
- gas_assert (i.reg_operands == 2);
+ i386_operand_type *type = &i.tm.operand_types[vex_reg];
- if (!operand_type_equal (&i.tm.operand_types[vex_reg],
- ®xmm)
- && !operand_type_equal (&i.tm.operand_types[vex_reg],
- ®ymm))
+ if (type->bitfield.reg32 != 1
+ && type->bitfield.reg64 != 1
+ && !operand_type_equal (type, ®xmm)
+ && !operand_type_equal (type, ®ymm))
abort ();
i.vex.register_specifier = i.op[vex_reg].regs;
output_branch (void)
{
char *p;
+ int size;
int code16;
int prefix;
relax_substateT subtype;
symbolS *sym;
offsetT off;
- code16 = 0;
- if (flag_code == CODE_16BIT)
- code16 = CODE16;
+ code16 = flag_code == CODE_16BIT ? CODE16 : 0;
+ size = i.disp32_encoding ? BIG : SMALL;
prefix = 0;
if (i.prefix[DATA_PREFIX] != 0)
*p = i.tm.base_opcode;
if ((unsigned char) *p == JUMP_PC_RELATIVE)
- subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, SMALL);
+ subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
else if (cpu_arch_flags.bitfield.cpui386)
- subtype = ENCODE_RELAX_STATE (COND_JUMP, SMALL);
+ subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
else
- subtype = ENCODE_RELAX_STATE (COND_JUMP86, SMALL);
+ subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
subtype |= code16;
sym = i.op[0].disps->X_add_symbol;
/* Might be a symbol version string. Don't as_bad here. */
return NULL;
}
+#endif
void
x86_cons (expressionS *exp, int size)
/* Handle @GOTOFF and the like in an expression. */
char *save;
char *gotfree_input_line;
- int adjust;
+ int adjust = 0;
save = input_line_pointer;
gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
if (intel_syntax)
i386_intel_simplify (exp);
}
-#endif
static void
signed_cons (int size)
#ifdef TE_PE
static void
-pe_directive_secrel (dummy)
- int dummy ATTRIBUTE_UNUSED;
+pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
{
expressionS exp;
{
/* Size it properly later. */
i.types[this_operand].bitfield.imm64 = 1;
- /* If BFD64, sign extend val. */
- if (!use_rela_relocations
+ /* If not 64bit, sign extend val. */
+ if (flag_code != CODE_64BIT
&& (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
exp->X_add_number
= (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
|| i.base_reg->reg_num !=
(i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
|| (i.index_reg
+ && !(i.index_reg->reg_type.bitfield.regxmm
+ || i.index_reg->reg_type.bitfield.regymm)
&& (!i.index_reg->reg_type.bitfield.baseindex
|| (i.prefix[ADDR_PREFIX] == 0
&& i.index_reg->reg_num != RegRiz
if ((i.base_reg
&& !i.base_reg->reg_type.bitfield.reg32)
|| (i.index_reg
+ && !i.index_reg->reg_type.bitfield.regxmm
+ && !i.index_reg->reg_type.bitfield.regymm
&& ((!i.index_reg->reg_type.bitfield.reg32
&& i.index_reg->reg_num != RegEiz)
|| !i.index_reg->reg_type.bitfield.baseindex)))
returned value. */
int
-md_estimate_size_before_relax (fragP, segment)
- fragS *fragP;
- segT segment;
+md_estimate_size_before_relax (fragS *fragP, segT segment)
{
/* We've already got fragP->fr_subtype right; all we have to do is
check for un-relaxable symbols. On an ELF system, we can't relax
Caller will turn frag into a ".space 0". */
void
-md_convert_frag (abfd, sec, fragP)
- bfd *abfd ATTRIBUTE_UNUSED;
- segT sec ATTRIBUTE_UNUSED;
- fragS *fragP;
+md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
+ fragS *fragP)
{
unsigned char *opcode;
unsigned char *where_to_put_displacement = NULL;
fragP->fr_fix += extension;
}
\f
-/* Apply a fixup (fixS) to segment data, once it has been determined
+/* Apply a fixup (fixP) to segment data, once it has been determined
by our caller that we have all the info we need to fix it up.
+ Parameter valP is the pointer to the value of the bits.
+
On the 386, immediates, displacements, and data pointers are all in
the same (little-endian) format, so we don't need to care about which
we are handling. */
void
-md_apply_fix (fixP, valP, seg)
- /* The fix we're to put in. */
- fixS *fixP;
- /* Pointer to the value of the bits. */
- valueT *valP;
- /* Segment fix is from. */
- segT seg ATTRIBUTE_UNUSED;
+md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
{
char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
valueT value = *valP;
#define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
#define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
#define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
+#define OPTION_X32 (OPTION_MD_BASE + 13)
struct option md_longopts[] =
{
#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
|| defined (TE_PE) || defined (TE_PEP))
{"64", no_argument, NULL, OPTION_64},
+#endif
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ {"x32", no_argument, NULL, OPTION_X32},
#endif
{"divide", no_argument, NULL, OPTION_DIVIDE},
{"march", required_argument, NULL, OPTION_MARCH},
break;
}
if (*l == NULL)
- as_fatal (_("No compiled in support for x86_64"));
+ as_fatal (_("no compiled in support for x86_64"));
free (list);
}
break;
#endif
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ case OPTION_X32:
+ if (IS_ELF)
+ {
+ const char **list, **l;
+
+ list = bfd_target_list ();
+ for (l = list; *l != NULL; l++)
+ if (CONST_STRNEQ (*l, "elf32-x86-64"))
+ {
+ default_arch = "x86_64:32";
+ break;
+ }
+ if (*l == NULL)
+ as_fatal (_("no compiled in support for 32bit x86_64"));
+ free (list);
+ }
+ else
+ as_fatal (_("32bit x86_64 is only supported for ELF"));
+ break;
+#endif
+
case OPTION_32:
default_arch = "i386";
break;
do
{
if (*arch == '.')
- as_fatal (_("Invalid -march= option: `%s'"), arg);
+ as_fatal (_("invalid -march= option: `%s'"), arg);
next = strchr (arch, '+');
if (next)
*next++ = '\0';
else
cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
cpu_arch_flags = flags;
+ cpu_arch_isa_flags = flags;
}
break;
}
}
if (j >= ARRAY_SIZE (cpu_arch))
- as_fatal (_("Invalid -march= option: `%s'"), arg);
+ as_fatal (_("invalid -march= option: `%s'"), arg);
arch = next;
}
case OPTION_MTUNE:
if (*arg == '.')
- as_fatal (_("Invalid -mtune= option: `%s'"), arg);
+ as_fatal (_("invalid -mtune= option: `%s'"), arg);
for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
{
if (strcmp (arg, cpu_arch [j].name) == 0)
}
}
if (j >= ARRAY_SIZE (cpu_arch))
- as_fatal (_("Invalid -mtune= option: `%s'"), arg);
+ as_fatal (_("invalid -mtune= option: `%s'"), arg);
break;
case OPTION_MMNEMONIC:
else if (strcasecmp (arg, "intel") == 0)
intel_mnemonic = 1;
else
- as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
+ as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
break;
case OPTION_MSYNTAX:
else if (strcasecmp (arg, "intel") == 0)
intel_syntax = 1;
else
- as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
+ as_fatal (_("invalid -msyntax= option: `%s'"), arg);
break;
case OPTION_MINDEX_REG:
else if (strcasecmp (arg, "none") == 0)
sse_check = sse_check_none;
else
- as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
+ as_fatal (_("invalid -msse-check= option: `%s'"), arg);
break;
case OPTION_MAVXSCALAR:
else if (strcasecmp (arg, "256") == 0)
avxscalar = vex256;
else
- as_fatal (_("Invalid -mavxscalar= option: `%s'"), arg);
+ as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
break;
default:
#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
|| defined (TE_PE) || defined (TE_PEP))
fprintf (stream, _("\
- --32/--64 generate 32bit/64bit code\n"));
+ --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
#endif
#ifdef SVR4_COMMENT_CHARS
fprintf (stream, _("\
const char *
i386_target_format (void)
{
- if (!strcmp (default_arch, "x86_64"))
- update_code_flag (CODE_64BIT, 1);
+ if (!strncmp (default_arch, "x86_64", 6))
+ {
+ update_code_flag (CODE_64BIT, 1);
+ if (default_arch[6] == '\0')
+ x86_elf_abi = X86_64_ABI;
+ else
+ x86_elf_abi = X86_64_X32_ABI;
+ }
else if (!strcmp (default_arch, "i386"))
update_code_flag (CODE_32BIT, 1);
else
- as_fatal (_("Unknown architecture"));
+ as_fatal (_("unknown architecture"));
if (cpu_flags_all_zero (&cpu_arch_isa_flags))
cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
case bfd_target_elf_flavour:
{
- if (flag_code == CODE_64BIT)
+ const char *format;
+
+ switch (x86_elf_abi)
{
+ default:
+ format = ELF_TARGET_FORMAT;
+ break;
+ case X86_64_ABI:
+ use_rela_relocations = 1;
object_64bit = 1;
+ format = ELF_TARGET_FORMAT64;
+ break;
+ case X86_64_X32_ABI:
use_rela_relocations = 1;
+ object_64bit = 1;
+ disallow_64bit_reloc = 1;
+ format = ELF_TARGET_FORMAT32;
+ break;
}
if (cpu_arch_isa == PROCESSOR_L1OM)
{
- if (flag_code != CODE_64BIT)
+ if (x86_elf_abi != X86_64_ABI)
as_fatal (_("Intel L1OM is 64bit only"));
return ELF_TARGET_L1OM_FORMAT;
}
+ if (cpu_arch_isa == PROCESSOR_K1OM)
+ {
+ if (x86_elf_abi != X86_64_ABI)
+ as_fatal (_("Intel K1OM is 64bit only"));
+ return ELF_TARGET_K1OM_FORMAT;
+ }
else
- return (flag_code == CODE_64BIT
- ? ELF_TARGET_FORMAT64 : ELF_TARGET_FORMAT);
+ return format;
}
#endif
#if defined (OBJ_MACH_O)
#endif
\f
symbolS *
-md_undefined_symbol (name)
- char *name;
+md_undefined_symbol (char *name)
{
if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
&& name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
/* Round up a section size to the appropriate boundary. */
valueT
-md_section_align (segment, size)
- segT segment ATTRIBUTE_UNUSED;
- valueT size;
+md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
{
#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
}
arelent *
-tc_gen_reloc (section, fixp)
- asection *section ATTRIBUTE_UNUSED;
- fixS *fixp;
+tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
{
arelent *rel;
bfd_reloc_code_real_type code;
/* Use the rela in 64bit mode. */
else
{
+ if (disallow_64bit_reloc)
+ switch (code)
+ {
+ case BFD_RELOC_64:
+ case BFD_RELOC_X86_64_DTPOFF64:
+ case BFD_RELOC_X86_64_TPOFF64:
+ case BFD_RELOC_64_PCREL:
+ case BFD_RELOC_X86_64_GOTOFF64:
+ case BFD_RELOC_X86_64_GOT64:
+ case BFD_RELOC_X86_64_GOTPCREL64:
+ case BFD_RELOC_X86_64_GOTPC64:
+ case BFD_RELOC_X86_64_GOTPLT64:
+ case BFD_RELOC_X86_64_PLTOFF64:
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _("cannot represent relocation type %s in x32 mode"),
+ bfd_get_reloc_code_name (code));
+ break;
+ default:
+ break;
+ }
+
if (!fixp->fx_pcrel)
rel->addend = fixp->fx_offset;
else
if (letter == 'l')
return SHF_X86_64_LARGE;
- *ptr_msg = _("Bad .section directive: want a,l,w,x,M,S,G,T in string");
+ *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
}
else
- *ptr_msg = _("Bad .section directive: want a,w,x,M,S,G,T in string");
+ *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
return -1;
}
bss_section = saved_bss_section;
}
}
+
+static void
+handle_quad (int nbytes)
+{
+ expressionS exp;
+
+ if (x86_elf_abi != X86_64_X32_ABI)
+ {
+ cons (nbytes);
+ return;
+ }
+
+ if (is_it_end_of_statement ())
+ {
+ demand_empty_rest_of_line ();
+ return;
+ }
+
+ do
+ {
+ if (*input_line_pointer == '"')
+ {
+ as_bad (_("unexpected `\"' in expression"));
+ ignore_rest_of_line ();
+ return;
+ }
+ x86_cons (&exp, nbytes);
+ /* Output 4 bytes if not constant. */
+ if (exp.X_op != O_constant)
+ nbytes = 4;
+ emit_expr (&exp, (unsigned int) nbytes);
+ /* Zero-extends to 8 bytes if not constant. */
+ if (nbytes == 4)
+ {
+ memset (&exp, '\0', sizeof (exp));
+ exp.X_op = O_constant;
+ emit_expr (&exp, nbytes);
+ }
+ nbytes = 8;
+ }
+ while (*input_line_pointer++ == ',');
+
+ input_line_pointer--; /* Put terminator back into stream. */
+
+ demand_empty_rest_of_line ();
+}
#endif /* OBJ_ELF || OBJ_MAYBE_ELF */