/* tc-i386.c -- Assemble code for the Intel 80386
Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
This file is part of GAS, the GNU Assembler.
typedef struct
{
const char *name; /* arch name */
+ unsigned int len; /* arch string length */
enum processor_type type; /* arch type */
i386_cpu_flags flags; /* cpu feature flags */
+ unsigned int skip; /* show_arch should skip this. */
+ unsigned int negated; /* turn off indicated flags. */
}
arch_entry;
+static void update_code_flag (int, int);
static void set_code_flag (int);
static void set_16bit_gcc_code_flag (int);
static void set_intel_syntax (int);
const reg_entry *regs;
};
+enum i386_error
+ {
+ operand_size_mismatch,
+ operand_type_mismatch,
+ register_type_mismatch,
+ number_of_operands_mismatch,
+ invalid_instruction_suffix,
+ bad_imm4,
+ old_gcc_only,
+ unsupported_with_intel_mnemonic,
+ unsupported_syntax,
+ unsupported
+ };
+
struct _i386_insn
{
/* TM holds the template for the insn were currently assembling. */
/* Swap operand in encoding. */
unsigned int swap_operand;
+
+ /* Error message. */
+ enum i386_error error;
};
typedef struct _i386_insn i386_insn;
/* Encode SSE instructions with VEX prefix. */
static unsigned int sse2avx;
+/* Encode scalar AVX instructions with specific vector length. */
+static enum
+ {
+ vex128 = 0,
+ vex256
+ } avxscalar;
+
/* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
static symbolS *GOT_symbol;
static const arch_entry cpu_arch[] =
{
- { "generic32", PROCESSOR_GENERIC32,
- CPU_GENERIC32_FLAGS },
- { "generic64", PROCESSOR_GENERIC64,
- CPU_GENERIC64_FLAGS },
- { "i8086", PROCESSOR_UNKNOWN,
- CPU_NONE_FLAGS },
- { "i186", PROCESSOR_UNKNOWN,
- CPU_I186_FLAGS },
- { "i286", PROCESSOR_UNKNOWN,
- CPU_I286_FLAGS },
- { "i386", PROCESSOR_I386,
- CPU_I386_FLAGS },
- { "i486", PROCESSOR_I486,
- CPU_I486_FLAGS },
- { "i586", PROCESSOR_PENTIUM,
- CPU_I586_FLAGS },
- { "i686", PROCESSOR_PENTIUMPRO,
- CPU_I686_FLAGS },
- { "pentium", PROCESSOR_PENTIUM,
- CPU_I586_FLAGS },
- { "pentiumpro", PROCESSOR_PENTIUMPRO,
- CPU_I686_FLAGS },
- { "pentiumii", PROCESSOR_PENTIUMPRO,
- CPU_P2_FLAGS },
- { "pentiumiii",PROCESSOR_PENTIUMPRO,
- CPU_P3_FLAGS },
- { "pentium4", PROCESSOR_PENTIUM4,
- CPU_P4_FLAGS },
- { "prescott", PROCESSOR_NOCONA,
- CPU_CORE_FLAGS },
- { "nocona", PROCESSOR_NOCONA,
- CPU_NOCONA_FLAGS },
- { "yonah", PROCESSOR_CORE,
- CPU_CORE_FLAGS },
- { "core", PROCESSOR_CORE,
- CPU_CORE_FLAGS },
- { "merom", PROCESSOR_CORE2,
- CPU_CORE2_FLAGS },
- { "core2", PROCESSOR_CORE2,
- CPU_CORE2_FLAGS },
- { "corei7", PROCESSOR_COREI7,
- CPU_COREI7_FLAGS },
- { "l1om", PROCESSOR_L1OM,
- CPU_L1OM_FLAGS },
- { "k6", PROCESSOR_K6,
- CPU_K6_FLAGS },
- { "k6_2", PROCESSOR_K6,
- CPU_K6_2_FLAGS },
- { "athlon", PROCESSOR_ATHLON,
- CPU_ATHLON_FLAGS },
- { "sledgehammer", PROCESSOR_K8,
- CPU_K8_FLAGS },
- { "opteron", PROCESSOR_K8,
- CPU_K8_FLAGS },
- { "k8", PROCESSOR_K8,
- CPU_K8_FLAGS },
- { "amdfam10", PROCESSOR_AMDFAM10,
- CPU_AMDFAM10_FLAGS },
- { ".8087", PROCESSOR_UNKNOWN,
- CPU_8087_FLAGS },
- { ".287", PROCESSOR_UNKNOWN,
- CPU_287_FLAGS },
- { ".387", PROCESSOR_UNKNOWN,
- CPU_387_FLAGS },
- { ".no87", PROCESSOR_UNKNOWN,
- CPU_ANY87_FLAGS },
- { ".mmx", PROCESSOR_UNKNOWN,
- CPU_MMX_FLAGS },
- { ".nommx", PROCESSOR_UNKNOWN,
- CPU_3DNOWA_FLAGS },
- { ".sse", PROCESSOR_UNKNOWN,
- CPU_SSE_FLAGS },
- { ".sse2", PROCESSOR_UNKNOWN,
- CPU_SSE2_FLAGS },
- { ".sse3", PROCESSOR_UNKNOWN,
- CPU_SSE3_FLAGS },
- { ".ssse3", PROCESSOR_UNKNOWN,
- CPU_SSSE3_FLAGS },
- { ".sse4.1", PROCESSOR_UNKNOWN,
- CPU_SSE4_1_FLAGS },
- { ".sse4.2", PROCESSOR_UNKNOWN,
- CPU_SSE4_2_FLAGS },
- { ".sse4", PROCESSOR_UNKNOWN,
- CPU_SSE4_2_FLAGS },
- { ".nosse", PROCESSOR_UNKNOWN,
- CPU_ANY_SSE_FLAGS },
- { ".avx", PROCESSOR_UNKNOWN,
- CPU_AVX_FLAGS },
- { ".noavx", PROCESSOR_UNKNOWN,
- CPU_ANY_AVX_FLAGS },
- { ".vmx", PROCESSOR_UNKNOWN,
- CPU_VMX_FLAGS },
- { ".smx", PROCESSOR_UNKNOWN,
- CPU_SMX_FLAGS },
- { ".xsave", PROCESSOR_UNKNOWN,
- CPU_XSAVE_FLAGS },
- { ".aes", PROCESSOR_UNKNOWN,
- CPU_AES_FLAGS },
- { ".pclmul", PROCESSOR_UNKNOWN,
- CPU_PCLMUL_FLAGS },
- { ".clmul", PROCESSOR_UNKNOWN,
- CPU_PCLMUL_FLAGS },
- { ".fma", PROCESSOR_UNKNOWN,
- CPU_FMA_FLAGS },
- { ".fma4", PROCESSOR_UNKNOWN,
- CPU_FMA4_FLAGS },
- { ".lwp", PROCESSOR_UNKNOWN,
- CPU_LWP_FLAGS },
- { ".movbe", PROCESSOR_UNKNOWN,
- CPU_MOVBE_FLAGS },
- { ".ept", PROCESSOR_UNKNOWN,
- CPU_EPT_FLAGS },
- { ".clflush", PROCESSOR_UNKNOWN,
- CPU_CLFLUSH_FLAGS },
- { ".syscall", PROCESSOR_UNKNOWN,
- CPU_SYSCALL_FLAGS },
- { ".rdtscp", PROCESSOR_UNKNOWN,
- CPU_RDTSCP_FLAGS },
- { ".3dnow", PROCESSOR_UNKNOWN,
- CPU_3DNOW_FLAGS },
- { ".3dnowa", PROCESSOR_UNKNOWN,
- CPU_3DNOWA_FLAGS },
- { ".padlock", PROCESSOR_UNKNOWN,
- CPU_PADLOCK_FLAGS },
- { ".pacifica", PROCESSOR_UNKNOWN,
- CPU_SVME_FLAGS },
- { ".svme", PROCESSOR_UNKNOWN,
- CPU_SVME_FLAGS },
- { ".sse4a", PROCESSOR_UNKNOWN,
- CPU_SSE4A_FLAGS },
- { ".abm", PROCESSOR_UNKNOWN,
- CPU_ABM_FLAGS },
+ /* Do not replace the first two entries - i386_target_format()
+ relies on them being there in this order. */
+ { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
+ CPU_GENERIC32_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
+ CPU_GENERIC64_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
+ CPU_NONE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
+ CPU_I186_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
+ CPU_I286_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
+ CPU_I386_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
+ CPU_I486_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
+ CPU_I586_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
+ CPU_I686_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
+ CPU_I586_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
+ CPU_PENTIUMPRO_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
+ CPU_P2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
+ CPU_P3_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
+ CPU_P4_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
+ CPU_CORE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
+ CPU_NOCONA_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
+ CPU_CORE_FLAGS, 1, 0 },
+ { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
+ CPU_CORE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
+ CPU_CORE2_FLAGS, 1, 0 },
+ { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
+ CPU_CORE2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
+ CPU_COREI7_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
+ CPU_L1OM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
+ CPU_K6_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
+ CPU_K6_2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
+ CPU_ATHLON_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
+ CPU_K8_FLAGS, 1, 0 },
+ { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
+ CPU_K8_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
+ CPU_K8_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
+ CPU_AMDFAM10_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BDVER1,
+ CPU_BDVER1_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
+ CPU_8087_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
+ CPU_287_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
+ CPU_387_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
+ CPU_ANY87_FLAGS, 0, 1 },
+ { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
+ CPU_MMX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
+ CPU_3DNOWA_FLAGS, 0, 1 },
+ { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
+ CPU_SSE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
+ CPU_SSE2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
+ CPU_SSE3_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
+ CPU_SSSE3_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
+ CPU_SSE4_1_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
+ CPU_SSE4_2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
+ CPU_SSE4_2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
+ CPU_ANY_SSE_FLAGS, 0, 1 },
+ { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
+ CPU_AVX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
+ CPU_ANY_AVX_FLAGS, 0, 1 },
+ { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
+ CPU_VMX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
+ CPU_SMX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
+ CPU_XSAVE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
+ CPU_XSAVEOPT_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
+ CPU_AES_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
+ CPU_PCLMUL_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
+ CPU_PCLMUL_FLAGS, 1, 0 },
+ { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
+ CPU_FSGSBASE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
+ CPU_RDRND_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
+ CPU_F16C_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
+ CPU_FMA_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
+ CPU_FMA4_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
+ CPU_XOP_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
+ CPU_LWP_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
+ CPU_MOVBE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
+ CPU_EPT_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
+ CPU_CLFLUSH_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
+ CPU_NOP_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
+ CPU_SYSCALL_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
+ CPU_RDTSCP_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
+ CPU_3DNOW_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
+ CPU_3DNOWA_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
+ CPU_PADLOCK_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
+ CPU_SVME_FLAGS, 1, 0 },
+ { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
+ CPU_SVME_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
+ CPU_SSE4A_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
+ CPU_ABM_FLAGS, 0, 0 },
};
#ifdef I386COFF
PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
PROCESSOR_GENERIC64, alt_long_patt will be used.
3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
- PROCESSOR_AMDFAM10, alt_short_patt will be used.
+ PROCESSOR_AMDFAM10, and PROCESSOR_BDVER1, alt_short_patt
+ will be used.
When -mtune= isn't used, alt_long_patt will be used if
- cpu_arch_isa_flags has Cpu686. Otherwise, f32_patt will
+ cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
be used.
When -march= or .arch is used, we can't use anything beyond
{
case PROCESSOR_UNKNOWN:
/* We use cpu_arch_isa_flags to check if we SHOULD
- optimize for Cpu686. */
- if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
+ optimize with nops. */
+ if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
patt = alt_long_patt;
else
patt = f32_patt;
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
case PROCESSOR_AMDFAM10:
+ case PROCESSOR_BDVER1:
patt = alt_short_patt;
break;
case PROCESSOR_I386:
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
case PROCESSOR_AMDFAM10:
+ case PROCESSOR_BDVER1:
case PROCESSOR_GENERIC32:
/* We use cpu_arch_isa_flags to check if we CAN optimize
- for Cpu686. */
- if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
+ with nops. */
+ if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
patt = alt_short_patt;
else
patt = f32_patt;
case PROCESSOR_CORE2:
case PROCESSOR_COREI7:
case PROCESSOR_L1OM:
- if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
+ if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
patt = alt_long_patt;
else
patt = f32_patt;
static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
+static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
enum operand_type
{
}
}
- if (match
- || (!t->opcode_modifier.d && !t->opcode_modifier.floatd))
+ if (match)
return match;
+ else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
+ {
+mismatch:
+ i.error = operand_size_mismatch;
+ return 0;
+ }
/* Check reverse. */
gas_assert (i.operands == 2);
{
if (t->operand_types[j].bitfield.acc
&& !match_reg_size (t, j ? 0 : 1))
- {
- match = 0;
- break;
- }
+ goto mismatch;
if (i.types[j].bitfield.mem
&& !match_mem_size (t, j ? 0 : 1))
- {
- match = 0;
- break;
- }
+ goto mismatch;
}
return match;
temp.bitfield.xmmword = 0;
temp.bitfield.ymmword = 0;
if (operand_type_all_zero (&temp))
- return 0;
+ goto mismatch;
- return (given.bitfield.baseindex == overlap.bitfield.baseindex
- && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute);
+ if (given.bitfield.baseindex == overlap.bitfield.baseindex
+ && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
+ return 1;
+
+mismatch:
+ i.error = operand_type_mismatch;
+ return 0;
}
/* If given types g0 and g1 are registers they must be of the same type
t1.bitfield.reg64 = 1;
}
- return (!(t0.bitfield.reg8 & t1.bitfield.reg8)
- && !(t0.bitfield.reg16 & t1.bitfield.reg16)
- && !(t0.bitfield.reg32 & t1.bitfield.reg32)
- && !(t0.bitfield.reg64 & t1.bitfield.reg64));
+ if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
+ && !(t0.bitfield.reg16 & t1.bitfield.reg16)
+ && !(t0.bitfield.reg32 & t1.bitfield.reg32)
+ && !(t0.bitfield.reg64 & t1.bitfield.reg64))
+ return 1;
+
+ i.error = register_type_mismatch;
+
+ return 0;
}
static INLINE unsigned int
#endif
} /* fits_in_unsigned_long() */
+static INLINE int
+fits_in_imm4 (offsetT num)
+{
+ return (num & 0xf) == num;
+}
+
static i386_operand_type
smallest_imm_type (offsetT num)
{
}
static void
-set_code_flag (int value)
+update_code_flag (int value, int check)
{
+ PRINTF_LIKE ((*as_error));
+
flag_code = (enum flag_code) value;
if (flag_code == CODE_64BIT)
{
}
if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
{
- as_bad (_("64bit mode not supported on this CPU."));
+ if (check)
+ as_error = as_fatal;
+ else
+ as_error = as_bad;
+ (*as_error) (_("64bit mode not supported on `%s'."),
+ cpu_arch_name ? cpu_arch_name : default_arch);
}
if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
{
- as_bad (_("32bit mode not supported on this CPU."));
+ if (check)
+ as_error = as_fatal;
+ else
+ as_error = as_bad;
+ (*as_error) (_("32bit mode not supported on `%s'."),
+ cpu_arch_name ? cpu_arch_name : default_arch);
}
stackop_size = '\0';
}
+static void
+set_code_flag (int value)
+{
+ update_code_flag (value, 0);
+}
+
static void
set_16bit_gcc_code_flag (int new_code_flag)
{
{
char *string = input_line_pointer;
int e = get_symbol_end ();
- unsigned int i;
+ unsigned int j;
i386_cpu_flags flags;
- for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
+ for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
{
- if (strcmp (string, cpu_arch[i].name) == 0)
+ if (strcmp (string, cpu_arch[j].name) == 0)
{
- check_cpu_arch_compatible (string, cpu_arch[i].flags);
+ check_cpu_arch_compatible (string, cpu_arch[j].flags);
if (*string != '.')
{
- cpu_arch_name = cpu_arch[i].name;
+ cpu_arch_name = cpu_arch[j].name;
cpu_sub_arch_name = NULL;
- cpu_arch_flags = cpu_arch[i].flags;
+ cpu_arch_flags = cpu_arch[j].flags;
if (flag_code == CODE_64BIT)
{
cpu_arch_flags.bitfield.cpu64 = 1;
cpu_arch_flags.bitfield.cpu64 = 0;
cpu_arch_flags.bitfield.cpuno64 = 1;
}
- cpu_arch_isa = cpu_arch[i].type;
- cpu_arch_isa_flags = cpu_arch[i].flags;
+ cpu_arch_isa = cpu_arch[j].type;
+ cpu_arch_isa_flags = cpu_arch[j].flags;
if (!cpu_arch_tune_set)
{
cpu_arch_tune = cpu_arch_isa;
break;
}
- if (strncmp (string + 1, "no", 2))
+ if (!cpu_arch[j].negated)
flags = cpu_flags_or (cpu_arch_flags,
- cpu_arch[i].flags);
+ cpu_arch[j].flags);
else
flags = cpu_flags_and_not (cpu_arch_flags,
- cpu_arch[i].flags);
+ cpu_arch[j].flags);
if (!cpu_flags_equal (&flags, &cpu_arch_flags))
{
if (cpu_sub_arch_name)
{
char *name = cpu_sub_arch_name;
cpu_sub_arch_name = concat (name,
- cpu_arch[i].name,
+ cpu_arch[j].name,
(const char *) NULL);
free (name);
}
else
- cpu_sub_arch_name = xstrdup (cpu_arch[i].name);
+ cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
cpu_arch_flags = flags;
}
*input_line_pointer = e;
return;
}
}
- if (i >= ARRAY_SIZE (cpu_arch))
+ if (j >= ARRAY_SIZE (cpu_arch))
as_bad (_("no such architecture: `%s'"), string);
*input_line_pointer = e;
static void
pi (char *line, i386_insn *x)
{
- unsigned int i;
+ unsigned int j;
fprintf (stdout, "%s: template ", line);
pte (&x->tm);
(x->rex & REX_R) != 0,
(x->rex & REX_X) != 0,
(x->rex & REX_B) != 0);
- for (i = 0; i < x->operands; i++)
+ for (j = 0; j < x->operands; j++)
{
- fprintf (stdout, " #%d: ", i + 1);
- pt (x->types[i]);
+ fprintf (stdout, " #%d: ", j + 1);
+ pt (x->types[j]);
fprintf (stdout, "\n");
- if (x->types[i].bitfield.reg8
- || x->types[i].bitfield.reg16
- || x->types[i].bitfield.reg32
- || x->types[i].bitfield.reg64
- || x->types[i].bitfield.regmmx
- || x->types[i].bitfield.regxmm
- || x->types[i].bitfield.regymm
- || x->types[i].bitfield.sreg2
- || x->types[i].bitfield.sreg3
- || x->types[i].bitfield.control
- || x->types[i].bitfield.debug
- || x->types[i].bitfield.test)
- fprintf (stdout, "%s\n", x->op[i].regs->reg_name);
- if (operand_type_check (x->types[i], imm))
- pe (x->op[i].imms);
- if (operand_type_check (x->types[i], disp))
- pe (x->op[i].disps);
+ if (x->types[j].bitfield.reg8
+ || x->types[j].bitfield.reg16
+ || x->types[j].bitfield.reg32
+ || x->types[j].bitfield.reg64
+ || x->types[j].bitfield.regmmx
+ || x->types[j].bitfield.regxmm
+ || x->types[j].bitfield.regymm
+ || x->types[j].bitfield.sreg2
+ || x->types[j].bitfield.sreg3
+ || x->types[j].bitfield.control
+ || x->types[j].bitfield.debug
+ || x->types[j].bitfield.test)
+ fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
+ if (operand_type_check (x->types[j], imm))
+ pe (x->op[j].imms);
+ if (operand_type_check (x->types[j], disp))
+ pe (x->op[j].disps);
}
}
static void
pte (insn_template *t)
{
- unsigned int i;
+ unsigned int j;
fprintf (stdout, " %d operands ", t->operands);
fprintf (stdout, "opcode %x ", t->base_opcode);
if (t->extension_opcode != None)
if (t->opcode_modifier.w)
fprintf (stdout, "W");
fprintf (stdout, "\n");
- for (i = 0; i < t->operands; i++)
+ for (j = 0; j < t->operands; j++)
{
- fprintf (stdout, " #%d type ", i + 1);
- pt (t->operand_types[i]);
+ fprintf (stdout, " #%d type ", j + 1);
+ pt (t->operand_types[j]);
fprintf (stdout, "\n");
}
}
{
if (other != NO_RELOC)
{
- reloc_howto_type *reloc;
+ reloc_howto_type *rel;
if (size == 8)
switch (other)
if (size == 4 && flag_code != CODE_64BIT)
sign = -1;
- reloc = bfd_reloc_type_lookup (stdoutput, other);
- if (!reloc)
+ rel = bfd_reloc_type_lookup (stdoutput, other);
+ if (!rel)
as_bad (_("unknown relocation (%u)"), other);
- else if (size != bfd_get_reloc_size (reloc))
+ else if (size != bfd_get_reloc_size (rel))
as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
- bfd_get_reloc_size (reloc),
+ bfd_get_reloc_size (rel),
size);
- else if (pcrel && !reloc->pc_relative)
+ else if (pcrel && !rel->pc_relative)
as_bad (_("non-pc-relative relocation for pc-relative field"));
- else if ((reloc->complain_on_overflow == complain_overflow_signed
+ else if ((rel->complain_on_overflow == complain_overflow_signed
&& !sign)
- || (reloc->complain_on_overflow == complain_overflow_unsigned
+ || (rel->complain_on_overflow == complain_overflow_unsigned
&& sign > 0))
as_bad (_("relocated field and relocation type differ in signedness"));
else
operand. */
if (!i.swap_operand
&& i.operands == i.reg_operands
- && i.tm.opcode_modifier.vex0f
+ && i.tm.opcode_modifier.vexopcode == VEX0F
&& i.tm.opcode_modifier.s
&& i.rex == REX_B)
{
i.tm = t[1];
}
- vector_length = i.tm.opcode_modifier.vex == 2 ? 1 : 0;
+ if (i.tm.opcode_modifier.vex == VEXScalar)
+ vector_length = avxscalar;
+ else
+ vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
switch ((i.tm.base_opcode >> 8) & 0xff)
{
}
/* Use 2-byte VEX prefix if possible. */
- if (i.tm.opcode_modifier.vex0f
+ if (i.tm.opcode_modifier.vexopcode == VEX0F
+ && i.tm.opcode_modifier.vexw != VEXW1
&& (i.rex & (REX_W | REX_X | REX_B)) == 0)
{
/* 2-byte VEX prefix. */
unsigned int m, w;
i.vex.length = 3;
- i.vex.bytes[0] = 0xc4;
-
- if (i.tm.opcode_modifier.vex0f)
- m = 0x1;
- else if (i.tm.opcode_modifier.vex0f38)
- m = 0x2;
- else if (i.tm.opcode_modifier.vex0f3a)
- m = 0x3;
- else if (i.tm.opcode_modifier.xop09)
+
+ switch (i.tm.opcode_modifier.vexopcode)
{
+ case VEX0F:
+ m = 0x1;
+ i.vex.bytes[0] = 0xc4;
+ break;
+ case VEX0F38:
+ m = 0x2;
+ i.vex.bytes[0] = 0xc4;
+ break;
+ case VEX0F3A:
+ m = 0x3;
+ i.vex.bytes[0] = 0xc4;
+ break;
+ case XOP08:
+ m = 0x8;
+ i.vex.bytes[0] = 0x8f;
+ break;
+ case XOP09:
m = 0x9;
i.vex.bytes[0] = 0x8f;
- }
- else if (i.tm.opcode_modifier.xop0a)
- {
+ break;
+ case XOP0A:
m = 0xa;
i.vex.bytes[0] = 0x8f;
+ break;
+ default:
+ abort ();
}
- else
- abort ();
/* The high 3 bits of the second VEX byte are 1's compliment
of RXB bits from REX. */
/* Check the REX.W bit. */
w = (i.rex & REX_W) ? 1 : 0;
- if (i.tm.opcode_modifier.vexw0 || i.tm.opcode_modifier.vexw1)
+ if (i.tm.opcode_modifier.vexw)
{
if (w)
abort ();
- if (i.tm.opcode_modifier.vexw1)
+ if (i.tm.opcode_modifier.vexw == VEXW1)
w = 1;
}
if (i.tm.opcode_modifier.vex)
build_vex_prefix (t);
- /* Handle conversion of 'int $3' --> special int3 insn. */
- if (i.tm.base_opcode == INT_OPCODE && i.op[0].imms->X_add_number == 3)
+ /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
+ instructions may define INT_OPCODE as well, so avoid this corner
+ case for those instructions that use MODRM. */
+ if (i.tm.base_opcode == INT_OPCODE
+ && !i.tm.opcode_modifier.modrm
+ && i.op[0].imms->X_add_number == 3)
{
i.tm.base_opcode = INT3_OPCODE;
i.imm_operands = 0;
{
if (i.op[op].disps->X_op == O_constant)
{
- offsetT disp = i.op[op].disps->X_add_number;
+ offsetT op_disp = i.op[op].disps->X_add_number;
if (i.types[op].bitfield.disp16
- && (disp & ~(offsetT) 0xffff) == 0)
+ && (op_disp & ~(offsetT) 0xffff) == 0)
{
/* If this operand is at most 16 bits, convert
to a signed 16 bit number and don't use 64bit
displacement. */
- disp = (((disp & 0xffff) ^ 0x8000) - 0x8000);
+ op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
i.types[op].bitfield.disp64 = 0;
}
if (i.types[op].bitfield.disp32
- && (disp & ~(((offsetT) 2 << 31) - 1)) == 0)
+ && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
{
/* If this operand is at most 32 bits, convert
to a signed 32 bit number and don't use 64bit
displacement. */
- disp &= (((offsetT) 2 << 31) - 1);
- disp = (disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
+ op_disp &= (((offsetT) 2 << 31) - 1);
+ op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
i.types[op].bitfield.disp64 = 0;
}
- if (!disp && i.types[op].bitfield.baseindex)
+ if (!op_disp && i.types[op].bitfield.baseindex)
{
i.types[op].bitfield.disp8 = 0;
i.types[op].bitfield.disp16 = 0;
}
else if (flag_code == CODE_64BIT)
{
- if (fits_in_signed_long (disp))
+ if (fits_in_signed_long (op_disp))
{
i.types[op].bitfield.disp64 = 0;
i.types[op].bitfield.disp32s = 1;
}
if (i.prefix[ADDR_PREFIX]
- && fits_in_unsigned_long (disp))
+ && fits_in_unsigned_long (op_disp))
i.types[op].bitfield.disp32 = 1;
}
if ((i.types[op].bitfield.disp32
|| i.types[op].bitfield.disp32s
|| i.types[op].bitfield.disp16)
- && fits_in_signed_byte (disp))
+ && fits_in_signed_byte (op_disp))
i.types[op].bitfield.disp8 = 1;
}
else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
}
}
+/* Check if operands are valid for the instruction. Update VEX
+ operand types. */
+
+static int
+VEX_check_operands (const insn_template *t)
+{
+ if (!t->opcode_modifier.vex)
+ return 0;
+
+ /* Only check VEX_Imm4, which must be the first operand. */
+ if (t->operand_types[0].bitfield.vec_imm4)
+ {
+ if (i.op[0].imms->X_op != O_constant
+ || !fits_in_imm4 (i.op[0].imms->X_add_number))
+ {
+ i.error = bad_imm4;
+ return 1;
+ }
+
+ /* Turn off Imm8 so that update_imm won't complain. */
+ i.types[0] = vec_imm4;
+ }
+
+ return 0;
+}
+
static const insn_template *
match_template (void)
{
else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
suffix_check.no_ldsuf = 1;
+ /* Must have right number of operands. */
+ i.error = number_of_operands_mismatch;
+
for (t = current_templates->start; t < current_templates->end; t++)
{
addr_prefix_disp = -1;
- /* Must have right number of operands. */
if (i.operands != t->operands)
continue;
/* Check processor support. */
+ i.error = unsupported;
found_cpu_match = (cpu_flags_match (t)
== CPU_FLAGS_PERFECT_MATCH);
if (!found_cpu_match)
continue;
/* Check old gcc support. */
+ i.error = old_gcc_only;
if (!old_gcc && t->opcode_modifier.oldgcc)
continue;
/* Check AT&T mnemonic. */
+ i.error = unsupported_with_intel_mnemonic;
if (intel_mnemonic && t->opcode_modifier.attmnemonic)
continue;
- /* Check AT&T syntax Intel syntax. */
+ /* Check AT&T/Intel syntax. */
+ i.error = unsupported_syntax;
if ((intel_syntax && t->opcode_modifier.attsyntax)
|| (!intel_syntax && t->opcode_modifier.intelsyntax))
continue;
/* Check the suffix, except for some instructions in intel mode. */
+ i.error = invalid_instruction_suffix;
if ((!intel_syntax || !t->opcode_modifier.ignoresize)
&& ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
|| (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
continue;
}
+ /* Check if VEX operands are valid. */
+ if (VEX_check_operands (t))
+ continue;
+
/* We've found a match; break out of loop. */
break;
}
if (t == current_templates->end)
{
/* We found no match. */
- if (intel_syntax)
- as_bad (_("ambiguous operand size or operands invalid for `%s'"),
- current_templates->start->name);
- else
- as_bad (_("suffix or operands invalid for `%s'"),
- current_templates->start->name);
+ const char *err_msg;
+ switch (i.error)
+ {
+ default:
+ abort ();
+ case operand_size_mismatch:
+ err_msg = _("operand size mismatch");
+ break;
+ case operand_type_mismatch:
+ err_msg = _("operand type mismatch");
+ break;
+ case register_type_mismatch:
+ err_msg = _("register type mismatch");
+ break;
+ case number_of_operands_mismatch:
+ err_msg = _("number of operands mismatch");
+ break;
+ case invalid_instruction_suffix:
+ err_msg = _("invalid instruction suffix");
+ break;
+ case bad_imm4:
+ err_msg = _("Imm4 isn't the first operand");
+ break;
+ case old_gcc_only:
+ err_msg = _("only supported with old gcc");
+ break;
+ case unsupported_with_intel_mnemonic:
+ err_msg = _("unsupported with Intel mnemonic");
+ break;
+ case unsupported_syntax:
+ err_msg = _("unsupported syntax");
+ break;
+ case unsupported:
+ err_msg = _("unsupported");
+ break;
+ }
+ as_bad (_("%s for `%s'"), err_msg,
+ current_templates->start->name);
return NULL;
}
}
else if (i.suffix == BYTE_MNEM_SUFFIX)
{
- if (!check_byte_reg ())
+ if (intel_syntax
+ && i.tm.opcode_modifier.ignoresize
+ && i.tm.opcode_modifier.no_bsuf)
+ i.suffix = 0;
+ else if (!check_byte_reg ())
return 0;
}
else if (i.suffix == LONG_MNEM_SUFFIX)
{
- if (!check_long_reg ())
+ if (intel_syntax
+ && i.tm.opcode_modifier.ignoresize
+ && i.tm.opcode_modifier.no_lsuf)
+ i.suffix = 0;
+ else if (!check_long_reg ())
return 0;
}
else if (i.suffix == QWORD_MNEM_SUFFIX)
}
else if (i.suffix == WORD_MNEM_SUFFIX)
{
- if (!check_word_reg ())
+ if (intel_syntax
+ && i.tm.opcode_modifier.ignoresize
+ && i.tm.opcode_modifier.no_wsuf)
+ i.suffix = 0;
+ else if (!check_word_reg ())
return 0;
}
else if (i.suffix == XMMWORD_MNEM_SUFFIX
if (i.types[op].bitfield.reg8)
continue;
- /* Don't generate this warning if not needed. */
- if (intel_syntax && i.tm.opcode_modifier.byteokintel)
- continue;
-
/* crc32 doesn't generate this warning. */
if (i.tm.base_opcode == 0xf20f38f0)
continue;
static int
bad_implicit_operand (int xmm)
{
- const char *reg = xmm ? "xmm0" : "ymm0";
+ const char *ireg = xmm ? "xmm0" : "ymm0";
+
if (intel_syntax)
as_bad (_("the last operand of `%s' must be `%s%s'"),
- i.tm.name, register_prefix, reg);
+ i.tm.name, register_prefix, ireg);
else
as_bad (_("the first operand of `%s' must be `%s%s'"),
- i.tm.name, register_prefix, reg);
+ i.tm.name, register_prefix, ireg);
return 0;
}
unnecessary segment overrides. */
const seg_entry *default_seg = 0;
- if (i.tm.opcode_modifier.sse2avx
- && (i.tm.opcode_modifier.vexnds
- || i.tm.opcode_modifier.vexndd))
+ if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
{
- unsigned int dup = i.operands;
- unsigned int dest = dup - 1;
+ unsigned int dupl = i.operands;
+ unsigned int dest = dupl - 1;
unsigned int j;
/* The destination must be an xmm register. */
gas_assert (i.reg_operands
- && MAX_OPERANDS > dup
+ && MAX_OPERANDS > dupl
&& operand_type_equal (&i.types[dest], ®xmm));
if (i.tm.opcode_modifier.firstxmm0)
if (i.op[0].regs->reg_num != 0)
return bad_implicit_operand (1);
- if (i.tm.opcode_modifier.vex3sources)
+ if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
{
/* Keep xmm0 for instructions with VEX prefix and 3
sources. */
}
else if (i.tm.opcode_modifier.implicit1stxmm0)
{
- gas_assert ((MAX_OPERANDS - 1) > dup
- && i.tm.opcode_modifier.vex3sources);
+ gas_assert ((MAX_OPERANDS - 1) > dupl
+ && (i.tm.opcode_modifier.vexsources
+ == VEX3SOURCES));
/* Add the implicit xmm0 for instructions with VEX prefix
and 3 sources. */
i.reg_operands += 2;
i.tm.operands += 2;
- dup++;
+ dupl++;
dest++;
- i.op[dup] = i.op[dest];
- i.types[dup] = i.types[dest];
- i.tm.operand_types[dup] = i.tm.operand_types[dest];
+ i.op[dupl] = i.op[dest];
+ i.types[dupl] = i.types[dest];
+ i.tm.operand_types[dupl] = i.tm.operand_types[dest];
}
else
{
i.reg_operands++;
i.tm.operands++;
- i.op[dup] = i.op[dest];
- i.types[dup] = i.types[dest];
- i.tm.operand_types[dup] = i.tm.operand_types[dest];
+ i.op[dupl] = i.op[dest];
+ i.types[dupl] = i.types[dest];
+ i.tm.operand_types[dupl] = i.tm.operand_types[dest];
}
if (i.tm.opcode_modifier.immext)
/* The first operand of instructions with VEX prefix and 3 sources
must be VEX_Imm4. */
- vex_3_sources = i.tm.opcode_modifier.vex3sources;
+ vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
if (vex_3_sources)
{
- unsigned int nds, reg;
+ unsigned int nds, reg_slot;
expressionS *exp;
if (i.tm.opcode_modifier.veximmext
- && i.tm.opcode_modifier.immext)
- {
- dest = i.operands - 2;
- gas_assert (dest == 3);
- }
+ && i.tm.opcode_modifier.immext)
+ {
+ dest = i.operands - 2;
+ gas_assert (dest == 3);
+ }
else
- dest = i.operands - 1;
+ dest = i.operands - 1;
nds = dest - 1;
- /* This instruction must have 4 register operands
- or 3 register operands plus 1 memory operand.
- It must have VexNDS and VexImmExt. */
+ /* There are 2 kinds of instructions:
+ 1. 5 operands: 4 register operands or 3 register operands
+ plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
+ VexW0 or VexW1. The destination must be either XMM or YMM
+ register.
+ 2. 4 operands: 4 register operands or 3 register operands
+ plus 1 memory operand, VexXDS, and VexImmExt */
gas_assert ((i.reg_operands == 4
- || (i.reg_operands == 3 && i.mem_operands == 1))
- && i.tm.opcode_modifier.vexnds
- && i.tm.opcode_modifier.veximmext
- && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
- || operand_type_equal (&i.tm.operand_types[dest], ®ymm)));
-
- /* Generate an 8bit immediate operand to encode the register
- operand. */
- exp = &im_expressions[i.imm_operands++];
- i.op[i.operands].imms = exp;
- i.types[i.operands] = imm8;
- i.operands++;
- /* If VexW1 is set, the first operand is the source and
- the second operand is encoded in the immediate operand. */
- if (i.tm.opcode_modifier.vexw1)
- {
- source = 0;
- reg = 1;
- }
+ || (i.reg_operands == 3 && i.mem_operands == 1))
+ && i.tm.opcode_modifier.vexvvvv == VEXXDS
+ && (i.tm.opcode_modifier.veximmext
+ || (i.imm_operands == 1
+ && i.types[0].bitfield.vec_imm4
+ && (i.tm.opcode_modifier.vexw == VEXW0
+ || i.tm.opcode_modifier.vexw == VEXW1)
+ && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
+ || operand_type_equal (&i.tm.operand_types[dest], ®ymm)))));
+
+ if (i.imm_operands == 0)
+ {
+ /* When there is no immediate operand, generate an 8bit
+ immediate operand to encode the first operand. */
+ exp = &im_expressions[i.imm_operands++];
+ i.op[i.operands].imms = exp;
+ i.types[i.operands] = imm8;
+ i.operands++;
+ /* If VexW1 is set, the first operand is the source and
+ the second operand is encoded in the immediate operand. */
+ if (i.tm.opcode_modifier.vexw == VEXW1)
+ {
+ source = 0;
+ reg_slot = 1;
+ }
+ else
+ {
+ source = 1;
+ reg_slot = 0;
+ }
+
+ /* FMA swaps REG and NDS. */
+ if (i.tm.cpu_flags.bitfield.cpufma)
+ {
+ unsigned int tmp;
+ tmp = reg_slot;
+ reg_slot = nds;
+ nds = tmp;
+ }
+
+ gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
+ ®xmm)
+ || operand_type_equal (&i.tm.operand_types[reg_slot],
+ ®ymm));
+ exp->X_op = O_constant;
+ exp->X_add_number
+ = ((i.op[reg_slot].regs->reg_num
+ + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
+ << 4);
+ }
else
- {
- source = 1;
- reg = 0;
- }
- gas_assert ((operand_type_equal (&i.tm.operand_types[reg], ®xmm)
- || operand_type_equal (&i.tm.operand_types[reg],
- ®ymm))
- && (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
- || operand_type_equal (&i.tm.operand_types[nds],
- ®ymm)));
- exp->X_op = O_constant;
- exp->X_add_number
- = ((i.op[reg].regs->reg_num
- + ((i.op[reg].regs->reg_flags & RegRex) ? 8 : 0)) << 4);
+ {
+ unsigned int imm_slot;
+
+ if (i.tm.opcode_modifier.vexw == VEXW0)
+ {
+ /* If VexW0 is set, the third operand is the source and
+ the second operand is encoded in the immediate
+ operand. */
+ source = 2;
+ reg_slot = 1;
+ }
+ else
+ {
+ /* VexW1 is set, the second operand is the source and
+ the third operand is encoded in the immediate
+ operand. */
+ source = 1;
+ reg_slot = 2;
+ }
+
+ if (i.tm.opcode_modifier.immext)
+ {
+ /* When ImmExt is set, the immdiate byte is the last
+ operand. */
+ imm_slot = i.operands - 1;
+ source--;
+ reg_slot--;
+ }
+ else
+ {
+ imm_slot = 0;
+
+ /* Turn on Imm8 so that output_imm will generate it. */
+ i.types[imm_slot].bitfield.imm8 = 1;
+ }
+
+ gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
+ ®xmm)
+ || operand_type_equal (&i.tm.operand_types[reg_slot],
+ ®ymm));
+ i.op[imm_slot].imms->X_add_number
+ |= ((i.op[reg_slot].regs->reg_num
+ + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
+ << 4);
+ }
+
+ gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
+ || operand_type_equal (&i.tm.operand_types[nds],
+ ®ymm));
i.vex.register_specifier = i.op[nds].regs;
}
else
a instruction with VEX prefix and 3 sources. */
if (i.mem_operands == 0
&& ((i.reg_operands == 2
- && !i.tm.opcode_modifier.vexndd
- && !i.tm.opcode_modifier.vexlwp)
+ && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
|| (i.reg_operands == 3
- && i.tm.opcode_modifier.vexnds)
+ && i.tm.opcode_modifier.vexvvvv == VEXXDS)
|| (i.reg_operands == 4 && vex_3_sources)))
{
switch (i.operands)
is an instruction with VexNDS. */
gas_assert (i.imm_operands == 1
|| (i.imm_operands == 0
- && (i.tm.opcode_modifier.vexnds
+ && (i.tm.opcode_modifier.vexvvvv == VEXXDS
|| i.types[0].bitfield.shiftcount)));
if (operand_type_check (i.types[0], imm)
|| i.types[0].bitfield.shiftcount)
gas_assert ((i.imm_operands == 2
&& i.types[0].bitfield.imm8
&& i.types[1].bitfield.imm8)
- || (i.tm.opcode_modifier.vexnds
+ || (i.tm.opcode_modifier.vexvvvv == VEXXDS
&& i.imm_operands == 1
&& (i.types[0].bitfield.imm8
|| i.types[i.operands - 1].bitfield.imm8)));
- if (i.tm.opcode_modifier.vexnds)
+ if (i.imm_operands == 2)
+ source = 2;
+ else
{
if (i.types[0].bitfield.imm8)
source = 1;
else
source = 0;
}
- else
- source = 2;
break;
case 5:
break;
{
dest = source + 1;
- if (i.tm.opcode_modifier.vexnds)
+ if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
{
/* For instructions with VexNDS, the register-only
source operand must be XMM or YMM register. It is
else
mem = ~0;
- if (i.tm.opcode_modifier.vexlwp)
+ if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
+ {
+ if (operand_type_check (i.types[0], imm))
+ i.vex.register_specifier = NULL;
+ else
+ {
+ /* VEX.vvvv encodes one of the sources when the first
+ operand is not an immediate. */
+ if (i.tm.opcode_modifier.vexw == VEXW0)
+ i.vex.register_specifier = i.op[0].regs;
+ else
+ i.vex.register_specifier = i.op[1].regs;
+ }
+
+ /* Destination is a XMM register encoded in the ModRM.reg
+ and VEX.R bit. */
+ i.rm.reg = i.op[2].regs->reg_num;
+ if ((i.op[2].regs->reg_flags & RegRex) != 0)
+ i.rex |= REX_R;
+
+ /* ModRM.rm and VEX.B encodes the other source. */
+ if (!i.mem_operands)
+ {
+ i.rm.mode = 3;
+
+ if (i.tm.opcode_modifier.vexw == VEXW0)
+ i.rm.regmem = i.op[1].regs->reg_num;
+ else
+ i.rm.regmem = i.op[0].regs->reg_num;
+
+ if ((i.op[1].regs->reg_flags & RegRex) != 0)
+ i.rex |= REX_B;
+ }
+ }
+ else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
{
i.vex.register_specifier = i.op[2].regs;
if (!i.mem_operands)
if (vex_3_sources)
op = dest;
- else if (i.tm.opcode_modifier.vexnds)
+ else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
{
/* For instructions with VexNDS, the register-only
source operand is encoded in VEX prefix. */
gas_assert (vex_reg < i.operands);
}
}
- else if (i.tm.opcode_modifier.vexndd)
+ else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
{
/* For instructions with VexNDD, there should be
no memory operand and the register destination
is non-null set it to the length of the string we removed from the
input line. Otherwise return NULL. */
static char *
-lex_got (enum bfd_reloc_code_real *reloc,
+lex_got (enum bfd_reloc_code_real *rel,
int *adjust,
i386_operand_type *types)
{
and adjust the reloc according to the real size in reloc(). */
static const struct {
const char *str;
+ int len;
const enum bfd_reloc_code_real rel[2];
const i386_operand_type types64;
} gotrel[] = {
- { "PLTOFF", { _dummy_first_bfd_reloc_code_real,
- BFD_RELOC_X86_64_PLTOFF64 },
+ { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_PLTOFF64 },
OPERAND_TYPE_IMM64 },
- { "PLT", { BFD_RELOC_386_PLT32,
- BFD_RELOC_X86_64_PLT32 },
+ { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
+ BFD_RELOC_X86_64_PLT32 },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "GOTPLT", { _dummy_first_bfd_reloc_code_real,
- BFD_RELOC_X86_64_GOTPLT64 },
+ { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_GOTPLT64 },
OPERAND_TYPE_IMM64_DISP64 },
- { "GOTOFF", { BFD_RELOC_386_GOTOFF,
- BFD_RELOC_X86_64_GOTOFF64 },
+ { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
+ BFD_RELOC_X86_64_GOTOFF64 },
OPERAND_TYPE_IMM64_DISP64 },
- { "GOTPCREL", { _dummy_first_bfd_reloc_code_real,
- BFD_RELOC_X86_64_GOTPCREL },
+ { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_GOTPCREL },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "TLSGD", { BFD_RELOC_386_TLS_GD,
- BFD_RELOC_X86_64_TLSGD },
+ { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
+ BFD_RELOC_X86_64_TLSGD },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "TLSLDM", { BFD_RELOC_386_TLS_LDM,
- _dummy_first_bfd_reloc_code_real },
+ { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
+ _dummy_first_bfd_reloc_code_real },
OPERAND_TYPE_NONE },
- { "TLSLD", { _dummy_first_bfd_reloc_code_real,
- BFD_RELOC_X86_64_TLSLD },
+ { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_TLSLD },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "GOTTPOFF", { BFD_RELOC_386_TLS_IE_32,
- BFD_RELOC_X86_64_GOTTPOFF },
+ { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
+ BFD_RELOC_X86_64_GOTTPOFF },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "TPOFF", { BFD_RELOC_386_TLS_LE_32,
- BFD_RELOC_X86_64_TPOFF32 },
+ { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
+ BFD_RELOC_X86_64_TPOFF32 },
OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
- { "NTPOFF", { BFD_RELOC_386_TLS_LE,
- _dummy_first_bfd_reloc_code_real },
+ { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
+ _dummy_first_bfd_reloc_code_real },
OPERAND_TYPE_NONE },
- { "DTPOFF", { BFD_RELOC_386_TLS_LDO_32,
- BFD_RELOC_X86_64_DTPOFF32 },
-
+ { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
+ BFD_RELOC_X86_64_DTPOFF32 },
OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
- { "GOTNTPOFF",{ BFD_RELOC_386_TLS_GOTIE,
- _dummy_first_bfd_reloc_code_real },
+ { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
+ _dummy_first_bfd_reloc_code_real },
OPERAND_TYPE_NONE },
- { "INDNTPOFF",{ BFD_RELOC_386_TLS_IE,
- _dummy_first_bfd_reloc_code_real },
+ { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
+ _dummy_first_bfd_reloc_code_real },
OPERAND_TYPE_NONE },
- { "GOT", { BFD_RELOC_386_GOT32,
- BFD_RELOC_X86_64_GOT32 },
+ { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
+ BFD_RELOC_X86_64_GOT32 },
OPERAND_TYPE_IMM32_32S_64_DISP32 },
- { "TLSDESC", { BFD_RELOC_386_TLS_GOTDESC,
- BFD_RELOC_X86_64_GOTPC32_TLSDESC },
+ { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
+ BFD_RELOC_X86_64_GOTPC32_TLSDESC },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "TLSCALL", { BFD_RELOC_386_TLS_DESC_CALL,
- BFD_RELOC_X86_64_TLSDESC_CALL },
+ { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
+ BFD_RELOC_X86_64_TLSDESC_CALL },
OPERAND_TYPE_IMM32_32S_DISP32 },
};
char *cp;
for (j = 0; j < ARRAY_SIZE (gotrel); j++)
{
- int len;
-
- len = strlen (gotrel[j].str);
+ int len = gotrel[j].len;
if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
{
if (gotrel[j].rel[object_64bit] != 0)
int first, second;
char *tmpbuf, *past_reloc;
- *reloc = gotrel[j].rel[object_64bit];
+ *rel = gotrel[j].rel[object_64bit];
if (adjust)
*adjust = len;
{
intel_syntax = -intel_syntax;
+ exp->X_md = 0;
if (size == 4 || (object_64bit && size == 8))
{
/* Handle @GOTOFF and the like in an expression. */
{
/* Size it properly later. */
i.types[this_operand].bitfield.imm64 = 1;
- /* If BFD64, sign extend val. */
- if (!use_rela_relocations
+ /* If not 64bit, sign extend val. */
+ if (flag_code != CODE_64BIT
&& (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
exp->X_add_number
= (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
goto inv_disp;
if (S_IS_LOCAL (exp->X_add_symbol)
- && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section)
+ && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
+ && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
exp->X_op = O_subtract;
exp->X_op_symbol = GOT_symbol;
#define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
#define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
#define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
+#define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
struct option md_longopts[] =
{
{"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
{"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
{"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
+ {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
{NULL, no_argument, NULL, 0}
};
size_t md_longopts_size = sizeof (md_longopts);
int
md_parse_option (int c, char *arg)
{
- unsigned int i;
+ unsigned int j;
char *arch, *next;
switch (c)
next = strchr (arch, '+');
if (next)
*next++ = '\0';
- for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
+ for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
{
- if (strcmp (arch, cpu_arch [i].name) == 0)
+ if (strcmp (arch, cpu_arch [j].name) == 0)
{
/* Processor. */
- cpu_arch_name = cpu_arch[i].name;
+ if (! cpu_arch[j].flags.bitfield.cpui386)
+ continue;
+
+ cpu_arch_name = cpu_arch[j].name;
cpu_sub_arch_name = NULL;
- cpu_arch_flags = cpu_arch[i].flags;
- cpu_arch_isa = cpu_arch[i].type;
- cpu_arch_isa_flags = cpu_arch[i].flags;
+ cpu_arch_flags = cpu_arch[j].flags;
+ cpu_arch_isa = cpu_arch[j].type;
+ cpu_arch_isa_flags = cpu_arch[j].flags;
if (!cpu_arch_tune_set)
{
cpu_arch_tune = cpu_arch_isa;
}
break;
}
- else if (*cpu_arch [i].name == '.'
- && strcmp (arch, cpu_arch [i].name + 1) == 0)
+ else if (*cpu_arch [j].name == '.'
+ && strcmp (arch, cpu_arch [j].name + 1) == 0)
{
/* ISA entension. */
i386_cpu_flags flags;
- if (strncmp (arch, "no", 2))
+ if (!cpu_arch[j].negated)
flags = cpu_flags_or (cpu_arch_flags,
- cpu_arch[i].flags);
+ cpu_arch[j].flags);
else
flags = cpu_flags_and_not (cpu_arch_flags,
- cpu_arch[i].flags);
+ cpu_arch[j].flags);
if (!cpu_flags_equal (&flags, &cpu_arch_flags))
{
if (cpu_sub_arch_name)
{
char *name = cpu_sub_arch_name;
cpu_sub_arch_name = concat (name,
- cpu_arch[i].name,
+ cpu_arch[j].name,
(const char *) NULL);
free (name);
}
else
- cpu_sub_arch_name = xstrdup (cpu_arch[i].name);
+ cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
cpu_arch_flags = flags;
}
break;
}
}
- if (i >= ARRAY_SIZE (cpu_arch))
+ if (j >= ARRAY_SIZE (cpu_arch))
as_fatal (_("Invalid -march= option: `%s'"), arg);
arch = next;
case OPTION_MTUNE:
if (*arg == '.')
as_fatal (_("Invalid -mtune= option: `%s'"), arg);
- for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
+ for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
{
- if (strcmp (arg, cpu_arch [i].name) == 0)
+ if (strcmp (arg, cpu_arch [j].name) == 0)
{
cpu_arch_tune_set = 1;
- cpu_arch_tune = cpu_arch [i].type;
- cpu_arch_tune_flags = cpu_arch[i].flags;
+ cpu_arch_tune = cpu_arch [j].type;
+ cpu_arch_tune_flags = cpu_arch[j].flags;
break;
}
}
- if (i >= ARRAY_SIZE (cpu_arch))
+ if (j >= ARRAY_SIZE (cpu_arch))
as_fatal (_("Invalid -mtune= option: `%s'"), arg);
break;
as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
break;
+ case OPTION_MAVXSCALAR:
+ if (strcasecmp (arg, "128") == 0)
+ avxscalar = vex128;
+ else if (strcasecmp (arg, "256") == 0)
+ avxscalar = vex256;
+ else
+ as_fatal (_("Invalid -mavxscalar= option: `%s'"), arg);
+ break;
+
default:
return 0;
}
return 1;
}
+#define MESSAGE_TEMPLATE \
+" "
+
+static void
+show_arch (FILE *stream, int ext, int check)
+{
+ static char message[] = MESSAGE_TEMPLATE;
+ char *start = message + 27;
+ char *p;
+ int size = sizeof (MESSAGE_TEMPLATE);
+ int left;
+ const char *name;
+ int len;
+ unsigned int j;
+
+ p = start;
+ left = size - (start - message);
+ for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
+ {
+ /* Should it be skipped? */
+ if (cpu_arch [j].skip)
+ continue;
+
+ name = cpu_arch [j].name;
+ len = cpu_arch [j].len;
+ if (*name == '.')
+ {
+ /* It is an extension. Skip if we aren't asked to show it. */
+ if (ext)
+ {
+ name++;
+ len--;
+ }
+ else
+ continue;
+ }
+ else if (ext)
+ {
+ /* It is an processor. Skip if we show only extension. */
+ continue;
+ }
+ else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
+ {
+ /* It is an impossible processor - skip. */
+ continue;
+ }
+
+ /* Reserve 2 spaces for ", " or ",\0" */
+ left -= len + 2;
+
+ /* Check if there is any room. */
+ if (left >= 0)
+ {
+ if (p != start)
+ {
+ *p++ = ',';
+ *p++ = ' ';
+ }
+ p = mempcpy (p, name, len);
+ }
+ else
+ {
+ /* Output the current message now and start a new one. */
+ *p++ = ',';
+ *p = '\0';
+ fprintf (stream, "%s\n", message);
+ p = start;
+ left = size - (start - message) - len - 2;
+
+ gas_assert (left >= 0);
+
+ p = mempcpy (p, name, len);
+ }
+ }
+
+ *p = '\0';
+ fprintf (stream, "%s\n", message);
+}
+
void
-md_show_usage (stream)
- FILE *stream;
+md_show_usage (FILE *stream)
{
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
fprintf (stream, _("\
#endif
fprintf (stream, _("\
-march=CPU[,+EXTENSION...]\n\
- generate code for CPU and EXTENSION, CPU is one of:\n\
- i8086, i186, i286, i386, i486, pentium, pentiumpro,\n\
- pentiumii, pentiumiii, pentium4, prescott, nocona,\n\
- core, core2, corei7, l1om, k6, k6_2, athlon, k8,\n\
- amdfam10, generic32, generic64\n\
- EXTENSION is combination of:\n\
- 8087, 287, 387, no87, mmx, nommx, sse, sse2, sse3,\n\
- ssse3, sse4.1, sse4.2, sse4, nosse, avx, noavx,\n\
- vmx, smx, xsave, movbe, ept, aes, pclmul, fma,\n\
- clflush, syscall, rdtscp, 3dnow, 3dnowa, sse4a,\n\
- svme, abm, padlock, fma4, lwp\n"));
+ generate code for CPU and EXTENSION, CPU is one of:\n"));
+ show_arch (stream, 0, 1);
+ fprintf (stream, _("\
+ EXTENSION is combination of:\n"));
+ show_arch (stream, 1, 0);
fprintf (stream, _("\
- -mtune=CPU optimize for CPU, CPU is one of:\n\
- i8086, i186, i286, i386, i486, pentium, pentiumpro,\n\
- pentiumii, pentiumiii, pentium4, prescott, nocona,\n\
- core, core2, corei7, l1om, k6, k6_2, athlon, k8,\n\
- amdfam10, generic32, generic64\n"));
+ -mtune=CPU optimize for CPU, CPU is one of:\n"));
+ show_arch (stream, 0, 0);
fprintf (stream, _("\
-msse2avx encode SSE instructions with VEX prefix\n"));
fprintf (stream, _("\
-msse-check=[none|error|warning]\n\
check SSE instructions\n"));
fprintf (stream, _("\
+ -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
+ length\n"));
+ fprintf (stream, _("\
-mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
fprintf (stream, _("\
-msyntax=[att|intel] use AT&T/Intel syntax\n"));
i386_target_format (void)
{
if (!strcmp (default_arch, "x86_64"))
- {
- set_code_flag (CODE_64BIT);
- if (cpu_flags_all_zero (&cpu_arch_isa_flags))
- {
- cpu_arch_isa_flags.bitfield.cpui186 = 1;
- cpu_arch_isa_flags.bitfield.cpui286 = 1;
- cpu_arch_isa_flags.bitfield.cpui386 = 1;
- cpu_arch_isa_flags.bitfield.cpui486 = 1;
- cpu_arch_isa_flags.bitfield.cpui586 = 1;
- cpu_arch_isa_flags.bitfield.cpui686 = 1;
- cpu_arch_isa_flags.bitfield.cpuclflush = 1;
- cpu_arch_isa_flags.bitfield.cpummx= 1;
- cpu_arch_isa_flags.bitfield.cpusse = 1;
- cpu_arch_isa_flags.bitfield.cpusse2 = 1;
- cpu_arch_isa_flags.bitfield.cpulm = 1;
- }
- if (cpu_flags_all_zero (&cpu_arch_tune_flags))
- {
- cpu_arch_tune_flags.bitfield.cpui186 = 1;
- cpu_arch_tune_flags.bitfield.cpui286 = 1;
- cpu_arch_tune_flags.bitfield.cpui386 = 1;
- cpu_arch_tune_flags.bitfield.cpui486 = 1;
- cpu_arch_tune_flags.bitfield.cpui586 = 1;
- cpu_arch_tune_flags.bitfield.cpui686 = 1;
- cpu_arch_tune_flags.bitfield.cpuclflush = 1;
- cpu_arch_tune_flags.bitfield.cpummx= 1;
- cpu_arch_tune_flags.bitfield.cpusse = 1;
- cpu_arch_tune_flags.bitfield.cpusse2 = 1;
- }
- }
+ update_code_flag (CODE_64BIT, 1);
else if (!strcmp (default_arch, "i386"))
- {
- set_code_flag (CODE_32BIT);
- if (cpu_flags_all_zero (&cpu_arch_isa_flags))
- {
- cpu_arch_isa_flags.bitfield.cpui186 = 1;
- cpu_arch_isa_flags.bitfield.cpui286 = 1;
- cpu_arch_isa_flags.bitfield.cpui386 = 1;
- }
- if (cpu_flags_all_zero (&cpu_arch_tune_flags))
- {
- cpu_arch_tune_flags.bitfield.cpui186 = 1;
- cpu_arch_tune_flags.bitfield.cpui286 = 1;
- cpu_arch_tune_flags.bitfield.cpui386 = 1;
- }
- }
+ update_code_flag (CODE_32BIT, 1);
else
as_fatal (_("Unknown architecture"));
+
+ if (cpu_flags_all_zero (&cpu_arch_isa_flags))
+ cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
+ if (cpu_flags_all_zero (&cpu_arch_tune_flags))
+ cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
+
switch (OUTPUT_FLAVOR)
{
#if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
void
tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
{
- expressionS expr;
+ expressionS exp;
- expr.X_op = O_secrel;
- expr.X_add_symbol = symbol;
- expr.X_add_number = 0;
- emit_expr (&expr, size);
+ exp.X_op = O_secrel;
+ exp.X_add_symbol = symbol;
+ exp.X_add_number = 0;
+ emit_expr (&exp, size);
}
#endif
if (letter == 'l')
return SHF_X86_64_LARGE;
- *ptr_msg = _("Bad .section directive: want a,l,w,x,M,S,G,T in string");
+ *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
}
else
- *ptr_msg = _("Bad .section directive: want a,w,x,M,S,G,T in string");
+ *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
return -1;
}