/* tc-i386.c -- Assemble code for the Intel 80386
- Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
- Free Software Foundation, Inc.
+ Copyright (C) 1989-2015 Free Software Foundation, Inc.
This file is part of GAS, the GNU Assembler.
#include "elf/x86-64.h"
#include "opcodes/i386-init.h"
+#ifdef TE_LINUX
+/* Default to compress debug sections for Linux. */
+enum compressed_debug_section_type flag_compress_debug
+ = COMPRESS_DEBUG_GNU_ZLIB;
+#endif
+
#ifndef REGISTER_WARNINGS
#define REGISTER_WARNINGS 1
#endif
WAIT_PREFIX must be the first prefix since FWAIT is really is an
instruction, and so must come before any prefixes.
The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
- LOCKREP_PREFIX. */
+ REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
#define WAIT_PREFIX 0
#define SEG_PREFIX 1
#define ADDR_PREFIX 2
#define DATA_PREFIX 3
-#define LOCKREP_PREFIX 4
-#define REX_PREFIX 5 /* must come last. */
-#define MAX_PREFIXES 6 /* max prefixes per opcode */
+#define REP_PREFIX 4
+#define HLE_PREFIX REP_PREFIX
+#define BND_PREFIX REP_PREFIX
+#define LOCK_PREFIX 5
+#define REX_PREFIX 6 /* must come last. */
+#define MAX_PREFIXES 7 /* max prefixes per opcode */
/* we define the syntax here (modulo base,index,scale syntax) */
#define REGISTER_PREFIX '%'
#define QWORD_MNEM_SUFFIX 'q'
#define XMMWORD_MNEM_SUFFIX 'x'
#define YMMWORD_MNEM_SUFFIX 'y'
+#define ZMMWORD_MNEM_SUFFIX 'z'
/* Intel Syntax. Use a non-ascii letter since since it never appears
in instructions. */
#define LONG_DOUBLE_MNEM_SUFFIX '\1'
*/
typedef struct
{
- const template *start;
- const template *end;
+ const insn_template *start;
+ const insn_template *end;
}
templates;
typedef struct
{
const char *name; /* arch name */
+ unsigned int len; /* arch string length */
enum processor_type type; /* arch type */
i386_cpu_flags flags; /* cpu feature flags */
+ unsigned int skip; /* show_arch should skip this. */
+ unsigned int negated; /* turn off indicated flags. */
}
arch_entry;
+static void update_code_flag (int, int);
static void set_code_flag (int);
static void set_16bit_gcc_code_flag (int);
static void set_intel_syntax (int);
static void set_intel_mnemonic (int);
static void set_allow_index_reg (int);
-static void set_sse_check (int);
+static void set_check (int);
static void set_cpu_arch (int);
#ifdef TE_PE
static void pe_directive_secrel (int);
static void swap_2_operands (int, int);
static void optimize_imm (void);
static void optimize_disp (void);
-static const template *match_template (void);
+static const insn_template *match_template (void);
static int check_string (void);
static int process_suffix (void);
static int check_byte_reg (void);
static const char *default_arch = DEFAULT_ARCH;
+/* This struct describes rounding control and SAE in the instruction. */
+struct RC_Operation
+{
+ enum rc_type
+ {
+ rne = 0,
+ rd,
+ ru,
+ rz,
+ saeonly
+ } type;
+ int operand;
+};
+
+static struct RC_Operation rc_op;
+
+/* The struct describes masking, applied to OPERAND in the instruction.
+ MASK is a pointer to the corresponding mask register. ZEROING tells
+ whether merging or zeroing mask is used. */
+struct Mask_Operation
+{
+ const reg_entry *mask;
+ unsigned int zeroing;
+ /* The operand where this operation is associated. */
+ int operand;
+};
+
+static struct Mask_Operation mask_op;
+
+/* The struct describes broadcasting, applied to OPERAND. FACTOR is
+ broadcast factor. */
+struct Broadcast_Operation
+{
+ /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
+ int type;
+
+ /* Index of broadcasted operand. */
+ int operand;
+};
+
+static struct Broadcast_Operation broadcast_op;
+
/* VEX prefix. */
typedef struct
{
- /* VEX prefix is either 2 byte or 3 byte. */
- unsigned char bytes[3];
+ /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
+ unsigned char bytes[4];
unsigned int length;
/* Destination or source register specifier. */
const reg_entry *register_specifier;
const reg_entry *regs;
};
+enum i386_error
+ {
+ operand_size_mismatch,
+ operand_type_mismatch,
+ register_type_mismatch,
+ number_of_operands_mismatch,
+ invalid_instruction_suffix,
+ bad_imm4,
+ old_gcc_only,
+ unsupported_with_intel_mnemonic,
+ unsupported_syntax,
+ unsupported,
+ invalid_vsib_address,
+ invalid_vector_register_set,
+ unsupported_vector_index_register,
+ unsupported_broadcast,
+ broadcast_not_on_src_operand,
+ broadcast_needed,
+ unsupported_masking,
+ mask_not_on_destination,
+ no_default_mask,
+ unsupported_rc_sae,
+ rc_sae_operand_not_last_imm,
+ invalid_register_operand,
+ try_vector_disp8
+ };
+
struct _i386_insn
{
/* TM holds the template for the insn were currently assembling. */
- template tm;
+ insn_template tm;
/* SUFFIX holds the instruction size suffix for byte, word, dword
or qword, if given. */
addressing modes of this insn are encoded. */
modrm_byte rm;
rex_byte rex;
+ rex_byte vrex;
sib_byte sib;
vex_prefix vex;
+ /* Masking attributes. */
+ struct Mask_Operation *mask;
+
+ /* Rounding control and SAE attributes. */
+ struct RC_Operation *rounding;
+
+ /* Broadcasting attributes. */
+ struct Broadcast_Operation *broadcast;
+
+ /* Compressed disp8*N attribute. */
+ unsigned int memshift;
+
/* Swap operand in encoding. */
- unsigned int swap_operand : 1;
+ unsigned int swap_operand;
+
+ /* Prefer 8bit or 32bit displacement in encoding. */
+ enum
+ {
+ disp_encoding_default = 0,
+ disp_encoding_8bit,
+ disp_encoding_32bit
+ } disp_encoding;
+
+ /* REP prefix. */
+ const char *rep_prefix;
+
+ /* HLE prefix. */
+ const char *hle_prefix;
+
+ /* Have BND prefix. */
+ const char *bnd_prefix;
+
+ /* Need VREX to support upper 16 registers. */
+ int need_vrex;
+
+ /* Error message. */
+ enum i386_error error;
};
typedef struct _i386_insn i386_insn;
+/* Link RC type with corresponding string, that'll be looked for in
+ asm. */
+struct RC_name
+{
+ enum rc_type type;
+ const char *name;
+ unsigned int len;
+};
+
+static const struct RC_name RC_NamesTable[] =
+{
+ { rne, STRING_COMMA_LEN ("rn-sae") },
+ { rd, STRING_COMMA_LEN ("rd-sae") },
+ { ru, STRING_COMMA_LEN ("ru-sae") },
+ { rz, STRING_COMMA_LEN ("rz-sae") },
+ { saeonly, STRING_COMMA_LEN ("sae") },
+};
+
/* List of chars besides those in app.c:symbol_chars that can start an
operand. Used to prevent the scrubber eating vital white-space. */
-const char extra_symbol_chars[] = "*%-(["
+const char extra_symbol_chars[] = "*%-([{"
#ifdef LEX_AT
"@"
#endif
|| ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
&& !defined (TE_GNU) \
&& !defined (TE_LINUX) \
- && !defined (TE_NETWARE) \
+ && !defined (TE_NACL) \
+ && !defined (TE_NETWARE) \
&& !defined (TE_FreeBSD) \
+ && !defined (TE_DragonFly) \
&& !defined (TE_NetBSD)))
/* This array holds the chars that always start a comment. If the
pre-processor is disabled, these aren't very useful. The option
static enum flag_code flag_code;
static unsigned int object_64bit;
+static unsigned int disallow_64bit_reloc;
static int use_rela_relocations = 0;
-/* The names used to print error messages. */
-static const char *flag_code_names[] =
- {
- "32",
- "16",
- "64"
- };
+#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
+ || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
+ || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
+
+/* The ELF ABI to use. */
+enum x86_elf_abi
+{
+ I386_ABI,
+ X86_64_ABI,
+ X86_64_X32_ABI
+};
+
+static enum x86_elf_abi x86_elf_abi = I386_ABI;
+#endif
+
+#if defined (TE_PE) || defined (TE_PEP)
+/* Use big object file format. */
+static int use_big_obj = 0;
+#endif
+
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+/* 1 if generating code for a shared library. */
+static int shared = 0;
+#endif
/* 1 for intel syntax,
0 if att syntax. */
/* 1 if register prefix % not required. */
static int allow_naked_reg = 0;
+/* 1 if the assembler should add BND prefix for all control-tranferring
+ instructions supporting it, even if this prefix wasn't specified
+ explicitly. */
+static int add_bnd_prefix = 0;
+
/* 1 if pseudo index register, eiz/riz, is allowed . */
static int allow_index_reg = 0;
-static enum
+/* 1 if the assembler should ignore LOCK prefix, even if it was
+ specified explicitly. */
+static int omit_lock_prefix = 0;
+
+static enum check_kind
{
- sse_check_none = 0,
- sse_check_warning,
- sse_check_error
+ check_none = 0,
+ check_warning,
+ check_error
}
-sse_check;
+sse_check, operand_check = check_warning;
/* Register prefix used for error message. */
static const char *register_prefix = "%";
/* Encode SSE instructions with VEX prefix. */
static unsigned int sse2avx;
+/* Encode scalar AVX instructions with specific vector length. */
+static enum
+ {
+ vex128 = 0,
+ vex256
+ } avxscalar;
+
+/* Encode scalar EVEX LIG instructions with specific vector length. */
+static enum
+ {
+ evexl128 = 0,
+ evexl256,
+ evexl512
+ } evexlig;
+
+/* Encode EVEX WIG instructions with specific evex.w. */
+static enum
+ {
+ evexw0 = 0,
+ evexw1
+ } evexwig;
+
+/* Value to encode in EVEX RC bits, for SAE-only instructions. */
+static enum rc_type evexrcig = rne;
+
/* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
static symbolS *GOT_symbol;
static const arch_entry cpu_arch[] =
{
- { "generic32", PROCESSOR_GENERIC32,
- CPU_GENERIC32_FLAGS },
- { "generic64", PROCESSOR_GENERIC64,
- CPU_GENERIC64_FLAGS },
- { "i8086", PROCESSOR_UNKNOWN,
- CPU_NONE_FLAGS },
- { "i186", PROCESSOR_UNKNOWN,
- CPU_I186_FLAGS },
- { "i286", PROCESSOR_UNKNOWN,
- CPU_I286_FLAGS },
- { "i386", PROCESSOR_I386,
- CPU_I386_FLAGS },
- { "i486", PROCESSOR_I486,
- CPU_I486_FLAGS },
- { "i586", PROCESSOR_PENTIUM,
- CPU_I586_FLAGS },
- { "i686", PROCESSOR_PENTIUMPRO,
- CPU_I686_FLAGS },
- { "pentium", PROCESSOR_PENTIUM,
- CPU_I586_FLAGS },
- { "pentiumpro", PROCESSOR_PENTIUMPRO,
- CPU_I686_FLAGS },
- { "pentiumii", PROCESSOR_PENTIUMPRO,
- CPU_P2_FLAGS },
- { "pentiumiii",PROCESSOR_PENTIUMPRO,
- CPU_P3_FLAGS },
- { "pentium4", PROCESSOR_PENTIUM4,
- CPU_P4_FLAGS },
- { "prescott", PROCESSOR_NOCONA,
- CPU_CORE_FLAGS },
- { "nocona", PROCESSOR_NOCONA,
- CPU_NOCONA_FLAGS },
- { "yonah", PROCESSOR_CORE,
- CPU_CORE_FLAGS },
- { "core", PROCESSOR_CORE,
- CPU_CORE_FLAGS },
- { "merom", PROCESSOR_CORE2,
- CPU_CORE2_FLAGS },
- { "core2", PROCESSOR_CORE2,
- CPU_CORE2_FLAGS },
- { "corei7", PROCESSOR_COREI7,
- CPU_COREI7_FLAGS },
- { "k6", PROCESSOR_K6,
- CPU_K6_FLAGS },
- { "k6_2", PROCESSOR_K6,
- CPU_K6_2_FLAGS },
- { "athlon", PROCESSOR_ATHLON,
- CPU_ATHLON_FLAGS },
- { "sledgehammer", PROCESSOR_K8,
- CPU_K8_FLAGS },
- { "opteron", PROCESSOR_K8,
- CPU_K8_FLAGS },
- { "k8", PROCESSOR_K8,
- CPU_K8_FLAGS },
- { "amdfam10", PROCESSOR_AMDFAM10,
- CPU_AMDFAM10_FLAGS },
- { ".mmx", PROCESSOR_UNKNOWN,
- CPU_MMX_FLAGS },
- { ".sse", PROCESSOR_UNKNOWN,
- CPU_SSE_FLAGS },
- { ".sse2", PROCESSOR_UNKNOWN,
- CPU_SSE2_FLAGS },
- { ".sse3", PROCESSOR_UNKNOWN,
- CPU_SSE3_FLAGS },
- { ".ssse3", PROCESSOR_UNKNOWN,
- CPU_SSSE3_FLAGS },
- { ".sse4.1", PROCESSOR_UNKNOWN,
- CPU_SSE4_1_FLAGS },
- { ".sse4.2", PROCESSOR_UNKNOWN,
- CPU_SSE4_2_FLAGS },
- { ".sse4", PROCESSOR_UNKNOWN,
- CPU_SSE4_2_FLAGS },
- { ".avx", PROCESSOR_UNKNOWN,
- CPU_AVX_FLAGS },
- { ".vmx", PROCESSOR_UNKNOWN,
- CPU_VMX_FLAGS },
- { ".smx", PROCESSOR_UNKNOWN,
- CPU_SMX_FLAGS },
- { ".xsave", PROCESSOR_UNKNOWN,
- CPU_XSAVE_FLAGS },
- { ".aes", PROCESSOR_UNKNOWN,
- CPU_AES_FLAGS },
- { ".pclmul", PROCESSOR_UNKNOWN,
- CPU_PCLMUL_FLAGS },
- { ".clmul", PROCESSOR_UNKNOWN,
- CPU_PCLMUL_FLAGS },
- { ".fma", PROCESSOR_UNKNOWN,
- CPU_FMA_FLAGS },
- { ".fma4", PROCESSOR_UNKNOWN,
- CPU_FMA4_FLAGS },
- { ".movbe", PROCESSOR_UNKNOWN,
- CPU_MOVBE_FLAGS },
- { ".ept", PROCESSOR_UNKNOWN,
- CPU_EPT_FLAGS },
- { ".clflush", PROCESSOR_UNKNOWN,
- CPU_CLFLUSH_FLAGS },
- { ".syscall", PROCESSOR_UNKNOWN,
- CPU_SYSCALL_FLAGS },
- { ".rdtscp", PROCESSOR_UNKNOWN,
- CPU_RDTSCP_FLAGS },
- { ".3dnow", PROCESSOR_UNKNOWN,
- CPU_3DNOW_FLAGS },
- { ".3dnowa", PROCESSOR_UNKNOWN,
- CPU_3DNOWA_FLAGS },
- { ".padlock", PROCESSOR_UNKNOWN,
- CPU_PADLOCK_FLAGS },
- { ".pacifica", PROCESSOR_UNKNOWN,
- CPU_SVME_FLAGS },
- { ".svme", PROCESSOR_UNKNOWN,
- CPU_SVME_FLAGS },
- { ".sse4a", PROCESSOR_UNKNOWN,
- CPU_SSE4A_FLAGS },
- { ".abm", PROCESSOR_UNKNOWN,
- CPU_ABM_FLAGS },
+ /* Do not replace the first two entries - i386_target_format()
+ relies on them being there in this order. */
+ { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
+ CPU_GENERIC32_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
+ CPU_GENERIC64_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
+ CPU_NONE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
+ CPU_I186_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
+ CPU_I286_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
+ CPU_I386_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
+ CPU_I486_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
+ CPU_I586_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
+ CPU_I686_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
+ CPU_I586_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
+ CPU_PENTIUMPRO_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
+ CPU_P2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
+ CPU_P3_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
+ CPU_P4_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
+ CPU_CORE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
+ CPU_NOCONA_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
+ CPU_CORE_FLAGS, 1, 0 },
+ { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
+ CPU_CORE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
+ CPU_CORE2_FLAGS, 1, 0 },
+ { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
+ CPU_CORE2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
+ CPU_COREI7_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
+ CPU_L1OM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
+ CPU_K1OM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU,
+ CPU_IAMCU_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
+ CPU_K6_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
+ CPU_K6_2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
+ CPU_ATHLON_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
+ CPU_K8_FLAGS, 1, 0 },
+ { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
+ CPU_K8_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
+ CPU_K8_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
+ CPU_AMDFAM10_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
+ CPU_BDVER1_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
+ CPU_BDVER2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
+ CPU_BDVER3_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
+ CPU_BDVER4_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
+ CPU_ZNVER1_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
+ CPU_BTVER1_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
+ CPU_BTVER2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
+ CPU_8087_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
+ CPU_287_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
+ CPU_387_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
+ CPU_ANY87_FLAGS, 0, 1 },
+ { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
+ CPU_MMX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
+ CPU_3DNOWA_FLAGS, 0, 1 },
+ { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
+ CPU_SSE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
+ CPU_SSE2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
+ CPU_SSE3_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
+ CPU_SSSE3_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
+ CPU_SSE4_1_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
+ CPU_SSE4_2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
+ CPU_SSE4_2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
+ CPU_ANY_SSE_FLAGS, 0, 1 },
+ { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
+ CPU_AVX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
+ CPU_AVX2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
+ CPU_AVX512F_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
+ CPU_AVX512CD_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
+ CPU_AVX512ER_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
+ CPU_AVX512PF_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
+ CPU_AVX512DQ_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
+ CPU_AVX512BW_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
+ CPU_AVX512VL_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
+ CPU_ANY_AVX_FLAGS, 0, 1 },
+ { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
+ CPU_VMX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
+ CPU_VMFUNC_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
+ CPU_SMX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
+ CPU_XSAVE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
+ CPU_XSAVEOPT_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
+ CPU_XSAVEC_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
+ CPU_XSAVES_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
+ CPU_AES_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
+ CPU_PCLMUL_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
+ CPU_PCLMUL_FLAGS, 1, 0 },
+ { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
+ CPU_FSGSBASE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
+ CPU_RDRND_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
+ CPU_F16C_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
+ CPU_BMI2_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
+ CPU_FMA_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
+ CPU_FMA4_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
+ CPU_XOP_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
+ CPU_LWP_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
+ CPU_MOVBE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
+ CPU_CX16_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
+ CPU_EPT_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
+ CPU_LZCNT_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
+ CPU_HLE_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
+ CPU_RTM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
+ CPU_INVPCID_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
+ CPU_CLFLUSH_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
+ CPU_NOP_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
+ CPU_SYSCALL_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
+ CPU_RDTSCP_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
+ CPU_3DNOW_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
+ CPU_3DNOWA_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
+ CPU_PADLOCK_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
+ CPU_SVME_FLAGS, 1, 0 },
+ { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
+ CPU_SVME_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
+ CPU_SSE4A_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
+ CPU_ABM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
+ CPU_BMI_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
+ CPU_TBM_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
+ CPU_ADX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
+ CPU_RDSEED_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
+ CPU_PRFCHW_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
+ CPU_SMAP_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
+ CPU_MPX_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
+ CPU_SHA_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
+ CPU_CLFLUSHOPT_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
+ CPU_PREFETCHWT1_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
+ CPU_SE1_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
+ CPU_CLWB_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN,
+ CPU_PCOMMIT_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
+ CPU_AVX512IFMA_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
+ CPU_AVX512VBMI_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
+ CPU_CLZERO_FLAGS, 0, 0 },
+ { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN,
+ CPU_MWAITX_FLAGS, 0, 0 },
};
#ifdef I386COFF
{"att_mnemonic", set_intel_mnemonic, 0},
{"allow_index_reg", set_allow_index_reg, 1},
{"disallow_index_reg", set_allow_index_reg, 0},
- {"sse_check", set_sse_check, 0},
+ {"sse_check", set_check, 0},
+ {"operand_check", set_check, 1},
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
{"largecomm", handle_large_common, 0},
#else
/* nopw %cs:0L(%[re]ax,%[re]ax,1) */
static const char alt_10[] =
{0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
- /* data16
- nopw %cs:0L(%[re]ax,%[re]ax,1) */
- static const char alt_long_11[] =
- {0x66,
- 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
- /* data16
- data16
- nopw %cs:0L(%[re]ax,%[re]ax,1) */
- static const char alt_long_12[] =
- {0x66,
- 0x66,
- 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
- /* data16
- data16
- data16
- nopw %cs:0L(%[re]ax,%[re]ax,1) */
- static const char alt_long_13[] =
- {0x66,
- 0x66,
- 0x66,
- 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
- /* data16
- data16
- data16
- data16
- nopw %cs:0L(%[re]ax,%[re]ax,1) */
- static const char alt_long_14[] =
- {0x66,
- 0x66,
- 0x66,
- 0x66,
- 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
- /* data16
- data16
- data16
- data16
- data16
- nopw %cs:0L(%[re]ax,%[re]ax,1) */
- static const char alt_long_15[] =
- {0x66,
- 0x66,
- 0x66,
- 0x66,
- 0x66,
- 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
- /* nopl 0(%[re]ax,%[re]ax,1)
- nopw 0(%[re]ax,%[re]ax,1) */
- static const char alt_short_11[] =
- {0x0f,0x1f,0x44,0x00,0x00,
- 0x66,0x0f,0x1f,0x44,0x00,0x00};
- /* nopw 0(%[re]ax,%[re]ax,1)
- nopw 0(%[re]ax,%[re]ax,1) */
- static const char alt_short_12[] =
- {0x66,0x0f,0x1f,0x44,0x00,0x00,
- 0x66,0x0f,0x1f,0x44,0x00,0x00};
- /* nopw 0(%[re]ax,%[re]ax,1)
- nopl 0L(%[re]ax) */
- static const char alt_short_13[] =
- {0x66,0x0f,0x1f,0x44,0x00,0x00,
- 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
- /* nopl 0L(%[re]ax)
- nopl 0L(%[re]ax) */
- static const char alt_short_14[] =
- {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
- 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
- /* nopl 0L(%[re]ax)
- nopl 0L(%[re]ax,%[re]ax,1) */
- static const char alt_short_15[] =
- {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
- 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
- static const char *const alt_short_patt[] = {
- f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
- alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
- alt_short_14, alt_short_15
- };
- static const char *const alt_long_patt[] = {
+ static const char *const alt_patt[] = {
f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
- alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
- alt_long_14, alt_long_15
+ alt_9, alt_10
};
/* Only align for at least a positive non-zero boundary. */
1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
PROCESSOR_GENERIC32, f32_patt will be used.
- 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
- PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
- PROCESSOR_GENERIC64, alt_long_patt will be used.
- 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
- PROCESSOR_AMDFAM10, alt_short_patt will be used.
-
- When -mtune= isn't used, alt_long_patt will be used if
- cpu_arch_isa_flags has Cpu686. Otherwise, f32_patt will
+ 2. For the rest, alt_patt will be used.
+
+ When -mtune= isn't used, alt_patt will be used if
+ cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
be used.
When -march= or .arch is used, we can't use anything beyond
{
case PROCESSOR_UNKNOWN:
/* We use cpu_arch_isa_flags to check if we SHOULD
- optimize for Cpu686. */
- if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
- patt = alt_long_patt;
+ optimize with nops. */
+ if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
+ patt = alt_patt;
else
patt = f32_patt;
break;
- case PROCESSOR_PENTIUMPRO:
case PROCESSOR_PENTIUM4:
case PROCESSOR_NOCONA:
case PROCESSOR_CORE:
case PROCESSOR_CORE2:
case PROCESSOR_COREI7:
+ case PROCESSOR_L1OM:
+ case PROCESSOR_K1OM:
case PROCESSOR_GENERIC64:
- patt = alt_long_patt;
- break;
case PROCESSOR_K6:
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
case PROCESSOR_AMDFAM10:
- patt = alt_short_patt;
+ case PROCESSOR_BD:
+ case PROCESSOR_ZNVER:
+ case PROCESSOR_BT:
+ patt = alt_patt;
break;
case PROCESSOR_I386:
case PROCESSOR_I486:
case PROCESSOR_PENTIUM:
+ case PROCESSOR_PENTIUMPRO:
+ case PROCESSOR_IAMCU:
case PROCESSOR_GENERIC32:
patt = f32_patt;
break;
case PROCESSOR_I386:
case PROCESSOR_I486:
case PROCESSOR_PENTIUM:
+ case PROCESSOR_IAMCU:
case PROCESSOR_K6:
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
case PROCESSOR_AMDFAM10:
+ case PROCESSOR_BD:
+ case PROCESSOR_ZNVER:
+ case PROCESSOR_BT:
case PROCESSOR_GENERIC32:
/* We use cpu_arch_isa_flags to check if we CAN optimize
- for Cpu686. */
- if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
- patt = alt_short_patt;
+ with nops. */
+ if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
+ patt = alt_patt;
else
patt = f32_patt;
break;
case PROCESSOR_CORE:
case PROCESSOR_CORE2:
case PROCESSOR_COREI7:
- if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
- patt = alt_long_patt;
+ case PROCESSOR_L1OM:
+ case PROCESSOR_K1OM:
+ if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
+ patt = alt_patt;
else
patt = f32_patt;
break;
case PROCESSOR_GENERIC64:
- patt = alt_long_patt;
+ patt = alt_patt;
break;
}
}
{
/* If the padding is less than 15 bytes, we use the normal
ones. Otherwise, we use a jump instruction and adjust
- its offset. */
- if (count < 15)
+ its offset. */
+ int limit;
+
+ /* For 64bit, the limit is 3 bytes. */
+ if (flag_code == CODE_64BIT
+ && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
+ limit = 3;
+ else
+ limit = 15;
+ if (count < limit)
memcpy (fragP->fr_literal + fragP->fr_fix,
patt[count - 1], count);
else
}
else
{
- /* Maximum length of an instruction is 15 byte. If the
- padding is greater than 15 bytes and we don't use jump,
+ /* Maximum length of an instruction is 10 byte. If the
+ padding is greater than 10 bytes and we don't use jump,
we have to break it into smaller pieces. */
int padding = count;
- while (padding > 15)
+ while (padding > 10)
{
- padding -= 15;
+ padding -= 10;
memcpy (fragP->fr_literal + fragP->fr_fix + padding,
- patt [14], 15);
+ patt [9], 10);
}
if (padding)
}
}
-static INLINE void
-cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
-{
- switch (ARRAY_SIZE(x->array))
- {
- case 3:
- x->array[2] = v;
- case 2:
- x->array[1] = v;
- case 1:
- x->array[0] = v;
- break;
- default:
- abort ();
- }
-}
-
static INLINE int
cpu_flags_equal (const union i386_cpu_flags *x,
const union i386_cpu_flags *y)
return x;
}
+static INLINE i386_cpu_flags
+cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
+{
+ switch (ARRAY_SIZE (x.array))
+ {
+ case 3:
+ x.array [2] &= ~y.array [2];
+ case 2:
+ x.array [1] &= ~y.array [1];
+ case 1:
+ x.array [0] &= ~y.array [0];
+ break;
+ default:
+ abort ();
+ }
+ return x;
+}
+
+static int
+valid_iamcu_cpu_flags (const i386_cpu_flags *flags)
+{
+ if (cpu_arch_isa == PROCESSOR_IAMCU)
+ {
+ static const i386_cpu_flags iamcu_flags = CPU_IAMCU_COMPAT_FLAGS;
+ i386_cpu_flags compat_flags;
+ compat_flags = cpu_flags_and_not (*flags, iamcu_flags);
+ return cpu_flags_all_zero (&compat_flags);
+ }
+ else
+ return 1;
+}
+
#define CPU_FLAGS_ARCH_MATCH 0x1
#define CPU_FLAGS_64BIT_MATCH 0x2
#define CPU_FLAGS_AES_MATCH 0x4
/* Return CPU flags match bits. */
static int
-cpu_flags_match (const template *t)
+cpu_flags_match (const insn_template *t)
{
i386_cpu_flags x = t->cpu_flags;
int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
= OPERAND_TYPE_ANYDISP;
static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
+static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM;
+static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
+static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
enum operand_type
{
operand J for instruction template T. */
static INLINE int
-match_reg_size (const template *t, unsigned int j)
+match_reg_size (const insn_template *t, unsigned int j)
{
return !((i.types[j].bitfield.byte
&& !t->operand_types[j].bitfield.byte)
instruction template T. */
static INLINE int
-match_mem_size (const template *t, unsigned int j)
+match_mem_size (const insn_template *t, unsigned int j)
{
return (match_reg_size (t, j)
&& !((i.types[j].bitfield.unspecified
+ && !i.broadcast
&& !t->operand_types[j].bitfield.unspecified)
|| (i.types[j].bitfield.fword
&& !t->operand_types[j].bitfield.fword)
|| (i.types[j].bitfield.xmmword
&& !t->operand_types[j].bitfield.xmmword)
|| (i.types[j].bitfield.ymmword
- && !t->operand_types[j].bitfield.ymmword)));
+ && !t->operand_types[j].bitfield.ymmword)
+ || (i.types[j].bitfield.zmmword
+ && !t->operand_types[j].bitfield.zmmword)));
}
/* Return 1 if there is no size conflict on any operands for
instruction template T. */
static INLINE int
-operand_size_match (const template *t)
+operand_size_match (const insn_template *t)
{
unsigned int j;
int match = 1;
}
}
- if (match
- || (!t->opcode_modifier.d && !t->opcode_modifier.floatd))
+ if (match)
return match;
+ else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
+ {
+mismatch:
+ i.error = operand_size_mismatch;
+ return 0;
+ }
/* Check reverse. */
gas_assert (i.operands == 2);
{
if (t->operand_types[j].bitfield.acc
&& !match_reg_size (t, j ? 0 : 1))
- {
- match = 0;
- break;
- }
+ goto mismatch;
if (i.types[j].bitfield.mem
&& !match_mem_size (t, j ? 0 : 1))
- {
- match = 0;
- break;
- }
+ goto mismatch;
}
return match;
temp.bitfield.tbyte = 0;
temp.bitfield.xmmword = 0;
temp.bitfield.ymmword = 0;
+ temp.bitfield.zmmword = 0;
if (operand_type_all_zero (&temp))
- return 0;
+ goto mismatch;
- return (given.bitfield.baseindex == overlap.bitfield.baseindex
- && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute);
+ if (given.bitfield.baseindex == overlap.bitfield.baseindex
+ && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
+ return 1;
+
+mismatch:
+ i.error = operand_type_mismatch;
+ return 0;
}
/* If given types g0 and g1 are registers they must be of the same type
t1.bitfield.reg64 = 1;
}
- return (!(t0.bitfield.reg8 & t1.bitfield.reg8)
- && !(t0.bitfield.reg16 & t1.bitfield.reg16)
- && !(t0.bitfield.reg32 & t1.bitfield.reg32)
- && !(t0.bitfield.reg64 & t1.bitfield.reg64));
+ if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
+ && !(t0.bitfield.reg16 & t1.bitfield.reg16)
+ && !(t0.bitfield.reg32 & t1.bitfield.reg32)
+ && !(t0.bitfield.reg64 & t1.bitfield.reg64))
+ return 1;
+
+ i.error = register_type_mismatch;
+
+ return 0;
+}
+
+static INLINE unsigned int
+register_number (const reg_entry *r)
+{
+ unsigned int nr = r->reg_num;
+
+ if (r->reg_flags & RegRex)
+ nr += 8;
+
+ return nr;
}
static INLINE unsigned int
mode_from_disp_size (i386_operand_type t)
{
- if (t.bitfield.disp8)
+ if (t.bitfield.disp8 || t.bitfield.vec_disp8)
return 1;
else if (t.bitfield.disp16
|| t.bitfield.disp32
}
static INLINE int
-fits_in_signed_byte (offsetT num)
+fits_in_signed_byte (addressT num)
{
- return (num >= -128) && (num <= 127);
+ return num + 0x80 <= 0xff;
}
static INLINE int
-fits_in_unsigned_byte (offsetT num)
+fits_in_unsigned_byte (addressT num)
{
- return (num & 0xff) == num;
+ return num <= 0xff;
}
static INLINE int
-fits_in_unsigned_word (offsetT num)
+fits_in_unsigned_word (addressT num)
{
- return (num & 0xffff) == num;
+ return num <= 0xffff;
}
static INLINE int
-fits_in_signed_word (offsetT num)
+fits_in_signed_word (addressT num)
{
- return (-32768 <= num) && (num <= 32767);
+ return num + 0x8000 <= 0xffff;
}
static INLINE int
-fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
+fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
{
#ifndef BFD64
return 1;
#else
- return (!(((offsetT) -1 << 31) & num)
- || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
+ return num + 0x80000000 <= 0xffffffff;
#endif
} /* fits_in_signed_long() */
static INLINE int
-fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
+fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
{
#ifndef BFD64
return 1;
#else
- return (num & (((offsetT) 2 << 31) - 1)) == num;
+ return num <= 0xffffffff;
#endif
} /* fits_in_unsigned_long() */
+static INLINE int
+fits_in_vec_disp8 (offsetT num)
+{
+ int shift = i.memshift;
+ unsigned int mask;
+
+ if (shift == -1)
+ abort ();
+
+ mask = (1 << shift) - 1;
+
+ /* Return 0 if NUM isn't properly aligned. */
+ if ((num & mask))
+ return 0;
+
+ /* Check if NUM will fit in 8bit after shift. */
+ return fits_in_signed_byte (num >> shift);
+}
+
+static INLINE int
+fits_in_imm4 (offsetT num)
+{
+ return (num & 0xf) == num;
+}
+
static i386_operand_type
smallest_imm_type (offsetT num)
{
default: abort ();
}
- /* If BFD64, sign extend val. */
- if (!use_rela_relocations)
+#ifdef BFD64
+ /* If BFD64, sign extend val for 32bit address mode. */
+ if (flag_code != CODE_64BIT
+ || i.prefix[ADDR_PREFIX])
if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
+#endif
if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
{
return val & mask;
}
-/* Returns 0 if attempting to add a prefix where one from the same
- class already exists, 1 if non rep/repne added, 2 if rep/repne
- added. */
-static int
+enum PREFIX_GROUP
+{
+ PREFIX_EXIST = 0,
+ PREFIX_LOCK,
+ PREFIX_REP,
+ PREFIX_OTHER
+};
+
+/* Returns
+ a. PREFIX_EXIST if attempting to add a prefix where one from the
+ same class already exists.
+ b. PREFIX_LOCK if lock prefix is added.
+ c. PREFIX_REP if rep/repne prefix is added.
+ d. PREFIX_OTHER if other prefix is added.
+ */
+
+static enum PREFIX_GROUP
add_prefix (unsigned int prefix)
{
- int ret = 1;
+ enum PREFIX_GROUP ret = PREFIX_OTHER;
unsigned int q;
if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
if ((i.prefix[REX_PREFIX] & prefix & REX_W)
|| ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
&& (prefix & (REX_R | REX_X | REX_B))))
- ret = 0;
+ ret = PREFIX_EXIST;
q = REX_PREFIX;
}
else
case REPNE_PREFIX_OPCODE:
case REPE_PREFIX_OPCODE:
- ret = 2;
- /* fall thru */
+ q = REP_PREFIX;
+ ret = PREFIX_REP;
+ break;
+
case LOCK_PREFIX_OPCODE:
- q = LOCKREP_PREFIX;
+ q = LOCK_PREFIX;
+ ret = PREFIX_LOCK;
break;
case FWAIT_OPCODE:
break;
}
if (i.prefix[q] != 0)
- ret = 0;
+ ret = PREFIX_EXIST;
}
if (ret)
}
static void
-set_code_flag (int value)
+update_code_flag (int value, int check)
{
- flag_code = value;
+ PRINTF_LIKE ((*as_error));
+
+ flag_code = (enum flag_code) value;
if (flag_code == CODE_64BIT)
{
cpu_arch_flags.bitfield.cpu64 = 1;
}
if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
{
- as_bad (_("64bit mode not supported on this CPU."));
+ if (check)
+ as_error = as_fatal;
+ else
+ as_error = as_bad;
+ (*as_error) (_("64bit mode not supported on `%s'."),
+ cpu_arch_name ? cpu_arch_name : default_arch);
}
if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
{
- as_bad (_("32bit mode not supported on this CPU."));
+ if (check)
+ as_error = as_fatal;
+ else
+ as_error = as_bad;
+ (*as_error) (_("32bit mode not supported on `%s'."),
+ cpu_arch_name ? cpu_arch_name : default_arch);
}
stackop_size = '\0';
}
+static void
+set_code_flag (int value)
+{
+ update_code_flag (value, 0);
+}
+
static void
set_16bit_gcc_code_flag (int new_code_flag)
{
- flag_code = new_code_flag;
+ flag_code = (enum flag_code) new_code_flag;
if (flag_code != CODE_16BIT)
abort ();
cpu_arch_flags.bitfield.cpu64 = 0;
}
static void
-set_sse_check (int dummy ATTRIBUTE_UNUSED)
+set_check (int what)
{
+ enum check_kind *kind;
+ const char *str;
+
+ if (what)
+ {
+ kind = &operand_check;
+ str = "operand";
+ }
+ else
+ {
+ kind = &sse_check;
+ str = "sse";
+ }
+
SKIP_WHITESPACE ();
if (!is_end_of_line[(unsigned char) *input_line_pointer])
int e = get_symbol_end ();
if (strcmp (string, "none") == 0)
- sse_check = sse_check_none;
+ *kind = check_none;
else if (strcmp (string, "warning") == 0)
- sse_check = sse_check_warning;
+ *kind = check_warning;
else if (strcmp (string, "error") == 0)
- sse_check = sse_check_error;
+ *kind = check_error;
else
- as_bad (_("bad argument to sse_check directive."));
+ as_bad (_("bad argument to %s_check directive."), str);
*input_line_pointer = e;
}
else
- as_bad (_("missing argument for sse_check directive"));
+ as_bad (_("missing argument for %s_check directive"), str);
demand_empty_rest_of_line ();
}
+static void
+check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
+ i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
+{
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ static const char *arch;
+
+ /* Intel LIOM is only supported on ELF. */
+ if (!IS_ELF)
+ return;
+
+ if (!arch)
+ {
+ /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
+ use default_arch. */
+ arch = cpu_arch_name;
+ if (!arch)
+ arch = default_arch;
+ }
+
+ /* If we are targeting Intel MCU, we must enable it. */
+ if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_IAMCU
+ || new_flag.bitfield.cpuiamcu)
+ return;
+
+ /* If we are targeting Intel L1OM, we must enable it. */
+ if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
+ || new_flag.bitfield.cpul1om)
+ return;
+
+ /* If we are targeting Intel K1OM, we must enable it. */
+ if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
+ || new_flag.bitfield.cpuk1om)
+ return;
+
+ as_bad (_("`%s' is not supported on `%s'"), name, arch);
+#endif
+}
+
static void
set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
{
{
char *string = input_line_pointer;
int e = get_symbol_end ();
- unsigned int i;
+ unsigned int j;
i386_cpu_flags flags;
- for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
+ for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
{
- if (strcmp (string, cpu_arch[i].name) == 0)
+ if (strcmp (string, cpu_arch[j].name) == 0)
{
+ check_cpu_arch_compatible (string, cpu_arch[j].flags);
+
if (*string != '.')
{
- cpu_arch_name = cpu_arch[i].name;
+ cpu_arch_name = cpu_arch[j].name;
cpu_sub_arch_name = NULL;
- cpu_arch_flags = cpu_arch[i].flags;
+ cpu_arch_flags = cpu_arch[j].flags;
if (flag_code == CODE_64BIT)
{
cpu_arch_flags.bitfield.cpu64 = 1;
cpu_arch_flags.bitfield.cpu64 = 0;
cpu_arch_flags.bitfield.cpuno64 = 1;
}
- cpu_arch_isa = cpu_arch[i].type;
- cpu_arch_isa_flags = cpu_arch[i].flags;
+ cpu_arch_isa = cpu_arch[j].type;
+ cpu_arch_isa_flags = cpu_arch[j].flags;
if (!cpu_arch_tune_set)
{
cpu_arch_tune = cpu_arch_isa;
break;
}
- flags = cpu_flags_or (cpu_arch_flags,
- cpu_arch[i].flags);
- if (!cpu_flags_equal (&flags, &cpu_arch_flags))
+ if (!cpu_arch[j].negated)
+ flags = cpu_flags_or (cpu_arch_flags,
+ cpu_arch[j].flags);
+ else
+ flags = cpu_flags_and_not (cpu_arch_flags,
+ cpu_arch[j].flags);
+
+ if (!valid_iamcu_cpu_flags (&flags))
+ as_fatal (_("`%s' isn't valid for Intel MCU"),
+ cpu_arch[j].name);
+ else if (!cpu_flags_equal (&flags, &cpu_arch_flags))
{
if (cpu_sub_arch_name)
{
char *name = cpu_sub_arch_name;
cpu_sub_arch_name = concat (name,
- cpu_arch[i].name,
+ cpu_arch[j].name,
(const char *) NULL);
free (name);
}
else
- cpu_sub_arch_name = xstrdup (cpu_arch[i].name);
+ cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
cpu_arch_flags = flags;
+ cpu_arch_isa_flags = flags;
}
*input_line_pointer = e;
demand_empty_rest_of_line ();
return;
}
}
- if (i >= ARRAY_SIZE (cpu_arch))
+ if (j >= ARRAY_SIZE (cpu_arch))
as_bad (_("no such architecture: `%s'"), string);
*input_line_pointer = e;
demand_empty_rest_of_line ();
}
+enum bfd_architecture
+i386_arch (void)
+{
+ if (cpu_arch_isa == PROCESSOR_L1OM)
+ {
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || flag_code != CODE_64BIT)
+ as_fatal (_("Intel L1OM is 64bit ELF only"));
+ return bfd_arch_l1om;
+ }
+ else if (cpu_arch_isa == PROCESSOR_K1OM)
+ {
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || flag_code != CODE_64BIT)
+ as_fatal (_("Intel K1OM is 64bit ELF only"));
+ return bfd_arch_k1om;
+ }
+ else if (cpu_arch_isa == PROCESSOR_IAMCU)
+ {
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || flag_code == CODE_64BIT)
+ as_fatal (_("Intel MCU is 32bit ELF only"));
+ return bfd_arch_iamcu;
+ }
+ else
+ return bfd_arch_i386;
+}
+
unsigned long
-i386_mach ()
+i386_mach (void)
{
- if (!strcmp (default_arch, "x86_64"))
- return bfd_mach_x86_64;
- else if (!strcmp (default_arch, "i386"))
- return bfd_mach_i386_i386;
+ if (!strncmp (default_arch, "x86_64", 6))
+ {
+ if (cpu_arch_isa == PROCESSOR_L1OM)
+ {
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || default_arch[6] != '\0')
+ as_fatal (_("Intel L1OM is 64bit ELF only"));
+ return bfd_mach_l1om;
+ }
+ else if (cpu_arch_isa == PROCESSOR_K1OM)
+ {
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || default_arch[6] != '\0')
+ as_fatal (_("Intel K1OM is 64bit ELF only"));
+ return bfd_mach_k1om;
+ }
+ else if (default_arch[6] == '\0')
+ return bfd_mach_x86_64;
+ else
+ return bfd_mach_x64_32;
+ }
+ else if (!strcmp (default_arch, "i386")
+ || !strcmp (default_arch, "iamcu"))
+ {
+ if (cpu_arch_isa == PROCESSOR_IAMCU)
+ {
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
+ as_fatal (_("Intel MCU is 32bit ELF only"));
+ return bfd_mach_i386_iamcu;
+ }
+ else
+ return bfd_mach_i386_i386;
+ }
else
- as_fatal (_("Unknown architecture"));
+ as_fatal (_("unknown architecture"));
}
\f
void
-md_begin ()
+md_begin (void)
{
const char *hash_err;
op_hash = hash_new ();
{
- const template *optab;
+ const insn_template *optab;
templates *core_optab;
/* Setup for loop. */
(void *) core_optab);
if (hash_err)
{
- as_fatal (_("Internal Error: Can't hash %s: %s"),
+ as_fatal (_("can't hash %s: %s"),
(optab - 1)->name,
hash_err);
}
{
hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
if (hash_err)
- as_fatal (_("Internal Error: Can't hash %s: %s"),
+ as_fatal (_("can't hash %s: %s"),
regtab->reg_name,
hash_err);
}
register_chars[c] = mnemonic_chars[c];
operand_chars[c] = c;
}
+ else if (c == '{' || c == '}')
+ operand_chars[c] = c;
if (ISALPHA (c) || ISDIGIT (c))
identifier_chars[c] = c;
operand_chars[(unsigned char) *p] = *p;
}
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- if (IS_ELF)
- {
- record_alignment (text_section, 2);
- record_alignment (data_section, 2);
- record_alignment (bss_section, 2);
- }
-#endif
-
if (flag_code == CODE_64BIT)
{
+#if defined (OBJ_COFF) && defined (TE_PE)
+ x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
+ ? 32 : 16);
+#else
x86_dwarf2_return_column = 16;
+#endif
x86_cie_data_alignment = -8;
}
else
#ifdef DEBUG386
/* Debugging routines for md_assemble. */
-static void pte (template *);
+static void pte (insn_template *);
static void pt (i386_operand_type);
static void pe (expressionS *);
static void ps (symbolS *);
static void
pi (char *line, i386_insn *x)
{
- unsigned int i;
+ unsigned int j;
fprintf (stdout, "%s: template ", line);
pte (&x->tm);
(x->rex & REX_R) != 0,
(x->rex & REX_X) != 0,
(x->rex & REX_B) != 0);
- for (i = 0; i < x->operands; i++)
+ for (j = 0; j < x->operands; j++)
{
- fprintf (stdout, " #%d: ", i + 1);
- pt (x->types[i]);
+ fprintf (stdout, " #%d: ", j + 1);
+ pt (x->types[j]);
fprintf (stdout, "\n");
- if (x->types[i].bitfield.reg8
- || x->types[i].bitfield.reg16
- || x->types[i].bitfield.reg32
- || x->types[i].bitfield.reg64
- || x->types[i].bitfield.regmmx
- || x->types[i].bitfield.regxmm
- || x->types[i].bitfield.regymm
- || x->types[i].bitfield.sreg2
- || x->types[i].bitfield.sreg3
- || x->types[i].bitfield.control
- || x->types[i].bitfield.debug
- || x->types[i].bitfield.test)
- fprintf (stdout, "%s\n", x->op[i].regs->reg_name);
- if (operand_type_check (x->types[i], imm))
- pe (x->op[i].imms);
- if (operand_type_check (x->types[i], disp))
- pe (x->op[i].disps);
+ if (x->types[j].bitfield.reg8
+ || x->types[j].bitfield.reg16
+ || x->types[j].bitfield.reg32
+ || x->types[j].bitfield.reg64
+ || x->types[j].bitfield.regmmx
+ || x->types[j].bitfield.regxmm
+ || x->types[j].bitfield.regymm
+ || x->types[j].bitfield.regzmm
+ || x->types[j].bitfield.sreg2
+ || x->types[j].bitfield.sreg3
+ || x->types[j].bitfield.control
+ || x->types[j].bitfield.debug
+ || x->types[j].bitfield.test)
+ fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
+ if (operand_type_check (x->types[j], imm))
+ pe (x->op[j].imms);
+ if (operand_type_check (x->types[j], disp))
+ pe (x->op[j].disps);
}
}
static void
-pte (template *t)
+pte (insn_template *t)
{
- unsigned int i;
+ unsigned int j;
fprintf (stdout, " %d operands ", t->operands);
fprintf (stdout, "opcode %x ", t->base_opcode);
if (t->extension_opcode != None)
if (t->opcode_modifier.w)
fprintf (stdout, "W");
fprintf (stdout, "\n");
- for (i = 0; i < t->operands; i++)
+ for (j = 0; j < t->operands; j++)
{
- fprintf (stdout, " #%d type ", i + 1);
- pt (t->operand_types[i]);
+ fprintf (stdout, " #%d type ", j + 1);
+ pt (t->operand_types[j]);
fprintf (stdout, "\n");
}
}
{ OPERAND_TYPE_DISP32, "d32" },
{ OPERAND_TYPE_DISP32S, "d32s" },
{ OPERAND_TYPE_DISP64, "d64" },
+ { OPERAND_TYPE_VEC_DISP8, "Vector d8" },
{ OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
{ OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
{ OPERAND_TYPE_CONTROL, "control reg" },
{ OPERAND_TYPE_REGMMX, "rMMX" },
{ OPERAND_TYPE_REGXMM, "rXMM" },
{ OPERAND_TYPE_REGYMM, "rYMM" },
+ { OPERAND_TYPE_REGZMM, "rZMM" },
+ { OPERAND_TYPE_REGMASK, "Mask reg" },
{ OPERAND_TYPE_ESSEG, "es" },
};
{
if (other != NO_RELOC)
{
- reloc_howto_type *reloc;
+ reloc_howto_type *rel;
if (size == 8)
switch (other)
case BFD_RELOC_X86_64_GOT32:
return BFD_RELOC_X86_64_GOT64;
break;
+ case BFD_RELOC_X86_64_GOTPLT64:
+ return BFD_RELOC_X86_64_GOTPLT64;
+ break;
case BFD_RELOC_X86_64_PLTOFF64:
return BFD_RELOC_X86_64_PLTOFF64;
break;
break;
}
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ if (other == BFD_RELOC_SIZE32)
+ {
+ if (size == 8)
+ other = BFD_RELOC_SIZE64;
+ if (pcrel)
+ {
+ as_bad (_("there are no pc-relative size relocations"));
+ return NO_RELOC;
+ }
+ }
+#endif
+
/* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
- if (size == 4 && flag_code != CODE_64BIT)
+ if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
sign = -1;
- reloc = bfd_reloc_type_lookup (stdoutput, other);
- if (!reloc)
+ rel = bfd_reloc_type_lookup (stdoutput, other);
+ if (!rel)
as_bad (_("unknown relocation (%u)"), other);
- else if (size != bfd_get_reloc_size (reloc))
+ else if (size != bfd_get_reloc_size (rel))
as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
- bfd_get_reloc_size (reloc),
+ bfd_get_reloc_size (rel),
size);
- else if (pcrel && !reloc->pc_relative)
+ else if (pcrel && !rel->pc_relative)
as_bad (_("non-pc-relative relocation for pc-relative field"));
- else if ((reloc->complain_on_overflow == complain_overflow_signed
+ else if ((rel->complain_on_overflow == complain_overflow_signed
&& !sign)
- || (reloc->complain_on_overflow == complain_overflow_unsigned
+ || (rel->complain_on_overflow == complain_overflow_unsigned
&& sign > 0))
as_bad (_("relocated field and relocation type differ in signedness"));
else
&& fixP->fx_r_type == BFD_RELOC_32_PCREL)
return 0;
- /* adjust_reloc_syms doesn't know about the GOT. */
- if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
+ /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
+ for size relocations. */
+ if (fixP->fx_r_type == BFD_RELOC_SIZE32
+ || fixP->fx_r_type == BFD_RELOC_SIZE64
+ || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
|| fixP->fx_r_type == BFD_RELOC_386_PLT32
|| fixP->fx_r_type == BFD_RELOC_386_GOT32
|| fixP->fx_r_type == BFD_RELOC_386_TLS_GD
|| fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
|| fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
return 0;
-
- if (fixP->fx_addsy != NULL
- && symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_GNU_INDIRECT_FUNCTION)
- return 0;
#endif
return 1;
}
/* Build the VEX prefix. */
static void
-build_vex_prefix (const template *t)
+build_vex_prefix (const insn_template *t)
{
unsigned int register_specifier;
unsigned int implied_prefix;
/* Check register specifier. */
if (i.vex.register_specifier)
{
- register_specifier = i.vex.register_specifier->reg_num;
- if ((i.vex.register_specifier->reg_flags & RegRex))
- register_specifier += 8;
- register_specifier = ~register_specifier & 0xf;
+ register_specifier =
+ ~register_number (i.vex.register_specifier) & 0xf;
+ gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
}
else
register_specifier = 0xf;
operand. */
if (!i.swap_operand
&& i.operands == i.reg_operands
- && i.tm.opcode_modifier.vex0f
+ && i.tm.opcode_modifier.vexopcode == VEX0F
&& i.tm.opcode_modifier.s
&& i.rex == REX_B)
{
i.tm = t[1];
}
- vector_length = i.tm.opcode_modifier.vex256 ? 1 : 0;
+ if (i.tm.opcode_modifier.vex == VEXScalar)
+ vector_length = avxscalar;
+ else
+ vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
switch ((i.tm.base_opcode >> 8) & 0xff)
{
}
/* Use 2-byte VEX prefix if possible. */
- if (i.tm.opcode_modifier.vex0f
+ if (i.tm.opcode_modifier.vexopcode == VEX0F
+ && i.tm.opcode_modifier.vexw != VEXW1
&& (i.rex & (REX_W | REX_X | REX_B)) == 0)
{
/* 2-byte VEX prefix. */
/* 3-byte VEX prefix. */
unsigned int m, w;
- if (i.tm.opcode_modifier.vex0f)
- m = 0x1;
- else if (i.tm.opcode_modifier.vex0f38)
- m = 0x2;
- else if (i.tm.opcode_modifier.vex0f3a)
- m = 0x3;
- else
- abort ();
-
i.vex.length = 3;
- i.vex.bytes[0] = 0xc4;
- /* The high 3 bits of the second VEX byte are 1's compliment
- of RXB bits from REX. */
+ switch (i.tm.opcode_modifier.vexopcode)
+ {
+ case VEX0F:
+ m = 0x1;
+ i.vex.bytes[0] = 0xc4;
+ break;
+ case VEX0F38:
+ m = 0x2;
+ i.vex.bytes[0] = 0xc4;
+ break;
+ case VEX0F3A:
+ m = 0x3;
+ i.vex.bytes[0] = 0xc4;
+ break;
+ case XOP08:
+ m = 0x8;
+ i.vex.bytes[0] = 0x8f;
+ break;
+ case XOP09:
+ m = 0x9;
+ i.vex.bytes[0] = 0x8f;
+ break;
+ case XOP0A:
+ m = 0xa;
+ i.vex.bytes[0] = 0x8f;
+ break;
+ default:
+ abort ();
+ }
+
+ /* The high 3 bits of the second VEX byte are 1's compliment
+ of RXB bits from REX. */
i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
/* Check the REX.W bit. */
w = (i.rex & REX_W) ? 1 : 0;
- if (i.tm.opcode_modifier.vexw0 || i.tm.opcode_modifier.vexw1)
- {
- if (w)
- abort ();
-
- if (i.tm.opcode_modifier.vexw1)
- w = 1;
- }
+ if (i.tm.opcode_modifier.vexw == VEXW1)
+ w = 1;
i.vex.bytes[2] = (w << 7
| register_specifier << 3
}
}
+/* Build the EVEX prefix. */
+
+static void
+build_evex_prefix (void)
+{
+ unsigned int register_specifier;
+ unsigned int implied_prefix;
+ unsigned int m, w;
+ rex_byte vrex_used = 0;
+
+ /* Check register specifier. */
+ if (i.vex.register_specifier)
+ {
+ gas_assert ((i.vrex & REX_X) == 0);
+
+ register_specifier = i.vex.register_specifier->reg_num;
+ if ((i.vex.register_specifier->reg_flags & RegRex))
+ register_specifier += 8;
+ /* The upper 16 registers are encoded in the fourth byte of the
+ EVEX prefix. */
+ if (!(i.vex.register_specifier->reg_flags & RegVRex))
+ i.vex.bytes[3] = 0x8;
+ register_specifier = ~register_specifier & 0xf;
+ }
+ else
+ {
+ register_specifier = 0xf;
+
+ /* Encode upper 16 vector index register in the fourth byte of
+ the EVEX prefix. */
+ if (!(i.vrex & REX_X))
+ i.vex.bytes[3] = 0x8;
+ else
+ vrex_used |= REX_X;
+ }
+
+ switch ((i.tm.base_opcode >> 8) & 0xff)
+ {
+ case 0:
+ implied_prefix = 0;
+ break;
+ case DATA_PREFIX_OPCODE:
+ implied_prefix = 1;
+ break;
+ case REPE_PREFIX_OPCODE:
+ implied_prefix = 2;
+ break;
+ case REPNE_PREFIX_OPCODE:
+ implied_prefix = 3;
+ break;
+ default:
+ abort ();
+ }
+
+ /* 4 byte EVEX prefix. */
+ i.vex.length = 4;
+ i.vex.bytes[0] = 0x62;
+
+ /* mmmm bits. */
+ switch (i.tm.opcode_modifier.vexopcode)
+ {
+ case VEX0F:
+ m = 1;
+ break;
+ case VEX0F38:
+ m = 2;
+ break;
+ case VEX0F3A:
+ m = 3;
+ break;
+ default:
+ abort ();
+ break;
+ }
+
+ /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
+ bits from REX. */
+ i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
+
+ /* The fifth bit of the second EVEX byte is 1's compliment of the
+ REX_R bit in VREX. */
+ if (!(i.vrex & REX_R))
+ i.vex.bytes[1] |= 0x10;
+ else
+ vrex_used |= REX_R;
+
+ if ((i.reg_operands + i.imm_operands) == i.operands)
+ {
+ /* When all operands are registers, the REX_X bit in REX is not
+ used. We reuse it to encode the upper 16 registers, which is
+ indicated by the REX_B bit in VREX. The REX_X bit is encoded
+ as 1's compliment. */
+ if ((i.vrex & REX_B))
+ {
+ vrex_used |= REX_B;
+ i.vex.bytes[1] &= ~0x40;
+ }
+ }
+
+ /* EVEX instructions shouldn't need the REX prefix. */
+ i.vrex &= ~vrex_used;
+ gas_assert (i.vrex == 0);
+
+ /* Check the REX.W bit. */
+ w = (i.rex & REX_W) ? 1 : 0;
+ if (i.tm.opcode_modifier.vexw)
+ {
+ if (i.tm.opcode_modifier.vexw == VEXW1)
+ w = 1;
+ }
+ /* If w is not set it means we are dealing with WIG instruction. */
+ else if (!w)
+ {
+ if (evexwig == evexw1)
+ w = 1;
+ }
+
+ /* Encode the U bit. */
+ implied_prefix |= 0x4;
+
+ /* The third byte of the EVEX prefix. */
+ i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
+
+ /* The fourth byte of the EVEX prefix. */
+ /* The zeroing-masking bit. */
+ if (i.mask && i.mask->zeroing)
+ i.vex.bytes[3] |= 0x80;
+
+ /* Don't always set the broadcast bit if there is no RC. */
+ if (!i.rounding)
+ {
+ /* Encode the vector length. */
+ unsigned int vec_length;
+
+ switch (i.tm.opcode_modifier.evex)
+ {
+ case EVEXLIG: /* LL' is ignored */
+ vec_length = evexlig << 5;
+ break;
+ case EVEX128:
+ vec_length = 0 << 5;
+ break;
+ case EVEX256:
+ vec_length = 1 << 5;
+ break;
+ case EVEX512:
+ vec_length = 2 << 5;
+ break;
+ default:
+ abort ();
+ break;
+ }
+ i.vex.bytes[3] |= vec_length;
+ /* Encode the broadcast bit. */
+ if (i.broadcast)
+ i.vex.bytes[3] |= 0x10;
+ }
+ else
+ {
+ if (i.rounding->type != saeonly)
+ i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
+ else
+ i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
+ }
+
+ if (i.mask && i.mask->mask)
+ i.vex.bytes[3] |= i.mask->mask->reg_num;
+}
+
static void
process_immext (void)
{
expressionS *exp;
- if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
+ if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
+ && i.operands > 0)
{
- /* SSE3 Instructions have the fixed operands with an opcode
- suffix which is coded in the same place as an 8-bit immediate
- field would be. Here we check those operands and remove them
- afterwards. */
+ /* MONITOR/MWAIT as well as SVME instructions have fixed operands
+ with an opcode suffix which is coded in the same place as an
+ 8-bit immediate field would be.
+ Here we check those operands and remove them afterwards. */
unsigned int x;
for (x = 0; x < i.operands; x++)
- if (i.op[x].regs->reg_num != x)
+ if (register_number (i.op[x].regs) != x)
as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
register_prefix, i.op[x].regs->reg_name, x + 1,
i.tm.name);
i.operands = 0;
}
+ if (i.tm.cpu_flags.bitfield.cpumwaitx && i.operands > 0)
+ {
+ /* MONITORX/MWAITX instructions have fixed operands with an opcode
+ suffix which is coded in the same place as an 8-bit immediate
+ field would be.
+ Here we check those operands and remove them afterwards. */
+ unsigned int x;
+
+ if (i.operands != 3)
+ abort();
+
+ for (x = 0; x < 2; x++)
+ if (register_number (i.op[x].regs) != x)
+ goto bad_register_operand;
+
+ /* Check for third operand for mwaitx/monitorx insn. */
+ if (register_number (i.op[x].regs)
+ != (x + (i.tm.extension_opcode == 0xfb)))
+ {
+bad_register_operand:
+ as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
+ register_prefix, i.op[x].regs->reg_name, x+1,
+ i.tm.name);
+ }
+
+ i.operands = 0;
+ }
+
/* These AMD 3DNow! and SSE2 instructions have an opcode suffix
which is coded in the same place as an 8-bit immediate field
would be. Here we fake an 8-bit immediate operand from the
AVX instructions also use this encoding, for some of
3 argument instructions. */
- gas_assert (i.imm_operands == 0
+ gas_assert (i.imm_operands <= 1
&& (i.operands <= 2
- || (i.tm.opcode_modifier.vex
+ || ((i.tm.opcode_modifier.vex
+ || i.tm.opcode_modifier.evex)
&& i.operands <= 4)));
exp = &im_expressions[i.imm_operands++];
i.tm.extension_opcode = None;
}
+
+static int
+check_hle (void)
+{
+ switch (i.tm.opcode_modifier.hleprefixok)
+ {
+ default:
+ abort ();
+ case HLEPrefixNone:
+ as_bad (_("invalid instruction `%s' after `%s'"),
+ i.tm.name, i.hle_prefix);
+ return 0;
+ case HLEPrefixLock:
+ if (i.prefix[LOCK_PREFIX])
+ return 1;
+ as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
+ return 0;
+ case HLEPrefixAny:
+ return 1;
+ case HLEPrefixRelease:
+ if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
+ {
+ as_bad (_("instruction `%s' after `xacquire' not allowed"),
+ i.tm.name);
+ return 0;
+ }
+ if (i.mem_operands == 0
+ || !operand_type_check (i.types[i.operands - 1], anymem))
+ {
+ as_bad (_("memory destination needed for instruction `%s'"
+ " after `xrelease'"), i.tm.name);
+ return 0;
+ }
+ return 1;
+ }
+}
+
/* This is the guts of the machine-dependent assembler. LINE points to a
machine dependent instruction. This function is supposed to emit
the frags/bytes it assembles to. */
{
unsigned int j;
char mnemonic[MAX_MNEM_SIZE];
- const template *t;
+ const insn_template *t;
/* Initialize globals. */
memset (&i, '\0', sizeof (i));
/* Don't optimize displacement for movabs since it only takes 64bit
displacement. */
if (i.disp_operands
+ && i.disp_encoding != disp_encoding_32bit
&& (flag_code != CODE_64BIT
|| strcmp (mnemonic, "movabs") != 0))
optimize_disp ();
if (!(t = match_template ()))
return;
- if (sse_check != sse_check_none
+ if (sse_check != check_none
&& !i.tm.opcode_modifier.noavx
&& (i.tm.cpu_flags.bitfield.cpusse
|| i.tm.cpu_flags.bitfield.cpusse2
|| i.tm.cpu_flags.bitfield.cpusse4_1
|| i.tm.cpu_flags.bitfield.cpusse4_2))
{
- (sse_check == sse_check_warning
+ (sse_check == check_warning
? as_warn
: as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
}
if (!add_prefix (FWAIT_OPCODE))
return;
+ /* Check if REP prefix is OK. */
+ if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
+ {
+ as_bad (_("invalid instruction `%s' after `%s'"),
+ i.tm.name, i.rep_prefix);
+ return;
+ }
+
+ /* Check for lock without a lockable instruction. Destination operand
+ must be memory unless it is xchg (0x86). */
+ if (i.prefix[LOCK_PREFIX]
+ && (!i.tm.opcode_modifier.islockable
+ || i.mem_operands == 0
+ || (i.tm.base_opcode != 0x86
+ && !operand_type_check (i.types[i.operands - 1], anymem))))
+ {
+ as_bad (_("expecting lockable instruction after `lock'"));
+ return;
+ }
+
+ /* Check if HLE prefix is OK. */
+ if (i.hle_prefix && !check_hle ())
+ return;
+
+ /* Check BND prefix. */
+ if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
+ as_bad (_("expecting valid branch instruction after `bnd'"));
+
+ if (i.tm.cpu_flags.bitfield.cpumpx
+ && flag_code == CODE_64BIT
+ && i.prefix[ADDR_PREFIX])
+ as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
+
+ /* Insert BND prefix. */
+ if (add_bnd_prefix
+ && i.tm.opcode_modifier.bndprefixok
+ && !i.prefix[BND_PREFIX])
+ add_prefix (BND_PREFIX_OPCODE);
+
/* Check string instruction segment overrides. */
if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
{
if (!process_suffix ())
return;
+ /* Update operand types. */
+ for (j = 0; j < i.operands; j++)
+ i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
+
/* Make still unresolved immediate matches conform to size of immediate
given in i.suffix. */
if (!finalize_imm ())
if (i.types[0].bitfield.imm1)
i.imm_operands = 0; /* kludge for shift insns. */
- for (j = 0; j < 3; j++)
- if (i.types[j].bitfield.inoutportreg
- || i.types[j].bitfield.shiftcount
- || i.types[j].bitfield.acc
- || i.types[j].bitfield.floatacc)
- i.reg_operands--;
+ /* We only need to check those implicit registers for instructions
+ with 3 operands or less. */
+ if (i.operands <= 3)
+ for (j = 0; j < i.operands; j++)
+ if (i.types[j].bitfield.inoutportreg
+ || i.types[j].bitfield.shiftcount
+ || i.types[j].bitfield.acc
+ || i.types[j].bitfield.floatacc)
+ i.reg_operands--;
/* ImmExt should be processed after SSE2AVX. */
if (!i.tm.opcode_modifier.sse2avx
as_warn (_("translating to `%sp'"), i.tm.name);
}
- if (i.tm.opcode_modifier.vex)
- build_vex_prefix (t);
+ if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.evex)
+ {
+ if (flag_code == CODE_16BIT)
+ {
+ as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
+ i.tm.name);
+ return;
+ }
+
+ if (i.tm.opcode_modifier.vex)
+ build_vex_prefix (t);
+ else
+ build_evex_prefix ();
+ }
- /* Handle conversion of 'int $3' --> special int3 insn. */
- if (i.tm.base_opcode == INT_OPCODE && i.op[0].imms->X_add_number == 3)
+ /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
+ instructions may define INT_OPCODE as well, so avoid this corner
+ case for those instructions that use MODRM. */
+ if (i.tm.base_opcode == INT_OPCODE
+ && !i.tm.opcode_modifier.modrm
+ && i.op[0].imms->X_add_number == 3)
{
i.tm.base_opcode = INT3_OPCODE;
i.imm_operands = 0;
char *token_start = l;
char *mnem_p;
int supported;
- const template *t;
+ const insn_template *t;
char *dot_p = NULL;
- /* Non-zero if we found a prefix only acceptable with string insns. */
- const char *expecting_string_instruction = NULL;
-
while (1)
{
mnem_p = mnemonic;
}
/* Look up instruction (or prefix) via hash table. */
- current_templates = hash_find (op_hash, mnemonic);
+ current_templates = (const templates *) hash_find (op_hash, mnemonic);
if (*l != END_OF_INSN
&& (!is_space_char (*l) || l[1] != END_OF_INSN)
/* Add prefix, checking for repeated prefixes. */
switch (add_prefix (current_templates->start->base_opcode))
{
- case 0:
+ case PREFIX_EXIST:
return NULL;
- case 2:
- expecting_string_instruction = current_templates->start->name;
+ case PREFIX_REP:
+ if (current_templates->start->cpu_flags.bitfield.cpuhle)
+ i.hle_prefix = current_templates->start->name;
+ else if (current_templates->start->cpu_flags.bitfield.cpumpx)
+ i.bnd_prefix = current_templates->start->name;
+ else
+ i.rep_prefix = current_templates->start->name;
+ break;
+ default:
break;
}
/* Skip past PREFIX_SEPARATOR and reset token_start. */
if (!current_templates)
{
- /* Check if we should swap operand in encoding. */
+ /* Check if we should swap operand or force 32bit displacement in
+ encoding. */
if (mnem_p - 2 == dot_p && dot_p[1] == 's')
i.swap_operand = 1;
+ else if (mnem_p - 3 == dot_p
+ && dot_p[1] == 'd'
+ && dot_p[2] == '8')
+ i.disp_encoding = disp_encoding_8bit;
+ else if (mnem_p - 4 == dot_p
+ && dot_p[1] == 'd'
+ && dot_p[2] == '3'
+ && dot_p[3] == '2')
+ i.disp_encoding = disp_encoding_32bit;
else
goto check_suffix;
mnem_p = dot_p;
*dot_p = '\0';
- current_templates = hash_find (op_hash, mnemonic);
+ current_templates = (const templates *) hash_find (op_hash, mnemonic);
}
if (!current_templates)
case QWORD_MNEM_SUFFIX:
i.suffix = mnem_p[-1];
mnem_p[-1] = '\0';
- current_templates = hash_find (op_hash, mnemonic);
+ current_templates = (const templates *) hash_find (op_hash,
+ mnemonic);
break;
case SHORT_MNEM_SUFFIX:
case LONG_MNEM_SUFFIX:
{
i.suffix = mnem_p[-1];
mnem_p[-1] = '\0';
- current_templates = hash_find (op_hash, mnemonic);
+ current_templates = (const templates *) hash_find (op_hash,
+ mnemonic);
}
break;
else
i.suffix = LONG_MNEM_SUFFIX;
mnem_p[-1] = '\0';
- current_templates = hash_find (op_hash, mnemonic);
+ current_templates = (const templates *) hash_find (op_hash,
+ mnemonic);
}
break;
}
as_warn (_("use .code16 to ensure correct addressing mode"));
}
- /* Check for rep/repne without a string instruction. */
- if (expecting_string_instruction)
- {
- static templates override;
-
- for (t = current_templates->start; t < current_templates->end; ++t)
- if (t->opcode_modifier.isstring)
- break;
- if (t >= current_templates->end)
- {
- as_bad (_("expecting string instruction after `%s'"),
- expecting_string_instruction);
- return NULL;
- }
- for (override.start = t; t < current_templates->end; ++t)
- if (!t->opcode_modifier.isstring)
- break;
- override.end = t;
- current_templates = &override;
- }
-
return l;
}
temp_reloc = i.reloc[xchg2];
i.reloc[xchg2] = i.reloc[xchg1];
i.reloc[xchg1] = temp_reloc;
+
+ if (i.mask)
+ {
+ if (i.mask->operand == xchg1)
+ i.mask->operand = xchg2;
+ else if (i.mask->operand == xchg2)
+ i.mask->operand = xchg1;
+ }
+ if (i.broadcast)
+ {
+ if (i.broadcast->operand == xchg1)
+ i.broadcast->operand = xchg2;
+ else if (i.broadcast->operand == xchg2)
+ i.broadcast->operand = xchg1;
+ }
+ if (i.rounding)
+ {
+ if (i.rounding->operand == xchg1)
+ i.rounding->operand = xchg2;
+ else if (i.rounding->operand == xchg2)
+ i.rounding->operand = xchg1;
+ }
}
static void
than those matching the insn suffix. */
{
i386_operand_type mask, allowed;
- const template *t;
+ const insn_template *t;
operand_type_set (&mask, 0);
operand_type_set (&allowed, 0);
{
if (i.op[op].disps->X_op == O_constant)
{
- offsetT disp = i.op[op].disps->X_add_number;
+ offsetT op_disp = i.op[op].disps->X_add_number;
if (i.types[op].bitfield.disp16
- && (disp & ~(offsetT) 0xffff) == 0)
+ && (op_disp & ~(offsetT) 0xffff) == 0)
{
/* If this operand is at most 16 bits, convert
to a signed 16 bit number and don't use 64bit
displacement. */
- disp = (((disp & 0xffff) ^ 0x8000) - 0x8000);
+ op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
i.types[op].bitfield.disp64 = 0;
}
if (i.types[op].bitfield.disp32
- && (disp & ~(((offsetT) 2 << 31) - 1)) == 0)
+ && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
{
/* If this operand is at most 32 bits, convert
to a signed 32 bit number and don't use 64bit
displacement. */
- disp &= (((offsetT) 2 << 31) - 1);
- disp = (disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
+ op_disp &= (((offsetT) 2 << 31) - 1);
+ op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
i.types[op].bitfield.disp64 = 0;
}
- if (!disp && i.types[op].bitfield.baseindex)
+ if (!op_disp && i.types[op].bitfield.baseindex)
{
i.types[op].bitfield.disp8 = 0;
i.types[op].bitfield.disp16 = 0;
}
else if (flag_code == CODE_64BIT)
{
- if (fits_in_signed_long (disp))
+ if (fits_in_signed_long (op_disp))
{
i.types[op].bitfield.disp64 = 0;
i.types[op].bitfield.disp32s = 1;
}
- if (fits_in_unsigned_long (disp))
+ if (i.prefix[ADDR_PREFIX]
+ && fits_in_unsigned_long (op_disp))
i.types[op].bitfield.disp32 = 1;
}
if ((i.types[op].bitfield.disp32
|| i.types[op].bitfield.disp32s
|| i.types[op].bitfield.disp16)
- && fits_in_signed_byte (disp))
+ && fits_in_signed_byte (op_disp))
i.types[op].bitfield.disp8 = 1;
}
else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
}
}
-static const template *
+/* Check if operands are valid for the instruction. */
+
+static int
+check_VecOperands (const insn_template *t)
+{
+ unsigned int op;
+
+ /* Without VSIB byte, we can't have a vector register for index. */
+ if (!t->opcode_modifier.vecsib
+ && i.index_reg
+ && (i.index_reg->reg_type.bitfield.regxmm
+ || i.index_reg->reg_type.bitfield.regymm
+ || i.index_reg->reg_type.bitfield.regzmm))
+ {
+ i.error = unsupported_vector_index_register;
+ return 1;
+ }
+
+ /* Check if default mask is allowed. */
+ if (t->opcode_modifier.nodefmask
+ && (!i.mask || i.mask->mask->reg_num == 0))
+ {
+ i.error = no_default_mask;
+ return 1;
+ }
+
+ /* For VSIB byte, we need a vector register for index, and all vector
+ registers must be distinct. */
+ if (t->opcode_modifier.vecsib)
+ {
+ if (!i.index_reg
+ || !((t->opcode_modifier.vecsib == VecSIB128
+ && i.index_reg->reg_type.bitfield.regxmm)
+ || (t->opcode_modifier.vecsib == VecSIB256
+ && i.index_reg->reg_type.bitfield.regymm)
+ || (t->opcode_modifier.vecsib == VecSIB512
+ && i.index_reg->reg_type.bitfield.regzmm)))
+ {
+ i.error = invalid_vsib_address;
+ return 1;
+ }
+
+ gas_assert (i.reg_operands == 2 || i.mask);
+ if (i.reg_operands == 2 && !i.mask)
+ {
+ gas_assert (i.types[0].bitfield.regxmm
+ || i.types[0].bitfield.regymm);
+ gas_assert (i.types[2].bitfield.regxmm
+ || i.types[2].bitfield.regymm);
+ if (operand_check == check_none)
+ return 0;
+ if (register_number (i.op[0].regs)
+ != register_number (i.index_reg)
+ && register_number (i.op[2].regs)
+ != register_number (i.index_reg)
+ && register_number (i.op[0].regs)
+ != register_number (i.op[2].regs))
+ return 0;
+ if (operand_check == check_error)
+ {
+ i.error = invalid_vector_register_set;
+ return 1;
+ }
+ as_warn (_("mask, index, and destination registers should be distinct"));
+ }
+ else if (i.reg_operands == 1 && i.mask)
+ {
+ if ((i.types[1].bitfield.regymm
+ || i.types[1].bitfield.regzmm)
+ && (register_number (i.op[1].regs)
+ == register_number (i.index_reg)))
+ {
+ if (operand_check == check_error)
+ {
+ i.error = invalid_vector_register_set;
+ return 1;
+ }
+ if (operand_check != check_none)
+ as_warn (_("index and destination registers should be distinct"));
+ }
+ }
+ }
+
+ /* Check if broadcast is supported by the instruction and is applied
+ to the memory operand. */
+ if (i.broadcast)
+ {
+ int broadcasted_opnd_size;
+
+ /* Check if specified broadcast is supported in this instruction,
+ and it's applied to memory operand of DWORD or QWORD type,
+ depending on VecESize. */
+ if (i.broadcast->type != t->opcode_modifier.broadcast
+ || !i.types[i.broadcast->operand].bitfield.mem
+ || (t->opcode_modifier.vecesize == 0
+ && !i.types[i.broadcast->operand].bitfield.dword
+ && !i.types[i.broadcast->operand].bitfield.unspecified)
+ || (t->opcode_modifier.vecesize == 1
+ && !i.types[i.broadcast->operand].bitfield.qword
+ && !i.types[i.broadcast->operand].bitfield.unspecified))
+ goto bad_broadcast;
+
+ broadcasted_opnd_size = t->opcode_modifier.vecesize ? 64 : 32;
+ if (i.broadcast->type == BROADCAST_1TO16)
+ broadcasted_opnd_size <<= 4; /* Broadcast 1to16. */
+ else if (i.broadcast->type == BROADCAST_1TO8)
+ broadcasted_opnd_size <<= 3; /* Broadcast 1to8. */
+ else if (i.broadcast->type == BROADCAST_1TO4)
+ broadcasted_opnd_size <<= 2; /* Broadcast 1to4. */
+ else if (i.broadcast->type == BROADCAST_1TO2)
+ broadcasted_opnd_size <<= 1; /* Broadcast 1to2. */
+ else
+ goto bad_broadcast;
+
+ if ((broadcasted_opnd_size == 256
+ && !t->operand_types[i.broadcast->operand].bitfield.ymmword)
+ || (broadcasted_opnd_size == 512
+ && !t->operand_types[i.broadcast->operand].bitfield.zmmword))
+ {
+ bad_broadcast:
+ i.error = unsupported_broadcast;
+ return 1;
+ }
+ }
+ /* If broadcast is supported in this instruction, we need to check if
+ operand of one-element size isn't specified without broadcast. */
+ else if (t->opcode_modifier.broadcast && i.mem_operands)
+ {
+ /* Find memory operand. */
+ for (op = 0; op < i.operands; op++)
+ if (operand_type_check (i.types[op], anymem))
+ break;
+ gas_assert (op < i.operands);
+ /* Check size of the memory operand. */
+ if ((t->opcode_modifier.vecesize == 0
+ && i.types[op].bitfield.dword)
+ || (t->opcode_modifier.vecesize == 1
+ && i.types[op].bitfield.qword))
+ {
+ i.error = broadcast_needed;
+ return 1;
+ }
+ }
+
+ /* Check if requested masking is supported. */
+ if (i.mask
+ && (!t->opcode_modifier.masking
+ || (i.mask->zeroing
+ && t->opcode_modifier.masking == MERGING_MASKING)))
+ {
+ i.error = unsupported_masking;
+ return 1;
+ }
+
+ /* Check if masking is applied to dest operand. */
+ if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
+ {
+ i.error = mask_not_on_destination;
+ return 1;
+ }
+
+ /* Check RC/SAE. */
+ if (i.rounding)
+ {
+ if ((i.rounding->type != saeonly
+ && !t->opcode_modifier.staticrounding)
+ || (i.rounding->type == saeonly
+ && (t->opcode_modifier.staticrounding
+ || !t->opcode_modifier.sae)))
+ {
+ i.error = unsupported_rc_sae;
+ return 1;
+ }
+ /* If the instruction has several immediate operands and one of
+ them is rounding, the rounding operand should be the last
+ immediate operand. */
+ if (i.imm_operands > 1
+ && i.rounding->operand != (int) (i.imm_operands - 1))
+ {
+ i.error = rc_sae_operand_not_last_imm;
+ return 1;
+ }
+ }
+
+ /* Check vector Disp8 operand. */
+ if (t->opcode_modifier.disp8memshift)
+ {
+ if (i.broadcast)
+ i.memshift = t->opcode_modifier.vecesize ? 3 : 2;
+ else
+ i.memshift = t->opcode_modifier.disp8memshift;
+
+ for (op = 0; op < i.operands; op++)
+ if (operand_type_check (i.types[op], disp)
+ && i.op[op].disps->X_op == O_constant)
+ {
+ offsetT value = i.op[op].disps->X_add_number;
+ int vec_disp8_ok = fits_in_vec_disp8 (value);
+ if (t->operand_types [op].bitfield.vec_disp8)
+ {
+ if (vec_disp8_ok)
+ i.types[op].bitfield.vec_disp8 = 1;
+ else
+ {
+ /* Vector insn can only have Vec_Disp8/Disp32 in
+ 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
+ mode. */
+ i.types[op].bitfield.disp8 = 0;
+ if (flag_code != CODE_16BIT)
+ i.types[op].bitfield.disp16 = 0;
+ }
+ }
+ else if (flag_code != CODE_16BIT)
+ {
+ /* One form of this instruction supports vector Disp8.
+ Try vector Disp8 if we need to use Disp32. */
+ if (vec_disp8_ok && !fits_in_signed_byte (value))
+ {
+ i.error = try_vector_disp8;
+ return 1;
+ }
+ }
+ }
+ }
+ else
+ i.memshift = -1;
+
+ return 0;
+}
+
+/* Check if operands are valid for the instruction. Update VEX
+ operand types. */
+
+static int
+VEX_check_operands (const insn_template *t)
+{
+ /* VREX is only valid with EVEX prefix. */
+ if (i.need_vrex && !t->opcode_modifier.evex)
+ {
+ i.error = invalid_register_operand;
+ return 1;
+ }
+
+ if (!t->opcode_modifier.vex)
+ return 0;
+
+ /* Only check VEX_Imm4, which must be the first operand. */
+ if (t->operand_types[0].bitfield.vec_imm4)
+ {
+ if (i.op[0].imms->X_op != O_constant
+ || !fits_in_imm4 (i.op[0].imms->X_add_number))
+ {
+ i.error = bad_imm4;
+ return 1;
+ }
+
+ /* Turn off Imm8 so that update_imm won't complain. */
+ i.types[0] = vec_imm4;
+ }
+
+ return 0;
+}
+
+static const insn_template *
match_template (void)
{
/* Points to template once we've found it. */
- const template *t;
+ const insn_template *t;
i386_operand_type overlap0, overlap1, overlap2, overlap3;
i386_operand_type overlap4;
unsigned int found_reverse_match;
unsigned int j;
unsigned int found_cpu_match;
unsigned int check_register;
+ enum i386_error specific_error = 0;
#if MAX_OPERANDS != 5
# error "MAX_OPERANDS must be 5."
else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
suffix_check.no_ldsuf = 1;
+ /* Must have right number of operands. */
+ i.error = number_of_operands_mismatch;
+
for (t = current_templates->start; t < current_templates->end; t++)
{
addr_prefix_disp = -1;
- /* Must have right number of operands. */
if (i.operands != t->operands)
continue;
/* Check processor support. */
+ i.error = unsupported;
found_cpu_match = (cpu_flags_match (t)
== CPU_FLAGS_PERFECT_MATCH);
if (!found_cpu_match)
continue;
/* Check old gcc support. */
+ i.error = old_gcc_only;
if (!old_gcc && t->opcode_modifier.oldgcc)
continue;
/* Check AT&T mnemonic. */
+ i.error = unsupported_with_intel_mnemonic;
if (intel_mnemonic && t->opcode_modifier.attmnemonic)
continue;
- /* Check AT&T syntax Intel syntax. */
+ /* Check AT&T/Intel syntax. */
+ i.error = unsupported_syntax;
if ((intel_syntax && t->opcode_modifier.attsyntax)
|| (!intel_syntax && t->opcode_modifier.intelsyntax))
continue;
/* Check the suffix, except for some instructions in intel mode. */
+ i.error = invalid_instruction_suffix;
if ((!intel_syntax || !t->opcode_modifier.ignoresize)
&& ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
|| (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
: intel_float_operand (t->name) != 2)
&& ((!operand_types[0].bitfield.regmmx
&& !operand_types[0].bitfield.regxmm
- && !operand_types[0].bitfield.regymm)
+ && !operand_types[0].bitfield.regymm
+ && !operand_types[0].bitfield.regzmm)
|| (!operand_types[t->operands > 1].bitfield.regmmx
- && !!operand_types[t->operands > 1].bitfield.regxmm
- && !!operand_types[t->operands > 1].bitfield.regymm))
+ && operand_types[t->operands > 1].bitfield.regxmm
+ && operand_types[t->operands > 1].bitfield.regymm
+ && operand_types[t->operands > 1].bitfield.regzmm))
&& (t->base_opcode != 0x0fc7
|| t->extension_opcode != 1 /* cmpxchg8b */))
continue;
&& ((!operand_types[0].bitfield.regmmx
&& !operand_types[0].bitfield.regxmm)
|| (!operand_types[t->operands > 1].bitfield.regmmx
- && !!operand_types[t->operands > 1].bitfield.regxmm)))
+ && operand_types[t->operands > 1].bitfield.regxmm)))
continue;
/* Do not verify operands when there are none. */
}
}
- /* We check register size only if size of operands can be
- encoded the canonical way. */
- check_register = t->opcode_modifier.w;
+ /* We check register size if needed. */
+ check_register = t->opcode_modifier.checkregsize;
overlap0 = operand_type_and (i.types[0], operand_types[0]);
switch (t->operands)
{
continue;
}
+ /* Check if vector and VEX operands are valid. */
+ if (check_VecOperands (t) || VEX_check_operands (t))
+ {
+ specific_error = i.error;
+ continue;
+ }
+
/* We've found a match; break out of loop. */
break;
}
if (t == current_templates->end)
{
/* We found no match. */
- if (intel_syntax)
- as_bad (_("ambiguous operand size or operands invalid for `%s'"),
- current_templates->start->name);
- else
- as_bad (_("suffix or operands invalid for `%s'"),
- current_templates->start->name);
+ const char *err_msg;
+ switch (specific_error ? specific_error : i.error)
+ {
+ default:
+ abort ();
+ case operand_size_mismatch:
+ err_msg = _("operand size mismatch");
+ break;
+ case operand_type_mismatch:
+ err_msg = _("operand type mismatch");
+ break;
+ case register_type_mismatch:
+ err_msg = _("register type mismatch");
+ break;
+ case number_of_operands_mismatch:
+ err_msg = _("number of operands mismatch");
+ break;
+ case invalid_instruction_suffix:
+ err_msg = _("invalid instruction suffix");
+ break;
+ case bad_imm4:
+ err_msg = _("constant doesn't fit in 4 bits");
+ break;
+ case old_gcc_only:
+ err_msg = _("only supported with old gcc");
+ break;
+ case unsupported_with_intel_mnemonic:
+ err_msg = _("unsupported with Intel mnemonic");
+ break;
+ case unsupported_syntax:
+ err_msg = _("unsupported syntax");
+ break;
+ case unsupported:
+ as_bad (_("unsupported instruction `%s'"),
+ current_templates->start->name);
+ return NULL;
+ case invalid_vsib_address:
+ err_msg = _("invalid VSIB address");
+ break;
+ case invalid_vector_register_set:
+ err_msg = _("mask, index, and destination registers must be distinct");
+ break;
+ case unsupported_vector_index_register:
+ err_msg = _("unsupported vector index register");
+ break;
+ case unsupported_broadcast:
+ err_msg = _("unsupported broadcast");
+ break;
+ case broadcast_not_on_src_operand:
+ err_msg = _("broadcast not on source memory operand");
+ break;
+ case broadcast_needed:
+ err_msg = _("broadcast is needed for operand of such type");
+ break;
+ case unsupported_masking:
+ err_msg = _("unsupported masking");
+ break;
+ case mask_not_on_destination:
+ err_msg = _("mask not on destination operand");
+ break;
+ case no_default_mask:
+ err_msg = _("default mask isn't allowed");
+ break;
+ case unsupported_rc_sae:
+ err_msg = _("unsupported static rounding/sae");
+ break;
+ case rc_sae_operand_not_last_imm:
+ if (intel_syntax)
+ err_msg = _("RC/SAE operand must precede immediate operands");
+ else
+ err_msg = _("RC/SAE operand must follow immediate operands");
+ break;
+ case invalid_register_operand:
+ err_msg = _("invalid register operand");
+ break;
+ }
+ as_bad (_("%s for `%s'"), err_msg,
+ current_templates->start->name);
return NULL;
}
}
else if (i.suffix == BYTE_MNEM_SUFFIX)
{
- if (!check_byte_reg ())
+ if (intel_syntax
+ && i.tm.opcode_modifier.ignoresize
+ && i.tm.opcode_modifier.no_bsuf)
+ i.suffix = 0;
+ else if (!check_byte_reg ())
return 0;
}
else if (i.suffix == LONG_MNEM_SUFFIX)
{
- if (!check_long_reg ())
+ if (intel_syntax
+ && i.tm.opcode_modifier.ignoresize
+ && i.tm.opcode_modifier.no_lsuf)
+ i.suffix = 0;
+ else if (!check_long_reg ())
return 0;
}
else if (i.suffix == QWORD_MNEM_SUFFIX)
}
else if (i.suffix == WORD_MNEM_SUFFIX)
{
- if (!check_word_reg ())
+ if (intel_syntax
+ && i.tm.opcode_modifier.ignoresize
+ && i.tm.opcode_modifier.no_wsuf)
+ i.suffix = 0;
+ else if (!check_word_reg ())
return 0;
}
else if (i.suffix == XMMWORD_MNEM_SUFFIX
- || i.suffix == YMMWORD_MNEM_SUFFIX)
+ || i.suffix == YMMWORD_MNEM_SUFFIX
+ || i.suffix == ZMMWORD_MNEM_SUFFIX)
{
- /* Skip if the instruction has x/y suffix. match_template
+ /* Skip if the instruction has x/y/z suffix. match_template
should check if it is a valid suffix. */
}
else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
if (i.suffix
&& i.suffix != BYTE_MNEM_SUFFIX
&& i.suffix != XMMWORD_MNEM_SUFFIX
- && i.suffix != YMMWORD_MNEM_SUFFIX)
+ && i.suffix != YMMWORD_MNEM_SUFFIX
+ && i.suffix != ZMMWORD_MNEM_SUFFIX)
{
/* It's not a byte, select word/dword operation. */
if (i.tm.opcode_modifier.w)
if (i.types[op].bitfield.reg8)
continue;
- /* Don't generate this warning if not needed. */
- if (intel_syntax && i.tm.opcode_modifier.byteokintel)
+ /* I/O port address operands are OK too. */
+ if (i.tm.operand_types[op].bitfield.inoutportreg)
continue;
/* crc32 doesn't generate this warning. */
if ((i.types[op].bitfield.reg16
|| i.types[op].bitfield.reg32
|| i.types[op].bitfield.reg64)
- && i.op[op].regs->reg_num < 4)
+ && i.op[op].regs->reg_num < 4
+ /* Prohibit these changes in 64bit mode, since the lowering
+ would be more complicated. */
+ && flag_code != CODE_64BIT)
{
- /* Prohibit these changes in the 64bit mode, since the
- lowering is more complicated. */
- if (flag_code == CODE_64BIT
- && !i.tm.operand_types[op].bitfield.inoutportreg)
- {
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
- register_prefix, i.op[op].regs->reg_name,
- i.suffix);
- return 0;
- }
#if REGISTER_WARNINGS
- if (!quiet_warnings
- && !i.tm.operand_types[op].bitfield.inoutportreg)
+ if (!quiet_warnings)
as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
register_prefix,
(i.op[op].regs + (i.types[op].bitfield.reg16
|| i.types[op].bitfield.regmmx
|| i.types[op].bitfield.regxmm
|| i.types[op].bitfield.regymm
+ || i.types[op].bitfield.regzmm
|| i.types[op].bitfield.sreg2
|| i.types[op].bitfield.sreg3
|| i.types[op].bitfield.control
i.suffix);
return 0;
}
- /* Warn if the e prefix on a general reg is missing. */
+ /* Warn if the e prefix on a general reg is missing. */
else if ((!quiet_warnings || flag_code == CODE_64BIT)
&& i.types[op].bitfield.reg16
&& (i.tm.operand_types[op].bitfield.reg32
lowering is more complicated. */
if (flag_code == CODE_64BIT)
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
}
#if REGISTER_WARNINGS
- else
- as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
- register_prefix,
- (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
- register_prefix,
- i.op[op].regs->reg_name,
- i.suffix);
+ as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
+ register_prefix,
+ (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
+ register_prefix, i.op[op].regs->reg_name, i.suffix);
#endif
}
- /* Warn if the r prefix on a general reg is missing. */
+ /* Warn if the r prefix on a general reg is present. */
else if (i.types[op].bitfield.reg64
&& (i.tm.operand_types[op].bitfield.reg32
|| i.tm.operand_types[op].bitfield.acc))
}
else
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
i.suffix);
return 0;
}
- /* Warn if the e prefix on a general reg is missing. */
+ /* Warn if the r prefix on a general reg is missing. */
else if ((i.types[op].bitfield.reg16
|| i.types[op].bitfield.reg32)
&& (i.tm.operand_types[op].bitfield.reg32
}
else
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
i.suffix);
return 0;
}
- /* Warn if the e prefix on a general reg is present. */
+ /* Warn if the e or r prefix on a general reg is present. */
else if ((!quiet_warnings || flag_code == CODE_64BIT)
- && i.types[op].bitfield.reg32
+ && (i.types[op].bitfield.reg32
+ || i.types[op].bitfield.reg64)
&& (i.tm.operand_types[op].bitfield.reg16
|| i.tm.operand_types[op].bitfield.acc))
{
lowering is more complicated. */
if (flag_code == CODE_64BIT)
{
- as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
register_prefix, i.op[op].regs->reg_name,
i.suffix);
return 0;
}
- else
#if REGISTER_WARNINGS
- as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
- register_prefix,
- (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
- register_prefix,
- i.op[op].regs->reg_name,
- i.suffix);
+ as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
+ register_prefix,
+ (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
+ register_prefix, i.op[op].regs->reg_name, i.suffix);
#endif
}
return 1;
static int
update_imm (unsigned int j)
{
- i386_operand_type overlap;
-
- overlap = operand_type_and (i.types[j], i.tm.operand_types[j]);
+ i386_operand_type overlap = i.types[j];
if ((overlap.bitfield.imm8
|| overlap.bitfield.imm8s
|| overlap.bitfield.imm16
static int
finalize_imm (void)
{
- unsigned int j;
+ unsigned int j, n;
- for (j = 0; j < 2; j++)
- if (update_imm (j) == 0)
- return 0;
+ /* Update the first 2 immediate operands. */
+ n = i.operands > 2 ? 2 : i.operands;
+ if (n)
+ {
+ for (j = 0; j < n; j++)
+ if (update_imm (j) == 0)
+ return 0;
- i.types[2] = operand_type_and (i.types[2], i.tm.operand_types[2]);
- gas_assert (operand_type_check (i.types[2], imm) == 0);
+ /* The 3rd operand can't be immediate operand. */
+ gas_assert (operand_type_check (i.types[2], imm) == 0);
+ }
return 1;
}
static int
bad_implicit_operand (int xmm)
{
- const char *reg = xmm ? "xmm0" : "ymm0";
+ const char *ireg = xmm ? "xmm0" : "ymm0";
+
if (intel_syntax)
as_bad (_("the last operand of `%s' must be `%s%s'"),
- i.tm.name, register_prefix, reg);
+ i.tm.name, register_prefix, ireg);
else
as_bad (_("the first operand of `%s' must be `%s%s'"),
- i.tm.name, register_prefix, reg);
+ i.tm.name, register_prefix, ireg);
return 0;
}
unnecessary segment overrides. */
const seg_entry *default_seg = 0;
- if (i.tm.opcode_modifier.sse2avx
- && (i.tm.opcode_modifier.vexnds
- || i.tm.opcode_modifier.vexndd))
+ if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
{
- unsigned int dup = i.operands;
- unsigned int dest = dup - 1;
+ unsigned int dupl = i.operands;
+ unsigned int dest = dupl - 1;
unsigned int j;
/* The destination must be an xmm register. */
gas_assert (i.reg_operands
- && MAX_OPERANDS > dup
+ && MAX_OPERANDS > dupl
&& operand_type_equal (&i.types[dest], ®xmm));
if (i.tm.opcode_modifier.firstxmm0)
{
/* The first operand is implicit and must be xmm0. */
gas_assert (operand_type_equal (&i.types[0], ®xmm));
- if (i.op[0].regs->reg_num != 0)
+ if (register_number (i.op[0].regs) != 0)
return bad_implicit_operand (1);
- if (i.tm.opcode_modifier.vex3sources)
+ if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
{
/* Keep xmm0 for instructions with VEX prefix and 3
sources. */
}
else if (i.tm.opcode_modifier.implicit1stxmm0)
{
- gas_assert ((MAX_OPERANDS - 1) > dup
- && i.tm.opcode_modifier.vex3sources);
+ gas_assert ((MAX_OPERANDS - 1) > dupl
+ && (i.tm.opcode_modifier.vexsources
+ == VEX3SOURCES));
/* Add the implicit xmm0 for instructions with VEX prefix
and 3 sources. */
i.reg_operands += 2;
i.tm.operands += 2;
- dup++;
+ dupl++;
dest++;
- i.op[dup] = i.op[dest];
- i.types[dup] = i.types[dest];
- i.tm.operand_types[dup] = i.tm.operand_types[dest];
+ i.op[dupl] = i.op[dest];
+ i.types[dupl] = i.types[dest];
+ i.tm.operand_types[dupl] = i.tm.operand_types[dest];
}
else
{
i.reg_operands++;
i.tm.operands++;
- i.op[dup] = i.op[dest];
- i.types[dup] = i.types[dest];
- i.tm.operand_types[dup] = i.tm.operand_types[dest];
+ i.op[dupl] = i.op[dest];
+ i.types[dupl] = i.types[dest];
+ i.tm.operand_types[dupl] = i.tm.operand_types[dest];
}
if (i.tm.opcode_modifier.immext)
{
unsigned int j;
- /* The first operand is implicit and must be xmm0/ymm0. */
+ /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
gas_assert (i.reg_operands
&& (operand_type_equal (&i.types[0], ®xmm)
- || operand_type_equal (&i.types[0], ®ymm)));
- if (i.op[0].regs->reg_num != 0)
+ || operand_type_equal (&i.types[0], ®ymm)
+ || operand_type_equal (&i.types[0], ®zmm)));
+ if (register_number (i.op[0].regs) != 0)
return bad_implicit_operand (i.types[0].bitfield.regxmm);
for (j = 1; j < i.operands; j++)
/* The first operand of instructions with VEX prefix and 3 sources
must be VEX_Imm4. */
- vex_3_sources = i.tm.opcode_modifier.vex3sources;
+ vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
if (vex_3_sources)
{
- unsigned int nds, reg;
+ unsigned int nds, reg_slot;
+ expressionS *exp;
if (i.tm.opcode_modifier.veximmext
- && i.tm.opcode_modifier.immext)
- {
- dest = i.operands - 2;
- gas_assert (dest == 3);
- }
+ && i.tm.opcode_modifier.immext)
+ {
+ dest = i.operands - 2;
+ gas_assert (dest == 3);
+ }
else
- dest = i.operands - 1;
+ dest = i.operands - 1;
nds = dest - 1;
- /* This instruction must have 4 register operands
- or 3 register operands plus 1 memory operand.
- It must have VexNDS and VexImmExt. */
+ /* There are 2 kinds of instructions:
+ 1. 5 operands: 4 register operands or 3 register operands
+ plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
+ VexW0 or VexW1. The destination must be either XMM, YMM or
+ ZMM register.
+ 2. 4 operands: 4 register operands or 3 register operands
+ plus 1 memory operand, VexXDS, and VexImmExt */
gas_assert ((i.reg_operands == 4
- || (i.reg_operands == 3 && i.mem_operands == 1))
- && i.tm.opcode_modifier.vexnds
- && i.tm.opcode_modifier.veximmext
- && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
- || operand_type_equal (&i.tm.operand_types[dest], ®ymm)));
-
- /* Generate an 8bit immediate operand to encode the register
- operand. */
- expressionS *exp = &im_expressions[i.imm_operands++];
- i.op[i.operands].imms = exp;
- i.types[i.operands] = imm8;
- i.operands++;
- /* If VexW1 is set, the first operand is the source and
- the second operand is encoded in the immediate operand. */
- if (i.tm.opcode_modifier.vexw1)
- {
- source = 0;
- reg = 1;
+ || (i.reg_operands == 3 && i.mem_operands == 1))
+ && i.tm.opcode_modifier.vexvvvv == VEXXDS
+ && (i.tm.opcode_modifier.veximmext
+ || (i.imm_operands == 1
+ && i.types[0].bitfield.vec_imm4
+ && (i.tm.opcode_modifier.vexw == VEXW0
+ || i.tm.opcode_modifier.vexw == VEXW1)
+ && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
+ || operand_type_equal (&i.tm.operand_types[dest], ®ymm)
+ || operand_type_equal (&i.tm.operand_types[dest], ®zmm)))));
+
+ if (i.imm_operands == 0)
+ {
+ /* When there is no immediate operand, generate an 8bit
+ immediate operand to encode the first operand. */
+ exp = &im_expressions[i.imm_operands++];
+ i.op[i.operands].imms = exp;
+ i.types[i.operands] = imm8;
+ i.operands++;
+ /* If VexW1 is set, the first operand is the source and
+ the second operand is encoded in the immediate operand. */
+ if (i.tm.opcode_modifier.vexw == VEXW1)
+ {
+ source = 0;
+ reg_slot = 1;
+ }
+ else
+ {
+ source = 1;
+ reg_slot = 0;
+ }
+
+ /* FMA swaps REG and NDS. */
+ if (i.tm.cpu_flags.bitfield.cpufma)
+ {
+ unsigned int tmp;
+ tmp = reg_slot;
+ reg_slot = nds;
+ nds = tmp;
+ }
+
+ gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
+ ®xmm)
+ || operand_type_equal (&i.tm.operand_types[reg_slot],
+ ®ymm)
+ || operand_type_equal (&i.tm.operand_types[reg_slot],
+ ®zmm));
+ exp->X_op = O_constant;
+ exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
+ gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
}
else
- {
- source = 1;
- reg = 0;
- }
- /* FMA4 swaps REG and NDS. */
- if (i.tm.cpu_flags.bitfield.cpufma4)
- {
- unsigned int tmp;
- tmp = reg;
- reg = nds;
- nds = tmp;
- }
- gas_assert ((operand_type_equal (&i.tm.operand_types[reg], ®xmm)
- || operand_type_equal (&i.tm.operand_types[reg],
- ®ymm))
- && (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
- || operand_type_equal (&i.tm.operand_types[nds],
- ®ymm)));
- exp->X_op = O_constant;
- exp->X_add_number
- = ((i.op[reg].regs->reg_num
- + ((i.op[reg].regs->reg_flags & RegRex) ? 8 : 0)) << 4);
+ {
+ unsigned int imm_slot;
+
+ if (i.tm.opcode_modifier.vexw == VEXW0)
+ {
+ /* If VexW0 is set, the third operand is the source and
+ the second operand is encoded in the immediate
+ operand. */
+ source = 2;
+ reg_slot = 1;
+ }
+ else
+ {
+ /* VexW1 is set, the second operand is the source and
+ the third operand is encoded in the immediate
+ operand. */
+ source = 1;
+ reg_slot = 2;
+ }
+
+ if (i.tm.opcode_modifier.immext)
+ {
+ /* When ImmExt is set, the immdiate byte is the last
+ operand. */
+ imm_slot = i.operands - 1;
+ source--;
+ reg_slot--;
+ }
+ else
+ {
+ imm_slot = 0;
+
+ /* Turn on Imm8 so that output_imm will generate it. */
+ i.types[imm_slot].bitfield.imm8 = 1;
+ }
+
+ gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
+ ®xmm)
+ || operand_type_equal (&i.tm.operand_types[reg_slot],
+ ®ymm)
+ || operand_type_equal (&i.tm.operand_types[reg_slot],
+ ®zmm));
+ i.op[imm_slot].imms->X_add_number
+ |= register_number (i.op[reg_slot].regs) << 4;
+ gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
+ }
+
+ gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
+ || operand_type_equal (&i.tm.operand_types[nds],
+ ®ymm)
+ || operand_type_equal (&i.tm.operand_types[nds],
+ ®zmm));
i.vex.register_specifier = i.op[nds].regs;
}
else
a instruction with VEX prefix and 3 sources. */
if (i.mem_operands == 0
&& ((i.reg_operands == 2
- && !i.tm.opcode_modifier.vexndd)
+ && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
|| (i.reg_operands == 3
- && i.tm.opcode_modifier.vexnds)
+ && i.tm.opcode_modifier.vexvvvv == VEXXDS)
|| (i.reg_operands == 4 && vex_3_sources)))
{
switch (i.operands)
is an instruction with VexNDS. */
gas_assert (i.imm_operands == 1
|| (i.imm_operands == 0
- && (i.tm.opcode_modifier.vexnds
+ && (i.tm.opcode_modifier.vexvvvv == VEXXDS
|| i.types[0].bitfield.shiftcount)));
if (operand_type_check (i.types[0], imm)
|| i.types[0].bitfield.shiftcount)
gas_assert ((i.imm_operands == 2
&& i.types[0].bitfield.imm8
&& i.types[1].bitfield.imm8)
- || (i.tm.opcode_modifier.vexnds
+ || (i.tm.opcode_modifier.vexvvvv == VEXXDS
&& i.imm_operands == 1
&& (i.types[0].bitfield.imm8
- || i.types[i.operands - 1].bitfield.imm8)));
- if (i.tm.opcode_modifier.vexnds)
+ || i.types[i.operands - 1].bitfield.imm8
+ || i.rounding)));
+ if (i.imm_operands == 2)
+ source = 2;
+ else
{
if (i.types[0].bitfield.imm8)
source = 1;
else
source = 0;
}
- else
- source = 2;
break;
case 5:
+ if (i.tm.opcode_modifier.evex)
+ {
+ /* For EVEX instructions, when there are 5 operands, the
+ first one must be immediate operand. If the second one
+ is immediate operand, the source operand is the 3th
+ one. If the last one is immediate operand, the source
+ operand is the 2nd one. */
+ gas_assert (i.imm_operands == 2
+ && i.tm.opcode_modifier.sae
+ && operand_type_check (i.types[0], imm));
+ if (operand_type_check (i.types[1], imm))
+ source = 2;
+ else if (operand_type_check (i.types[4], imm))
+ source = 1;
+ else
+ abort ();
+ }
break;
default:
abort ();
{
dest = source + 1;
- if (i.tm.opcode_modifier.vexnds)
+ /* RC/SAE operand could be between DEST and SRC. That happens
+ when one operand is GPR and the other one is XMM/YMM/ZMM
+ register. */
+ if (i.rounding && i.rounding->operand == (int) dest)
+ dest++;
+
+ if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
{
- /* For instructions with VexNDS, the register-only
- source operand must be XMM or YMM register. It is
- encoded in VEX prefix. We need to clear RegMem bit
- before calling operand_type_equal. */
- i386_operand_type op = i.tm.operand_types[dest];
+ /* For instructions with VexNDS, the register-only source
+ operand must be 32/64bit integer, XMM, YMM or ZMM
+ register. It is encoded in VEX prefix. We need to
+ clear RegMem bit before calling operand_type_equal. */
+
+ i386_operand_type op;
+ unsigned int vvvv;
+
+ /* Check register-only source operand when two source
+ operands are swapped. */
+ if (!i.tm.operand_types[source].bitfield.baseindex
+ && i.tm.operand_types[dest].bitfield.baseindex)
+ {
+ vvvv = source;
+ source = dest;
+ }
+ else
+ vvvv = dest;
+
+ op = i.tm.operand_types[vvvv];
op.bitfield.regmem = 0;
if ((dest + 1) >= i.operands
- || (!operand_type_equal (&op, ®xmm)
- && !operand_type_equal (&op, ®ymm)))
+ || (!op.bitfield.reg32
+ && op.bitfield.reg64
+ && !operand_type_equal (&op, ®xmm)
+ && !operand_type_equal (&op, ®ymm)
+ && !operand_type_equal (&op, ®zmm)
+ && !operand_type_equal (&op, ®mask)))
abort ();
- i.vex.register_specifier = i.op[dest].regs;
+ i.vex.register_specifier = i.op[vvvv].regs;
dest++;
}
}
i.rm.regmem = i.op[source].regs->reg_num;
if ((i.op[dest].regs->reg_flags & RegRex) != 0)
i.rex |= REX_R;
+ if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
+ i.vrex |= REX_R;
if ((i.op[source].regs->reg_flags & RegRex) != 0)
i.rex |= REX_B;
+ if ((i.op[source].regs->reg_flags & RegVRex) != 0)
+ i.vrex |= REX_B;
}
else
{
i.rm.regmem = i.op[dest].regs->reg_num;
if ((i.op[dest].regs->reg_flags & RegRex) != 0)
i.rex |= REX_B;
+ if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
+ i.vrex |= REX_B;
if ((i.op[source].regs->reg_flags & RegRex) != 0)
i.rex |= REX_R;
+ if ((i.op[source].regs->reg_flags & RegVRex) != 0)
+ i.vrex |= REX_R;
}
if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
{
break;
gas_assert (op < i.operands);
+ if (i.tm.opcode_modifier.vecsib)
+ {
+ if (i.index_reg->reg_num == RegEiz
+ || i.index_reg->reg_num == RegRiz)
+ abort ();
+
+ i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
+ if (!i.base_reg)
+ {
+ i.sib.base = NO_BASE_REGISTER;
+ i.sib.scale = i.log2_scale_factor;
+ /* No Vec_Disp8 if there is no base. */
+ i.types[op].bitfield.vec_disp8 = 0;
+ i.types[op].bitfield.disp8 = 0;
+ i.types[op].bitfield.disp16 = 0;
+ i.types[op].bitfield.disp64 = 0;
+ if (flag_code != CODE_64BIT)
+ {
+ /* Must be 32 bit */
+ i.types[op].bitfield.disp32 = 1;
+ i.types[op].bitfield.disp32s = 0;
+ }
+ else
+ {
+ i.types[op].bitfield.disp32 = 0;
+ i.types[op].bitfield.disp32s = 1;
+ }
+ }
+ i.sib.index = i.index_reg->reg_num;
+ if ((i.index_reg->reg_flags & RegRex) != 0)
+ i.rex |= REX_X;
+ if ((i.index_reg->reg_flags & RegVRex) != 0)
+ i.vrex |= REX_X;
+ }
+
default_seg = &ds;
if (i.base_reg == 0)
{
i.rm.mode = 0;
if (!i.disp_operands)
- fake_zero_displacement = 1;
+ {
+ fake_zero_displacement = 1;
+ /* Instructions with VSIB byte need 32bit displacement
+ if there is no base register. */
+ if (i.tm.opcode_modifier.vecsib)
+ i.types[op].bitfield.disp32 = 1;
+ }
if (i.index_reg == 0)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
/* Operand is just <disp> */
if (flag_code == CODE_64BIT)
{
i.types[op] = disp32;
}
}
- else /* !i.base_reg && i.index_reg */
+ else if (!i.tm.opcode_modifier.vecsib)
{
+ /* !i.base_reg && i.index_reg */
if (i.index_reg->reg_num == RegEiz
|| i.index_reg->reg_num == RegRiz)
i.sib.index = NO_INDEX_REGISTER;
i.sib.base = NO_BASE_REGISTER;
i.sib.scale = i.log2_scale_factor;
i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
+ /* No Vec_Disp8 if there is no base. */
+ i.types[op].bitfield.vec_disp8 = 0;
i.types[op].bitfield.disp8 = 0;
i.types[op].bitfield.disp16 = 0;
i.types[op].bitfield.disp64 = 0;
else if (i.base_reg->reg_num == RegRip ||
i.base_reg->reg_num == RegEip)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
i.rm.regmem = NO_BASE_REGISTER;
i.types[op].bitfield.disp8 = 0;
i.types[op].bitfield.disp16 = 0;
i.types[op].bitfield.disp32 = 0;
i.types[op].bitfield.disp32s = 1;
i.types[op].bitfield.disp64 = 0;
+ i.types[op].bitfield.vec_disp8 = 0;
i.flags[op] |= Operand_PCrel;
if (! i.disp_operands)
fake_zero_displacement = 1;
}
else if (i.base_reg->reg_type.bitfield.reg16)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
switch (i.base_reg->reg_num)
{
case 3: /* (%bx) */
if (operand_type_check (i.types[op], disp) == 0)
{
/* fake (%bp) into 0(%bp) */
- i.types[op].bitfield.disp8 = 1;
+ if (i.tm.operand_types[op].bitfield.vec_disp8)
+ i.types[op].bitfield.vec_disp8 = 1;
+ else
+ i.types[op].bitfield.disp8 = 1;
fake_zero_displacement = 1;
}
}
i386_operand_type temp;
operand_type_set (&temp, 0);
temp.bitfield.disp8 = i.types[op].bitfield.disp8;
+ temp.bitfield.vec_disp8
+ = i.types[op].bitfield.vec_disp8;
i.types[op] = temp;
if (i.prefix[ADDR_PREFIX] == 0)
i.types[op].bitfield.disp32s = 1;
i.types[op].bitfield.disp32 = 1;
}
- i.rm.regmem = i.base_reg->reg_num;
+ if (!i.tm.opcode_modifier.vecsib)
+ i.rm.regmem = i.base_reg->reg_num;
if ((i.base_reg->reg_flags & RegRex) != 0)
i.rex |= REX_B;
i.sib.base = i.base_reg->reg_num;
/* x86-64 ignores REX prefix bit here to avoid decoder
complications. */
- if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
- {
+ if (!(i.base_reg->reg_flags & RegRex)
+ && (i.base_reg->reg_num == EBP_REG_NUM
+ || i.base_reg->reg_num == ESP_REG_NUM))
default_seg = &ss;
- if (i.disp_operands == 0)
- {
- fake_zero_displacement = 1;
- i.types[op].bitfield.disp8 = 1;
- }
- }
- else if (i.base_reg->reg_num == ESP_REG_NUM)
+ if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
{
- default_seg = &ss;
+ fake_zero_displacement = 1;
+ if (i.tm.operand_types [op].bitfield.vec_disp8)
+ i.types[op].bitfield.vec_disp8 = 1;
+ else
+ i.types[op].bitfield.disp8 = 1;
}
i.sib.scale = i.log2_scale_factor;
if (i.index_reg == 0)
{
+ gas_assert (!i.tm.opcode_modifier.vecsib);
/* <disp>(%esp) becomes two byte modrm with no index
register. We've already stored the code for esp
in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
extra modrm byte. */
i.sib.index = NO_INDEX_REGISTER;
}
- else
+ else if (!i.tm.opcode_modifier.vecsib)
{
if (i.index_reg->reg_num == RegEiz
|| i.index_reg->reg_num == RegRiz)
|| i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
i.rm.mode = 0;
else
- i.rm.mode = mode_from_disp_size (i.types[op]);
+ {
+ if (!fake_zero_displacement
+ && !i.disp_operands
+ && i.disp_encoding)
+ {
+ fake_zero_displacement = 1;
+ if (i.disp_encoding == disp_encoding_8bit)
+ i.types[op].bitfield.disp8 = 1;
+ else
+ i.types[op].bitfield.disp32 = 1;
+ }
+ i.rm.mode = mode_from_disp_size (i.types[op]);
+ }
}
if (fake_zero_displacement)
else
mem = ~0;
+ if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
+ {
+ if (operand_type_check (i.types[0], imm))
+ i.vex.register_specifier = NULL;
+ else
+ {
+ /* VEX.vvvv encodes one of the sources when the first
+ operand is not an immediate. */
+ if (i.tm.opcode_modifier.vexw == VEXW0)
+ i.vex.register_specifier = i.op[0].regs;
+ else
+ i.vex.register_specifier = i.op[1].regs;
+ }
+
+ /* Destination is a XMM register encoded in the ModRM.reg
+ and VEX.R bit. */
+ i.rm.reg = i.op[2].regs->reg_num;
+ if ((i.op[2].regs->reg_flags & RegRex) != 0)
+ i.rex |= REX_R;
+
+ /* ModRM.rm and VEX.B encodes the other source. */
+ if (!i.mem_operands)
+ {
+ i.rm.mode = 3;
+
+ if (i.tm.opcode_modifier.vexw == VEXW0)
+ i.rm.regmem = i.op[1].regs->reg_num;
+ else
+ i.rm.regmem = i.op[0].regs->reg_num;
+
+ if ((i.op[1].regs->reg_flags & RegRex) != 0)
+ i.rex |= REX_B;
+ }
+ }
+ else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
+ {
+ i.vex.register_specifier = i.op[2].regs;
+ if (!i.mem_operands)
+ {
+ i.rm.mode = 3;
+ i.rm.regmem = i.op[1].regs->reg_num;
+ if ((i.op[1].regs->reg_flags & RegRex) != 0)
+ i.rex |= REX_B;
+ }
+ }
/* Fill in i.rm.reg or i.rm.regmem field with register operand
(if any) based on i.tm.extension_opcode. Again, we must be
careful to make sure that segment/control/debug/test/MMX
registers are coded into the i.rm.reg field. */
- if (i.reg_operands)
+ else if (i.reg_operands)
{
unsigned int op;
unsigned int vex_reg = ~0;
|| i.types[op].bitfield.regmmx
|| i.types[op].bitfield.regxmm
|| i.types[op].bitfield.regymm
+ || i.types[op].bitfield.regbnd
+ || i.types[op].bitfield.regzmm
+ || i.types[op].bitfield.regmask
|| i.types[op].bitfield.sreg2
|| i.types[op].bitfield.sreg3
|| i.types[op].bitfield.control
if (vex_3_sources)
op = dest;
- else if (i.tm.opcode_modifier.vexnds)
+ else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
{
/* For instructions with VexNDS, the register-only
source operand is encoded in VEX prefix. */
}
else
{
- vex_reg = op + 1;
- gas_assert (vex_reg < i.operands);
+ /* Check register-only source operand when two source
+ operands are swapped. */
+ if (!i.tm.operand_types[op].bitfield.baseindex
+ && i.tm.operand_types[op + 1].bitfield.baseindex)
+ {
+ vex_reg = op;
+ op += 2;
+ gas_assert (mem == (vex_reg + 1)
+ && op < i.operands);
+ }
+ else
+ {
+ vex_reg = op + 1;
+ gas_assert (vex_reg < i.operands);
+ }
}
}
- else if (i.tm.opcode_modifier.vexndd)
+ else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
{
- /* For instructions with VexNDD, there should be
- no memory operand and the register destination
+ /* For instructions with VexNDD, the register destination
is encoded in VEX prefix. */
- gas_assert (i.mem_operands == 0
- && (op + 2) == i.operands);
- vex_reg = op + 1;
+ if (i.mem_operands == 0)
+ {
+ /* There is no memory operand. */
+ gas_assert ((op + 2) == i.operands);
+ vex_reg = op + 1;
+ }
+ else
+ {
+ /* There are only 2 operands. */
+ gas_assert (op < 2 && i.operands == 2);
+ vex_reg = 1;
+ }
}
else
gas_assert (op < i.operands);
if (vex_reg != (unsigned int) ~0)
{
- gas_assert (i.reg_operands == 2);
-
- if (!operand_type_equal (&i.tm.operand_types[vex_reg],
- & regxmm)
- && !operand_type_equal (&i.tm.operand_types[vex_reg],
- ®ymm))
+ i386_operand_type *type = &i.tm.operand_types[vex_reg];
+
+ if (type->bitfield.reg32 != 1
+ && type->bitfield.reg64 != 1
+ && !operand_type_equal (type, ®xmm)
+ && !operand_type_equal (type, ®ymm)
+ && !operand_type_equal (type, ®zmm)
+ && !operand_type_equal (type, ®mask))
abort ();
+
i.vex.register_specifier = i.op[vex_reg].regs;
}
- /* If there is an extension opcode to put here, the
- register number must be put into the regmem field. */
- if (i.tm.extension_opcode != None)
+ /* Don't set OP operand twice. */
+ if (vex_reg != op)
{
- i.rm.regmem = i.op[op].regs->reg_num;
- if ((i.op[op].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_B;
- }
- else
- {
- i.rm.reg = i.op[op].regs->reg_num;
- if ((i.op[op].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_R;
+ /* If there is an extension opcode to put here, the
+ register number must be put into the regmem field. */
+ if (i.tm.extension_opcode != None)
+ {
+ i.rm.regmem = i.op[op].regs->reg_num;
+ if ((i.op[op].regs->reg_flags & RegRex) != 0)
+ i.rex |= REX_B;
+ if ((i.op[op].regs->reg_flags & RegVRex) != 0)
+ i.vrex |= REX_B;
+ }
+ else
+ {
+ i.rm.reg = i.op[op].regs->reg_num;
+ if ((i.op[op].regs->reg_flags & RegRex) != 0)
+ i.rex |= REX_R;
+ if ((i.op[op].regs->reg_flags & RegVRex) != 0)
+ i.vrex |= REX_R;
+ }
}
/* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
output_branch (void)
{
char *p;
+ int size;
int code16;
int prefix;
relax_substateT subtype;
symbolS *sym;
offsetT off;
- code16 = 0;
- if (flag_code == CODE_16BIT)
- code16 = CODE16;
+ code16 = flag_code == CODE_16BIT ? CODE16 : 0;
+ size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
prefix = 0;
if (i.prefix[DATA_PREFIX] != 0)
i.prefixes--;
}
+ /* BND prefixed jump. */
+ if (i.prefix[BND_PREFIX] != 0)
+ {
+ FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
+ i.prefixes -= 1;
+ }
+
if (i.prefixes != 0 && !intel_syntax)
as_warn (_("skipping prefixes on this instruction"));
*p = i.tm.base_opcode;
if ((unsigned char) *p == JUMP_PC_RELATIVE)
- subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, SMALL);
+ subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
else if (cpu_arch_flags.bitfield.cpui386)
- subtype = ENCODE_RELAX_STATE (COND_JUMP, SMALL);
+ subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
else
- subtype = ENCODE_RELAX_STATE (COND_JUMP86, SMALL);
+ subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
subtype |= code16;
sym = i.op[0].disps->X_add_symbol;
i.prefixes -= 1;
}
+ /* BND prefixed jump. */
+ if (i.prefix[BND_PREFIX] != 0)
+ {
+ FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
+ i.prefixes -= 1;
+ }
+
if (i.prefixes != 0 && !intel_syntax)
as_warn (_("skipping prefixes on this instruction"));
- p = frag_more (1 + size);
- *p++ = i.tm.base_opcode;
+ p = frag_more (i.tm.opcode_length + size);
+ switch (i.tm.opcode_length)
+ {
+ case 2:
+ *p++ = i.tm.base_opcode >> 8;
+ case 1:
+ *p++ = i.tm.base_opcode;
+ break;
+ default:
+ abort ();
+ }
fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
unsigned int j;
unsigned int prefix;
- /* Since the VEX prefix contains the implicit prefix, we don't
- need the explicit prefix. */
- if (!i.tm.opcode_modifier.vex)
+ /* Some processors fail on LOCK prefix. This options makes
+ assembler ignore LOCK prefix and serves as a workaround. */
+ if (omit_lock_prefix)
+ {
+ if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
+ return;
+ i.prefix[LOCK_PREFIX] = 0;
+ }
+
+ /* Since the VEX/EVEX prefix contains the implicit prefix, we
+ don't need the explicit prefix. */
+ if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
{
switch (i.tm.opcode_length)
{
{
check_prefix:
if (prefix != REPE_PREFIX_OPCODE
- || (i.prefix[LOCKREP_PREFIX]
+ || (i.prefix[REP_PREFIX]
!= REPE_PREFIX_OPCODE))
add_prefix (prefix);
}
abort ();
}
+#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
+ /* For x32, add a dummy REX_OPCODE prefix for mov/add with
+ R_X86_64_GOTTPOFF relocation so that linker can safely
+ perform IE->LE optimization. */
+ if (x86_elf_abi == X86_64_X32_ABI
+ && i.operands == 2
+ && i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
+ && i.prefix[REX_PREFIX] == 0)
+ add_prefix (REX_OPCODE);
+#endif
+
/* The prefix bytes. */
for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
if (*q)
FRAG_APPEND_1_CHAR (*q);
}
-
- if (i.tm.opcode_modifier.vex)
+ else
{
for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
if (*q)
abort ();
}
+ /* For EVEX instructions i.vrex should become 0 after
+ build_evex_prefix. For VEX instructions upper 16 registers
+ aren't available, so VREX should be 0. */
+ if (i.vrex)
+ abort ();
/* Now the VEX prefix. */
p = frag_more (i.vex.length);
for (j = 0; j < i.vex.length; j++)
{
switch (i.tm.opcode_length)
{
+ case 4:
+ p = frag_more (4);
+ *p++ = (i.tm.base_opcode >> 24) & 0xff;
+ *p++ = (i.tm.base_opcode >> 16) & 0xff;
+ break;
case 3:
p = frag_more (3);
*p++ = (i.tm.base_opcode >> 16) & 0xff;
disp_size (unsigned int n)
{
int size = 4;
- if (i.types[n].bitfield.disp64)
+
+ /* Vec_Disp8 has to be 8bit. */
+ if (i.types[n].bitfield.vec_disp8)
+ size = 1;
+ else if (i.types[n].bitfield.disp64)
size = 8;
else if (i.types[n].bitfield.disp8)
size = 1;
for (n = 0; n < i.operands; n++)
{
- if (operand_type_check (i.types[n], disp))
+ if (i.types[n].bitfield.vec_disp8
+ || operand_type_check (i.types[n], disp))
{
if (i.op[n].disps->X_op == O_constant)
{
int size = disp_size (n);
- offsetT val;
+ offsetT val = i.op[n].disps->X_add_number;
- val = offset_in_range (i.op[n].disps->X_add_number,
- size);
+ if (i.types[n].bitfield.vec_disp8)
+ val >>= i.memshift;
+ val = offset_in_range (val, size);
p = frag_more (size);
md_number_to_chars (p, val, size);
}
for (n = 0; n < i.operands; n++)
{
+ /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
+ if (i.rounding && (int) n == i.rounding->operand)
+ continue;
+
if (operand_type_check (i.types[n], imm))
{
if (i.op[n].imms->X_op == O_constant)
\f
/* x86_cons_fix_new is called via the expression parsing code when a
reloc is needed. We use this hook to get the correct .got reloc. */
-static enum bfd_reloc_code_real got_reloc = NO_RELOC;
static int cons_sign = -1;
void
x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
- expressionS *exp)
+ expressionS *exp, bfd_reloc_code_real_type r)
{
- enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
-
- got_reloc = NO_RELOC;
+ r = reloc (len, 0, cons_sign, r);
#ifdef TE_PE
if (exp->X_op == O_secrel)
fix_new_exp (frag, off, len, exp, 0, r);
}
-#if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
+/* Export the ABI address size for use by TC_ADDRESS_BYTES for the
+ purpose of the `.dc.a' internal pseudo-op. */
+
+int
+x86_address_bytes (void)
+{
+ if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
+ return 4;
+ return stdoutput->arch_info->bits_per_address / 8;
+}
+
+#if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
+ || defined (LEX_AT)
# define lex_got(reloc, adjust, types) NULL
#else
/* Parse operands of the form
is non-null set it to the length of the string we removed from the
input line. Otherwise return NULL. */
static char *
-lex_got (enum bfd_reloc_code_real *reloc,
+lex_got (enum bfd_reloc_code_real *rel,
int *adjust,
i386_operand_type *types)
{
and adjust the reloc according to the real size in reloc(). */
static const struct {
const char *str;
+ int len;
const enum bfd_reloc_code_real rel[2];
const i386_operand_type types64;
} gotrel[] = {
- { "PLTOFF", { 0,
- BFD_RELOC_X86_64_PLTOFF64 },
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
+ BFD_RELOC_SIZE32 },
+ OPERAND_TYPE_IMM32_64 },
+#endif
+ { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_PLTOFF64 },
OPERAND_TYPE_IMM64 },
- { "PLT", { BFD_RELOC_386_PLT32,
- BFD_RELOC_X86_64_PLT32 },
+ { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
+ BFD_RELOC_X86_64_PLT32 },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "GOTPLT", { 0,
- BFD_RELOC_X86_64_GOTPLT64 },
+ { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_GOTPLT64 },
OPERAND_TYPE_IMM64_DISP64 },
- { "GOTOFF", { BFD_RELOC_386_GOTOFF,
- BFD_RELOC_X86_64_GOTOFF64 },
+ { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
+ BFD_RELOC_X86_64_GOTOFF64 },
OPERAND_TYPE_IMM64_DISP64 },
- { "GOTPCREL", { 0,
- BFD_RELOC_X86_64_GOTPCREL },
+ { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_GOTPCREL },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "TLSGD", { BFD_RELOC_386_TLS_GD,
- BFD_RELOC_X86_64_TLSGD },
+ { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
+ BFD_RELOC_X86_64_TLSGD },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "TLSLDM", { BFD_RELOC_386_TLS_LDM,
- 0 },
+ { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
+ _dummy_first_bfd_reloc_code_real },
OPERAND_TYPE_NONE },
- { "TLSLD", { 0,
- BFD_RELOC_X86_64_TLSLD },
+ { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_TLSLD },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "GOTTPOFF", { BFD_RELOC_386_TLS_IE_32,
- BFD_RELOC_X86_64_GOTTPOFF },
+ { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
+ BFD_RELOC_X86_64_GOTTPOFF },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "TPOFF", { BFD_RELOC_386_TLS_LE_32,
- BFD_RELOC_X86_64_TPOFF32 },
+ { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
+ BFD_RELOC_X86_64_TPOFF32 },
OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
- { "NTPOFF", { BFD_RELOC_386_TLS_LE,
- 0 },
+ { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
+ _dummy_first_bfd_reloc_code_real },
OPERAND_TYPE_NONE },
- { "DTPOFF", { BFD_RELOC_386_TLS_LDO_32,
- BFD_RELOC_X86_64_DTPOFF32 },
-
+ { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
+ BFD_RELOC_X86_64_DTPOFF32 },
OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
- { "GOTNTPOFF",{ BFD_RELOC_386_TLS_GOTIE,
- 0 },
+ { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
+ _dummy_first_bfd_reloc_code_real },
OPERAND_TYPE_NONE },
- { "INDNTPOFF",{ BFD_RELOC_386_TLS_IE,
- 0 },
+ { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
+ _dummy_first_bfd_reloc_code_real },
OPERAND_TYPE_NONE },
- { "GOT", { BFD_RELOC_386_GOT32,
- BFD_RELOC_X86_64_GOT32 },
+ { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
+ BFD_RELOC_X86_64_GOT32 },
OPERAND_TYPE_IMM32_32S_64_DISP32 },
- { "TLSDESC", { BFD_RELOC_386_TLS_GOTDESC,
- BFD_RELOC_X86_64_GOTPC32_TLSDESC },
+ { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
+ BFD_RELOC_X86_64_GOTPC32_TLSDESC },
OPERAND_TYPE_IMM32_32S_DISP32 },
- { "TLSCALL", { BFD_RELOC_386_TLS_DESC_CALL,
- BFD_RELOC_X86_64_TLSDESC_CALL },
+ { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
+ BFD_RELOC_X86_64_TLSDESC_CALL },
OPERAND_TYPE_IMM32_32S_DISP32 },
};
char *cp;
unsigned int j;
+#if defined (OBJ_MAYBE_ELF)
if (!IS_ELF)
return NULL;
+#endif
for (cp = input_line_pointer; *cp != '@'; cp++)
if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
for (j = 0; j < ARRAY_SIZE (gotrel); j++)
{
- int len;
-
- len = strlen (gotrel[j].str);
+ int len = gotrel[j].len;
if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
{
if (gotrel[j].rel[object_64bit] != 0)
int first, second;
char *tmpbuf, *past_reloc;
- *reloc = gotrel[j].rel[object_64bit];
- if (adjust)
- *adjust = len;
+ *rel = gotrel[j].rel[object_64bit];
if (types)
{
*types = gotrel[j].types64;
}
- if (GOT_symbol == NULL)
+ if (j != 0 && GOT_symbol == NULL)
GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
/* The length of the first part of our input line. */
/* Allocate and copy string. The trailing NUL shouldn't
be necessary, but be safe. */
- tmpbuf = xmalloc (first + second + 2);
+ tmpbuf = (char *) xmalloc (first + second + 2);
memcpy (tmpbuf, input_line_pointer, first);
if (second != 0 && *past_reloc != ' ')
/* Replace the relocation token with ' ', so that
errors like foo@GOTOFF1 will be detected. */
tmpbuf[first++] = ' ';
+ else
+ /* Increment length by 1 if the relocation token is
+ removed. */
+ len++;
+ if (adjust)
+ *adjust = len;
memcpy (tmpbuf + first, past_reloc, second);
tmpbuf[first + second] = '\0';
return tmpbuf;
/* Might be a symbol version string. Don't as_bad here. */
return NULL;
}
+#endif
-void
+#ifdef TE_PE
+#ifdef lex_got
+#undef lex_got
+#endif
+/* Parse operands of the form
+ <symbol>@SECREL32+<nnn>
+
+ If we find one, set up the correct relocation in RELOC and copy the
+ input string, minus the `@SECREL32' into a malloc'd buffer for
+ parsing by the calling routine. Return this buffer, and if ADJUST
+ is non-null set it to the length of the string we removed from the
+ input line. Otherwise return NULL.
+
+ This function is copied from the ELF version above adjusted for PE targets. */
+
+static char *
+lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
+ int *adjust ATTRIBUTE_UNUSED,
+ i386_operand_type *types)
+{
+ static const struct
+ {
+ const char *str;
+ int len;
+ const enum bfd_reloc_code_real rel[2];
+ const i386_operand_type types64;
+ }
+ gotrel[] =
+ {
+ { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
+ BFD_RELOC_32_SECREL },
+ OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
+ };
+
+ char *cp;
+ unsigned j;
+
+ for (cp = input_line_pointer; *cp != '@'; cp++)
+ if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
+ return NULL;
+
+ for (j = 0; j < ARRAY_SIZE (gotrel); j++)
+ {
+ int len = gotrel[j].len;
+
+ if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
+ {
+ if (gotrel[j].rel[object_64bit] != 0)
+ {
+ int first, second;
+ char *tmpbuf, *past_reloc;
+
+ *rel = gotrel[j].rel[object_64bit];
+ if (adjust)
+ *adjust = len;
+
+ if (types)
+ {
+ if (flag_code != CODE_64BIT)
+ {
+ types->bitfield.imm32 = 1;
+ types->bitfield.disp32 = 1;
+ }
+ else
+ *types = gotrel[j].types64;
+ }
+
+ /* The length of the first part of our input line. */
+ first = cp - input_line_pointer;
+
+ /* The second part goes from after the reloc token until
+ (and including) an end_of_line char or comma. */
+ past_reloc = cp + 1 + len;
+ cp = past_reloc;
+ while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
+ ++cp;
+ second = cp + 1 - past_reloc;
+
+ /* Allocate and copy string. The trailing NUL shouldn't
+ be necessary, but be safe. */
+ tmpbuf = (char *) xmalloc (first + second + 2);
+ memcpy (tmpbuf, input_line_pointer, first);
+ if (second != 0 && *past_reloc != ' ')
+ /* Replace the relocation token with ' ', so that
+ errors like foo@SECLREL321 will be detected. */
+ tmpbuf[first++] = ' ';
+ memcpy (tmpbuf + first, past_reloc, second);
+ tmpbuf[first + second] = '\0';
+ return tmpbuf;
+ }
+
+ as_bad (_("@%s reloc is not supported with %d-bit output format"),
+ gotrel[j].str, 1 << (5 + object_64bit));
+ return NULL;
+ }
+ }
+
+ /* Might be a symbol version string. Don't as_bad here. */
+ return NULL;
+}
+
+#endif /* TE_PE */
+
+bfd_reloc_code_real_type
x86_cons (expressionS *exp, int size)
{
+ bfd_reloc_code_real_type got_reloc = NO_RELOC;
+
intel_syntax = -intel_syntax;
+ exp->X_md = 0;
if (size == 4 || (object_64bit && size == 8))
{
/* Handle @GOTOFF and the like in an expression. */
char *save;
char *gotfree_input_line;
- int adjust;
+ int adjust = 0;
save = input_line_pointer;
gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
if (intel_syntax)
i386_intel_simplify (exp);
+
+ return got_reloc;
}
-#endif
-static void signed_cons (int size)
+static void
+signed_cons (int size)
{
if (flag_code == CODE_64BIT)
cons_sign = 1;
#ifdef TE_PE
static void
-pe_directive_secrel (dummy)
- int dummy ATTRIBUTE_UNUSED;
+pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
{
expressionS exp;
}
#endif
+/* Handle Vector operations. */
+
+static char *
+check_VecOperations (char *op_string, char *op_end)
+{
+ const reg_entry *mask;
+ const char *saved;
+ char *end_op;
+
+ while (*op_string
+ && (op_end == NULL || op_string < op_end))
+ {
+ saved = op_string;
+ if (*op_string == '{')
+ {
+ op_string++;
+
+ /* Check broadcasts. */
+ if (strncmp (op_string, "1to", 3) == 0)
+ {
+ int bcst_type;
+
+ if (i.broadcast)
+ goto duplicated_vec_op;
+
+ op_string += 3;
+ if (*op_string == '8')
+ bcst_type = BROADCAST_1TO8;
+ else if (*op_string == '4')
+ bcst_type = BROADCAST_1TO4;
+ else if (*op_string == '2')
+ bcst_type = BROADCAST_1TO2;
+ else if (*op_string == '1'
+ && *(op_string+1) == '6')
+ {
+ bcst_type = BROADCAST_1TO16;
+ op_string++;
+ }
+ else
+ {
+ as_bad (_("Unsupported broadcast: `%s'"), saved);
+ return NULL;
+ }
+ op_string++;
+
+ broadcast_op.type = bcst_type;
+ broadcast_op.operand = this_operand;
+ i.broadcast = &broadcast_op;
+ }
+ /* Check masking operation. */
+ else if ((mask = parse_register (op_string, &end_op)) != NULL)
+ {
+ /* k0 can't be used for write mask. */
+ if (mask->reg_num == 0)
+ {
+ as_bad (_("`%s' can't be used for write mask"),
+ op_string);
+ return NULL;
+ }
+
+ if (!i.mask)
+ {
+ mask_op.mask = mask;
+ mask_op.zeroing = 0;
+ mask_op.operand = this_operand;
+ i.mask = &mask_op;
+ }
+ else
+ {
+ if (i.mask->mask)
+ goto duplicated_vec_op;
+
+ i.mask->mask = mask;
+
+ /* Only "{z}" is allowed here. No need to check
+ zeroing mask explicitly. */
+ if (i.mask->operand != this_operand)
+ {
+ as_bad (_("invalid write mask `%s'"), saved);
+ return NULL;
+ }
+ }
+
+ op_string = end_op;
+ }
+ /* Check zeroing-flag for masking operation. */
+ else if (*op_string == 'z')
+ {
+ if (!i.mask)
+ {
+ mask_op.mask = NULL;
+ mask_op.zeroing = 1;
+ mask_op.operand = this_operand;
+ i.mask = &mask_op;
+ }
+ else
+ {
+ if (i.mask->zeroing)
+ {
+ duplicated_vec_op:
+ as_bad (_("duplicated `%s'"), saved);
+ return NULL;
+ }
+
+ i.mask->zeroing = 1;
+
+ /* Only "{%k}" is allowed here. No need to check mask
+ register explicitly. */
+ if (i.mask->operand != this_operand)
+ {
+ as_bad (_("invalid zeroing-masking `%s'"),
+ saved);
+ return NULL;
+ }
+ }
+
+ op_string++;
+ }
+ else
+ goto unknown_vec_op;
+
+ if (*op_string != '}')
+ {
+ as_bad (_("missing `}' in `%s'"), saved);
+ return NULL;
+ }
+ op_string++;
+ continue;
+ }
+ unknown_vec_op:
+ /* We don't know this one. */
+ as_bad (_("unknown vector operation: `%s'"), saved);
+ return NULL;
+ }
+
+ return op_string;
+}
+
static int
i386_immediate (char *imm_start)
{
exp_seg = expression (exp);
SKIP_WHITESPACE ();
+
+ /* Handle vector operations. */
+ if (*input_line_pointer == '{')
+ {
+ input_line_pointer = check_VecOperations (input_line_pointer,
+ NULL);
+ if (input_line_pointer == NULL)
+ return 0;
+ }
+
if (*input_line_pointer)
as_bad (_("junk `%s' after expression"), input_line_pointer);
{
if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
{
- as_bad (_("missing or invalid immediate expression `%s'"),
- imm_start);
+ if (imm_start)
+ as_bad (_("missing or invalid immediate expression `%s'"),
+ imm_start);
return 0;
}
else if (exp->X_op == O_constant)
{
/* Size it properly later. */
i.types[this_operand].bitfield.imm64 = 1;
- /* If BFD64, sign extend val. */
- if (!use_rela_relocations
+ /* If not 64bit, sign extend val. */
+ if (flag_code != CODE_64BIT
&& (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
exp->X_add_number
= (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
return 0;
}
#endif
- else if (!intel_syntax && exp->X_op == O_register)
+ else if (!intel_syntax && exp_seg == reg_section)
{
- as_bad (_("illegal immediate register operand %s"), imm_start);
+ if (imm_start)
+ as_bad (_("illegal immediate register operand %s"), imm_start);
return 0;
}
else
goto inv_disp;
if (S_IS_LOCAL (exp->X_add_symbol)
- && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section)
+ && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
+ && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
exp->X_op = O_subtract;
exp->X_op_symbol = GOT_symbol;
ret = 0;
}
+ else if (flag_code == CODE_64BIT
+ && !i.prefix[ADDR_PREFIX]
+ && exp->X_op == O_constant)
+ {
+ /* Since displacement is signed extended to 64bit, don't allow
+ disp32 and turn off disp32s if they are out of range. */
+ i.types[this_operand].bitfield.disp32 = 0;
+ if (!fits_in_signed_long (exp->X_add_number))
+ {
+ i.types[this_operand].bitfield.disp32s = 0;
+ if (i.types[this_operand].bitfield.baseindex)
+ {
+ as_bad (_("0x%lx out range of signed 32bit displacement"),
+ (long) exp->X_add_number);
+ ret = 0;
+ }
+ }
+ }
+
#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
else if (exp->X_op != O_constant
&& OUTPUT_FLAVOR == bfd_target_aout_flavour
static int
i386_index_check (const char *operand_string)
{
- int ok;
const char *kind = "base/index";
+ enum flag_code addr_mode;
+
+ if (i.prefix[ADDR_PREFIX])
+ addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
+ else
+ {
+ addr_mode = flag_code;
+
#if INFER_ADDR_PREFIX
- int fudged = 0;
+ if (i.mem_operands == 0)
+ {
+ /* Infer address prefix from the first memory operand. */
+ const reg_entry *addr_reg = i.base_reg;
+
+ if (addr_reg == NULL)
+ addr_reg = i.index_reg;
- tryprefix:
+ if (addr_reg)
+ {
+ if (addr_reg->reg_num == RegEip
+ || addr_reg->reg_num == RegEiz
+ || addr_reg->reg_type.bitfield.reg32)
+ addr_mode = CODE_32BIT;
+ else if (flag_code != CODE_64BIT
+ && addr_reg->reg_type.bitfield.reg16)
+ addr_mode = CODE_16BIT;
+
+ if (addr_mode != flag_code)
+ {
+ i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
+ i.prefixes += 1;
+ /* Change the size of any displacement too. At most one
+ of Disp16 or Disp32 is set.
+ FIXME. There doesn't seem to be any real need for
+ separate Disp16 and Disp32 flags. The same goes for
+ Imm16 and Imm32. Removing them would probably clean
+ up the code quite a lot. */
+ if (flag_code != CODE_64BIT
+ && (i.types[this_operand].bitfield.disp16
+ || i.types[this_operand].bitfield.disp32))
+ i.types[this_operand]
+ = operand_type_xor (i.types[this_operand], disp16_32);
+ }
+ }
+ }
#endif
- ok = 1;
+ }
+
if (current_templates->start->opcode_modifier.isstring
&& !current_templates->start->opcode_modifier.immext
&& (current_templates->end[-1].opcode_modifier.isstring
{
/* Memory operands of string insns are special in that they only allow
a single register (rDI, rSI, or rBX) as their memory address. */
- unsigned int expected;
+ const reg_entry *expected_reg;
+ static const char *di_si[][2] =
+ {
+ { "esi", "edi" },
+ { "si", "di" },
+ { "rsi", "rdi" }
+ };
+ static const char *bx[] = { "ebx", "bx", "rbx" };
kind = "string address";
&& current_templates->end[-1].operand_types[1]
.bitfield.baseindex))
type = current_templates->end[-1].operand_types[1];
- expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
+ expected_reg = hash_find (reg_hash,
+ di_si[addr_mode][type.bitfield.esseg]);
+
}
else
- expected = 3 /* rBX */;
+ expected_reg = hash_find (reg_hash, bx[addr_mode]);
- if (!i.base_reg || i.index_reg
+ if (i.base_reg != expected_reg
+ || i.index_reg
|| operand_type_check (i.types[this_operand], disp))
- ok = -1;
- else if (!(flag_code == CODE_64BIT
- ? i.prefix[ADDR_PREFIX]
- ? i.base_reg->reg_type.bitfield.reg32
- : i.base_reg->reg_type.bitfield.reg64
- : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
- ? i.base_reg->reg_type.bitfield.reg32
- : i.base_reg->reg_type.bitfield.reg16))
- ok = 0;
- else if (i.base_reg->reg_num != expected)
- ok = -1;
-
- if (ok < 0)
- {
- unsigned int j;
-
- for (j = 0; j < i386_regtab_size; ++j)
- if ((flag_code == CODE_64BIT
- ? i.prefix[ADDR_PREFIX]
- ? i386_regtab[j].reg_type.bitfield.reg32
- : i386_regtab[j].reg_type.bitfield.reg64
- : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
- ? i386_regtab[j].reg_type.bitfield.reg32
- : i386_regtab[j].reg_type.bitfield.reg16)
- && i386_regtab[j].reg_num == expected)
- break;
- gas_assert (j < i386_regtab_size);
+ {
+ /* The second memory operand must have the same size as
+ the first one. */
+ if (i.mem_operands
+ && i.base_reg
+ && !((addr_mode == CODE_64BIT
+ && i.base_reg->reg_type.bitfield.reg64)
+ || (addr_mode == CODE_32BIT
+ ? i.base_reg->reg_type.bitfield.reg32
+ : i.base_reg->reg_type.bitfield.reg16)))
+ goto bad_address;
+
as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
operand_string,
intel_syntax ? '[' : '(',
register_prefix,
- i386_regtab[j].reg_name,
+ expected_reg->reg_name,
intel_syntax ? ']' : ')');
- ok = 1;
- }
- }
- else if (flag_code == CODE_64BIT)
- {
- if ((i.base_reg
- && ((i.prefix[ADDR_PREFIX] == 0
- && !i.base_reg->reg_type.bitfield.reg64)
- || (i.prefix[ADDR_PREFIX]
- && !i.base_reg->reg_type.bitfield.reg32))
- && (i.index_reg
- || i.base_reg->reg_num !=
- (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
- || (i.index_reg
- && (!i.index_reg->reg_type.bitfield.baseindex
- || (i.prefix[ADDR_PREFIX] == 0
- && i.index_reg->reg_num != RegRiz
- && !i.index_reg->reg_type.bitfield.reg64
- )
- || (i.prefix[ADDR_PREFIX]
- && i.index_reg->reg_num != RegEiz
- && !i.index_reg->reg_type.bitfield.reg32))))
- ok = 0;
+ return 1;
+ }
+ else
+ return 1;
+
+bad_address:
+ as_bad (_("`%s' is not a valid %s expression"),
+ operand_string, kind);
+ return 0;
}
else
{
- if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
+ if (addr_mode != CODE_16BIT)
+ {
+ /* 32-bit/64-bit checks. */
+ if ((i.base_reg
+ && (addr_mode == CODE_64BIT
+ ? !i.base_reg->reg_type.bitfield.reg64
+ : !i.base_reg->reg_type.bitfield.reg32)
+ && (i.index_reg
+ || (i.base_reg->reg_num
+ != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
+ || (i.index_reg
+ && !i.index_reg->reg_type.bitfield.regxmm
+ && !i.index_reg->reg_type.bitfield.regymm
+ && !i.index_reg->reg_type.bitfield.regzmm
+ && ((addr_mode == CODE_64BIT
+ ? !(i.index_reg->reg_type.bitfield.reg64
+ || i.index_reg->reg_num == RegRiz)
+ : !(i.index_reg->reg_type.bitfield.reg32
+ || i.index_reg->reg_num == RegEiz))
+ || !i.index_reg->reg_type.bitfield.baseindex)))
+ goto bad_address;
+ }
+ else
{
- /* 16bit checks. */
+ /* 16-bit checks. */
if ((i.base_reg
&& (!i.base_reg->reg_type.bitfield.reg16
|| !i.base_reg->reg_type.bitfield.baseindex))
&& i.base_reg->reg_num < 6
&& i.index_reg->reg_num >= 6
&& i.log2_scale_factor == 0))))
- ok = 0;
+ goto bad_address;
}
- else
+ }
+ return 1;
+}
+
+/* Handle vector immediates. */
+
+static int
+RC_SAE_immediate (const char *imm_start)
+{
+ unsigned int match_found, j;
+ const char *pstr = imm_start;
+ expressionS *exp;
+
+ if (*pstr != '{')
+ return 0;
+
+ pstr++;
+ match_found = 0;
+ for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
+ {
+ if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
{
- /* 32bit checks. */
- if ((i.base_reg
- && !i.base_reg->reg_type.bitfield.reg32)
- || (i.index_reg
- && ((!i.index_reg->reg_type.bitfield.reg32
- && i.index_reg->reg_num != RegEiz)
- || !i.index_reg->reg_type.bitfield.baseindex)))
- ok = 0;
+ if (!i.rounding)
+ {
+ rc_op.type = RC_NamesTable[j].type;
+ rc_op.operand = this_operand;
+ i.rounding = &rc_op;
+ }
+ else
+ {
+ as_bad (_("duplicated `%s'"), imm_start);
+ return 0;
+ }
+ pstr += RC_NamesTable[j].len;
+ match_found = 1;
+ break;
}
}
- if (!ok)
+ if (!match_found)
+ return 0;
+
+ if (*pstr++ != '}')
{
-#if INFER_ADDR_PREFIX
- if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
- {
- i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
- i.prefixes += 1;
- /* Change the size of any displacement too. At most one of
- Disp16 or Disp32 is set.
- FIXME. There doesn't seem to be any real need for separate
- Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
- Removing them would probably clean up the code quite a lot. */
- if (flag_code != CODE_64BIT
- && (i.types[this_operand].bitfield.disp16
- || i.types[this_operand].bitfield.disp32))
- i.types[this_operand]
- = operand_type_xor (i.types[this_operand], disp16_32);
- fudged = 1;
- goto tryprefix;
- }
- if (fudged)
- as_bad (_("`%s' is not a valid %s expression"),
- operand_string,
- kind);
- else
-#endif
- as_bad (_("`%s' is not a valid %s-bit %s expression"),
- operand_string,
- flag_code_names[i.prefix[ADDR_PREFIX]
- ? flag_code == CODE_32BIT
- ? CODE_16BIT
- : CODE_32BIT
- : flag_code],
- kind);
+ as_bad (_("Missing '}': '%s'"), imm_start);
+ return 0;
+ }
+ /* RC/SAE immediate string should contain nothing more. */;
+ if (*pstr != 0)
+ {
+ as_bad (_("Junk after '}': '%s'"), imm_start);
+ return 0;
}
- return ok;
+
+ exp = &im_expressions[i.imm_operands++];
+ i.op[this_operand].imms = exp;
+
+ exp->X_op = O_constant;
+ exp->X_add_number = 0;
+ exp->X_add_symbol = (symbolS *) 0;
+ exp->X_op_symbol = (symbolS *) 0;
+
+ i.types[this_operand].bitfield.imm8 = 1;
+ return 1;
}
/* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
}
goto do_memory_reference;
}
+
+ /* Handle vector operations. */
+ if (*op_string == '{')
+ {
+ op_string = check_VecOperations (op_string, NULL);
+ if (op_string == NULL)
+ return 0;
+ }
+
if (*op_string)
{
as_bad (_("junk `%s' after register"), op_string);
if (!i386_immediate (op_string))
return 0;
}
+ else if (RC_SAE_immediate (operand_string))
+ {
+ /* If it is a RC or SAE immediate, do nothing. */
+ ;
+ }
else if (is_digit_char (*op_string)
|| is_identifier_char (*op_string)
|| *op_string == '(')
/* Start and end of displacement string expression (if found). */
char *displacement_string_start;
char *displacement_string_end;
+ char *vop_start;
do_memory_reference:
if ((i.mem_operands == 1
after the '('. */
base_string = op_string + strlen (op_string);
+ /* Handle vector operations. */
+ vop_start = strchr (op_string, '{');
+ if (vop_start && vop_start < base_string)
+ {
+ if (check_VecOperations (vop_start, base_string) == NULL)
+ return 0;
+ base_string = vop_start;
+ }
+
--base_string;
if (is_space_char (*base_string))
--base_string;
}
else if (*base_string == REGISTER_PREFIX)
{
+ end_op = strchr (base_string, ',');
+ if (end_op)
+ *end_op = '\0';
as_bad (_("bad register name `%s'"), base_string);
return 0;
}
}
else if (*base_string == REGISTER_PREFIX)
{
+ end_op = strchr (base_string, ',');
+ if (end_op)
+ *end_op = '\0';
as_bad (_("bad register name `%s'"), base_string);
return 0;
}
return 1; /* Normal return. */
}
\f
+/* Calculate the maximum variable size (i.e., excluding fr_fix)
+ that an rs_machine_dependent frag may reach. */
+
+unsigned int
+i386_frag_max_var (fragS *frag)
+{
+ /* The only relaxable frags are for jumps.
+ Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
+ gas_assert (frag->fr_type == rs_machine_dependent);
+ return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
+}
+
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+static int
+elf_symbol_resolved_in_segment_p (symbolS *fr_symbol, offsetT fr_var)
+{
+ /* STT_GNU_IFUNC symbol must go through PLT. */
+ if ((symbol_get_bfdsym (fr_symbol)->flags
+ & BSF_GNU_INDIRECT_FUNCTION) != 0)
+ return 0;
+
+ if (!S_IS_EXTERNAL (fr_symbol))
+ /* Symbol may be weak or local. */
+ return !S_IS_WEAK (fr_symbol);
+
+ /* Global symbols with non-default visibility can't be preempted. */
+ if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol)) != STV_DEFAULT)
+ return 1;
+
+ if (fr_var != NO_RELOC)
+ switch ((enum bfd_reloc_code_real) fr_var)
+ {
+ case BFD_RELOC_386_PLT32:
+ case BFD_RELOC_X86_64_PLT32:
+ /* Symbol with PLT relocatin may be preempted. */
+ return 0;
+ default:
+ abort ();
+ }
+
+ /* Global symbols with default visibility in a shared library may be
+ preempted by another definition. */
+ return !shared;
+}
+#endif
+
/* md_estimate_size_before_relax()
Called just before relax() for rs_machine_dependent frags. The x86
returned value. */
int
-md_estimate_size_before_relax (fragP, segment)
- fragS *fragP;
- segT segment;
+md_estimate_size_before_relax (fragS *fragP, segT segment)
{
/* We've already got fragP->fr_subtype right; all we have to do is
check for un-relaxable symbols. On an ELF system, we can't relax
if (S_GET_SEGMENT (fragP->fr_symbol) != segment
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
|| (IS_ELF
- && (S_IS_EXTERNAL (fragP->fr_symbol)
- || S_IS_WEAK (fragP->fr_symbol)))
+ && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol,
+ fragP->fr_var))
#endif
#if defined (OBJ_COFF) && defined (TE_PE)
|| (OUTPUT_FLAVOR == bfd_target_coff_flavour
int old_fr_fix;
if (fragP->fr_var != NO_RELOC)
- reloc_type = fragP->fr_var;
+ reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
else if (size == 2)
reloc_type = BFD_RELOC_16_PCREL;
else
Caller will turn frag into a ".space 0". */
void
-md_convert_frag (abfd, sec, fragP)
- bfd *abfd ATTRIBUTE_UNUSED;
- segT sec ATTRIBUTE_UNUSED;
- fragS *fragP;
+md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
+ fragS *fragP)
{
unsigned char *opcode;
unsigned char *where_to_put_displacement = NULL;
fragP->fr_fix += extension;
}
\f
-/* Apply a fixup (fixS) to segment data, once it has been determined
+/* Apply a fixup (fixP) to segment data, once it has been determined
by our caller that we have all the info we need to fix it up.
+ Parameter valP is the pointer to the value of the bits.
+
On the 386, immediates, displacements, and data pointers are all in
the same (little-endian) format, so we don't need to care about which
we are handling. */
void
-md_apply_fix (fixP, valP, seg)
- /* The fix we're to put in. */
- fixS *fixP;
- /* Pointer to the value of the bits. */
- valueT *valP;
- /* Segment fix is from. */
- segT seg ATTRIBUTE_UNUSED;
+md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
{
char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
valueT value = *valP;
if ((sym_seg == seg
|| (symbol_section_p (fixP->fx_addsy)
&& sym_seg != absolute_section))
- && !TC_FORCE_RELOCATION (fixP))
+ && !generic_force_reloc (fixP))
{
/* Yes, we add the values in twice. This is because
bfd_install_relocation subtracts them out again. I think
#endif
}
#if defined (OBJ_COFF) && defined (TE_PE)
- if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
- {
+ if (fixP->fx_addsy != NULL
+ && S_IS_WEAK (fixP->fx_addsy)
+ /* PR 16858: Do not modify weak function references. */
+ && ! fixP->fx_pcrel)
+ {
+#if !defined (TE_PEP)
+ /* For x86 PE weak function symbols are neither PC-relative
+ nor do they set S_IS_FUNCTION. So the only reliable way
+ to detect them is to check the flags of their containing
+ section. */
+ if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
+ && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
+ ;
+ else
+#endif
value -= S_GET_VALUE (fixP->fx_addsy);
}
#endif
if (*s == ')')
{
*end_op = s + 1;
- r = hash_find (reg_hash, "st(0)");
+ r = (const reg_entry *) hash_find (reg_hash, "st(0)");
know (r);
return r + fpr;
}
&& !cpu_arch_flags.bitfield.cpui386)
return (const reg_entry *) NULL;
+ if (r->reg_type.bitfield.floatreg
+ && !cpu_arch_flags.bitfield.cpu8087
+ && !cpu_arch_flags.bitfield.cpu287
+ && !cpu_arch_flags.bitfield.cpu387)
+ return (const reg_entry *) NULL;
+
if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
return (const reg_entry *) NULL;
if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
return (const reg_entry *) NULL;
+ if ((r->reg_type.bitfield.regzmm || r->reg_type.bitfield.regmask)
+ && !cpu_arch_flags.bitfield.cpuavx512f)
+ return (const reg_entry *) NULL;
+
/* Don't allow fake index register unless allow_index_reg isn't 0. */
if (!allow_index_reg
&& (r->reg_num == RegEiz || r->reg_num == RegRiz))
return (const reg_entry *) NULL;
+ /* Upper 16 vector register is only available with VREX in 64bit
+ mode. */
+ if ((r->reg_flags & RegVRex))
+ {
+ if (!cpu_arch_flags.bitfield.cpuvrex
+ || flag_code != CODE_64BIT)
+ return (const reg_entry *) NULL;
+
+ i.need_vrex = 1;
+ }
+
if (((r->reg_flags & (RegRex64 | RegRex))
|| r->reg_type.bitfield.reg64)
&& (!cpu_arch_flags.bitfield.cpulm
know (e->X_add_number >= 0
&& (valueT) e->X_add_number < i386_regtab_size);
r = i386_regtab + e->X_add_number;
+ if ((r->reg_flags & RegVRex))
+ i.need_vrex = 1;
*end_op = input_line_pointer;
}
*input_line_pointer = c;
#define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
#define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
#define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
+#define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
+#define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
+#define OPTION_X32 (OPTION_MD_BASE + 14)
+#define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
+#define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
+#define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
+#define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
+#define OPTION_OMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
+#define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
+#define OPTION_MSHARED (OPTION_MD_BASE + 21)
+#define OPTION_MAMD64 (OPTION_MD_BASE + 22)
+#define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
struct option md_longopts[] =
{
{"32", no_argument, NULL, OPTION_32},
#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
- || defined (TE_PE) || defined (TE_PEP))
+ || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
{"64", no_argument, NULL, OPTION_64},
+#endif
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ {"x32", no_argument, NULL, OPTION_X32},
+ {"mshared", no_argument, NULL, OPTION_MSHARED},
#endif
{"divide", no_argument, NULL, OPTION_DIVIDE},
{"march", required_argument, NULL, OPTION_MARCH},
{"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
{"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
{"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
+ {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
+ {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
+ {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
+ {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
+ {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
+# if defined (TE_PE) || defined (TE_PEP)
+ {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
+#endif
+ {"momit-lock-prefix", required_argument, NULL, OPTION_OMIT_LOCK_PREFIX},
+ {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
+ {"mamd64", no_argument, NULL, OPTION_MAMD64},
+ {"mintel64", no_argument, NULL, OPTION_MINTEL64},
{NULL, no_argument, NULL, 0}
};
size_t md_longopts_size = sizeof (md_longopts);
int
md_parse_option (int c, char *arg)
{
- unsigned int i;
+ unsigned int j;
char *arch, *next;
switch (c)
/* -s: On i386 Solaris, this tells the native assembler to use
.stab instead of .stab.excl. We always use .stab anyhow. */
break;
+
+ case OPTION_MSHARED:
+ shared = 1;
+ break;
#endif
#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
- || defined (TE_PE) || defined (TE_PEP))
+ || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
case OPTION_64:
{
const char **list, **l;
if (CONST_STRNEQ (*l, "elf64-x86-64")
|| strcmp (*l, "coff-x86-64") == 0
|| strcmp (*l, "pe-x86-64") == 0
- || strcmp (*l, "pei-x86-64") == 0)
+ || strcmp (*l, "pei-x86-64") == 0
+ || strcmp (*l, "mach-o-x86-64") == 0)
{
default_arch = "x86_64";
break;
}
if (*l == NULL)
- as_fatal (_("No compiled in support for x86_64"));
+ as_fatal (_("no compiled in support for x86_64"));
free (list);
}
break;
#endif
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ case OPTION_X32:
+ if (IS_ELF)
+ {
+ const char **list, **l;
+
+ list = bfd_target_list ();
+ for (l = list; *l != NULL; l++)
+ if (CONST_STRNEQ (*l, "elf32-x86-64"))
+ {
+ default_arch = "x86_64:32";
+ break;
+ }
+ if (*l == NULL)
+ as_fatal (_("no compiled in support for 32bit x86_64"));
+ free (list);
+ }
+ else
+ as_fatal (_("32bit x86_64 is only supported for ELF"));
+ break;
+#endif
+
case OPTION_32:
default_arch = "i386";
break;
do
{
if (*arch == '.')
- as_fatal (_("Invalid -march= option: `%s'"), arg);
+ as_fatal (_("invalid -march= option: `%s'"), arg);
next = strchr (arch, '+');
if (next)
*next++ = '\0';
- for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
+ for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
{
- if (strcmp (arch, cpu_arch [i].name) == 0)
+ if (strcmp (arch, cpu_arch [j].name) == 0)
{
/* Processor. */
- cpu_arch_name = cpu_arch[i].name;
+ if (! cpu_arch[j].flags.bitfield.cpui386)
+ continue;
+
+ cpu_arch_name = cpu_arch[j].name;
cpu_sub_arch_name = NULL;
- cpu_arch_flags = cpu_arch[i].flags;
- cpu_arch_isa = cpu_arch[i].type;
- cpu_arch_isa_flags = cpu_arch[i].flags;
+ cpu_arch_flags = cpu_arch[j].flags;
+ cpu_arch_isa = cpu_arch[j].type;
+ cpu_arch_isa_flags = cpu_arch[j].flags;
if (!cpu_arch_tune_set)
{
cpu_arch_tune = cpu_arch_isa;
}
break;
}
- else if (*cpu_arch [i].name == '.'
- && strcmp (arch, cpu_arch [i].name + 1) == 0)
+ else if (*cpu_arch [j].name == '.'
+ && strcmp (arch, cpu_arch [j].name + 1) == 0)
{
/* ISA entension. */
i386_cpu_flags flags;
- flags = cpu_flags_or (cpu_arch_flags,
- cpu_arch[i].flags);
- if (!cpu_flags_equal (&flags, &cpu_arch_flags))
+
+ if (!cpu_arch[j].negated)
+ flags = cpu_flags_or (cpu_arch_flags,
+ cpu_arch[j].flags);
+ else
+ flags = cpu_flags_and_not (cpu_arch_flags,
+ cpu_arch[j].flags);
+
+ if (!valid_iamcu_cpu_flags (&flags))
+ as_fatal (_("`%s' isn't valid for Intel MCU"), arch);
+ else if (!cpu_flags_equal (&flags, &cpu_arch_flags))
{
if (cpu_sub_arch_name)
{
char *name = cpu_sub_arch_name;
cpu_sub_arch_name = concat (name,
- cpu_arch[i].name,
+ cpu_arch[j].name,
(const char *) NULL);
free (name);
}
else
- cpu_sub_arch_name = xstrdup (cpu_arch[i].name);
+ cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
cpu_arch_flags = flags;
+ cpu_arch_isa_flags = flags;
}
break;
}
}
- if (i >= ARRAY_SIZE (cpu_arch))
- as_fatal (_("Invalid -march= option: `%s'"), arg);
+ if (j >= ARRAY_SIZE (cpu_arch))
+ as_fatal (_("invalid -march= option: `%s'"), arg);
arch = next;
}
case OPTION_MTUNE:
if (*arg == '.')
- as_fatal (_("Invalid -mtune= option: `%s'"), arg);
- for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
+ as_fatal (_("invalid -mtune= option: `%s'"), arg);
+ for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
{
- if (strcmp (arg, cpu_arch [i].name) == 0)
+ if (strcmp (arg, cpu_arch [j].name) == 0)
{
cpu_arch_tune_set = 1;
- cpu_arch_tune = cpu_arch [i].type;
- cpu_arch_tune_flags = cpu_arch[i].flags;
+ cpu_arch_tune = cpu_arch [j].type;
+ cpu_arch_tune_flags = cpu_arch[j].flags;
break;
}
}
- if (i >= ARRAY_SIZE (cpu_arch))
- as_fatal (_("Invalid -mtune= option: `%s'"), arg);
+ if (j >= ARRAY_SIZE (cpu_arch))
+ as_fatal (_("invalid -mtune= option: `%s'"), arg);
break;
case OPTION_MMNEMONIC:
else if (strcasecmp (arg, "intel") == 0)
intel_mnemonic = 1;
else
- as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
+ as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
break;
case OPTION_MSYNTAX:
else if (strcasecmp (arg, "intel") == 0)
intel_syntax = 1;
else
- as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
+ as_fatal (_("invalid -msyntax= option: `%s'"), arg);
break;
case OPTION_MINDEX_REG:
case OPTION_MSSE_CHECK:
if (strcasecmp (arg, "error") == 0)
- sse_check = sse_check_error;
+ sse_check = check_error;
+ else if (strcasecmp (arg, "warning") == 0)
+ sse_check = check_warning;
+ else if (strcasecmp (arg, "none") == 0)
+ sse_check = check_none;
+ else
+ as_fatal (_("invalid -msse-check= option: `%s'"), arg);
+ break;
+
+ case OPTION_MOPERAND_CHECK:
+ if (strcasecmp (arg, "error") == 0)
+ operand_check = check_error;
else if (strcasecmp (arg, "warning") == 0)
- sse_check = sse_check_warning;
+ operand_check = check_warning;
else if (strcasecmp (arg, "none") == 0)
- sse_check = sse_check_none;
+ operand_check = check_none;
+ else
+ as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
+ break;
+
+ case OPTION_MAVXSCALAR:
+ if (strcasecmp (arg, "128") == 0)
+ avxscalar = vex128;
+ else if (strcasecmp (arg, "256") == 0)
+ avxscalar = vex256;
+ else
+ as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
+ break;
+
+ case OPTION_MADD_BND_PREFIX:
+ add_bnd_prefix = 1;
+ break;
+
+ case OPTION_MEVEXLIG:
+ if (strcmp (arg, "128") == 0)
+ evexlig = evexl128;
+ else if (strcmp (arg, "256") == 0)
+ evexlig = evexl256;
+ else if (strcmp (arg, "512") == 0)
+ evexlig = evexl512;
+ else
+ as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
+ break;
+
+ case OPTION_MEVEXRCIG:
+ if (strcmp (arg, "rne") == 0)
+ evexrcig = rne;
+ else if (strcmp (arg, "rd") == 0)
+ evexrcig = rd;
+ else if (strcmp (arg, "ru") == 0)
+ evexrcig = ru;
+ else if (strcmp (arg, "rz") == 0)
+ evexrcig = rz;
+ else
+ as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
+ break;
+
+ case OPTION_MEVEXWIG:
+ if (strcmp (arg, "0") == 0)
+ evexwig = evexw0;
+ else if (strcmp (arg, "1") == 0)
+ evexwig = evexw1;
+ else
+ as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
+ break;
+
+# if defined (TE_PE) || defined (TE_PEP)
+ case OPTION_MBIG_OBJ:
+ use_big_obj = 1;
+ break;
+#endif
+
+ case OPTION_OMIT_LOCK_PREFIX:
+ if (strcasecmp (arg, "yes") == 0)
+ omit_lock_prefix = 1;
+ else if (strcasecmp (arg, "no") == 0)
+ omit_lock_prefix = 0;
else
- as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
+ as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
+ break;
+
+ case OPTION_MAMD64:
+ cpu_arch_flags.bitfield.cpuamd64 = 1;
+ cpu_arch_flags.bitfield.cpuintel64 = 0;
+ cpu_arch_isa_flags.bitfield.cpuamd64 = 1;
+ cpu_arch_isa_flags.bitfield.cpuintel64 = 0;
+ break;
+
+ case OPTION_MINTEL64:
+ cpu_arch_flags.bitfield.cpuamd64 = 0;
+ cpu_arch_flags.bitfield.cpuintel64 = 1;
+ cpu_arch_isa_flags.bitfield.cpuamd64 = 0;
+ cpu_arch_isa_flags.bitfield.cpuintel64 = 1;
break;
default:
return 1;
}
+#define MESSAGE_TEMPLATE \
+" "
+
+static void
+show_arch (FILE *stream, int ext, int check)
+{
+ static char message[] = MESSAGE_TEMPLATE;
+ char *start = message + 27;
+ char *p;
+ int size = sizeof (MESSAGE_TEMPLATE);
+ int left;
+ const char *name;
+ int len;
+ unsigned int j;
+
+ p = start;
+ left = size - (start - message);
+ for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
+ {
+ /* Should it be skipped? */
+ if (cpu_arch [j].skip)
+ continue;
+
+ name = cpu_arch [j].name;
+ len = cpu_arch [j].len;
+ if (*name == '.')
+ {
+ /* It is an extension. Skip if we aren't asked to show it. */
+ if (ext)
+ {
+ name++;
+ len--;
+ }
+ else
+ continue;
+ }
+ else if (ext)
+ {
+ /* It is an processor. Skip if we show only extension. */
+ continue;
+ }
+ else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
+ {
+ /* It is an impossible processor - skip. */
+ continue;
+ }
+
+ /* Reserve 2 spaces for ", " or ",\0" */
+ left -= len + 2;
+
+ /* Check if there is any room. */
+ if (left >= 0)
+ {
+ if (p != start)
+ {
+ *p++ = ',';
+ *p++ = ' ';
+ }
+ p = mempcpy (p, name, len);
+ }
+ else
+ {
+ /* Output the current message now and start a new one. */
+ *p++ = ',';
+ *p = '\0';
+ fprintf (stream, "%s\n", message);
+ p = start;
+ left = size - (start - message) - len - 2;
+
+ gas_assert (left >= 0);
+
+ p = mempcpy (p, name, len);
+ }
+ }
+
+ *p = '\0';
+ fprintf (stream, "%s\n", message);
+}
+
void
-md_show_usage (stream)
- FILE *stream;
+md_show_usage (FILE *stream)
{
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
fprintf (stream, _("\
#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
|| defined (TE_PE) || defined (TE_PEP))
fprintf (stream, _("\
- --32/--64 generate 32bit/64bit code\n"));
+ --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
#endif
#ifdef SVR4_COMMENT_CHARS
fprintf (stream, _("\
#endif
fprintf (stream, _("\
-march=CPU[,+EXTENSION...]\n\
- generate code for CPU and EXTENSION, CPU is one of:\n\
- i8086, i186, i286, i386, i486, pentium, pentiumpro,\n\
- pentiumii, pentiumiii, pentium4, prescott, nocona,\n\
- core, core2, corei7, k6, k6_2, athlon, k8, amdfam10,\n\
- generic32, generic64\n\
- EXTENSION is combination of:\n\
- mmx, sse, sse2, sse3, ssse3, sse4.1, sse4.2, sse4,\n\
- avx, vmx, smx, xsave, movbe, ept, aes, pclmul, fma,\n\
- clflush, syscall, rdtscp, 3dnow, 3dnowa, sse4a,\n\
- svme, abm, padlock, fma4\n"));
+ generate code for CPU and EXTENSION, CPU is one of:\n"));
+ show_arch (stream, 0, 1);
fprintf (stream, _("\
- -mtune=CPU optimize for CPU, CPU is one of:\n\
- i8086, i186, i286, i386, i486, pentium, pentiumpro,\n\
- pentiumii, pentiumiii, pentium4, prescott, nocona,\n\
- core, core2, corei7, k6, k6_2, athlon, k8, amdfam10,\n\
- generic32, generic64\n"));
+ EXTENSION is combination of:\n"));
+ show_arch (stream, 1, 0);
+ fprintf (stream, _("\
+ -mtune=CPU optimize for CPU, CPU is one of:\n"));
+ show_arch (stream, 0, 0);
fprintf (stream, _("\
-msse2avx encode SSE instructions with VEX prefix\n"));
fprintf (stream, _("\
-msse-check=[none|error|warning]\n\
check SSE instructions\n"));
fprintf (stream, _("\
+ -moperand-check=[none|error|warning]\n\
+ check operand combinations for validity\n"));
+ fprintf (stream, _("\
+ -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
+ length\n"));
+ fprintf (stream, _("\
+ -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
+ length\n"));
+ fprintf (stream, _("\
+ -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
+ for EVEX.W bit ignored instructions\n"));
+ fprintf (stream, _("\
+ -mevexrcig=[rne|rd|ru|rz]\n\
+ encode EVEX instructions with specific EVEX.RC value\n\
+ for SAE-only ignored instructions\n"));
+ fprintf (stream, _("\
-mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
fprintf (stream, _("\
-msyntax=[att|intel] use AT&T/Intel syntax\n"));
-mnaked-reg don't require `%%' prefix for registers\n"));
fprintf (stream, _("\
-mold-gcc support old (<= 2.8.1) versions of gcc\n"));
+ fprintf (stream, _("\
+ -madd-bnd-prefix add BND prefix for all valid branches\n"));
+ fprintf (stream, _("\
+ -mshared disable branch optimization for shared code\n"));
+# if defined (TE_PE) || defined (TE_PEP)
+ fprintf (stream, _("\
+ -mbig-obj generate big object files\n"));
+#endif
+ fprintf (stream, _("\
+ -momit-lock-prefix=[no|yes]\n\
+ strip all lock prefixes\n"));
+ fprintf (stream, _("\
+ -mamd64 accept only AMD64 ISA\n"));
+ fprintf (stream, _("\
+ -mintel64 accept only Intel64 ISA\n"));
}
#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
const char *
i386_target_format (void)
{
- if (!strcmp (default_arch, "x86_64"))
+ if (!strncmp (default_arch, "x86_64", 6))
{
- set_code_flag (CODE_64BIT);
- if (cpu_flags_all_zero (&cpu_arch_isa_flags))
- {
- cpu_arch_isa_flags.bitfield.cpui186 = 1;
- cpu_arch_isa_flags.bitfield.cpui286 = 1;
- cpu_arch_isa_flags.bitfield.cpui386 = 1;
- cpu_arch_isa_flags.bitfield.cpui486 = 1;
- cpu_arch_isa_flags.bitfield.cpui586 = 1;
- cpu_arch_isa_flags.bitfield.cpui686 = 1;
- cpu_arch_isa_flags.bitfield.cpuclflush = 1;
- cpu_arch_isa_flags.bitfield.cpummx= 1;
- cpu_arch_isa_flags.bitfield.cpusse = 1;
- cpu_arch_isa_flags.bitfield.cpusse2 = 1;
- }
- if (cpu_flags_all_zero (&cpu_arch_tune_flags))
- {
- cpu_arch_tune_flags.bitfield.cpui186 = 1;
- cpu_arch_tune_flags.bitfield.cpui286 = 1;
- cpu_arch_tune_flags.bitfield.cpui386 = 1;
- cpu_arch_tune_flags.bitfield.cpui486 = 1;
- cpu_arch_tune_flags.bitfield.cpui586 = 1;
- cpu_arch_tune_flags.bitfield.cpui686 = 1;
- cpu_arch_tune_flags.bitfield.cpuclflush = 1;
- cpu_arch_tune_flags.bitfield.cpummx= 1;
- cpu_arch_tune_flags.bitfield.cpusse = 1;
- cpu_arch_tune_flags.bitfield.cpusse2 = 1;
- }
+ update_code_flag (CODE_64BIT, 1);
+ if (default_arch[6] == '\0')
+ x86_elf_abi = X86_64_ABI;
+ else
+ x86_elf_abi = X86_64_X32_ABI;
}
else if (!strcmp (default_arch, "i386"))
+ update_code_flag (CODE_32BIT, 1);
+ else if (!strcmp (default_arch, "iamcu"))
{
- set_code_flag (CODE_32BIT);
- if (cpu_flags_all_zero (&cpu_arch_isa_flags))
- {
- cpu_arch_isa_flags.bitfield.cpui186 = 1;
- cpu_arch_isa_flags.bitfield.cpui286 = 1;
- cpu_arch_isa_flags.bitfield.cpui386 = 1;
- }
- if (cpu_flags_all_zero (&cpu_arch_tune_flags))
+ update_code_flag (CODE_32BIT, 1);
+ if (cpu_arch_isa == PROCESSOR_UNKNOWN)
{
- cpu_arch_tune_flags.bitfield.cpui186 = 1;
- cpu_arch_tune_flags.bitfield.cpui286 = 1;
- cpu_arch_tune_flags.bitfield.cpui386 = 1;
+ static const i386_cpu_flags iamcu_flags = CPU_IAMCU_FLAGS;
+ cpu_arch_name = "iamcu";
+ cpu_sub_arch_name = NULL;
+ cpu_arch_flags = iamcu_flags;
+ cpu_arch_isa = PROCESSOR_IAMCU;
+ cpu_arch_isa_flags = iamcu_flags;
+ if (!cpu_arch_tune_set)
+ {
+ cpu_arch_tune = cpu_arch_isa;
+ cpu_arch_tune_flags = cpu_arch_isa_flags;
+ }
}
+ else
+ as_fatal (_("Intel MCU doesn't support `%s' architecture"),
+ cpu_arch_name);
}
else
- as_fatal (_("Unknown architecture"));
+ as_fatal (_("unknown architecture"));
+
+ if (cpu_flags_all_zero (&cpu_arch_isa_flags))
+ cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
+ if (cpu_flags_all_zero (&cpu_arch_tune_flags))
+ cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
+
switch (OUTPUT_FLAVOR)
{
-#if defined (TE_PE) || defined (TE_PEP)
- case bfd_target_coff_flavour:
- return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
-#endif
-#ifdef OBJ_MAYBE_AOUT
+#if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
case bfd_target_aout_flavour:
return AOUT_TARGET_FORMAT;
#endif
-#ifdef OBJ_MAYBE_COFF
+#if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
+# if defined (TE_PE) || defined (TE_PEP)
+ case bfd_target_coff_flavour:
+ if (flag_code == CODE_64BIT)
+ return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
+ else
+ return "pe-i386";
+# elif defined (TE_GO32)
+ case bfd_target_coff_flavour:
+ return "coff-go32";
+# else
case bfd_target_coff_flavour:
return "coff-i386";
+# endif
#endif
#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
case bfd_target_elf_flavour:
{
- if (flag_code == CODE_64BIT)
+ const char *format;
+
+ switch (x86_elf_abi)
{
+ default:
+ format = ELF_TARGET_FORMAT;
+ break;
+ case X86_64_ABI:
+ use_rela_relocations = 1;
object_64bit = 1;
+ format = ELF_TARGET_FORMAT64;
+ break;
+ case X86_64_X32_ABI:
use_rela_relocations = 1;
+ object_64bit = 1;
+ disallow_64bit_reloc = 1;
+ format = ELF_TARGET_FORMAT32;
+ break;
}
- return flag_code == CODE_64BIT ? ELF_TARGET_FORMAT64 : ELF_TARGET_FORMAT;
+ if (cpu_arch_isa == PROCESSOR_L1OM)
+ {
+ if (x86_elf_abi != X86_64_ABI)
+ as_fatal (_("Intel L1OM is 64bit only"));
+ return ELF_TARGET_L1OM_FORMAT;
+ }
+ else if (cpu_arch_isa == PROCESSOR_K1OM)
+ {
+ if (x86_elf_abi != X86_64_ABI)
+ as_fatal (_("Intel K1OM is 64bit only"));
+ return ELF_TARGET_K1OM_FORMAT;
+ }
+ else if (cpu_arch_isa == PROCESSOR_IAMCU)
+ {
+ if (x86_elf_abi != I386_ABI)
+ as_fatal (_("Intel MCU is 32bit only"));
+ return ELF_TARGET_IAMCU_FORMAT;
+ }
+ else
+ return format;
}
#endif
#if defined (OBJ_MACH_O)
case bfd_target_mach_o_flavour:
- return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
+ if (flag_code == CODE_64BIT)
+ {
+ use_rela_relocations = 1;
+ object_64bit = 1;
+ return "mach-o-x86-64";
+ }
+ else
+ return "mach-o-i386";
#endif
default:
abort ();
}
#endif /* OBJ_MAYBE_ more than one */
-
-#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
-void
-i386_elf_emit_arch_note (void)
-{
- if (IS_ELF && cpu_arch_name != NULL)
- {
- char *p;
- asection *seg = now_seg;
- subsegT subseg = now_subseg;
- Elf_Internal_Note i_note;
- Elf_External_Note e_note;
- asection *note_secp;
- int len;
-
- /* Create the .note section. */
- note_secp = subseg_new (".note", 0);
- bfd_set_section_flags (stdoutput,
- note_secp,
- SEC_HAS_CONTENTS | SEC_READONLY);
-
- /* Process the arch string. */
- len = strlen (cpu_arch_name);
-
- i_note.namesz = len + 1;
- i_note.descsz = 0;
- i_note.type = NT_ARCH;
- p = frag_more (sizeof (e_note.namesz));
- md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
- p = frag_more (sizeof (e_note.descsz));
- md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
- p = frag_more (sizeof (e_note.type));
- md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
- p = frag_more (len + 1);
- strcpy (p, cpu_arch_name);
-
- frag_align (2, 0, 0);
-
- subseg_set (seg, subseg);
- }
-}
-#endif
\f
symbolS *
-md_undefined_symbol (name)
- char *name;
+md_undefined_symbol (char *name)
{
if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
&& name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
/* Round up a section size to the appropriate boundary. */
valueT
-md_section_align (segment, size)
- segT segment ATTRIBUTE_UNUSED;
- valueT size;
+md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
{
#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
}
arelent *
-tc_gen_reloc (section, fixp)
- asection *section ATTRIBUTE_UNUSED;
- fixS *fixp;
+tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
{
arelent *rel;
bfd_reloc_code_real_type code;
switch (fixp->fx_r_type)
{
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ case BFD_RELOC_SIZE32:
+ case BFD_RELOC_SIZE64:
+ if (S_IS_DEFINED (fixp->fx_addsy)
+ && !S_IS_EXTERNAL (fixp->fx_addsy))
+ {
+ /* Resolve size relocation against local symbol to size of
+ the symbol plus addend. */
+ valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
+ if (fixp->fx_r_type == BFD_RELOC_SIZE32
+ && !fits_in_unsigned_long (value))
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _("symbol size computation overflow"));
+ fixp->fx_addsy = NULL;
+ fixp->fx_subsy = NULL;
+ md_apply_fix (fixp, (valueT *) &value, NULL);
+ return NULL;
+ }
+#endif
+
case BFD_RELOC_X86_64_PLT32:
case BFD_RELOC_X86_64_GOT32:
case BFD_RELOC_X86_64_GOTPCREL:
/* Use the rela in 64bit mode. */
else
{
+ if (disallow_64bit_reloc)
+ switch (code)
+ {
+ case BFD_RELOC_X86_64_DTPOFF64:
+ case BFD_RELOC_X86_64_TPOFF64:
+ case BFD_RELOC_64_PCREL:
+ case BFD_RELOC_X86_64_GOTOFF64:
+ case BFD_RELOC_X86_64_GOT64:
+ case BFD_RELOC_X86_64_GOTPCREL64:
+ case BFD_RELOC_X86_64_GOTPC64:
+ case BFD_RELOC_X86_64_GOTPLT64:
+ case BFD_RELOC_X86_64_PLTOFF64:
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _("cannot represent relocation type %s in x32 mode"),
+ bfd_get_reloc_code_name (code));
+ break;
+ default:
+ break;
+ }
+
if (!fixp->fx_pcrel)
rel->addend = fixp->fx_offset;
else
cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
}
+int
+x86_dwarf2_addr_size (void)
+{
+#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
+ if (x86_elf_abi == X86_64_X32_ABI)
+ return 4;
+#endif
+ return bfd_arch_bits_per_address (stdoutput) / 8;
+}
+
int
i386_elf_section_type (const char *str, size_t len)
{
void
tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
{
- expressionS expr;
+ expressionS exp;
- expr.X_op = O_secrel;
- expr.X_add_symbol = symbol;
- expr.X_add_number = 0;
- emit_expr (&expr, size);
+ exp.X_op = O_secrel;
+ exp.X_add_symbol = symbol;
+ exp.X_add_number = 0;
+ emit_expr (&exp, size);
}
#endif
if (letter == 'l')
return SHF_X86_64_LARGE;
- *ptr_msg = _("Bad .section directive: want a,l,w,x,M,S,G,T in string");
+ *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
}
else
- *ptr_msg = _("Bad .section directive: want a,w,x,M,S,G,T in string");
+ *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
return -1;
}