#endif
#endif
-#ifndef REGISTER_WARNINGS
-#define REGISTER_WARNINGS 1
-#endif
-
#ifndef INFER_ADDR_PREFIX
#define INFER_ADDR_PREFIX 1
#endif
CPU_SSE2_FLAGS, 0 },
{ STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
CPU_SSE3_FLAGS, 0 },
+ { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
+ CPU_SSE4A_FLAGS, 0 },
{ STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
CPU_SSSE3_FLAGS, 0 },
{ STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
{ STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS },
{ STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS },
{ STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS },
+ { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS },
{ STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS },
{ STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS },
{ STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS },
return x;
}
+static const i386_cpu_flags avx512 = CPU_ANY_AVX512F_FLAGS;
+
#define CPU_FLAGS_ARCH_MATCH 0x1
#define CPU_FLAGS_64BIT_MATCH 0x2
if (fixP->fx_r_type == BFD_RELOC_SIZE32
|| fixP->fx_r_type == BFD_RELOC_SIZE64
|| fixP->fx_r_type == BFD_RELOC_386_GOTOFF
- || fixP->fx_r_type == BFD_RELOC_386_PLT32
|| fixP->fx_r_type == BFD_RELOC_386_GOT32
|| fixP->fx_r_type == BFD_RELOC_386_GOT32X
|| fixP->fx_r_type == BFD_RELOC_386_TLS_GD
|| fixP->fx_r_type == BFD_RELOC_386_TLS_LE
|| fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
|| fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
- || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
|| fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
|| fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
|| fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCRELX
/* Now we've parsed the mnemonic into a set of templates, and have the
operands at hand. */
- /* All intel opcodes have reversed operands except for "bound" and
- "enter". We also don't reverse intersegment "jmp" and "call"
- instructions with 2 immediate operands so that the immediate segment
- precedes the offset, as it does when in AT&T mode. */
+ /* All Intel opcodes have reversed operands except for "bound", "enter"
+ "monitor*", and "mwait*". We also don't reverse intersegment "jmp"
+ and "call" instructions with 2 immediate operands so that the immediate
+ segment precedes the offset, as it does when in AT&T mode. */
if (intel_syntax
&& i.operands > 1
&& (strcmp (mnemonic, "bound") != 0)
&& (strcmp (mnemonic, "invlpga") != 0)
+ && (strncmp (mnemonic, "monitor", 7) != 0)
+ && (strncmp (mnemonic, "mwait", 5) != 0)
&& !(operand_type_check (i.types[0], imm)
&& operand_type_check (i.types[1], imm)))
swap_operands ();
: as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
}
- /* Zap movzx and movsx suffix. The suffix has been set from
- "word ptr" or "byte ptr" on the source operand in Intel syntax
- or extracted from mnemonic in AT&T syntax. But we'll use
- the destination register to choose the suffix for encoding. */
- if ((i.tm.base_opcode & ~9) == 0x0fb6)
- {
- /* In Intel syntax, there must be a suffix. In AT&T syntax, if
- there is no suffix, the default will be byte extension. */
- if (i.reg_operands != 2
- && !i.suffix
- && intel_syntax)
- as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
-
- i.suffix = 0;
- }
-
if (i.tm.opcode_modifier.fwait)
if (!add_prefix (FWAIT_OPCODE))
return;
{
unsigned int op;
i386_cpu_flags cpu;
- static const i386_cpu_flags avx512 = CPU_ANY_AVX512F_FLAGS;
/* Templates allowing for ZMMword as well as YMMword and/or XMMword for
any one operand are implicity requiring AVX512VL support if the actual
break;
case intel64:
/* -mintel64: Don't accept AMD64. */
- if (t->opcode_modifier.isa64 == AMD64)
+ if (t->opcode_modifier.isa64 == AMD64 && flag_code == CODE_64BIT)
continue;
break;
}
else if (i.reg_operands
&& (i.operands > 1 || i.types[0].bitfield.class == Reg))
{
+ unsigned int numop = i.operands;
+
+ /* movsx/movzx want only their source operand considered here, for the
+ ambiguity checking below. The suffix will be replaced afterwards
+ to represent the destination (register). */
+ if (((i.tm.base_opcode | 8) == 0xfbe && i.tm.opcode_modifier.w)
+ || (i.tm.base_opcode == 0x63 && i.tm.cpu_flags.bitfield.cpu64))
+ --i.operands;
+
/* If there's no instruction mnemonic suffix we try to invent one
based on GPR operands. */
if (!i.suffix)
continue;
break;
}
+
+ /* As an exception, movsx/movzx silently default to a byte source
+ in AT&T mode. */
+ if ((i.tm.base_opcode | 8) == 0xfbe && i.tm.opcode_modifier.w
+ && !i.suffix && !intel_syntax)
+ i.suffix = BYTE_MNEM_SUFFIX;
}
else if (i.suffix == BYTE_MNEM_SUFFIX)
{
;
else
abort ();
+
+ /* Undo the movsx/movzx change done above. */
+ i.operands = numop;
}
else if (i.tm.opcode_modifier.defaultsize && !i.suffix)
{
/* Accept FLDENV et al without suffix. */
&& (i.tm.opcode_modifier.no_ssuf || i.tm.opcode_modifier.floatmf))
{
- unsigned int suffixes;
+ unsigned int suffixes, evex = 0;
suffixes = !i.tm.opcode_modifier.no_bsuf;
if (!i.tm.opcode_modifier.no_wsuf)
if (flag_code == CODE_64BIT && !i.tm.opcode_modifier.no_qsuf)
suffixes |= 1 << 5;
- /* Are multiple suffixes allowed? */
+ /* For [XYZ]MMWORD operands inspect operand sizes. While generally
+ also suitable for AT&T syntax mode, it was requested that this be
+ restricted to just Intel syntax. */
+ if (intel_syntax && is_any_vex_encoding (&i.tm) && !i.broadcast)
+ {
+ unsigned int op;
+
+ for (op = 0; op < i.tm.operands; ++op)
+ {
+ if (is_evex_encoding (&i.tm)
+ && !cpu_arch_flags.bitfield.cpuavx512vl)
+ {
+ if (i.tm.operand_types[op].bitfield.ymmword)
+ i.tm.operand_types[op].bitfield.xmmword = 0;
+ if (i.tm.operand_types[op].bitfield.zmmword)
+ i.tm.operand_types[op].bitfield.ymmword = 0;
+ if (!i.tm.opcode_modifier.evex
+ || i.tm.opcode_modifier.evex == EVEXDYN)
+ i.tm.opcode_modifier.evex = EVEX512;
+ }
+
+ if (i.tm.operand_types[op].bitfield.xmmword
+ + i.tm.operand_types[op].bitfield.ymmword
+ + i.tm.operand_types[op].bitfield.zmmword < 2)
+ continue;
+
+ /* Any properly sized operand disambiguates the insn. */
+ if (i.types[op].bitfield.xmmword
+ || i.types[op].bitfield.ymmword
+ || i.types[op].bitfield.zmmword)
+ {
+ suffixes &= ~(7 << 6);
+ evex = 0;
+ break;
+ }
+
+ if ((i.flags[op] & Operand_Mem)
+ && i.tm.operand_types[op].bitfield.unspecified)
+ {
+ if (i.tm.operand_types[op].bitfield.xmmword)
+ suffixes |= 1 << 6;
+ if (i.tm.operand_types[op].bitfield.ymmword)
+ suffixes |= 1 << 7;
+ if (i.tm.operand_types[op].bitfield.zmmword)
+ suffixes |= 1 << 8;
+ if (is_evex_encoding (&i.tm))
+ evex = EVEX512;
+ }
+ }
+ }
+
+ /* Are multiple suffixes / operand sizes allowed? */
if (suffixes & (suffixes - 1))
{
if (intel_syntax
if (i.tm.opcode_modifier.floatmf)
i.suffix = SHORT_MNEM_SUFFIX;
+ else if ((i.tm.base_opcode | 8) == 0xfbe
+ || (i.tm.base_opcode == 0x63
+ && i.tm.cpu_flags.bitfield.cpu64))
+ /* handled below */;
+ else if (evex)
+ i.tm.opcode_modifier.evex = evex;
else if (flag_code == CODE_16BIT)
i.suffix = WORD_MNEM_SUFFIX;
else if (!i.tm.opcode_modifier.no_lsuf)
}
}
+ if ((i.tm.base_opcode | 8) == 0xfbe
+ || (i.tm.base_opcode == 0x63 && i.tm.cpu_flags.bitfield.cpu64))
+ {
+ /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
+ In AT&T syntax, if there is no suffix (warned about above), the default
+ will be byte extension. */
+ if (i.tm.opcode_modifier.w && i.suffix && i.suffix != BYTE_MNEM_SUFFIX)
+ i.tm.base_opcode |= 1;
+
+ /* For further processing, the suffix should represent the destination
+ (register). This is already the case when one was used with
+ mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
+ no suffix to begin with. */
+ if (i.tm.opcode_modifier.w || i.tm.base_opcode == 0x63 || !i.suffix)
+ {
+ if (i.types[1].bitfield.word)
+ i.suffix = WORD_MNEM_SUFFIX;
+ else if (i.types[1].bitfield.qword)
+ i.suffix = QWORD_MNEM_SUFFIX;
+ else
+ i.suffix = LONG_MNEM_SUFFIX;
+
+ i.tm.opcode_modifier.w = 0;
+ }
+ }
+
if (!i.tm.opcode_modifier.modrm && i.reg_operands && i.tm.operands < 3)
i.short_form = (i.tm.operand_types[0].bitfield.class == Reg)
!= (i.tm.operand_types[1].bitfield.class == Reg);
&& i.tm.operand_types[op].bitfield.word)
continue;
- /* crc32 doesn't generate this warning. */
- if (i.tm.base_opcode == 0xf20f38f0)
+ /* crc32 only wants its source operand checked here. */
+ if (i.tm.base_opcode == 0xf20f38f0 && op)
continue;
- if ((i.types[op].bitfield.word
- || i.types[op].bitfield.dword
- || i.types[op].bitfield.qword)
- && i.op[op].regs->reg_num < 4
- /* Prohibit these changes in 64bit mode, since the lowering
- would be more complicated. */
- && flag_code != CODE_64BIT)
- {
-#if REGISTER_WARNINGS
- if (!quiet_warnings)
- as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
- register_prefix,
- (i.op[op].regs + (i.types[op].bitfield.word
- ? REGNAM_AL - REGNAM_AX
- : REGNAM_AL - REGNAM_EAX))->reg_name,
- register_prefix,
- i.op[op].regs->reg_name,
- i.suffix);
-#endif
- continue;
- }
/* Any other register is bad. */
if (i.types[op].bitfield.class == Reg
|| i.types[op].bitfield.class == RegMMX
&& i.tm.operand_types[op].bitfield.dword)
{
if (intel_syntax
- && (i.tm.opcode_modifier.toqword
- /* Also convert to QWORD for MOVSXD. */
- || i.tm.base_opcode == 0x63)
+ && i.tm.opcode_modifier.toqword
&& i.types[0].bitfield.class != RegSIMD)
{
/* Convert to QWORD. We want REX byte. */
i.suffix);
return 0;
}
- /* Warn if the e or r prefix on a general reg is present. */
- else if ((!quiet_warnings || flag_code == CODE_64BIT)
- && (i.types[op].bitfield.dword
+ /* Error if the e or r prefix on a general reg is present. */
+ else if ((i.types[op].bitfield.dword
|| i.types[op].bitfield.qword)
&& (i.tm.operand_types[op].bitfield.class == Reg
|| i.tm.operand_types[op].bitfield.instance == Accum)
&& i.tm.operand_types[op].bitfield.word)
{
- /* Prohibit these changes in the 64bit mode, since the
- lowering is more complicated. */
- if (flag_code == CODE_64BIT)
- {
- as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
- register_prefix, i.op[op].regs->reg_name,
- i.suffix);
- return 0;
- }
-#if REGISTER_WARNINGS
- as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
- register_prefix,
- (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
- register_prefix, i.op[op].regs->reg_name, i.suffix);
-#endif
+ as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
+ register_prefix, i.op[op].regs->reg_name,
+ i.suffix);
+ return 0;
}
return 1;
}
}
}
- if (i.tm.base_opcode == 0x8d /* lea */
- && i.seg[0]
- && !quiet_warnings)
- as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
+ if ((i.seg[0] || i.prefix[SEG_PREFIX])
+ && i.tm.base_opcode == 0x8d /* lea */
+ && !is_any_vex_encoding(&i.tm))
+ {
+ if (!quiet_warnings)
+ as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
+ if (optimize)
+ {
+ i.seg[0] = NULL;
+ i.prefix[SEG_PREFIX] = 0;
+ }
+ }
/* If a segment was explicitly specified, and the specified segment
- is not the default, use an opcode prefix to select it. If we
- never figured out what the default segment is, then default_seg
- will be zero at this point, and the specified segment prefix will
- always be used. */
- if ((i.seg[0]) && (i.seg[0] != default_seg))
+ is neither the default nor the one already recorded from a prefix,
+ use an opcode prefix to select it. If we never figured out what
+ the default segment is, then default_seg will be zero at this
+ point, and the specified segment prefix will always be used. */
+ if (i.seg[0]
+ && i.seg[0] != default_seg
+ && i.seg[0]->seg_prefix != i.prefix[SEG_PREFIX])
{
if (!add_prefix (i.seg[0]->seg_prefix))
return 0;