CPU_BDVER4_FLAGS, 0 },
{ STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
CPU_ZNVER1_FLAGS, 0 },
+ { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER,
+ CPU_ZNVER2_FLAGS, 0 },
{ STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
CPU_BTVER1_FLAGS, 0 },
{ STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
CPU_WAITPKG_FLAGS, 0 },
{ STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN,
CPU_CLDEMOTE_FLAGS, 0 },
+ { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN,
+ CPU_MOVDIRI_FLAGS, 0 },
+ { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN,
+ CPU_MOVDIR64B_FLAGS, 0 },
};
static const noarch_entry cpu_noarch[] =
{ STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS },
{ STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS },
{ STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS },
+ { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS },
+ { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS },
};
#ifdef I386COFF
static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
-static const i386_operand_type control = OPERAND_TYPE_CONTROL;
-static const i386_operand_type inoutportreg
- = OPERAND_TYPE_INOUTPORTREG;
-static const i386_operand_type reg16_inoutportreg
- = OPERAND_TYPE_REG16_INOUTPORTREG;
static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
return 0;
}
-/* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit on
- operand J for instruction template T. */
+/* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
+ between operand GIVEN and opeand WANTED for instruction template T. */
static INLINE int
-match_reg_size (const insn_template *t, unsigned int j)
+match_operand_size (const insn_template *t, unsigned int wanted,
+ unsigned int given)
{
- return !((i.types[j].bitfield.byte
- && !t->operand_types[j].bitfield.byte)
- || (i.types[j].bitfield.word
- && !t->operand_types[j].bitfield.word)
- || (i.types[j].bitfield.dword
- && !t->operand_types[j].bitfield.dword)
- || (i.types[j].bitfield.qword
- && !t->operand_types[j].bitfield.qword)
- || (i.types[j].bitfield.tbyte
- && !t->operand_types[j].bitfield.tbyte));
+ return !((i.types[given].bitfield.byte
+ && !t->operand_types[wanted].bitfield.byte)
+ || (i.types[given].bitfield.word
+ && !t->operand_types[wanted].bitfield.word)
+ || (i.types[given].bitfield.dword
+ && !t->operand_types[wanted].bitfield.dword)
+ || (i.types[given].bitfield.qword
+ && !t->operand_types[wanted].bitfield.qword)
+ || (i.types[given].bitfield.tbyte
+ && !t->operand_types[wanted].bitfield.tbyte));
}
-/* Return 1 if there is no conflict in SIMD register on
- operand J for instruction template T. */
+/* Return 1 if there is no conflict in SIMD register between operand
+ GIVEN and opeand WANTED for instruction template T. */
static INLINE int
-match_simd_size (const insn_template *t, unsigned int j)
+match_simd_size (const insn_template *t, unsigned int wanted,
+ unsigned int given)
{
- return !((i.types[j].bitfield.xmmword
- && !t->operand_types[j].bitfield.xmmword)
- || (i.types[j].bitfield.ymmword
- && !t->operand_types[j].bitfield.ymmword)
- || (i.types[j].bitfield.zmmword
- && !t->operand_types[j].bitfield.zmmword));
+ return !((i.types[given].bitfield.xmmword
+ && !t->operand_types[wanted].bitfield.xmmword)
+ || (i.types[given].bitfield.ymmword
+ && !t->operand_types[wanted].bitfield.ymmword)
+ || (i.types[given].bitfield.zmmword
+ && !t->operand_types[wanted].bitfield.zmmword));
}
-/* Return 1 if there is no conflict in any size on operand J for
- instruction template T. */
+/* Return 1 if there is no conflict in any size between operand GIVEN
+ and opeand WANTED for instruction template T. */
static INLINE int
-match_mem_size (const insn_template *t, unsigned int j)
+match_mem_size (const insn_template *t, unsigned int wanted,
+ unsigned int given)
{
- return (match_reg_size (t, j)
- && !((i.types[j].bitfield.unspecified
+ return (match_operand_size (t, wanted, given)
+ && !((i.types[given].bitfield.unspecified
&& !i.broadcast
- && !t->operand_types[j].bitfield.unspecified)
- || (i.types[j].bitfield.fword
- && !t->operand_types[j].bitfield.fword)
+ && !t->operand_types[wanted].bitfield.unspecified)
+ || (i.types[given].bitfield.fword
+ && !t->operand_types[wanted].bitfield.fword)
/* For scalar opcode templates to allow register and memory
operands at the same time, some special casing is needed
here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
down-conversion vpmov*. */
- || ((t->operand_types[j].bitfield.regsimd
+ || ((t->operand_types[wanted].bitfield.regsimd
&& !t->opcode_modifier.broadcast
- && (t->operand_types[j].bitfield.byte
- || t->operand_types[j].bitfield.word
- || t->operand_types[j].bitfield.dword
- || t->operand_types[j].bitfield.qword))
- ? (i.types[j].bitfield.xmmword
- || i.types[j].bitfield.ymmword
- || i.types[j].bitfield.zmmword)
- : !match_simd_size(t, j))));
+ && (t->operand_types[wanted].bitfield.byte
+ || t->operand_types[wanted].bitfield.word
+ || t->operand_types[wanted].bitfield.dword
+ || t->operand_types[wanted].bitfield.qword))
+ ? (i.types[given].bitfield.xmmword
+ || i.types[given].bitfield.ymmword
+ || i.types[given].bitfield.zmmword)
+ : !match_simd_size(t, wanted, given))));
}
-/* Return 1 if there is no size conflict on any operands for
- instruction template T. */
+/* Return value has MATCH_STRAIGHT set if there is no size conflict on any
+ operands for instruction template T, and it has MATCH_REVERSE set if there
+ is no size conflict on any operands for the template with operands reversed
+ (and the template allows for reversing in the first place). */
-static INLINE int
+#define MATCH_STRAIGHT 1
+#define MATCH_REVERSE 2
+
+static INLINE unsigned int
operand_size_match (const insn_template *t)
{
- unsigned int j;
- int match = 1;
+ unsigned int j, match = MATCH_STRAIGHT;
/* Don't check jump instructions. */
if (t->opcode_modifier.jump
continue;
if (t->operand_types[j].bitfield.reg
- && !match_reg_size (t, j))
+ && !match_operand_size (t, j, j))
{
match = 0;
break;
}
if (t->operand_types[j].bitfield.regsimd
- && !match_simd_size (t, j))
+ && !match_simd_size (t, j, j))
{
match = 0;
break;
}
if (t->operand_types[j].bitfield.acc
- && (!match_reg_size (t, j) || !match_simd_size (t, j)))
+ && (!match_operand_size (t, j, j) || !match_simd_size (t, j, j)))
{
match = 0;
break;
}
- if (i.types[j].bitfield.mem && !match_mem_size (t, j))
+ if (i.types[j].bitfield.mem && !match_mem_size (t, j, j))
{
match = 0;
break;
}
}
- if (match)
- return match;
- else if (!t->opcode_modifier.d)
+ if (!t->opcode_modifier.d)
{
mismatch:
- i.error = operand_size_mismatch;
- return 0;
+ if (!match)
+ i.error = operand_size_mismatch;
+ return match;
}
/* Check reverse. */
gas_assert (i.operands == 2);
- match = 1;
for (j = 0; j < 2; j++)
{
if ((t->operand_types[j].bitfield.reg
|| t->operand_types[j].bitfield.acc)
- && !match_reg_size (t, j ? 0 : 1))
+ && !match_operand_size (t, j, !j))
goto mismatch;
- if (i.types[j].bitfield.mem
- && !match_mem_size (t, j ? 0 : 1))
+ if (i.types[!j].bitfield.mem
+ && !match_mem_size (t, j, !j))
goto mismatch;
}
- return match;
+ return match | MATCH_REVERSE;
}
static INLINE int
&& flag_code == CODE_64BIT)
{
if ((i.prefix[REX_PREFIX] & prefix & REX_W)
- || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
- && (prefix & (REX_R | REX_X | REX_B))))
+ || (i.prefix[REX_PREFIX] & prefix & REX_R)
+ || (i.prefix[REX_PREFIX] & prefix & REX_X)
+ || (i.prefix[REX_PREFIX] & prefix & REX_B))
ret = PREFIX_EXIST;
q = REX_PREFIX;
}
vector_length = 1;
else
{
- unsigned int op;
+ int op;
+ /* Determine vector length from the last multi-length vector
+ operand. */
vector_length = 0;
- for (op = 0; op < t->operands; ++op)
+ for (op = t->operands - 1; op >= 0; op--)
if (t->operand_types[op].bitfield.xmmword
&& t->operand_types[op].bitfield.ymmword
&& i.types[op].bitfield.ymmword)
static INLINE bfd_boolean
is_evex_encoding (const insn_template *t)
{
- return t->opcode_modifier.evex
+ return t->opcode_modifier.evex || t->opcode_modifier.disp8memshift
|| t->opcode_modifier.broadcast || t->opcode_modifier.masking
|| t->opcode_modifier.staticrounding || t->opcode_modifier.sae;
}
if (!i.tm.opcode_modifier.evex
|| i.tm.opcode_modifier.evex == EVEXDYN)
{
- unsigned int op;
+ int op;
+ /* Determine vector length from the last multi-length vector
+ operand. */
vec_length = 0;
- for (op = 0; op < i.tm.operands; ++op)
+ for (op = i.operands - 1; op >= 0; op--)
if (i.tm.operand_types[op].bitfield.xmmword
+ i.tm.operand_types[op].bitfield.ymmword
+ i.tm.operand_types[op].bitfield.zmmword > 1)
{
if (i.types[op].bitfield.zmmword)
- i.tm.opcode_modifier.evex = EVEX512;
+ {
+ i.tm.opcode_modifier.evex = EVEX512;
+ break;
+ }
else if (i.types[op].bitfield.ymmword)
- i.tm.opcode_modifier.evex = EVEX256;
+ {
+ i.tm.opcode_modifier.evex = EVEX256;
+ break;
+ }
else if (i.types[op].bitfield.xmmword)
- i.tm.opcode_modifier.evex = EVEX128;
- else
- continue;
- break;
+ {
+ i.tm.opcode_modifier.evex = EVEX128;
+ break;
+ }
+ else if (i.broadcast && (int) op == i.broadcast->operand)
+ {
+ switch ((i.tm.operand_types[op].bitfield.dword ? 4 : 8)
+ * i.broadcast->type)
+ {
+ case 64:
+ i.tm.opcode_modifier.evex = EVEX512;
+ break;
+ case 32:
+ i.tm.opcode_modifier.evex = EVEX256;
+ break;
+ case 16:
+ i.tm.opcode_modifier.evex = EVEX128;
+ break;
+ default:
+ abort ();
+ }
+ break;
+ }
}
+
+ if (op < 0)
+ abort ();
}
switch (i.tm.opcode_modifier.evex)
&& is_evex_encoding (&i.tm)
&& (i.vec_encoding != vex_encoding_evex
|| i.tm.cpu_flags.bitfield.cpuavx512vl
+ || (i.tm.operand_types[2].bitfield.zmmword
+ && i.types[2].bitfield.ymmword)
|| cpu_arch_isa_flags.bitfield.cpuavx512vl)))
&& ((i.tm.base_opcode == 0x55
|| i.tm.base_opcode == 0x6655
}
/* Insert BND prefix. */
- if (add_bnd_prefix
- && i.tm.opcode_modifier.bndprefixok
- && !i.prefix[BND_PREFIX])
- add_prefix (BND_PREFIX_OPCODE);
+ if (add_bnd_prefix && i.tm.opcode_modifier.bndprefixok)
+ {
+ if (!i.prefix[BND_PREFIX])
+ add_prefix (BND_PREFIX_OPCODE);
+ else if (i.prefix[BND_PREFIX] != BND_PREFIX_OPCODE)
+ {
+ as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
+ i.prefix[BND_PREFIX] = BND_PREFIX_OPCODE;
+ }
+ }
/* Check string instruction segment overrides. */
if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
{
if (i.broadcast)
i.memshift = t->operand_types[op].bitfield.dword ? 2 : 3;
- else
+ else if (t->opcode_modifier.disp8memshift != DISP8_SHIFT_VL)
i.memshift = t->opcode_modifier.disp8memshift;
+ else
+ {
+ const i386_operand_type *type = NULL;
+
+ i.memshift = 0;
+ for (op = 0; op < i.operands; op++)
+ if (operand_type_check (i.types[op], anymem))
+ {
+ if (t->operand_types[op].bitfield.xmmword
+ + t->operand_types[op].bitfield.ymmword
+ + t->operand_types[op].bitfield.zmmword <= 1)
+ type = &t->operand_types[op];
+ else if (!i.types[op].bitfield.unspecified)
+ type = &i.types[op];
+ }
+ else if (i.types[op].bitfield.regsimd)
+ {
+ if (i.types[op].bitfield.zmmword)
+ i.memshift = 6;
+ else if (i.types[op].bitfield.ymmword && i.memshift < 5)
+ i.memshift = 5;
+ else if (i.types[op].bitfield.xmmword && i.memshift < 4)
+ i.memshift = 4;
+ }
+
+ if (type)
+ {
+ if (type->bitfield.zmmword)
+ i.memshift = 6;
+ else if (type->bitfield.ymmword)
+ i.memshift = 5;
+ else if (type->bitfield.xmmword)
+ i.memshift = 4;
+ }
+
+ /* For the check in fits_in_disp8(). */
+ if (i.memshift == 0)
+ i.memshift = -1;
+ }
for (op = 0; op < i.operands; op++)
if (operand_type_check (i.types[op], disp)
i386_operand_type operand_types [MAX_OPERANDS];
int addr_prefix_disp;
unsigned int j;
- unsigned int found_cpu_match;
+ unsigned int found_cpu_match, size_match;
unsigned int check_register;
enum i386_error specific_error = 0;
|| (t->opcode_modifier.no_ldsuf && mnemsuf_check.no_ldsuf))
continue;
- if (!operand_size_match (t))
+ size_match = operand_size_match (t);
+ if (!size_match)
continue;
for (j = 0; j < MAX_OPERANDS; j++)
&& flag_code != CODE_64BIT
&& (intel_syntax
? (!t->opcode_modifier.ignoresize
+ && !t->opcode_modifier.broadcast
&& !intel_float_operand (t->name))
: intel_float_operand (t->name) != 2)
&& ((!operand_types[0].bitfield.regmmx
&& i.types[0].bitfield.acc
&& operand_type_check (i.types[1], anymem))
continue;
+ if (!(size_match & MATCH_STRAIGHT))
+ goto check_reverse;
/* If we want store form, we reverse direction of operands. */
if (i.dir_encoding == dir_encoding_store
&& t->opcode_modifier.d)
continue;
check_reverse:
+ if (!(size_match & MATCH_REVERSE))
+ continue;
/* Try reversing direction of operands. */
overlap0 = operand_type_and (i.types[0], operand_types[1]);
overlap1 = operand_type_and (i.types[1], operand_types[0]);
/* Now select between word & dword operations via the operand
size prefix, except for instructions that will ignore this
prefix anyway. */
- if (i.tm.opcode_modifier.addrprefixop0)
+ if (i.reg_operands > 0
+ && i.types[0].bitfield.reg
+ && i.tm.opcode_modifier.addrprefixopreg
+ && (i.tm.opcode_modifier.immext
+ || i.operands == 1))
{
/* The address size override prefix changes the size of the
first operand. */
if ((flag_code == CODE_32BIT
- && i.op->regs[0].reg_type.bitfield.word)
+ && i.op[0].regs->reg_type.bitfield.word)
|| (flag_code != CODE_32BIT
- && i.op->regs[0].reg_type.bitfield.dword))
+ && i.op[0].regs->reg_type.bitfield.dword))
if (!add_prefix (ADDR_PREFIX_OPCODE))
return 0;
}
break;
}
+ if (i.reg_operands != 0
+ && i.operands > 1
+ && i.tm.opcode_modifier.addrprefixopreg
+ && !i.tm.opcode_modifier.immext)
+ {
+ /* Check invalid register operand when the address size override
+ prefix changes the size of register operands. */
+ unsigned int op;
+ enum { need_word, need_dword, need_qword } need;
+
+ if (flag_code == CODE_32BIT)
+ need = i.prefix[ADDR_PREFIX] ? need_word : need_dword;
+ else
+ {
+ if (i.prefix[ADDR_PREFIX])
+ need = need_dword;
+ else
+ need = flag_code == CODE_64BIT ? need_qword : need_word;
+ }
+
+ for (op = 0; op < i.operands; op++)
+ if (i.types[op].bitfield.reg
+ && ((need == need_word
+ && !i.op[op].regs->reg_type.bitfield.word)
+ || (need == need_dword
+ && !i.op[op].regs->reg_type.bitfield.dword)
+ || (need == need_qword
+ && !i.op[op].regs->reg_type.bitfield.qword)))
+ {
+ as_bad (_("invalid register operand size for `%s'"),
+ i.tm.name);
+ return 0;
+ }
+ }
+
return 1;
}
if ((i.op[source].regs->reg_flags & RegVRex) != 0)
i.vrex |= REX_R;
}
- if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
+ if (flag_code != CODE_64BIT && (i.rex & REX_R))
{
- if (!i.types[0].bitfield.control
- && !i.types[1].bitfield.control)
+ if (!i.types[i.tm.operand_types[0].bitfield.regmem].bitfield.control)
abort ();
- i.rex &= ~(REX_R | REX_B);
+ i.rex &= ~REX_R;
add_prefix (LOCK_PREFIX_OPCODE);
}
}
if (i.tm.base_opcode & 0xff000000)
{
prefix = (i.tm.base_opcode >> 24) & 0xff;
- goto check_prefix;
+ add_prefix (prefix);
}
break;
case 2:
if ((i.tm.base_opcode & 0xff0000) != 0)
{
prefix = (i.tm.base_opcode >> 16) & 0xff;
- if (i.tm.cpu_flags.bitfield.cpupadlock)
- {
-check_prefix:
- if (prefix != REPE_PREFIX_OPCODE
- || (i.prefix[REP_PREFIX]
- != REPE_PREFIX_OPCODE))
- add_prefix (prefix);
- }
- else
+ if (!i.tm.cpu_flags.bitfield.cpupadlock
+ || prefix != REPE_PREFIX_OPCODE
+ || (i.prefix[REP_PREFIX] != REPE_PREFIX_OPCODE))
add_prefix (prefix);
}
break;
/* Special case for (%dx) while doing input/output op. */
if (i.base_reg
- && operand_type_equal (&i.base_reg->reg_type,
- ®16_inoutportreg)
+ && i.base_reg->reg_type.bitfield.inoutportreg
&& i.index_reg == 0
&& i.log2_scale_factor == 0
&& i.seg[i.mem_operands] == 0
&& !operand_type_check (i.types[this_operand], disp))
{
- i.types[this_operand] = inoutportreg;
+ i.types[this_operand] = i.base_reg->reg_type;
return 1;
}
i.vec_encoding = vex_encoding_evex;
}
- if (((r->reg_flags & (RegRex64 | RegRex))
- || r->reg_type.bitfield.qword)
- && (!cpu_arch_flags.bitfield.cpulm
- || !operand_type_equal (&r->reg_type, &control))
+ if (((r->reg_flags & (RegRex64 | RegRex)) || r->reg_type.bitfield.qword)
+ && (!cpu_arch_flags.bitfield.cpulm || !r->reg_type.bitfield.control)
&& flag_code != CODE_64BIT)
return (const reg_entry *) NULL;