+ else if (i.vec_encoding != vex_encoding_evex
+ && !i.types[0].bitfield.zmmword
+ && !i.types[1].bitfield.zmmword
+ && !i.mask
+ && !i.broadcast
+ && is_evex_encoding (&i.tm)
+ && ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0x666f
+ || (i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf36f
+ || (i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf26f
+ || (i.tm.base_opcode & ~4) == 0x66db
+ || (i.tm.base_opcode & ~4) == 0x66eb)
+ && i.tm.extension_opcode == None)
+ {
+ /* Optimize: -O1:
+ VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
+ vmovdqu32 and vmovdqu64:
+ EVEX VOP %xmmM, %xmmN
+ -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
+ EVEX VOP %ymmM, %ymmN
+ -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
+ EVEX VOP %xmmM, mem
+ -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
+ EVEX VOP %ymmM, mem
+ -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
+ EVEX VOP mem, %xmmN
+ -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
+ EVEX VOP mem, %ymmN
+ -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
+ VOP, one of vpand, vpandn, vpor, vpxor:
+ EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
+ -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
+ EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
+ -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
+ EVEX VOP{d,q} mem, %xmmM, %xmmN
+ -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
+ EVEX VOP{d,q} mem, %ymmM, %ymmN
+ -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
+ */
+ for (j = 0; j < i.operands; j++)
+ if (operand_type_check (i.types[j], disp)
+ && i.op[j].disps->X_op == O_constant)
+ {
+ /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
+ has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
+ bytes, we choose EVEX Disp8 over VEX Disp32. */
+ int evex_disp8, vex_disp8;
+ unsigned int memshift = i.memshift;
+ offsetT n = i.op[j].disps->X_add_number;
+
+ evex_disp8 = fits_in_disp8 (n);
+ i.memshift = 0;
+ vex_disp8 = fits_in_disp8 (n);
+ if (evex_disp8 != vex_disp8)
+ {
+ i.memshift = memshift;
+ return;
+ }
+
+ i.types[j].bitfield.disp8 = vex_disp8;
+ break;
+ }
+ if ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf26f)
+ i.tm.base_opcode ^= 0xf36f ^ 0xf26f;
+ i.tm.opcode_modifier.vex
+ = i.types[0].bitfield.ymmword ? VEX256 : VEX128;
+ i.tm.opcode_modifier.vexw = VEXW0;
+ /* VPAND, VPOR, and VPXOR are commutative. */
+ if (i.reg_operands == 3 && i.tm.base_opcode != 0x66df)
+ i.tm.opcode_modifier.commutative = 1;
+ i.tm.opcode_modifier.evex = 0;
+ i.tm.opcode_modifier.masking = 0;
+ i.tm.opcode_modifier.broadcast = 0;
+ i.tm.opcode_modifier.disp8memshift = 0;
+ i.memshift = 0;
+ if (j < i.operands)
+ i.types[j].bitfield.disp8
+ = fits_in_disp8 (i.op[j].disps->X_add_number);
+ }