/* Swap operand in encoding. */
unsigned int swap_operand;
+ /* Force 32bit displacement in encoding. */
+ unsigned int disp32_encoding;
+
/* Error message. */
enum i386_error error;
};
static unsigned int object_64bit;
static int use_rela_relocations = 0;
+/* The ELF ABI to use. */
+enum x86_elf_abi
+{
+ I386_ABI,
+ X86_64_LP64_ABI,
+ X86_64_ILP32_ABI
+};
+
+static enum x86_elf_abi x86_elf_abi = I386_ABI;
+
/* The names used to print error messages. */
static const char *flag_code_names[] =
{
unsigned long
i386_mach ()
{
- if (!strcmp (default_arch, "x86_64"))
+ if (!strncmp (default_arch, "x86_64", 6))
{
if (cpu_arch_isa == PROCESSOR_L1OM)
{
- if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
+ if (OUTPUT_FLAVOR != bfd_target_elf_flavour
+ || default_arch[6] != '\0')
as_fatal (_("Intel L1OM is 64bit ELF only"));
return bfd_mach_l1om;
}
- else
+ else if (default_arch[6] == '\0')
return bfd_mach_x86_64;
+ else
+ return bfd_mach_x64_32;
}
else if (!strcmp (default_arch, "i386"))
return bfd_mach_i386_i386;
/* Don't optimize displacement for movabs since it only takes 64bit
displacement. */
if (i.disp_operands
+ && !i.disp32_encoding
&& (flag_code != CODE_64BIT
|| strcmp (mnemonic, "movabs") != 0))
optimize_disp ();
if (!current_templates)
{
- /* Check if we should swap operand in encoding. */
+ /* Check if we should swap operand or force 32bit displacement in
+ encoding. */
if (mnem_p - 2 == dot_p && dot_p[1] == 's')
i.swap_operand = 1;
+ else if (mnem_p - 4 == dot_p
+ && dot_p[1] == 'd'
+ && dot_p[2] == '3'
+ && dot_p[3] == '2')
+ i.disp32_encoding = 1;
else
goto check_suffix;
mnem_p = dot_p;
}
}
- /* We check register size only if size of operands can be
- encoded the canonical way. */
- check_register = t->opcode_modifier.w;
+ /* We check register size if needed. */
+ check_register = t->opcode_modifier.checkregsize;
overlap0 = operand_type_and (i.types[0], operand_types[0]);
switch (t->operands)
{
output_branch (void)
{
char *p;
+ int size;
int code16;
int prefix;
relax_substateT subtype;
symbolS *sym;
offsetT off;
- code16 = 0;
- if (flag_code == CODE_16BIT)
- code16 = CODE16;
+ code16 = flag_code == CODE_16BIT ? CODE16 : 0;
+ size = i.disp32_encoding ? BIG : SMALL;
prefix = 0;
if (i.prefix[DATA_PREFIX] != 0)
*p = i.tm.base_opcode;
if ((unsigned char) *p == JUMP_PC_RELATIVE)
- subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, SMALL);
+ subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
else if (cpu_arch_flags.bitfield.cpui386)
- subtype = ENCODE_RELAX_STATE (COND_JUMP, SMALL);
+ subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
else
- subtype = ENCODE_RELAX_STATE (COND_JUMP86, SMALL);
+ subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
subtype |= code16;
sym = i.op[0].disps->X_add_symbol;
#define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
#define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
#define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
+#define OPTION_N32 (OPTION_MD_BASE + 13)
struct option md_longopts[] =
{
#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
|| defined (TE_PE) || defined (TE_PEP))
{"64", no_argument, NULL, OPTION_64},
+#endif
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ {"n32", no_argument, NULL, OPTION_N32},
#endif
{"divide", no_argument, NULL, OPTION_DIVIDE},
{"march", required_argument, NULL, OPTION_MARCH},
break;
#endif
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ case OPTION_N32:
+ if (IS_ELF)
+ {
+ const char **list, **l;
+
+ list = bfd_target_list ();
+ for (l = list; *l != NULL; l++)
+ if (CONST_STRNEQ (*l, "elf32-x86-64"))
+ {
+ default_arch = "x86_64:32";
+ break;
+ }
+ if (*l == NULL)
+ as_fatal (_("No compiled in support for 32bit x86_64"));
+ free (list);
+ }
+ else
+ as_fatal (_("32bit x86_64 is only supported for ELF"));
+ break;
+#endif
+
case OPTION_32:
default_arch = "i386";
break;
#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
|| defined (TE_PE) || defined (TE_PEP))
fprintf (stream, _("\
- --32/--64 generate 32bit/64bit code\n"));
+ --32/--64/--n32 generate 32bit/64bit/n32bit code\n"));
#endif
#ifdef SVR4_COMMENT_CHARS
fprintf (stream, _("\
const char *
i386_target_format (void)
{
- if (!strcmp (default_arch, "x86_64"))
- update_code_flag (CODE_64BIT, 1);
+ if (!strncmp (default_arch, "x86_64", 6))
+ {
+ update_code_flag (CODE_64BIT, 1);
+ if (default_arch[6] == '\0')
+ x86_elf_abi = X86_64_LP64_ABI;
+ else
+ x86_elf_abi = X86_64_ILP32_ABI;
+ }
else if (!strcmp (default_arch, "i386"))
update_code_flag (CODE_32BIT, 1);
else
#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
case bfd_target_elf_flavour:
{
- if (flag_code == CODE_64BIT)
+ const char *format;
+
+ switch (x86_elf_abi)
{
+ default:
+ format = ELF_TARGET_FORMAT;
+ break;
+ case X86_64_LP64_ABI:
+ use_rela_relocations = 1;
object_64bit = 1;
+ format = ELF_TARGET_FORMAT64;
+ break;
+ case X86_64_ILP32_ABI:
use_rela_relocations = 1;
+ object_64bit = 1;
+ format = ELF_TARGET_FORMAT32;
+ break;
}
if (cpu_arch_isa == PROCESSOR_L1OM)
{
- if (flag_code != CODE_64BIT)
+ if (x86_elf_abi != X86_64_LP64_ABI)
as_fatal (_("Intel L1OM is 64bit only"));
return ELF_TARGET_L1OM_FORMAT;
}
else
- return (flag_code == CODE_64BIT
- ? ELF_TARGET_FORMAT64 : ELF_TARGET_FORMAT);
+ return format;
}
#endif
#if defined (OBJ_MACH_O)
if (letter == 'l')
return SHF_X86_64_LARGE;
- *ptr_msg = _("Bad .section directive: want a,l,w,x,M,S,G,T in string");
+ *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
}
else
- *ptr_msg = _("Bad .section directive: want a,w,x,M,S,G,T in string");
+ *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
return -1;
}