/* X86-64 specific support for ELF
- Copyright (C) 2000-2017 Free Software Foundation, Inc.
+ Copyright (C) 2000-2020 Free Software Foundation, Inc.
Contributed by Jan Hubicka <jh@suse.cz>.
This file is part of BFD, the Binary File Descriptor library.
FALSE)
};
-/* Set if a relocation is converted from a GOTPCREL relocation. */
-#define R_X86_64_converted_reloc_bit (1 << 7)
-
#define X86_PCREL_TYPE_P(TYPE) \
( ((TYPE) == R_X86_64_PC8) \
|| ((TYPE) == R_X86_64_PC16) \
if (r_type >= (unsigned int) R_X86_64_standard)
{
/* xgettext:c-format */
- _bfd_error_handler (_("%B: invalid relocation type %d"),
- abfd, (int) r_type);
- r_type = R_X86_64_NONE;
+ _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
+ abfd, r_type);
+ bfd_set_error (bfd_error_bad_value);
+ return NULL;
}
i = r_type;
}
/* Given an x86_64 ELF reloc type, fill in an arelent structure. */
-static void
-elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
+static bfd_boolean
+elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
Elf_Internal_Rela *dst)
{
unsigned r_type;
r_type = ELF32_R_TYPE (dst->r_info);
- if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
- && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
- r_type &= ~R_X86_64_converted_reloc_bit;
cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
-
+ if (cache_ptr->howto == NULL)
+ return FALSE;
BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
+ return TRUE;
}
\f
/* Support for core dump NOTE sections. */
}
#ifdef CORE_HEADER
+# if GCC_VERSION >= 8000
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wstringop-truncation"
+# endif
static char *
elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
int note_type, ...)
}
/* NOTREACHED */
}
+# if GCC_VERSION >= 8000
+# pragma GCC diagnostic pop
+# endif
#endif
\f
/* Functions for the x86-64 ELF linker. */
static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
{
- 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
+ 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
- 0x0f, 0x1f, 0 /* nopl (%rax) */
+ 0x0f, 0x1f, 0 /* nopl (%rax) */
};
/* Subsequent entries for branches with BND prefx in a lazy procedure
static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
{
- 0x68, 0, 0, 0, 0, /* pushq immediate */
- 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
- 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
+ 0x68, 0, 0, 0, 0, /* pushq immediate */
+ 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
+ 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
};
/* The first entry in the IBT-enabled lazy procedure linkage table is the
static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
{
- 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
- 0x68, 0, 0, 0, 0, /* pushq immediate */
- 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
- 0x90 /* nop */
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0x68, 0, 0, 0, 0, /* pushq immediate */
+ 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
+ 0x90 /* nop */
};
/* The first entry in the x32 IBT-enabled lazy procedure linkage table
static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
{
- 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
- 0x68, 0, 0, 0, 0, /* pushq immediate */
- 0xe9, 0, 0, 0, 0, /* jmpq relative */
- 0x66, 0x90 /* xchg %ax,%ax */
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0x68, 0, 0, 0, 0, /* pushq immediate */
+ 0xe9, 0, 0, 0, 0, /* jmpq relative */
+ 0x66, 0x90 /* xchg %ax,%ax */
};
/* Entries in the non-lazey procedure linkage table look like this. */
static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
{
- 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
- 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
- 0x66, 0x90 /* xchg %ax,%ax */
+ 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
+ 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
+ 0x66, 0x90 /* xchg %ax,%ax */
};
/* Entries for branches with BND prefix in the non-lazey procedure
static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
{
- 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
- 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
- 0x90 /* nop */
+ 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
+ 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
+ 0x90 /* nop */
};
/* Entries for branches with IBT-enabled in the non-lazey procedure
static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
{
- 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
- 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
- 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
+ 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
};
/* Entries for branches with IBT-enabled in the x32 non-lazey procedure
static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
{
- 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
- 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
};
+/* The TLSDESC entry in a lazy procedure linkage table. */
+static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
+{
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
+ 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
+};
+
/* .eh_frame covering the lazy .plt section. */
static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
};
-/* Architecture-specific backend data for x86-64. */
-
-struct elf_x86_64_backend_data
-{
- /* Target system. */
- enum
- {
- is_normal,
- is_nacl
- } os;
-};
-
-#define get_elf_x86_64_arch_data(bed) \
- ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
-
-#define get_elf_x86_64_backend_data(abfd) \
- get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
-
/* These are the standard parameters. */
static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
{
- elf_x86_64_lazy_plt0_entry, /* plt0_entry */
- LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
- elf_x86_64_lazy_plt_entry, /* plt_entry */
- LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
- 2, /* plt0_got1_offset */
- 8, /* plt0_got2_offset */
- 12, /* plt0_got2_insn_end */
- 2, /* plt_got_offset */
- 7, /* plt_reloc_offset */
- 12, /* plt_plt_offset */
- 6, /* plt_got_insn_size */
- LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
- 6, /* plt_lazy_offset */
- elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
- elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
- elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
+ elf_x86_64_lazy_plt0_entry, /* plt0_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
+ elf_x86_64_lazy_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
+ 6, /* plt_tlsdesc_got1_offset */
+ 12, /* plt_tlsdesc_got2_offset */
+ 10, /* plt_tlsdesc_got1_insn_end */
+ 16, /* plt_tlsdesc_got2_insn_end */
+ 2, /* plt0_got1_offset */
+ 8, /* plt0_got2_offset */
+ 12, /* plt0_got2_insn_end */
+ 2, /* plt_got_offset */
+ 7, /* plt_reloc_offset */
+ 12, /* plt_plt_offset */
+ 6, /* plt_got_insn_size */
+ LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
+ 6, /* plt_lazy_offset */
+ elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
+ elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
+ elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
};
static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
{
- elf_x86_64_non_lazy_plt_entry, /* plt_entry */
- elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
- NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
- 2, /* plt_got_offset */
- 6, /* plt_got_insn_size */
- elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
+ elf_x86_64_non_lazy_plt_entry, /* plt_entry */
+ elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
+ NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 2, /* plt_got_offset */
+ 6, /* plt_got_insn_size */
+ elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
};
static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
{
- elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
- LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
- elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
- LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
- 2, /* plt0_got1_offset */
- 1+8, /* plt0_got2_offset */
- 1+12, /* plt0_got2_insn_end */
- 1+2, /* plt_got_offset */
- 1, /* plt_reloc_offset */
- 7, /* plt_plt_offset */
- 1+6, /* plt_got_insn_size */
- 11, /* plt_plt_insn_end */
- 0, /* plt_lazy_offset */
- elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
- elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
- elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
+ elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
+ elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
+ 6, /* plt_tlsdesc_got1_offset */
+ 12, /* plt_tlsdesc_got2_offset */
+ 10, /* plt_tlsdesc_got1_insn_end */
+ 16, /* plt_tlsdesc_got2_insn_end */
+ 2, /* plt0_got1_offset */
+ 1+8, /* plt0_got2_offset */
+ 1+12, /* plt0_got2_insn_end */
+ 1+2, /* plt_got_offset */
+ 1, /* plt_reloc_offset */
+ 7, /* plt_plt_offset */
+ 1+6, /* plt_got_insn_size */
+ 11, /* plt_plt_insn_end */
+ 0, /* plt_lazy_offset */
+ elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
+ elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
+ elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
};
static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
{
- elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
- elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
- NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
- 1+2, /* plt_got_offset */
- 1+6, /* plt_got_insn_size */
- elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
+ elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
+ elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
+ NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 1+2, /* plt_got_offset */
+ 1+6, /* plt_got_insn_size */
+ elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
};
static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
{
- elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
- LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
- elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
- LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
- 2, /* plt0_got1_offset */
- 1+8, /* plt0_got2_offset */
- 1+12, /* plt0_got2_insn_end */
- 4+1+2, /* plt_got_offset */
- 4+1, /* plt_reloc_offset */
- 4+1+6, /* plt_plt_offset */
- 4+1+6, /* plt_got_insn_size */
- 4+1+5+5, /* plt_plt_insn_end */
- 0, /* plt_lazy_offset */
- elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
- elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
- elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
+ elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
+ elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
+ 6, /* plt_tlsdesc_got1_offset */
+ 12, /* plt_tlsdesc_got2_offset */
+ 10, /* plt_tlsdesc_got1_insn_end */
+ 16, /* plt_tlsdesc_got2_insn_end */
+ 2, /* plt0_got1_offset */
+ 1+8, /* plt0_got2_offset */
+ 1+12, /* plt0_got2_insn_end */
+ 4+1+2, /* plt_got_offset */
+ 4+1, /* plt_reloc_offset */
+ 4+1+6, /* plt_plt_offset */
+ 4+1+6, /* plt_got_insn_size */
+ 4+1+5+5, /* plt_plt_insn_end */
+ 0, /* plt_lazy_offset */
+ elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
+ elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
+ elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
};
static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
{
- elf_x86_64_lazy_plt0_entry, /* plt0_entry */
- LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
- elf_x32_lazy_ibt_plt_entry, /* plt_entry */
- LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
- 2, /* plt0_got1_offset */
- 8, /* plt0_got2_offset */
- 12, /* plt0_got2_insn_end */
- 4+2, /* plt_got_offset */
- 4+1, /* plt_reloc_offset */
- 4+6, /* plt_plt_offset */
- 4+6, /* plt_got_insn_size */
- 4+5+5, /* plt_plt_insn_end */
- 0, /* plt_lazy_offset */
- elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
- elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
- elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
+ elf_x86_64_lazy_plt0_entry, /* plt0_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
+ elf_x32_lazy_ibt_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
+ 6, /* plt_tlsdesc_got1_offset */
+ 12, /* plt_tlsdesc_got2_offset */
+ 10, /* plt_tlsdesc_got1_insn_end */
+ 16, /* plt_tlsdesc_got2_insn_end */
+ 2, /* plt0_got1_offset */
+ 8, /* plt0_got2_offset */
+ 12, /* plt0_got2_insn_end */
+ 4+2, /* plt_got_offset */
+ 4+1, /* plt_reloc_offset */
+ 4+6, /* plt_plt_offset */
+ 4+6, /* plt_got_insn_size */
+ 4+5+5, /* plt_plt_insn_end */
+ 0, /* plt_lazy_offset */
+ elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
+ elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
+ elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
};
static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
{
- elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
- elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
- LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
- 4+1+2, /* plt_got_offset */
- 4+1+6, /* plt_got_insn_size */
- elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
+ elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
+ elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 4+1+2, /* plt_got_offset */
+ 4+1+6, /* plt_got_insn_size */
+ elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
};
static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
{
- elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
- elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
- LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
- 4+2, /* plt_got_offset */
- 4+6, /* plt_got_insn_size */
- elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
+ elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
+ elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 4+2, /* plt_got_offset */
+ 4+6, /* plt_got_insn_size */
+ elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
};
-static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
+static const struct elf_x86_backend_data elf_x86_64_arch_bed =
{
- is_normal /* os */
+ is_normal /* os */
};
#define elf_backend_arch_data &elf_x86_64_arch_bed
addr32 call __tls_get_addr
can transit to different access model. For largepic,
we also support:
- leaq foo@tlsgd(%rip), %rdi
- movabsq $__tls_get_addr@pltoff, %rax
- addq $r15, %rax
- call *%rax
+ leaq foo@tlsgd(%rip), %rdi
+ movabsq $__tls_get_addr@pltoff, %rax
+ addq $r15, %rax
+ call *%rax
or
- leaq foo@tlsgd(%rip), %rdi
- movabsq $__tls_get_addr@pltoff, %rax
- addq $rbx, %rax
- call *%rax */
+ leaq foo@tlsgd(%rip), %rdi
+ movabsq $__tls_get_addr@pltoff, %rax
+ addq $rbx, %rax
+ call *%rax */
static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
/* Check transition from LD access model. Only
leaq foo@tlsld(%rip), %rdi;
call __tls_get_addr@PLT
- or
+ or
leaq foo@tlsld(%rip), %rdi;
call *__tls_get_addr@GOTPCREL(%rip)
which may be converted to
addr32 call __tls_get_addr
can transit to different access model. For largepic
we also support:
- leaq foo@tlsld(%rip), %rdi
- movabsq $__tls_get_addr@pltoff, %rax
- addq $r15, %rax
- call *%rax
+ leaq foo@tlsld(%rip), %rdi
+ movabsq $__tls_get_addr@pltoff, %rax
+ addq $r15, %rax
+ call *%rax
or
- leaq foo@tlsld(%rip), %rdi
- movabsq $__tls_get_addr@pltoff, %rax
- addq $rbx, %rax
- call *%rax */
+ leaq foo@tlsld(%rip), %rdi
+ movabsq $__tls_get_addr@pltoff, %rax
+ addq $rbx, %rax
+ call *%rax */
static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
case R_X86_64_GOTPC32_TLSDESC:
/* Check transition from GDesc access model:
- leaq x@tlsdesc(%rip), %rax
+ leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
+ rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
Make sure it's a leaq adding rip to a 32-bit offset
into any register, although it's probably almost always
return FALSE;
val = bfd_get_8 (abfd, contents + offset - 3);
- if ((val & 0xfb) != 0x48)
+ val &= 0xfb;
+ if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
return FALSE;
if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
case R_X86_64_TLSDESC_CALL:
/* Check transition from GDesc access model:
- call *x@tlsdesc(%rax)
+ call *x@tlsdesc(%rax) <--- LP64 mode.
+ call *x@tlsdesc(%eax) <--- X32 mode.
*/
if (offset + 2 <= sec->size)
{
- /* Make sure that it's a call *x@tlsdesc(%rax). */
+ unsigned int prefix;
call = contents + offset;
- return call[0] == 0xff && call[1] == 0x10;
+ prefix = 0;
+ if (!ABI_64_P (abfd))
+ {
+ /* Check for call *x@tlsdesc(%eax). */
+ if (call[0] == 0x67)
+ {
+ prefix = 1;
+ if (offset + 3 > sec->size)
+ return FALSE;
+ }
+ }
+ /* Make sure that it's a call *x@tlsdesc(%rax). */
+ return call[prefix] == 0xff && call[1 + prefix] == 0x10;
}
return FALSE;
from = elf_x86_64_rtype_to_howto (abfd, from_type);
to = elf_x86_64_rtype_to_howto (abfd, to_type);
+ if (from == NULL || to == NULL)
+ return FALSE;
+
if (h)
name = h->root.root.string;
else
_bfd_error_handler
/* xgettext:c-format */
- (_("%B: TLS transition from %s to %s against `%s' at %#Lx "
- "in section `%A' failed"),
- abfd, from->name, to->name, name, rel->r_offset, sec);
+ (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
+ " in section `%pA' failed"),
+ abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
v = _("protected symbol ");
else
v = _("symbol ");
- pic = _("; recompile with -fPIC");
+ pic = NULL;
break;
}
- if (!h->def_regular && !h->def_dynamic)
+ if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
und = _("undefined ");
}
else
{
name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
- pic = _("; recompile with -fPIC");
+ pic = NULL;
}
if (bfd_link_dll (info))
- object = _("a shared object");
- else if (bfd_link_pie (info))
- object = _("a PIE object");
+ {
+ object = _("a shared object");
+ if (!pic)
+ pic = _("; recompile with -fPIC");
+ }
else
- object = _("a PDE object");
+ {
+ if (bfd_link_pie (info))
+ object = _("a PIE object");
+ else
+ object = _("a PDE object");
+ if (!pic)
+ pic = _("; recompile with -fPIE");
+ }
/* xgettext:c-format */
- _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
+ _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
"not be used when making %s%s"),
input_bfd, howto->name, und, v, name,
object, pic);
bfd_boolean no_overflow;
bfd_boolean relocx;
bfd_boolean to_reloc_pc32;
+ bfd_boolean abs_symbol;
+ bfd_boolean local_ref;
asection *tsec;
bfd_signed_vma raddend;
unsigned int opcode;
unsigned int r_type = *r_type_p;
unsigned int r_symndx;
bfd_vma roff = irel->r_offset;
+ bfd_vma abs_relocation;
if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
return TRUE;
|| no_overflow
|| is_pic);
+ abs_symbol = FALSE;
+ abs_relocation = 0;
+
/* Get the symbol referred to by the reloc. */
if (h == NULL)
{
if (isym->st_shndx == SHN_UNDEF)
return TRUE;
+ local_ref = TRUE;
if (isym->st_shndx == SHN_ABS)
- tsec = bfd_abs_section_ptr;
+ {
+ tsec = bfd_abs_section_ptr;
+ abs_symbol = TRUE;
+ abs_relocation = isym->st_value;
+ }
else if (isym->st_shndx == SHN_COMMON)
tsec = bfd_com_section_ptr;
else if (isym->st_shndx == SHN_X86_64_LCOMMON)
GOTPCRELX relocations since we need to modify REX byte.
It is OK convert mov with R_X86_64_GOTPCREL to
R_X86_64_PC32. */
- bfd_boolean local_ref;
struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
+ abs_symbol = ABS_SYMBOL_P (h);
+ abs_relocation = h->root.u.def.value;
+
/* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
if ((relocx || opcode == 0x8b)
if (no_overflow)
return TRUE;
-convert:
+ convert:
if (opcode == 0xff)
{
/* We have "call/jmp *foo@GOTPCREL(%rip)". */
}
else
{
- nop = link_info->call_nop_byte;
- if (link_info->call_nop_as_suffix)
+ nop = htab->params->call_nop_byte;
+ if (htab->params->call_nop_as_suffix)
{
nop_offset = irel->r_offset + 3;
disp = bfd_get_32 (abfd, contents + irel->r_offset);
if (opcode == 0x8b)
{
+ if (abs_symbol && local_ref)
+ to_reloc_pc32 = FALSE;
+
if (to_reloc_pc32)
{
/* Convert "mov foo@GOTPCREL(%rip), %reg" to
overflow when sign-extending imm32 to imm64. */
r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
-rewrite_modrm_rex:
+ rewrite_modrm_rex:
+ if (abs_relocation)
+ {
+ /* Check if R_X86_64_32S/R_X86_64_32 fits. */
+ if (r_type == R_X86_64_32S)
+ {
+ if ((abs_relocation + 0x80000000) > 0xffffffff)
+ return TRUE;
+ }
+ else
+ {
+ if (abs_relocation > 0xffffffff)
+ return TRUE;
+ }
+ }
+
bfd_put_8 (abfd, modrm, contents + roff - 1);
if (rex)
const char *name;
bfd_boolean size_reloc;
bfd_boolean converted_reloc;
+ bfd_boolean no_dynreloc;
r_symndx = htab->r_sym (rel->r_info);
r_type = ELF32_R_TYPE (rel->r_info);
if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
{
/* xgettext:c-format */
- _bfd_error_handler (_("%B: bad symbol index: %d"),
+ _bfd_error_handler (_("%pB: bad symbol index: %d"),
abfd, r_symndx);
goto error_return;
}
NULL);
_bfd_error_handler
/* xgettext:c-format */
- (_("%B: relocation %s against symbol `%s' isn't "
+ (_("%pB: relocation %s against symbol `%s' isn't "
"supported in x32 mode"), abfd,
x86_64_elf_howto_table[r_type].name, name);
bfd_set_error (bfd_error_bad_value);
{
/* It is referenced by a non-shared object. */
h->ref_regular = 1;
- h->root.non_ir_ref_regular = 1;
-
- if (h->type == STT_GNU_IFUNC)
- elf_tdata (info->output_bfd)->has_gnu_symbols
- |= elf_gnu_symbol_ifunc;
}
converted_reloc = FALSE;
converted = TRUE;
}
+ if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
+ symtab_hdr, &no_dynreloc))
+ return FALSE;
+
if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
symtab_hdr, sym_hashes,
&r_type, GOT_UNKNOWN,
rel, rel_end, h, r_symndx, FALSE))
goto error_return;
+ /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
+ if (h == htab->elf.hgot)
+ htab->got_referenced = TRUE;
+
eh = (struct elf_x86_link_hash_entry *) h;
switch (r_type)
{
switch (r_type)
{
- default: tls_type = GOT_NORMAL; break;
- case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
- case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
+ default:
+ tls_type = GOT_NORMAL;
+ if (h)
+ {
+ if (ABS_SYMBOL_P (h))
+ tls_type = GOT_ABS;
+ }
+ else if (isym->st_shndx == SHN_ABS)
+ tls_type = GOT_ABS;
+ break;
+ case R_X86_64_TLSGD:
+ tls_type = GOT_TLS_GD;
+ break;
+ case R_X86_64_GOTTPOFF:
+ tls_type = GOT_TLS_IE;
+ break;
case R_X86_64_GOTPC32_TLSDESC:
case R_X86_64_TLSDESC_CALL:
- tls_type = GOT_TLS_GDESC; break;
+ tls_type = GOT_TLS_GDESC;
+ break;
}
if (h != NULL)
isym, NULL);
_bfd_error_handler
/* xgettext:c-format */
- (_("%B: '%s' accessed both as normal and"
+ (_("%pB: '%s' accessed both as normal and"
" thread local symbol"),
abfd, name);
bfd_set_error (bfd_error_bad_value);
eh->zero_undefweak &= 0x2;
h->needs_plt = 1;
- h->plt.refcount += 1;
+ h->plt.refcount = 1;
break;
case R_X86_64_PLTOFF64:
if (h != NULL)
{
h->needs_plt = 1;
- h->plt.refcount += 1;
+ h->plt.refcount = 1;
}
goto create_got;
run-time relocation overflow. Don't error out for
sections we don't care about, such as debug sections or
when relocation overflow check is disabled. */
- if (!info->no_reloc_overflow_check
+ if (!htab->params->no_reloc_overflow_check
&& !converted_reloc
&& (bfd_link_pic (info)
|| (bfd_link_executable (info)
case R_X86_64_PC32_BND:
case R_X86_64_PC64:
case R_X86_64_64:
-pointer:
+ pointer:
if (eh != NULL && (sec->flags & SEC_CODE) != 0)
eh->zero_undefweak |= 0x2;
/* We are called after all symbols have been resolved. Only
&& (bfd_link_executable (info)
|| h->type == STT_GNU_IFUNC))
{
- /* If this reloc is in a read-only section, we might
- need a copy reloc. We can't check reliably at this
- stage whether the section is read-only, as input
- sections have not yet been mapped to output sections.
- Tentatively set the flag for now, and correct in
- adjust_dynamic_symbol. */
- h->non_got_ref = 1;
-
- /* We may need a .plt entry if the symbol is a function
- defined in a shared lib or is a STT_GNU_IFUNC function
- referenced from the code or read-only section. */
- if (!h->def_regular
- || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
- h->plt.refcount += 1;
+ bfd_boolean func_pointer_ref = FALSE;
if (r_type == R_X86_64_PC32)
{
as pointer, make sure that PLT is used if foo is
a function defined in a shared library. */
if ((sec->flags & SEC_CODE) == 0)
- h->pointer_equality_needed = 1;
+ {
+ h->pointer_equality_needed = 1;
+ if (bfd_link_pie (info)
+ && h->type == STT_FUNC
+ && !h->def_regular
+ && h->def_dynamic)
+ {
+ h->needs_plt = 1;
+ h->plt.refcount = 1;
+ }
+ }
}
else if (r_type != R_X86_64_PC32_BND
&& r_type != R_X86_64_PC64)
|| (!ABI_64_P (abfd)
&& (r_type == R_X86_64_32
|| r_type == R_X86_64_32S))))
- eh->func_pointer_refcount += 1;
+ func_pointer_ref = TRUE;
+ }
+
+ if (!func_pointer_ref)
+ {
+ /* If this reloc is in a read-only section, we might
+ need a copy reloc. We can't check reliably at this
+ stage whether the section is read-only, as input
+ sections have not yet been mapped to output sections.
+ Tentatively set the flag for now, and correct in
+ adjust_dynamic_symbol. */
+ h->non_got_ref = 1;
+
+ /* We may need a .plt entry if the symbol is a function
+ defined in a shared lib or is a function referenced
+ from the code or read-only section. */
+ if (!h->def_regular
+ || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
+ h->plt.refcount = 1;
}
}
size_reloc = FALSE;
-do_size:
- if (NEED_DYNAMIC_RELOCATION_P (info, h, sec, r_type,
- htab->pointer_r_type))
+ do_size:
+ if (!no_dynreloc
+ && NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
+ htab->pointer_r_type))
{
struct elf_dyn_relocs *p;
struct elf_dyn_relocs **head;
p = *head;
if (p == NULL || p->sec != sec)
{
- bfd_size_type amt = sizeof *p;
+ size_t amt = sizeof *p;
p = ((struct elf_dyn_relocs *)
bfd_alloc (htab->elf.dynobj, amt));
/* This relocation describes which C++ vtable entries are actually
used. Record for later use during GC. */
case R_X86_64_GNU_VTENTRY:
- BFD_ASSERT (h != NULL);
- if (h != NULL
- && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
+ if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
goto error_return;
break;
return TRUE;
-error_return:
+ error_return:
if (elf_section_data (sec)->this_hdr.contents != contents)
free (contents);
sec->check_relocs_failed = 1;
return address - static_tls_size - htab->tls_sec->vma;
}
-/* Is the instruction before OFFSET in CONTENTS a 32bit relative
- branch? */
-
-static bfd_boolean
-is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
-{
- /* Opcode Instruction
- 0xe8 call
- 0xe9 jump
- 0x0f 0x8x conditional jump */
- return ((offset > 0
- && (contents [offset - 1] == 0xe8
- || contents [offset - 1] == 0xe9))
- || (offset > 1
- && contents [offset - 2] == 0x0f
- && (contents [offset - 1] & 0xf0) == 0x80));
-}
-
/* Relocate an x86_64 ELF section. */
static bfd_boolean
if (htab == NULL)
return FALSE;
- BFD_ASSERT (is_x86_elf (input_bfd, htab));
+ if (!is_x86_elf (input_bfd, htab))
+ {
+ bfd_set_error (bfd_error_wrong_format);
+ return FALSE;
+ }
plt_entry_size = htab->plt.plt_entry_size;
symtab_hdr = &elf_symtab_hdr (input_bfd);
bfd_boolean relative_reloc;
bfd_boolean converted_reloc;
bfd_boolean need_copy_reloc_in_pie;
+ bfd_boolean no_copyreloc_p;
r_type = ELF32_R_TYPE (rel->r_info);
if (r_type == (int) R_X86_64_GNU_VTINHERIT
continue;
}
+ r_symndx = htab->r_sym (rel->r_info);
converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
- r_type &= ~R_X86_64_converted_reloc_bit;
+ if (converted_reloc)
+ {
+ r_type &= ~R_X86_64_converted_reloc_bit;
+ rel->r_info = htab->r_info (r_symndx, r_type);
+ }
- if (r_type >= (int) R_X86_64_standard)
+ howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
+ if (howto == NULL)
return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
- if (r_type != (int) R_X86_64_32
- || ABI_64_P (output_bfd))
- howto = x86_64_elf_howto_table + r_type;
- else
- howto = (x86_64_elf_howto_table
- + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
- r_symndx = htab->r_sym (rel->r_info);
h = NULL;
sym = NULL;
sec = NULL;
if (sec != NULL && discarded_section (sec))
{
_bfd_clear_contents (howto, input_bfd, input_section,
- contents + rel->r_offset);
+ contents, rel->r_offset);
wrel->r_offset = rel->r_offset;
wrel->r_info = 0;
wrel->r_addend = 0;
if ((input_section->flags & SEC_ALLOC) == 0)
{
+ /* If this is a SHT_NOTE section without SHF_ALLOC, treat
+ STT_GNU_IFUNC symbol as STT_FUNC. */
+ if (elf_section_type (input_section) == SHT_NOTE)
+ goto skip_ifunc;
/* Dynamic relocs are not propagated for SEC_DEBUGGING
sections because such sections are not SEC_ALLOC and
thus ld.so will not process them. */
switch (r_type)
{
default:
-bad_ifunc_reloc:
+ bad_ifunc_reloc:
if (h->root.root.string)
name = h->root.root.string;
else
NULL);
_bfd_error_handler
/* xgettext:c-format */
- (_("%B: relocation %s against STT_GNU_IFUNC "
+ (_("%pB: relocation %s against STT_GNU_IFUNC "
"symbol `%s' isn't supported"), input_bfd,
howto->name, name);
bfd_set_error (bfd_error_bad_value);
goto do_relocation;
/* FALLTHROUGH */
case R_X86_64_64:
-do_ifunc_pointer:
+ do_ifunc_pointer:
if (rel->r_addend != 0)
{
if (h->root.root.string)
sym, NULL);
_bfd_error_handler
/* xgettext:c-format */
- (_("%B: relocation %s against STT_GNU_IFUNC "
- "symbol `%s' has non-zero addend: %Ld"),
- input_bfd, howto->name, name, rel->r_addend);
+ (_("%pB: relocation %s against STT_GNU_IFUNC "
+ "symbol `%s' has non-zero addend: %" PRId64),
+ input_bfd, howto->name, name, (int64_t) rel->r_addend);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
if (POINTER_LOCAL_IFUNC_P (info, h))
{
- info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
+ info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
h->root.root.string,
h->root.u.def.section->owner);
}
}
+ skip_ifunc:
resolved_to_zero = (eh != NULL
&& UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
base_got->contents + off);
local_got_offsets[r_symndx] |= 1;
- if (bfd_link_pic (info))
+ /* NB: GOTPCREL relocations against local absolute
+ symbol store relocation value in the GOT slot
+ without relative relocation. */
+ if (bfd_link_pic (info)
+ && !(sym->st_shndx == SHN_ABS
+ && (r_type == R_X86_64_GOTPCREL
+ || r_type == R_X86_64_GOTPCRELX
+ || r_type == R_X86_64_REX_GOTPCRELX)))
relative_reloc = TRUE;
}
}
_bfd_error_handler
/* xgettext:c-format */
- (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s"
+ (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
" `%s' can not be used when making a shared object"),
input_bfd, v, h->root.root.string);
bfd_set_error (bfd_error_bad_value);
&& ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
{
_bfd_error_handler
- /* xgettext:c-format */
- (_("%B: relocation R_X86_64_GOTOFF64 against protected %s"
+ /* xgettext:c-format */
+ (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
" `%s' can not be used when making a shared object"),
input_bfd,
h->type == STT_FUNC ? "function" : "data",
h->root.root.string);
bfd_set_error (bfd_error_bad_value);
- return FALSE;
+ return FALSE;
}
}
break;
}
+ use_plt:
if (h->plt.offset != (bfd_vma) -1)
{
if (htab->plt_second != NULL)
case R_X86_64_PC32:
case R_X86_64_PC32_BND:
/* Don't complain about -fPIC if the symbol is undefined when
- building executable unless it is unresolved weak symbol or
- -z nocopyreloc is used. */
- if ((input_section->flags & SEC_ALLOC) != 0
+ building executable unless it is unresolved weak symbol,
+ references a dynamic definition in PIE or -z nocopyreloc
+ is used. */
+ no_copyreloc_p
+ = (info->nocopyreloc
+ || (h != NULL
+ && !h->root.linker_def
+ && !h->root.ldscript_def
+ && eh->def_protected
+ && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
+
+ if ((input_section->flags & SEC_ALLOC) != 0
&& (input_section->flags & SEC_READONLY) != 0
&& h != NULL
&& ((bfd_link_executable (info)
&& ((h->root.type == bfd_link_hash_undefweak
- && !resolved_to_zero)
- || ((info->nocopyreloc
- || (eh->def_protected
- && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
+ && (eh == NULL
+ || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
+ eh)))
+ || (bfd_link_pie (info)
+ && !SYMBOL_DEFINED_NON_SHARED_P (h)
+ && h->def_dynamic)
+ || (no_copyreloc_p
&& h->def_dynamic
&& !(h->root.u.def.section->flags & SEC_CODE))))
|| bfd_link_dll (info)))
{
bfd_boolean fail = FALSE;
- bfd_boolean branch
- = ((r_type == R_X86_64_PC32
- || r_type == R_X86_64_PC32_BND)
- && is_32bit_relative_branch (contents, rel->r_offset));
-
if (SYMBOL_REFERENCES_LOCAL_P (info, h))
{
/* Symbol is referenced locally. Make sure it is
- defined locally or for a branch. */
- fail = (!(h->def_regular || ELF_COMMON_DEF_P (h))
- && !branch);
+ defined locally. */
+ fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
+ }
+ else if (bfd_link_pie (info))
+ {
+ /* We can only use PC-relative relocations in PIE
+ from non-code sections. */
+ if (h->type == STT_FUNC
+ && (sec->flags & SEC_CODE) != 0)
+ fail = TRUE;
}
- else if (!(bfd_link_pie (info)
- && (h->needs_copy || eh->needs_copy)))
+ else if (no_copyreloc_p || bfd_link_dll (info))
{
- /* Symbol doesn't need copy reloc and isn't referenced
- locally. We only allow branch to symbol with
- non-default visibility. */
- fail = (!branch
- || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
+ /* Symbol doesn't need copy reloc and isn't
+ referenced locally. Don't allow PC-relative
+ relocations against default and protected
+ symbols since address of protected function
+ and location of protected data may not be in
+ the shared object. */
+ fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
+ || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
}
if (fail)
return elf_x86_64_need_pic (info, input_bfd, input_section,
h, NULL, NULL, howto);
}
+ /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
+ as function address. */
+ else if (h != NULL
+ && (input_section->flags & SEC_CODE) == 0
+ && bfd_link_pie (info)
+ && h->type == STT_FUNC
+ && !h->def_regular
+ && h->def_dynamic)
+ goto use_plt;
/* Fall through. */
case R_X86_64_8:
/* FIXME: The ABI says the linker should make sure the value is
the same when it's zeroextended to 64 bit. */
-direct:
+ direct:
if ((input_section->flags & SEC_ALLOC) == 0)
break;
&& (X86_PCREL_TYPE_P (r_type)
|| X86_SIZE_TYPE_P (r_type)));
- if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
+ if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type, sec,
need_copy_reloc_in_pie,
resolved_to_zero, FALSE))
{
convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
if (r_type == htab->pointer_r_type
|| (r_type == R_X86_64_32
- && info->no_reloc_overflow_check))
+ && htab->params->no_reloc_overflow_check))
{
relocate = TRUE;
outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
sym, NULL);
_bfd_error_handler
/* xgettext:c-format */
- (_("%B: addend %s%#x in relocation %s against "
- "symbol `%s' at %#Lx in section `%A' is "
- "out of range"),
+ (_("%pB: addend %s%#x in relocation %s against "
+ "symbol `%s' at %#" PRIx64
+ " in section `%pA' is out of range"),
input_bfd, addend < 0 ? "-" : "", addend,
- howto->name, name, rel->r_offset, input_section);
+ howto->name, name, (uint64_t) rel->r_offset,
+ input_section);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
{
if (contents[roff + 5] == 0xb8)
{
+ if (roff < 3
+ || (roff - 3 + 22) > input_section->size)
+ {
+ corrupt_input:
+ info->callbacks->einfo
+ (_("%F%P: corrupt input: %pB\n"),
+ input_bfd);
+ return FALSE;
+ }
memcpy (contents + roff - 3,
"\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
"\0\0\0\0\x66\x0f\x1f\x44\0", 22);
largepic = 1;
}
else
- memcpy (contents + roff - 4,
- "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
- 16);
+ {
+ if (roff < 4
+ || (roff - 4 + 16) > input_section->size)
+ goto corrupt_input;
+ memcpy (contents + roff - 4,
+ "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
+ 16);
+ }
}
else
- memcpy (contents + roff - 3,
- "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
- 15);
+ {
+ if (roff < 3
+ || (roff - 3 + 15) > input_section->size)
+ goto corrupt_input;
+ memcpy (contents + roff - 3,
+ "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
+ 15);
+ }
bfd_put_32 (output_bfd,
elf_x86_64_tpoff (info, relocation),
contents + roff + 8 + largepic);
{
/* GDesc -> LE transition.
It's originally something like:
- leaq x@tlsdesc(%rip), %rax
+ leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
+ rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
Change it to:
- movl $x@tpoff, %rax. */
+ movq $x@tpoff, %rax <--- LP64 mode.
+ rex movl $x@tpoff, %eax <--- X32 mode.
+ */
unsigned int val, type;
+ if (roff < 3)
+ goto corrupt_input;
type = bfd_get_8 (input_bfd, contents + roff - 3);
val = bfd_get_8 (input_bfd, contents + roff - 1);
- bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
+ bfd_put_8 (output_bfd,
+ (type & 0x48) | ((type >> 2) & 1),
contents + roff - 3);
bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
{
/* GDesc -> LE transition.
It's originally:
- call *(%rax)
+ call *(%rax) <--- LP64 mode.
+ call *(%eax) <--- X32 mode.
Turn it into:
- xchg %ax,%ax. */
- bfd_put_8 (output_bfd, 0x66, contents + roff);
- bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
+ xchg %ax,%ax <-- LP64 mode.
+ nopl (%rax) <-- X32 mode.
+ */
+ unsigned int prefix = 0;
+ if (!ABI_64_P (input_bfd))
+ {
+ /* Check for call *x@tlsdesc(%eax). */
+ if (contents[roff] == 0x67)
+ prefix = 1;
+ }
+ if (prefix)
+ {
+ bfd_put_8 (output_bfd, 0x0f, contents + roff);
+ bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
+ bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
+ }
+ else
+ {
+ bfd_put_8 (output_bfd, 0x66, contents + roff);
+ bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
+ }
continue;
}
else if (r_type == R_X86_64_GOTTPOFF)
if (roff >= 3)
val = bfd_get_8 (input_bfd, contents + roff - 3);
else
- val = 0;
+ {
+ if (roff < 2)
+ goto corrupt_input;
+ val = 0;
+ }
type = bfd_get_8 (input_bfd, contents + roff - 2);
reg = bfd_get_8 (input_bfd, contents + roff - 1);
reg >>= 3;
{
/* movq */
if (val == 0x4c)
- bfd_put_8 (output_bfd, 0x49,
- contents + roff - 3);
+ {
+ if (roff < 3)
+ goto corrupt_input;
+ bfd_put_8 (output_bfd, 0x49,
+ contents + roff - 3);
+ }
else if (!ABI_64_P (output_bfd) && val == 0x44)
- bfd_put_8 (output_bfd, 0x41,
- contents + roff - 3);
+ {
+ if (roff < 3)
+ goto corrupt_input;
+ bfd_put_8 (output_bfd, 0x41,
+ contents + roff - 3);
+ }
bfd_put_8 (output_bfd, 0xc7,
contents + roff - 2);
bfd_put_8 (output_bfd, 0xc0 | reg,
/* addq/addl -> addq/addl - addressing with %rsp/%r12
is special */
if (val == 0x4c)
- bfd_put_8 (output_bfd, 0x49,
- contents + roff - 3);
+ {
+ if (roff < 3)
+ goto corrupt_input;
+ bfd_put_8 (output_bfd, 0x49,
+ contents + roff - 3);
+ }
else if (!ABI_64_P (output_bfd) && val == 0x44)
- bfd_put_8 (output_bfd, 0x41,
- contents + roff - 3);
+ {
+ if (roff < 3)
+ goto corrupt_input;
+ bfd_put_8 (output_bfd, 0x41,
+ contents + roff - 3);
+ }
bfd_put_8 (output_bfd, 0x81,
contents + roff - 2);
bfd_put_8 (output_bfd, 0xc0 | reg,
{
/* addq/addl -> leaq/leal */
if (val == 0x4c)
- bfd_put_8 (output_bfd, 0x4d,
- contents + roff - 3);
+ {
+ if (roff < 3)
+ goto corrupt_input;
+ bfd_put_8 (output_bfd, 0x4d,
+ contents + roff - 3);
+ }
else if (!ABI_64_P (output_bfd) && val == 0x44)
- bfd_put_8 (output_bfd, 0x45,
- contents + roff - 3);
+ {
+ if (roff < 3)
+ goto corrupt_input;
+ bfd_put_8 (output_bfd, 0x45,
+ contents + roff - 3);
+ }
bfd_put_8 (output_bfd, 0x8d,
contents + roff - 2);
bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
{
if (contents[roff + 5] == 0xb8)
{
+ if (roff < 3
+ || (roff - 3 + 22) > input_section->size)
+ goto corrupt_input;
memcpy (contents + roff - 3,
"\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
"\0\0\0\0\x66\x0f\x1f\x44\0", 22);
largepic = 1;
}
else
- memcpy (contents + roff - 4,
- "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
- 16);
+ {
+ if (roff < 4
+ || (roff - 4 + 16) > input_section->size)
+ goto corrupt_input;
+ memcpy (contents + roff - 4,
+ "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
+ 16);
+ }
}
else
- memcpy (contents + roff - 3,
- "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
- 15);
+ {
+ if (roff < 3
+ || (roff - 3 + 15) > input_section->size)
+ goto corrupt_input;
+ memcpy (contents + roff - 3,
+ "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
+ 15);
+ }
relocation = (htab->elf.sgot->output_section->vma
+ htab->elf.sgot->output_offset + off
{
/* GDesc -> IE transition.
It's originally something like:
- leaq x@tlsdesc(%rip), %rax
+ leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
+ rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
Change it to:
- movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
+ # before xchg %ax,%ax in LP64 mode.
+ movq x@gottpoff(%rip), %rax
+ # before nopl (%rax) in X32 mode.
+ rex movl x@gottpoff(%rip), %eax
+ */
/* Now modify the instruction as appropriate. To
- turn a leaq into a movq in the form we use it, it
+ turn a lea into a mov in the form we use it, it
suffices to change the second byte from 0x8d to
0x8b. */
+ if (roff < 2)
+ goto corrupt_input;
bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
bfd_put_32 (output_bfd,
{
/* GDesc -> IE transition.
It's originally:
- call *(%rax)
+ call *(%rax) <--- LP64 mode.
+ call *(%eax) <--- X32 mode.
Change it to:
- xchg %ax, %ax. */
+ xchg %ax, %ax <-- LP64 mode.
+ nopl (%rax) <-- X32 mode.
+ */
- bfd_put_8 (output_bfd, 0x66, contents + roff);
- bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
+ unsigned int prefix = 0;
+ if (!ABI_64_P (input_bfd))
+ {
+ /* Check for call *x@tlsdesc(%eax). */
+ if (contents[roff] == 0x67)
+ prefix = 1;
+ }
+ if (prefix)
+ {
+ bfd_put_8 (output_bfd, 0x0f, contents + roff);
+ bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
+ bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
+ }
+ else
+ {
+ bfd_put_8 (output_bfd, 0x66, contents + roff);
+ bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
+ }
continue;
}
else
BFD_ASSERT (r_type == R_X86_64_TPOFF32);
if (ABI_64_P (output_bfd))
{
+ if ((rel->r_offset + 5) >= input_section->size)
+ goto corrupt_input;
if (contents[rel->r_offset + 5] == 0xb8)
- memcpy (contents + rel->r_offset - 3,
- "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
- "\x64\x48\x8b\x04\x25\0\0\0", 22);
+ {
+ if (rel->r_offset < 3
+ || (rel->r_offset - 3 + 22) > input_section->size)
+ goto corrupt_input;
+ memcpy (contents + rel->r_offset - 3,
+ "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
+ "\x64\x48\x8b\x04\x25\0\0\0", 22);
+ }
else if (contents[rel->r_offset + 4] == 0xff
|| contents[rel->r_offset + 4] == 0x67)
- memcpy (contents + rel->r_offset - 3,
- "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
- 13);
+ {
+ if (rel->r_offset < 3
+ || (rel->r_offset - 3 + 13) > input_section->size)
+ goto corrupt_input;
+ memcpy (contents + rel->r_offset - 3,
+ "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
+ 13);
+
+ }
else
- memcpy (contents + rel->r_offset - 3,
- "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
+ {
+ if (rel->r_offset < 3
+ || (rel->r_offset - 3 + 12) > input_section->size)
+ goto corrupt_input;
+ memcpy (contents + rel->r_offset - 3,
+ "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
+ }
}
else
{
+ if ((rel->r_offset + 4) >= input_section->size)
+ goto corrupt_input;
if (contents[rel->r_offset + 4] == 0xff)
- memcpy (contents + rel->r_offset - 3,
- "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
- 13);
+ {
+ if (rel->r_offset < 3
+ || (rel->r_offset - 3 + 13) > input_section->size)
+ goto corrupt_input;
+ memcpy (contents + rel->r_offset - 3,
+ "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
+ 13);
+ }
else
- memcpy (contents + rel->r_offset - 3,
- "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
+ {
+ if (rel->r_offset < 3
+ || (rel->r_offset - 3 + 12) > input_section->size)
+ goto corrupt_input;
+ memcpy (contents + rel->r_offset - 3,
+ "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
+ }
}
/* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
and R_X86_64_PLTOFF64. */
default:
_bfd_error_handler
/* xgettext:c-format */
- (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"),
+ (_("%pB(%pA+%#" PRIx64 "): "
+ "unresolvable %s relocation against symbol `%s'"),
input_bfd,
input_section,
- rel->r_offset,
+ (uint64_t) rel->r_offset,
howto->name,
h->root.root.string);
return FALSE;
}
}
-do_relocation:
+ do_relocation:
r = _bfd_final_link_relocate (howto, input_bfd, input_section,
contents, rel->r_offset,
relocation, rel->r_addend);
-check_relocation_error:
+ check_relocation_error:
if (r != bfd_reloc_ok)
{
const char *name;
if (name == NULL)
return FALSE;
if (*name == '\0')
- name = bfd_section_name (input_bfd, sec);
+ name = bfd_section_name (sec);
}
if (r == bfd_reloc_overflow)
{
_bfd_error_handler
/* xgettext:c-format */
- (_("%B(%A+%#Lx): reloc against `%s': error %d"),
+ (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
input_bfd, input_section,
- rel->r_offset, name, (int) r);
+ (uint64_t) rel->r_offset, name, (int) r);
return FALSE;
}
}
/* Check PC-relative offset overflow in PLT entry. */
if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
/* xgettext:c-format */
- info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
+ info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
output_bfd, h->root.root.string);
bfd_put_32 (output_bfd, plt_got_pcrel_offset,
+ got_offset);
if (PLT_LOCAL_IFUNC_P (info, h))
{
- info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
+ info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
h->root.root.string,
h->root.u.def.section->owner);
will overflow first. */
if (plt0_offset > 0x80000000)
/* xgettext:c-format */
- info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
+ info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
output_bfd, h->root.root.string);
bfd_put_32 (output_bfd, - plt0_offset,
(plt->contents + h->plt.offset
if ((got_after_plt && got_pcrel_offset < 0)
|| (!got_after_plt && got_pcrel_offset > 0))
/* xgettext:c-format */
- info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
+ info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
output_bfd, h->root.root.string);
bfd_put_32 (output_bfd, got_pcrel_offset,
sym->st_value = 0;
}
+ _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
+
/* Don't generate dynamic GOT relocation against undefined weak
symbol in executable. */
if (h->got.offset != (bfd_vma) -1
}
if (SYMBOL_REFERENCES_LOCAL_P (info, h))
{
- info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
+ info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
h->root.root.string,
h->root.u.def.section->owner);
else if (bfd_link_pic (info)
&& SYMBOL_REFERENCES_LOCAL_P (info, h))
{
- if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
+ if (!SYMBOL_DEFINED_NON_SHARED_P (h))
return FALSE;
BFD_ASSERT((h->got.offset & 1) != 0);
rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
else
{
BFD_ASSERT((h->got.offset & 1) == 0);
-do_glob_dat:
+ do_glob_dat:
bfd_put_64 (output_bfd, (bfd_vma) 0,
htab->elf.sgot->contents + h->got.offset);
rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
&& htab->elf.dynsym->contents != NULL)
{
/* Check relocation against STT_GNU_IFUNC symbol if there are
- dynamic symbols. */
+ dynamic symbols. */
unsigned long r_symndx = htab->r_sym (rela->r_info);
if (r_symndx != STN_UNDEF)
{
struct bfd_link_info *info)
{
struct elf_x86_link_hash_table *htab;
- bfd *dynobj;
- asection *sdyn;
- htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
+ htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
if (htab == NULL)
return FALSE;
- dynobj = htab->elf.dynobj;
- sdyn = bfd_get_linker_section (dynobj, ".dynamic");
-
- if (htab->elf.dynamic_sections_created)
- {
- bfd_byte *dyncon, *dynconend;
- const struct elf_backend_data *bed;
- bfd_size_type sizeof_dyn;
-
- if (sdyn == NULL || htab->elf.sgot == NULL)
- abort ();
-
- bed = get_elf_backend_data (dynobj);
- sizeof_dyn = bed->s->sizeof_dyn;
- dyncon = sdyn->contents;
- dynconend = sdyn->contents + sdyn->size;
- for (; dyncon < dynconend; dyncon += sizeof_dyn)
- {
- Elf_Internal_Dyn dyn;
- asection *s;
-
- (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
-
- switch (dyn.d_tag)
- {
- default:
- continue;
-
- case DT_PLTGOT:
- s = htab->elf.sgotplt;
- dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
- break;
-
- case DT_JMPREL:
- dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
- break;
-
- case DT_PLTRELSZ:
- s = htab->elf.srelplt->output_section;
- dyn.d_un.d_val = s->size;
- break;
-
- case DT_TLSDESC_PLT:
- s = htab->elf.splt;
- dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
- + htab->tlsdesc_plt;
- break;
-
- case DT_TLSDESC_GOT:
- s = htab->elf.sgot;
- dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
- + htab->tlsdesc_got;
- break;
- }
-
- (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
- }
-
- if (htab->elf.splt && htab->elf.splt->size > 0)
- {
- elf_section_data (htab->elf.splt->output_section)
- ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
-
- if (htab->plt.has_plt0)
- {
- /* Fill in the special first entry in the procedure linkage
- table. */
- memcpy (htab->elf.splt->contents,
- htab->lazy_plt->plt0_entry,
- htab->lazy_plt->plt0_entry_size);
- /* Add offset for pushq GOT+8(%rip), since the instruction
- uses 6 bytes subtract this value. */
- bfd_put_32 (output_bfd,
- (htab->elf.sgotplt->output_section->vma
- + htab->elf.sgotplt->output_offset
- + 8
- - htab->elf.splt->output_section->vma
- - htab->elf.splt->output_offset
- - 6),
- (htab->elf.splt->contents
- + htab->lazy_plt->plt0_got1_offset));
- /* Add offset for the PC-relative instruction accessing
- GOT+16, subtracting the offset to the end of that
- instruction. */
- bfd_put_32 (output_bfd,
- (htab->elf.sgotplt->output_section->vma
- + htab->elf.sgotplt->output_offset
- + 16
- - htab->elf.splt->output_section->vma
- - htab->elf.splt->output_offset
- - htab->lazy_plt->plt0_got2_insn_end),
- (htab->elf.splt->contents
- + htab->lazy_plt->plt0_got2_offset));
-
- if (htab->tlsdesc_plt)
- {
- bfd_put_64 (output_bfd, (bfd_vma) 0,
- htab->elf.sgot->contents + htab->tlsdesc_got);
-
- memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
- htab->lazy_plt->plt0_entry,
- htab->lazy_plt->plt0_entry_size);
-
- /* Add offset for pushq GOT+8(%rip), since the
- instruction uses 6 bytes subtract this value. */
- bfd_put_32 (output_bfd,
- (htab->elf.sgotplt->output_section->vma
- + htab->elf.sgotplt->output_offset
- + 8
- - htab->elf.splt->output_section->vma
- - htab->elf.splt->output_offset
- - htab->tlsdesc_plt
- - 6),
- (htab->elf.splt->contents
- + htab->tlsdesc_plt
- + htab->lazy_plt->plt0_got1_offset));
- /* Add offset for the PC-relative instruction accessing
- GOT+TDG, where TDG stands for htab->tlsdesc_got,
- subtracting the offset to the end of that
- instruction. */
- bfd_put_32 (output_bfd,
- (htab->elf.sgot->output_section->vma
- + htab->elf.sgot->output_offset
- + htab->tlsdesc_got
- - htab->elf.splt->output_section->vma
- - htab->elf.splt->output_offset
- - htab->tlsdesc_plt
- - htab->lazy_plt->plt0_got2_insn_end),
- (htab->elf.splt->contents
- + htab->tlsdesc_plt
- + htab->lazy_plt->plt0_got2_offset));
- }
- }
- }
-
- if (htab->plt_got != NULL && htab->plt_got->size > 0)
- elf_section_data (htab->plt_got->output_section)
- ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
-
- if (htab->plt_second != NULL && htab->plt_second->size > 0)
- elf_section_data (htab->plt_second->output_section)
- ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
- }
+ if (! htab->elf.dynamic_sections_created)
+ return TRUE;
- /* GOT is always created in setup_gnu_properties. But it may not be
- needed. */
- if (htab->elf.sgotplt && htab->elf.sgotplt->size > 0)
+ if (htab->elf.splt && htab->elf.splt->size > 0)
{
- if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
- {
- _bfd_error_handler
- (_("discarded output section: `%A'"), htab->elf.sgotplt);
- return FALSE;
- }
+ elf_section_data (htab->elf.splt->output_section)
+ ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
- /* Set the first entry in the global offset table to the address of
- the dynamic section. */
- if (sdyn == NULL)
- bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
- else
- bfd_put_64 (output_bfd,
- sdyn->output_section->vma + sdyn->output_offset,
- htab->elf.sgotplt->contents);
- /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
- bfd_put_64 (output_bfd, (bfd_vma) 0,
- htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
- bfd_put_64 (output_bfd, (bfd_vma) 0,
- htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
-
- elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize
- = GOT_ENTRY_SIZE;
- }
-
- /* Adjust .eh_frame for .plt section. */
- if (htab->plt_eh_frame != NULL
- && htab->plt_eh_frame->contents != NULL)
- {
- if (htab->elf.splt != NULL
- && htab->elf.splt->size != 0
- && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
- && htab->elf.splt->output_section != NULL
- && htab->plt_eh_frame->output_section != NULL)
+ if (htab->plt.has_plt0)
{
- bfd_vma plt_start = htab->elf.splt->output_section->vma;
- bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
- + htab->plt_eh_frame->output_offset
- + PLT_FDE_START_OFFSET;
- bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
- htab->plt_eh_frame->contents
- + PLT_FDE_START_OFFSET);
- }
- if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
- {
- if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
- htab->plt_eh_frame,
- htab->plt_eh_frame->contents))
- return FALSE;
- }
- }
-
- /* Adjust .eh_frame for .plt.got section. */
- if (htab->plt_got_eh_frame != NULL
- && htab->plt_got_eh_frame->contents != NULL)
- {
- if (htab->plt_got != NULL
- && htab->plt_got->size != 0
- && (htab->plt_got->flags & SEC_EXCLUDE) == 0
- && htab->plt_got->output_section != NULL
- && htab->plt_got_eh_frame->output_section != NULL)
- {
- bfd_vma plt_start = htab->plt_got->output_section->vma;
- bfd_vma eh_frame_start = htab->plt_got_eh_frame->output_section->vma
- + htab->plt_got_eh_frame->output_offset
- + PLT_FDE_START_OFFSET;
- bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
- htab->plt_got_eh_frame->contents
- + PLT_FDE_START_OFFSET);
- }
- if (htab->plt_got_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
- {
- if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
- htab->plt_got_eh_frame,
- htab->plt_got_eh_frame->contents))
- return FALSE;
+ /* Fill in the special first entry in the procedure linkage
+ table. */
+ memcpy (htab->elf.splt->contents,
+ htab->lazy_plt->plt0_entry,
+ htab->lazy_plt->plt0_entry_size);
+ /* Add offset for pushq GOT+8(%rip), since the instruction
+ uses 6 bytes subtract this value. */
+ bfd_put_32 (output_bfd,
+ (htab->elf.sgotplt->output_section->vma
+ + htab->elf.sgotplt->output_offset
+ + 8
+ - htab->elf.splt->output_section->vma
+ - htab->elf.splt->output_offset
+ - 6),
+ (htab->elf.splt->contents
+ + htab->lazy_plt->plt0_got1_offset));
+ /* Add offset for the PC-relative instruction accessing
+ GOT+16, subtracting the offset to the end of that
+ instruction. */
+ bfd_put_32 (output_bfd,
+ (htab->elf.sgotplt->output_section->vma
+ + htab->elf.sgotplt->output_offset
+ + 16
+ - htab->elf.splt->output_section->vma
+ - htab->elf.splt->output_offset
+ - htab->lazy_plt->plt0_got2_insn_end),
+ (htab->elf.splt->contents
+ + htab->lazy_plt->plt0_got2_offset));
}
- }
- /* Adjust .eh_frame for the second PLT section. */
- if (htab->plt_second_eh_frame != NULL
- && htab->plt_second_eh_frame->contents != NULL)
- {
- if (htab->plt_second != NULL
- && htab->plt_second->size != 0
- && (htab->plt_second->flags & SEC_EXCLUDE) == 0
- && htab->plt_second->output_section != NULL
- && htab->plt_second_eh_frame->output_section != NULL)
+ if (htab->tlsdesc_plt)
{
- bfd_vma plt_start = htab->plt_second->output_section->vma;
- bfd_vma eh_frame_start
- = (htab->plt_second_eh_frame->output_section->vma
- + htab->plt_second_eh_frame->output_offset
- + PLT_FDE_START_OFFSET);
- bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
- htab->plt_second_eh_frame->contents
- + PLT_FDE_START_OFFSET);
- }
- if (htab->plt_second_eh_frame->sec_info_type
- == SEC_INFO_TYPE_EH_FRAME)
- {
- if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
- htab->plt_second_eh_frame,
- htab->plt_second_eh_frame->contents))
- return FALSE;
+ bfd_put_64 (output_bfd, (bfd_vma) 0,
+ htab->elf.sgot->contents + htab->tlsdesc_got);
+
+ memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
+ htab->lazy_plt->plt_tlsdesc_entry,
+ htab->lazy_plt->plt_tlsdesc_entry_size);
+
+ /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
+ bytes and the instruction uses 6 bytes, subtract these
+ values. */
+ bfd_put_32 (output_bfd,
+ (htab->elf.sgotplt->output_section->vma
+ + htab->elf.sgotplt->output_offset
+ + 8
+ - htab->elf.splt->output_section->vma
+ - htab->elf.splt->output_offset
+ - htab->tlsdesc_plt
+ - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
+ (htab->elf.splt->contents
+ + htab->tlsdesc_plt
+ + htab->lazy_plt->plt_tlsdesc_got1_offset));
+ /* Add offset for indirect branch via GOT+TDG, where TDG
+ stands for htab->tlsdesc_got, subtracting the offset
+ to the end of that instruction. */
+ bfd_put_32 (output_bfd,
+ (htab->elf.sgot->output_section->vma
+ + htab->elf.sgot->output_offset
+ + htab->tlsdesc_got
+ - htab->elf.splt->output_section->vma
+ - htab->elf.splt->output_offset
+ - htab->tlsdesc_plt
+ - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
+ (htab->elf.splt->contents
+ + htab->tlsdesc_plt
+ + htab->lazy_plt->plt_tlsdesc_got2_offset));
}
}
- if (htab->elf.sgot && htab->elf.sgot->size > 0)
- elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
- = GOT_ENTRY_SIZE;
-
/* Fill PLT entries for undefined weak symbols in PIE. */
if (bfd_link_pie (info))
bfd_hash_traverse (&info->hash->table,
if (relsize <= 0)
return -1;
- if (get_elf_x86_64_backend_data (abfd)->os == is_normal)
+ if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
{
lazy_plt = &elf_x86_64_lazy_plt;
non_lazy_plt = &elf_x86_64_non_lazy_plt;
!= (int) R_X86_64_GNU_VTENTRY))
abort ();
- init_table.is_vxworks = FALSE;
- if (get_elf_x86_64_backend_data (info->output_bfd)->os == is_normal)
+ /* This is unused for x86-64. */
+ init_table.plt0_pad_byte = 0x90;
+
+ if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
{
- if (info->bndplt)
+ const struct elf_backend_data *bed
+ = get_elf_backend_data (info->output_bfd);
+ struct elf_x86_link_hash_table *htab
+ = elf_x86_hash_table (info, bed->target_id);
+ if (!htab)
+ abort ();
+ if (htab->params->bndplt)
{
init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
}
- init_table.normal_target = TRUE;
}
else
{
init_table.non_lazy_plt = NULL;
init_table.lazy_ibt_plt = NULL;
init_table.non_lazy_ibt_plt = NULL;
- init_table.normal_target = FALSE;
}
if (ABI_64_P (info->output_bfd))
{ STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
{ STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
{ STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
- { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
+ { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
{ STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
{ STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
- { NULL, 0, 0, 0, 0 }
+ { NULL, 0, 0, 0, 0 }
};
#define TARGET_LITTLE_SYM x86_64_elf64_vec
#define ELF_ARCH bfd_arch_i386
#define ELF_TARGET_ID X86_64_ELF_DATA
#define ELF_MACHINE_CODE EM_X86_64
-#define ELF_MAXPAGESIZE 0x200000
+#if DEFAULT_LD_Z_SEPARATE_CODE
+# define ELF_MAXPAGESIZE 0x1000
+#else
+# define ELF_MAXPAGESIZE 0x200000
+#endif
#define ELF_MINPAGESIZE 0x1000
#define ELF_COMMONPAGESIZE 0x1000
#define elf_backend_want_plt_sym 0
#define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
#define elf_backend_rela_normal 1
-#define elf_backend_plt_alignment 4
+#define elf_backend_plt_alignment 4
#define elf_backend_extern_protected_data 1
#define elf_backend_caches_rawsize 1
#define elf_backend_dtrel_excludes_plt 1
#define elf_backend_hide_symbol \
_bfd_x86_elf_hide_symbol
+#undef elf64_bed
+#define elf64_bed elf64_x86_64_bed
+
#include "elf64-target.h"
/* CloudABI support. */
-#undef TARGET_LITTLE_SYM
+#undef TARGET_LITTLE_SYM
#define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
-#undef TARGET_LITTLE_NAME
+#undef TARGET_LITTLE_NAME
#define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
#undef ELF_OSABI
#define ELF_OSABI ELFOSABI_CLOUDABI
-#undef elf64_bed
+#undef elf64_bed
#define elf64_bed elf64_x86_64_cloudabi_bed
#include "elf64-target.h"
/* FreeBSD support. */
-#undef TARGET_LITTLE_SYM
+#undef TARGET_LITTLE_SYM
#define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
-#undef TARGET_LITTLE_NAME
+#undef TARGET_LITTLE_NAME
#define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
#undef ELF_OSABI
#define ELF_OSABI ELFOSABI_FREEBSD
-#undef elf64_bed
+#undef elf64_bed
#define elf64_bed elf64_x86_64_fbsd_bed
#include "elf64-target.h"
#undef TARGET_LITTLE_NAME
#define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
+static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
+ {
+ is_solaris /* os */
+ };
+
+#undef elf_backend_arch_data
+#define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
+
/* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
objects won't be recognized. */
#undef ELF_OSABI
static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
{
- 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
+ 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
- 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
- 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
- 0x41, 0xff, 0xe3, /* jmpq *%r11 */
+ 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
+ 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
+ 0x41, 0xff, 0xe3, /* jmpq *%r11 */
/* 9-byte nop sequence to pad out to the next 32-byte boundary. */
0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
- 0x66, /* excess data16 prefix */
- 0x90 /* nop */
+ 0x66, /* excess data16 prefix */
+ 0x90 /* nop */
};
static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
{
0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
- 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
- 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
- 0x41, 0xff, 0xe3, /* jmpq *%r11 */
+ 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
+ 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
+ 0x41, 0xff, 0xe3, /* jmpq *%r11 */
/* 15-byte nop sequence to pad out to the next 32-byte boundary. */
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
/* Lazy GOT entries point here (32-byte aligned). */
- 0x68, /* pushq immediate */
- 0, 0, 0, 0, /* replaced with index into relocation table. */
- 0xe9, /* jmp relative */
- 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
+ 0x68, /* pushq immediate */
+ 0, 0, 0, 0, /* replaced with index into relocation table. */
+ 0xe9, /* jmp relative */
+ 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
- /* 22 bytes of nop to pad out to the standard size. */
+ /* 22 bytes of nop to pad out to the standard size. */
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
- 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
+ 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
};
/* .eh_frame covering the .plt section. */
static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
{
-#if (PLT_CIE_LENGTH != 20 \
- || PLT_FDE_LENGTH != 36 \
- || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
+#if (PLT_CIE_LENGTH != 20 \
+ || PLT_FDE_LENGTH != 36 \
+ || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
|| PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
-# error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
+# error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
#endif
PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
0, 0, 0, 0, /* CIE ID */
1, /* CIE version */
- 'z', 'R', 0, /* Augmentation string */
+ 'z', 'R', 0, /* Augmentation string */
1, /* Code alignment factor */
- 0x78, /* Data alignment factor */
+ 0x78, /* Data alignment factor */
16, /* Return address column */
1, /* Augmentation size */
DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
{
- elf_x86_64_nacl_plt0_entry, /* plt0_entry */
- NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
- elf_x86_64_nacl_plt_entry, /* plt_entry */
- NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
- 2, /* plt0_got1_offset */
- 9, /* plt0_got2_offset */
- 13, /* plt0_got2_insn_end */
- 3, /* plt_got_offset */
- 33, /* plt_reloc_offset */
- 38, /* plt_plt_offset */
- 7, /* plt_got_insn_size */
- 42, /* plt_plt_insn_end */
- 32, /* plt_lazy_offset */
- elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
- elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
- elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
+ elf_x86_64_nacl_plt0_entry, /* plt0_entry */
+ NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
+ elf_x86_64_nacl_plt_entry, /* plt_entry */
+ NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
+ elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
+ NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
+ 2, /* plt_tlsdesc_got1_offset */
+ 9, /* plt_tlsdesc_got2_offset */
+ 6, /* plt_tlsdesc_got1_insn_end */
+ 13, /* plt_tlsdesc_got2_insn_end */
+ 2, /* plt0_got1_offset */
+ 9, /* plt0_got2_offset */
+ 13, /* plt0_got2_insn_end */
+ 3, /* plt_got_offset */
+ 33, /* plt_reloc_offset */
+ 38, /* plt_plt_offset */
+ 7, /* plt_got_insn_size */
+ 42, /* plt_plt_insn_end */
+ 32, /* plt_lazy_offset */
+ elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
+ elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
+ elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
};
-static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
+static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
{
- is_nacl /* os */
+ is_nacl /* os */
};
#undef elf_backend_arch_data
#define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
#undef elf_backend_modify_segment_map
#define elf_backend_modify_segment_map nacl_modify_segment_map
-#undef elf_backend_modify_program_headers
-#define elf_backend_modify_program_headers nacl_modify_program_headers
+#undef elf_backend_modify_headers
+#define elf_backend_modify_headers nacl_modify_headers
#undef elf_backend_final_write_processing
#define elf_backend_final_write_processing nacl_final_write_processing
return TRUE;
}
-#undef TARGET_LITTLE_SYM
+#undef TARGET_LITTLE_SYM
#define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
-#undef TARGET_LITTLE_NAME
+#undef TARGET_LITTLE_NAME
#define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
#undef elf32_bed
#define elf32_bed elf32_x86_64_nacl_bed
#define elf_backend_size_info \
_bfd_elf32_size_info
+#undef elf32_bed
+#define elf32_bed elf32_x86_64_bed
+
#include "elf32-target.h"
/* Restore defaults. */
#undef elf_backend_bfd_from_remote_memory
#undef elf_backend_size_info
#undef elf_backend_modify_segment_map
-#undef elf_backend_modify_program_headers
+#undef elf_backend_modify_headers
#undef elf_backend_final_write_processing
/* Intel L1OM support. */
#undef ELF_MAXPAGESIZE
#undef ELF_MINPAGESIZE
#undef ELF_COMMONPAGESIZE
-#define ELF_MAXPAGESIZE 0x200000
+#if DEFAULT_LD_Z_SEPARATE_CODE
+# define ELF_MAXPAGESIZE 0x1000
+#else
+# define ELF_MAXPAGESIZE 0x200000
+#endif
#define ELF_MINPAGESIZE 0x1000
#define ELF_COMMONPAGESIZE 0x1000
#undef elf_backend_plt_alignment