X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=bfd%2Felf64-x86-64.c;h=4c9ad78dd74a600a3da7a45f8fbf2e7d2d2b5cca;hb=7e94cf6cb018df7cc1311afb2b15e9f69adb60d9;hp=f8a7ca39e669f61d267ca122707dbec377faeba0;hpb=897463b12ba936df7d2070755eaac94f87fcedfb;p=deliverable%2Fbinutils-gdb.git diff --git a/bfd/elf64-x86-64.c b/bfd/elf64-x86-64.c index f8a7ca39e6..4c9ad78dd7 100644 --- a/bfd/elf64-x86-64.c +++ b/bfd/elf64-x86-64.c @@ -1,5 +1,5 @@ /* X86-64 specific support for ELF - Copyright (C) 2000-2016 Free Software Foundation, Inc. + Copyright (C) 2000-2020 Free Software Foundation, Inc. Contributed by Jan Hubicka . This file is part of BFD, the Binary File Descriptor library. @@ -19,15 +19,8 @@ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ -#include "sysdep.h" -#include "bfd.h" -#include "bfdlink.h" -#include "libbfd.h" -#include "elf-bfd.h" +#include "elfxx-x86.h" #include "elf-nacl.h" -#include "bfd_stdint.h" -#include "objalloc.h" -#include "hashtab.h" #include "dwarf2.h" #include "libiberty.h" @@ -47,9 +40,6 @@ relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE since they are the same. */ -#define ABI_64_P(abfd) \ - (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64) - /* The relocation "howto" table. Order of fields: type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow, special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */ @@ -206,13 +196,16 @@ static reloc_howto_type x86_64_elf_howto_table[] = FALSE) }; -#define IS_X86_64_PCREL_TYPE(TYPE) \ +#define X86_PCREL_TYPE_P(TYPE) \ ( ((TYPE) == R_X86_64_PC8) \ || ((TYPE) == R_X86_64_PC16) \ || ((TYPE) == R_X86_64_PC32) \ || ((TYPE) == R_X86_64_PC32_BND) \ || ((TYPE) == R_X86_64_PC64)) +#define X86_SIZE_TYPE_P(TYPE) \ + ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64) + /* Map BFD relocs to the x86_64 elf relocs. */ struct elf_reloc_map { @@ -285,9 +278,11 @@ elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type) { if (r_type >= (unsigned int) R_X86_64_standard) { - (*_bfd_error_handler) (_("%B: invalid relocation type %d"), - abfd, (int) r_type); - r_type = R_X86_64_NONE; + /* xgettext:c-format */ + _bfd_error_handler (_("%pB: unsupported relocation type %#x"), + abfd, r_type); + bfd_set_error (bfd_error_bad_value); + return NULL; } i = r_type; } @@ -339,15 +334,18 @@ elf_x86_64_reloc_name_lookup (bfd *abfd, /* Given an x86_64 ELF reloc type, fill in an arelent structure. */ -static void -elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr, +static bfd_boolean +elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr, Elf_Internal_Rela *dst) { unsigned r_type; r_type = ELF32_R_TYPE (dst->r_info); cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type); - BFD_ASSERT (r_type == cache_ptr->howto->type); + if (cache_ptr->howto == NULL) + return FALSE; + BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE); + return TRUE; } /* Support for core dump NOTE sections. */ @@ -438,6 +436,10 @@ elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) } #ifdef CORE_HEADER +# if GCC_VERSION >= 8000 +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wstringop-truncation" +# endif static char * elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz, int note_type, ...) @@ -523,43 +525,40 @@ elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz, } /* NOTREACHED */ } +# if GCC_VERSION >= 8000 +# pragma GCC diagnostic pop +# endif #endif /* Functions for the x86-64 ELF linker. */ -/* The name of the dynamic interpreter. This is put in the .interp - section. */ - -#define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1" -#define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1" - -/* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid - copying dynamic variables from a shared lib into an app's dynbss - section, and instead use a dynamic relocation to point into the - shared lib. */ -#define ELIMINATE_COPY_RELOCS 1 - /* The size in bytes of an entry in the global offset table. */ #define GOT_ENTRY_SIZE 8 -/* The size in bytes of an entry in the procedure linkage table. */ +/* The size in bytes of an entry in the lazy procedure linkage table. */ + +#define LAZY_PLT_ENTRY_SIZE 16 + +/* The size in bytes of an entry in the non-lazy procedure linkage + table. */ -#define PLT_ENTRY_SIZE 16 +#define NON_LAZY_PLT_ENTRY_SIZE 8 -/* The first entry in a procedure linkage table looks like this. See the - SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */ +/* The first entry in a lazy procedure linkage table looks like this. + See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this + works. */ -static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] = +static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] = { 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */ 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */ }; -/* Subsequent entries in a procedure linkage table look like this. */ +/* Subsequent entries in a lazy procedure linkage table look like this. */ -static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] = +static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] = { 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ @@ -569,64 +568,107 @@ static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] = 0, 0, 0, 0 /* replaced with offset to start of .plt0. */ }; -/* The first entry in a procedure linkage table with BND relocations +/* The first entry in a lazy procedure linkage table with BND prefix like this. */ -static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] = +static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] = { - 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ + 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */ - 0x0f, 0x1f, 0 /* nopl (%rax) */ + 0x0f, 0x1f, 0 /* nopl (%rax) */ }; -/* Subsequent entries for legacy branches in a procedure linkage table - with BND relocations look like this. */ +/* Subsequent entries for branches with BND prefx in a lazy procedure + linkage table look like this. */ -static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] = +static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] = { - 0x68, 0, 0, 0, 0, /* pushq immediate */ - 0xe9, 0, 0, 0, 0, /* jmpq relative */ - 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */ + 0x68, 0, 0, 0, 0, /* pushq immediate */ + 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */ + 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */ }; -/* Subsequent entries for branches with BND prefx in a procedure linkage - table with BND relocations look like this. */ +/* The first entry in the IBT-enabled lazy procedure linkage table is the + the same as the lazy PLT with BND prefix so that bound registers are + preserved when control is passed to dynamic linker. Subsequent + entries for a IBT-enabled lazy procedure linkage table look like + this. */ -static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] = +static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = { - 0x68, 0, 0, 0, 0, /* pushq immediate */ - 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */ - 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */ + 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ + 0x68, 0, 0, 0, 0, /* pushq immediate */ + 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */ + 0x90 /* nop */ }; -/* Entries for legacy branches in the second procedure linkage table - look like this. */ +/* The first entry in the x32 IBT-enabled lazy procedure linkage table + is the same as the normal lazy PLT. Subsequent entries for an + x32 IBT-enabled lazy procedure linkage table look like this. */ -static const bfd_byte elf_x86_64_legacy_plt2_entry[8] = +static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = { - 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ - 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ - 0x66, 0x90 /* xchg %ax,%ax */ + 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ + 0x68, 0, 0, 0, 0, /* pushq immediate */ + 0xe9, 0, 0, 0, 0, /* jmpq relative */ + 0x66, 0x90 /* xchg %ax,%ax */ }; -/* Entries for branches with BND prefix in the second procedure linkage - table look like this. */ +/* Entries in the non-lazey procedure linkage table look like this. */ -static const bfd_byte elf_x86_64_bnd_plt2_entry[8] = +static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] = { - 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ - 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ - 0x90 /* nop */ + 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ + 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ + 0x66, 0x90 /* xchg %ax,%ax */ }; -/* .eh_frame covering the .plt section. */ +/* Entries for branches with BND prefix in the non-lazey procedure + linkage table look like this. */ + +static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] = +{ + 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ + 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ + 0x90 /* nop */ +}; -static const bfd_byte elf_x86_64_eh_frame_plt[] = +/* Entries for branches with IBT-enabled in the non-lazey procedure + linkage table look like this. They have the same size as the lazy + PLT entry. */ + +static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = +{ + 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ + 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ + 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ + 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */ +}; + +/* Entries for branches with IBT-enabled in the x32 non-lazey procedure + linkage table look like this. They have the same size as the lazy + PLT entry. */ + +static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = +{ + 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ + 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ + 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ + 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */ +}; + +/* The TLSDESC entry in a lazy procedure linkage table. */ +static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] = +{ + 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ + 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ + 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */ +}; + +/* .eh_frame covering the lazy .plt section. */ + +static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] = { -#define PLT_CIE_LENGTH 20 -#define PLT_FDE_LENGTH 36 -#define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8 -#define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 0, 0, 0, 0, /* CIE ID */ 1, /* CIE version */ @@ -658,681 +700,382 @@ static const bfd_byte elf_x86_64_eh_frame_plt[] = DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop }; -/* Architecture-specific backend data for x86-64. */ +/* .eh_frame covering the lazy BND .plt section. */ -struct elf_x86_64_backend_data +static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] = { - /* Templates for the initial PLT entry and for subsequent entries. */ - const bfd_byte *plt0_entry; - const bfd_byte *plt_entry; - unsigned int plt_entry_size; /* Size of each PLT entry. */ + PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ + 0, 0, 0, 0, /* CIE ID */ + 1, /* CIE version */ + 'z', 'R', 0, /* Augmentation string */ + 1, /* Code alignment factor */ + 0x78, /* Data alignment factor */ + 16, /* Return address column */ + 1, /* Augmentation size */ + DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ + DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ + DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ + DW_CFA_nop, DW_CFA_nop, - /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */ - unsigned int plt0_got1_offset; - unsigned int plt0_got2_offset; + PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ + PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ + 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ + 0, 0, 0, 0, /* .plt size goes here */ + 0, /* Augmentation size */ + DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ + DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ + DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ + DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ + DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ + 11, /* Block length */ + DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ + DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ + DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge, + DW_OP_lit3, DW_OP_shl, DW_OP_plus, + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop +}; - /* Offset of the end of the PC-relative instruction containing - plt0_got2_offset. */ - unsigned int plt0_got2_insn_end; +/* .eh_frame covering the lazy .plt section with IBT-enabled. */ - /* Offsets into plt_entry that are to be replaced with... */ - unsigned int plt_got_offset; /* ... address of this symbol in .got. */ - unsigned int plt_reloc_offset; /* ... offset into relocation table. */ - unsigned int plt_plt_offset; /* ... offset to start of .plt. */ +static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] = +{ + PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ + 0, 0, 0, 0, /* CIE ID */ + 1, /* CIE version */ + 'z', 'R', 0, /* Augmentation string */ + 1, /* Code alignment factor */ + 0x78, /* Data alignment factor */ + 16, /* Return address column */ + 1, /* Augmentation size */ + DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ + DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ + DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ + DW_CFA_nop, DW_CFA_nop, - /* Length of the PC-relative instruction containing plt_got_offset. */ - unsigned int plt_got_insn_size; + PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ + PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ + 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ + 0, 0, 0, 0, /* .plt size goes here */ + 0, /* Augmentation size */ + DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ + DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ + DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ + DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ + DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ + 11, /* Block length */ + DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ + DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ + DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge, + DW_OP_lit3, DW_OP_shl, DW_OP_plus, + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop +}; - /* Offset of the end of the PC-relative jump to plt0_entry. */ - unsigned int plt_plt_insn_end; +/* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */ - /* Offset into plt_entry where the initial value of the GOT entry points. */ - unsigned int plt_lazy_offset; +static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] = +{ + PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ + 0, 0, 0, 0, /* CIE ID */ + 1, /* CIE version */ + 'z', 'R', 0, /* Augmentation string */ + 1, /* Code alignment factor */ + 0x78, /* Data alignment factor */ + 16, /* Return address column */ + 1, /* Augmentation size */ + DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ + DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ + DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ + DW_CFA_nop, DW_CFA_nop, - /* .eh_frame covering the .plt section. */ - const bfd_byte *eh_frame_plt; - unsigned int eh_frame_plt_size; + PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ + PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ + 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ + 0, 0, 0, 0, /* .plt size goes here */ + 0, /* Augmentation size */ + DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ + DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ + DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ + DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ + DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ + 11, /* Block length */ + DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ + DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ + DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge, + DW_OP_lit3, DW_OP_shl, DW_OP_plus, + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop }; -#define get_elf_x86_64_arch_data(bed) \ - ((const struct elf_x86_64_backend_data *) (bed)->arch_data) +/* .eh_frame covering the non-lazy .plt section. */ -#define get_elf_x86_64_backend_data(abfd) \ - get_elf_x86_64_arch_data (get_elf_backend_data (abfd)) +static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] = +{ +#define PLT_GOT_FDE_LENGTH 20 + PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ + 0, 0, 0, 0, /* CIE ID */ + 1, /* CIE version */ + 'z', 'R', 0, /* Augmentation string */ + 1, /* Code alignment factor */ + 0x78, /* Data alignment factor */ + 16, /* Return address column */ + 1, /* Augmentation size */ + DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ + DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ + DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ + DW_CFA_nop, DW_CFA_nop, -#define GET_PLT_ENTRY_SIZE(abfd) \ - get_elf_x86_64_backend_data (abfd)->plt_entry_size + PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */ + PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ + 0, 0, 0, 0, /* the start of non-lazy .plt goes here */ + 0, 0, 0, 0, /* non-lazy .plt size goes here */ + 0, /* Augmentation size */ + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop +}; /* These are the standard parameters. */ -static const struct elf_x86_64_backend_data elf_x86_64_arch_bed = +static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt = { - elf_x86_64_plt0_entry, /* plt0_entry */ - elf_x86_64_plt_entry, /* plt_entry */ - sizeof (elf_x86_64_plt_entry), /* plt_entry_size */ - 2, /* plt0_got1_offset */ - 8, /* plt0_got2_offset */ - 12, /* plt0_got2_insn_end */ - 2, /* plt_got_offset */ - 7, /* plt_reloc_offset */ - 12, /* plt_plt_offset */ - 6, /* plt_got_insn_size */ - PLT_ENTRY_SIZE, /* plt_plt_insn_end */ - 6, /* plt_lazy_offset */ - elf_x86_64_eh_frame_plt, /* eh_frame_plt */ - sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */ + elf_x86_64_lazy_plt0_entry, /* plt0_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ + elf_x86_64_lazy_plt_entry, /* plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ + 6, /* plt_tlsdesc_got1_offset */ + 12, /* plt_tlsdesc_got2_offset */ + 10, /* plt_tlsdesc_got1_insn_end */ + 16, /* plt_tlsdesc_got2_insn_end */ + 2, /* plt0_got1_offset */ + 8, /* plt0_got2_offset */ + 12, /* plt0_got2_insn_end */ + 2, /* plt_got_offset */ + 7, /* plt_reloc_offset */ + 12, /* plt_plt_offset */ + 6, /* plt_got_insn_size */ + LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */ + 6, /* plt_lazy_offset */ + elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */ + elf_x86_64_lazy_plt_entry, /* pic_plt_entry */ + elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */ }; -static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed = +static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt = { - elf_x86_64_bnd_plt0_entry, /* plt0_entry */ - elf_x86_64_bnd_plt_entry, /* plt_entry */ - sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */ - 2, /* plt0_got1_offset */ - 1+8, /* plt0_got2_offset */ - 1+12, /* plt0_got2_insn_end */ - 1+2, /* plt_got_offset */ - 1, /* plt_reloc_offset */ - 7, /* plt_plt_offset */ - 1+6, /* plt_got_insn_size */ - 11, /* plt_plt_insn_end */ - 0, /* plt_lazy_offset */ - elf_x86_64_eh_frame_plt, /* eh_frame_plt */ - sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */ + elf_x86_64_non_lazy_plt_entry, /* plt_entry */ + elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */ + NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + 2, /* plt_got_offset */ + 6, /* plt_got_insn_size */ + elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ }; -#define elf_backend_arch_data &elf_x86_64_arch_bed - -/* Is a undefined weak symbol which is resolved to 0. Reference to an - undefined weak symbol is resolved to 0 when building executable if - it isn't dynamic and - 1. Has non-GOT/non-PLT relocations in text section. Or - 2. Has no GOT/PLT relocation. - */ -#define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, GOT_RELOC, EH) \ - ((EH)->elf.root.type == bfd_link_hash_undefweak \ - && bfd_link_executable (INFO) \ - && (elf_x86_64_hash_table (INFO)->interp == NULL \ - || !(GOT_RELOC) \ - || (EH)->has_non_got_reloc \ - || !(INFO)->dynamic_undefined_weak)) - -/* x86-64 ELF linker hash entry. */ - -struct elf_x86_64_link_hash_entry -{ - struct elf_link_hash_entry elf; - - /* Track dynamic relocs copied for this symbol. */ - struct elf_dyn_relocs *dyn_relocs; - -#define GOT_UNKNOWN 0 -#define GOT_NORMAL 1 -#define GOT_TLS_GD 2 -#define GOT_TLS_IE 3 -#define GOT_TLS_GDESC 4 -#define GOT_TLS_GD_BOTH_P(type) \ - ((type) == (GOT_TLS_GD | GOT_TLS_GDESC)) -#define GOT_TLS_GD_P(type) \ - ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type)) -#define GOT_TLS_GDESC_P(type) \ - ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type)) -#define GOT_TLS_GD_ANY_P(type) \ - (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type)) - unsigned char tls_type; - - /* TRUE if a weak symbol with a real definition needs a copy reloc. - When there is a weak symbol with a real definition, the processor - independent code will have arranged for us to see the real - definition first. We need to copy the needs_copy bit from the - real definition and check it when allowing copy reloc in PIE. */ - unsigned int needs_copy : 1; - - /* TRUE if symbol has at least one BND relocation. */ - unsigned int has_bnd_reloc : 1; - - /* TRUE if symbol has GOT or PLT relocations. */ - unsigned int has_got_reloc : 1; - - /* TRUE if symbol has non-GOT/non-PLT relocations in text sections. */ - unsigned int has_non_got_reloc : 1; - - /* 0: symbol isn't __tls_get_addr. - 1: symbol is __tls_get_addr. - 2: symbol is unknown. */ - unsigned int tls_get_addr : 2; - - /* Reference count of C/C++ function pointer relocations in read-write - section which can be resolved at run-time. */ - bfd_signed_vma func_pointer_refcount; - - /* Information about the GOT PLT entry. Filled when there are both - GOT and PLT relocations against the same function. */ - union gotplt_union plt_got; - - /* Information about the second PLT entry. Filled when has_bnd_reloc is - set. */ - union gotplt_union plt_bnd; - - /* Offset of the GOTPLT entry reserved for the TLS descriptor, - starting at the end of the jump table. */ - bfd_vma tlsdesc_got; -}; - -#define elf_x86_64_hash_entry(ent) \ - ((struct elf_x86_64_link_hash_entry *)(ent)) - -struct elf_x86_64_obj_tdata -{ - struct elf_obj_tdata root; - - /* tls_type for each local got entry. */ - char *local_got_tls_type; - - /* GOTPLT entries for TLS descriptors. */ - bfd_vma *local_tlsdesc_gotent; -}; - -#define elf_x86_64_tdata(abfd) \ - ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any) - -#define elf_x86_64_local_got_tls_type(abfd) \ - (elf_x86_64_tdata (abfd)->local_got_tls_type) - -#define elf_x86_64_local_tlsdesc_gotent(abfd) \ - (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent) - -#define is_x86_64_elf(bfd) \ - (bfd_get_flavour (bfd) == bfd_target_elf_flavour \ - && elf_tdata (bfd) != NULL \ - && elf_object_id (bfd) == X86_64_ELF_DATA) - -static bfd_boolean -elf_x86_64_mkobject (bfd *abfd) -{ - return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata), - X86_64_ELF_DATA); -} - -/* x86-64 ELF linker hash table. */ - -struct elf_x86_64_link_hash_table -{ - struct elf_link_hash_table elf; - - /* Short-cuts to get to dynamic linker sections. */ - asection *interp; - asection *sdynbss; - asection *srelbss; - asection *plt_eh_frame; - asection *plt_bnd; - asection *plt_got; - - union +static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt = { - bfd_signed_vma refcount; - bfd_vma offset; - } tls_ld_got; - - /* The amount of space used by the jump slots in the GOT. */ - bfd_vma sgotplt_jump_table_size; - - /* Small local sym cache. */ - struct sym_cache sym_cache; - - bfd_vma (*r_info) (bfd_vma, bfd_vma); - bfd_vma (*r_sym) (bfd_vma); - unsigned int pointer_r_type; - const char *dynamic_interpreter; - int dynamic_interpreter_size; - - /* _TLS_MODULE_BASE_ symbol. */ - struct bfd_link_hash_entry *tls_module_base; - - /* Used by local STT_GNU_IFUNC symbols. */ - htab_t loc_hash_table; - void * loc_hash_memory; - - /* The offset into splt of the PLT entry for the TLS descriptor - resolver. Special values are 0, if not necessary (or not found - to be necessary yet), and -1 if needed but not determined - yet. */ - bfd_vma tlsdesc_plt; - /* The offset into sgot of the GOT entry used by the PLT entry - above. */ - bfd_vma tlsdesc_got; - - /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */ - bfd_vma next_jump_slot_index; - /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */ - bfd_vma next_irelative_index; - - /* TRUE if there are dynamic relocs against IFUNC symbols that apply - to read-only sections. */ - bfd_boolean readonly_dynrelocs_against_ifunc; -}; - -/* Get the x86-64 ELF linker hash table from a link_info structure. */ + elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ + elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ + 6, /* plt_tlsdesc_got1_offset */ + 12, /* plt_tlsdesc_got2_offset */ + 10, /* plt_tlsdesc_got1_insn_end */ + 16, /* plt_tlsdesc_got2_insn_end */ + 2, /* plt0_got1_offset */ + 1+8, /* plt0_got2_offset */ + 1+12, /* plt0_got2_insn_end */ + 1+2, /* plt_got_offset */ + 1, /* plt_reloc_offset */ + 7, /* plt_plt_offset */ + 1+6, /* plt_got_insn_size */ + 11, /* plt_plt_insn_end */ + 0, /* plt_lazy_offset */ + elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */ + elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */ + elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */ + }; -#define elf_x86_64_hash_table(p) \ - (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \ - == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL) +static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt = + { + elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */ + elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */ + NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + 1+2, /* plt_got_offset */ + 1+6, /* plt_got_insn_size */ + elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ + }; -#define elf_x86_64_compute_jump_table_size(htab) \ - ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE) +static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt = + { + elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ + elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ + 6, /* plt_tlsdesc_got1_offset */ + 12, /* plt_tlsdesc_got2_offset */ + 10, /* plt_tlsdesc_got1_insn_end */ + 16, /* plt_tlsdesc_got2_insn_end */ + 2, /* plt0_got1_offset */ + 1+8, /* plt0_got2_offset */ + 1+12, /* plt0_got2_insn_end */ + 4+1+2, /* plt_got_offset */ + 4+1, /* plt_reloc_offset */ + 4+1+6, /* plt_plt_offset */ + 4+1+6, /* plt_got_insn_size */ + 4+1+5+5, /* plt_plt_insn_end */ + 0, /* plt_lazy_offset */ + elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */ + elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */ + elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */ + }; -/* Create an entry in an x86-64 ELF linker hash table. */ +static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt = + { + elf_x86_64_lazy_plt0_entry, /* plt0_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ + elf_x32_lazy_ibt_plt_entry, /* plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ + 6, /* plt_tlsdesc_got1_offset */ + 12, /* plt_tlsdesc_got2_offset */ + 10, /* plt_tlsdesc_got1_insn_end */ + 16, /* plt_tlsdesc_got2_insn_end */ + 2, /* plt0_got1_offset */ + 8, /* plt0_got2_offset */ + 12, /* plt0_got2_insn_end */ + 4+2, /* plt_got_offset */ + 4+1, /* plt_reloc_offset */ + 4+6, /* plt_plt_offset */ + 4+6, /* plt_got_insn_size */ + 4+5+5, /* plt_plt_insn_end */ + 0, /* plt_lazy_offset */ + elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */ + elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */ + elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */ + sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */ + }; -static struct bfd_hash_entry * -elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry, - struct bfd_hash_table *table, - const char *string) -{ - /* Allocate the structure if it has not already been allocated by a - subclass. */ - if (entry == NULL) - { - entry = (struct bfd_hash_entry *) - bfd_hash_allocate (table, - sizeof (struct elf_x86_64_link_hash_entry)); - if (entry == NULL) - return entry; - } +static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt = + { + elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */ + elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + 4+1+2, /* plt_got_offset */ + 4+1+6, /* plt_got_insn_size */ + elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ + }; - /* Call the allocation method of the superclass. */ - entry = _bfd_elf_link_hash_newfunc (entry, table, string); - if (entry != NULL) - { - struct elf_x86_64_link_hash_entry *eh; - - eh = (struct elf_x86_64_link_hash_entry *) entry; - eh->dyn_relocs = NULL; - eh->tls_type = GOT_UNKNOWN; - eh->needs_copy = 0; - eh->has_bnd_reloc = 0; - eh->has_got_reloc = 0; - eh->has_non_got_reloc = 0; - eh->tls_get_addr = 2; - eh->func_pointer_refcount = 0; - eh->plt_bnd.offset = (bfd_vma) -1; - eh->plt_got.offset = (bfd_vma) -1; - eh->tlsdesc_got = (bfd_vma) -1; - } +static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt = + { + elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */ + elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + 4+2, /* plt_got_offset */ + 4+6, /* plt_got_insn_size */ + elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ + }; - return entry; -} +static const struct elf_x86_backend_data elf_x86_64_arch_bed = + { + is_normal /* os */ + }; -/* Compute a hash of a local hash entry. We use elf_link_hash_entry - for local symbol so that we can handle local STT_GNU_IFUNC symbols - as global symbol. We reuse indx and dynstr_index for local symbol - hash since they aren't used by global symbols in this backend. */ +#define elf_backend_arch_data &elf_x86_64_arch_bed -static hashval_t -elf_x86_64_local_htab_hash (const void *ptr) +static bfd_boolean +elf64_x86_64_elf_object_p (bfd *abfd) { - struct elf_link_hash_entry *h - = (struct elf_link_hash_entry *) ptr; - return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index); + /* Set the right machine number for an x86-64 elf64 file. */ + bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64); + return TRUE; } -/* Compare local hash entries. */ - -static int -elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2) +static bfd_boolean +elf32_x86_64_elf_object_p (bfd *abfd) { - struct elf_link_hash_entry *h1 - = (struct elf_link_hash_entry *) ptr1; - struct elf_link_hash_entry *h2 - = (struct elf_link_hash_entry *) ptr2; - - return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index; + /* Set the right machine number for an x86-64 elf32 file. */ + bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32); + return TRUE; } -/* Find and/or create a hash entry for local symbol. */ +/* Return TRUE if the TLS access code sequence support transition + from R_TYPE. */ -static struct elf_link_hash_entry * -elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab, - bfd *abfd, const Elf_Internal_Rela *rel, - bfd_boolean create) +static bfd_boolean +elf_x86_64_check_tls_transition (bfd *abfd, + struct bfd_link_info *info, + asection *sec, + bfd_byte *contents, + Elf_Internal_Shdr *symtab_hdr, + struct elf_link_hash_entry **sym_hashes, + unsigned int r_type, + const Elf_Internal_Rela *rel, + const Elf_Internal_Rela *relend) { - struct elf_x86_64_link_hash_entry e, *ret; - asection *sec = abfd->sections; - hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id, - htab->r_sym (rel->r_info)); - void **slot; - - e.elf.indx = sec->id; - e.elf.dynstr_index = htab->r_sym (rel->r_info); - slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h, - create ? INSERT : NO_INSERT); - - if (!slot) - return NULL; - - if (*slot) - { - ret = (struct elf_x86_64_link_hash_entry *) *slot; - return &ret->elf; - } + unsigned int val; + unsigned long r_symndx; + bfd_boolean largepic = FALSE; + struct elf_link_hash_entry *h; + bfd_vma offset; + struct elf_x86_link_hash_table *htab; + bfd_byte *call; + bfd_boolean indirect_call; - ret = (struct elf_x86_64_link_hash_entry *) - objalloc_alloc ((struct objalloc *) htab->loc_hash_memory, - sizeof (struct elf_x86_64_link_hash_entry)); - if (ret) + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); + offset = rel->r_offset; + switch (r_type) { - memset (ret, 0, sizeof (*ret)); - ret->elf.indx = sec->id; - ret->elf.dynstr_index = htab->r_sym (rel->r_info); - ret->elf.dynindx = -1; - ret->func_pointer_refcount = 0; - ret->plt_got.offset = (bfd_vma) -1; - *slot = ret; - } - return &ret->elf; -} - -/* Destroy an X86-64 ELF linker hash table. */ - -static void -elf_x86_64_link_hash_table_free (bfd *obfd) -{ - struct elf_x86_64_link_hash_table *htab - = (struct elf_x86_64_link_hash_table *) obfd->link.hash; - - if (htab->loc_hash_table) - htab_delete (htab->loc_hash_table); - if (htab->loc_hash_memory) - objalloc_free ((struct objalloc *) htab->loc_hash_memory); - _bfd_elf_link_hash_table_free (obfd); -} + case R_X86_64_TLSGD: + case R_X86_64_TLSLD: + if ((rel + 1) >= relend) + return FALSE; -/* Create an X86-64 ELF linker hash table. */ + if (r_type == R_X86_64_TLSGD) + { + /* Check transition from GD access model. For 64bit, only + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .word 0x6666; rex64; call __tls_get_addr@PLT + or + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr + can transit to different access model. For 32bit, only + leaq foo@tlsgd(%rip), %rdi + .word 0x6666; rex64; call __tls_get_addr@PLT + or + leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr + can transit to different access model. For largepic, + we also support: + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq $r15, %rax + call *%rax + or + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq $rbx, %rax + call *%rax */ -static struct bfd_link_hash_table * -elf_x86_64_link_hash_table_create (bfd *abfd) -{ - struct elf_x86_64_link_hash_table *ret; - bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table); + static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d }; - ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt); - if (ret == NULL) - return NULL; - - if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd, - elf_x86_64_link_hash_newfunc, - sizeof (struct elf_x86_64_link_hash_entry), - X86_64_ELF_DATA)) - { - free (ret); - return NULL; - } - - if (ABI_64_P (abfd)) - { - ret->r_info = elf64_r_info; - ret->r_sym = elf64_r_sym; - ret->pointer_r_type = R_X86_64_64; - ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER; - ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER; - } - else - { - ret->r_info = elf32_r_info; - ret->r_sym = elf32_r_sym; - ret->pointer_r_type = R_X86_64_32; - ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER; - ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER; - } - - ret->loc_hash_table = htab_try_create (1024, - elf_x86_64_local_htab_hash, - elf_x86_64_local_htab_eq, - NULL); - ret->loc_hash_memory = objalloc_create (); - if (!ret->loc_hash_table || !ret->loc_hash_memory) - { - elf_x86_64_link_hash_table_free (abfd); - return NULL; - } - ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free; - - return &ret->elf.root; -} - -/* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and - .rela.bss sections in DYNOBJ, and set up shortcuts to them in our - hash table. */ - -static bfd_boolean -elf_x86_64_create_dynamic_sections (bfd *dynobj, - struct bfd_link_info *info) -{ - struct elf_x86_64_link_hash_table *htab; - - if (!_bfd_elf_create_dynamic_sections (dynobj, info)) - return FALSE; - - htab = elf_x86_64_hash_table (info); - if (htab == NULL) - return FALSE; - - /* Set the contents of the .interp section to the interpreter. */ - if (bfd_link_executable (info) && !info->nointerp) - { - asection *s = bfd_get_linker_section (dynobj, ".interp"); - if (s == NULL) - abort (); - s->size = htab->dynamic_interpreter_size; - s->contents = (unsigned char *) htab->dynamic_interpreter; - htab->interp = s; - } - - htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss"); - if (!htab->sdynbss) - abort (); - - if (bfd_link_executable (info)) - { - /* Always allow copy relocs for building executables. */ - asection *s = bfd_get_linker_section (dynobj, ".rela.bss"); - if (s == NULL) - { - const struct elf_backend_data *bed = get_elf_backend_data (dynobj); - s = bfd_make_section_anyway_with_flags (dynobj, - ".rela.bss", - (bed->dynamic_sec_flags - | SEC_READONLY)); - if (s == NULL - || ! bfd_set_section_alignment (dynobj, s, - bed->s->log_file_align)) - return FALSE; - } - htab->srelbss = s; - } - - if (!info->no_ld_generated_unwind_info - && htab->plt_eh_frame == NULL - && htab->elf.splt != NULL) - { - flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY - | SEC_HAS_CONTENTS | SEC_IN_MEMORY - | SEC_LINKER_CREATED); - htab->plt_eh_frame - = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags); - if (htab->plt_eh_frame == NULL - || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3)) - return FALSE; - } - return TRUE; -} - -/* Copy the extra info we tack onto an elf_link_hash_entry. */ - -static void -elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info, - struct elf_link_hash_entry *dir, - struct elf_link_hash_entry *ind) -{ - struct elf_x86_64_link_hash_entry *edir, *eind; - - edir = (struct elf_x86_64_link_hash_entry *) dir; - eind = (struct elf_x86_64_link_hash_entry *) ind; - - if (!edir->has_bnd_reloc) - edir->has_bnd_reloc = eind->has_bnd_reloc; - - if (!edir->has_got_reloc) - edir->has_got_reloc = eind->has_got_reloc; - - if (!edir->has_non_got_reloc) - edir->has_non_got_reloc = eind->has_non_got_reloc; - - if (eind->dyn_relocs != NULL) - { - if (edir->dyn_relocs != NULL) - { - struct elf_dyn_relocs **pp; - struct elf_dyn_relocs *p; - - /* Add reloc counts against the indirect sym to the direct sym - list. Merge any entries against the same section. */ - for (pp = &eind->dyn_relocs; (p = *pp) != NULL; ) - { - struct elf_dyn_relocs *q; - - for (q = edir->dyn_relocs; q != NULL; q = q->next) - if (q->sec == p->sec) - { - q->pc_count += p->pc_count; - q->count += p->count; - *pp = p->next; - break; - } - if (q == NULL) - pp = &p->next; - } - *pp = edir->dyn_relocs; - } - - edir->dyn_relocs = eind->dyn_relocs; - eind->dyn_relocs = NULL; - } - - if (ind->root.type == bfd_link_hash_indirect - && dir->got.refcount <= 0) - { - edir->tls_type = eind->tls_type; - eind->tls_type = GOT_UNKNOWN; - } - - if (ELIMINATE_COPY_RELOCS - && ind->root.type != bfd_link_hash_indirect - && dir->dynamic_adjusted) - { - /* If called to transfer flags for a weakdef during processing - of elf_adjust_dynamic_symbol, don't copy non_got_ref. - We clear it ourselves for ELIMINATE_COPY_RELOCS. */ - dir->ref_dynamic |= ind->ref_dynamic; - dir->ref_regular |= ind->ref_regular; - dir->ref_regular_nonweak |= ind->ref_regular_nonweak; - dir->needs_plt |= ind->needs_plt; - dir->pointer_equality_needed |= ind->pointer_equality_needed; - } - else - { - if (eind->func_pointer_refcount > 0) - { - edir->func_pointer_refcount += eind->func_pointer_refcount; - eind->func_pointer_refcount = 0; - } - - _bfd_elf_link_hash_copy_indirect (info, dir, ind); - } -} - -static bfd_boolean -elf64_x86_64_elf_object_p (bfd *abfd) -{ - /* Set the right machine number for an x86-64 elf64 file. */ - bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64); - return TRUE; -} - -static bfd_boolean -elf32_x86_64_elf_object_p (bfd *abfd) -{ - /* Set the right machine number for an x86-64 elf32 file. */ - bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32); - return TRUE; -} - -/* Return TRUE if the TLS access code sequence support transition - from R_TYPE. */ - -static bfd_boolean -elf_x86_64_check_tls_transition (bfd *abfd, - struct bfd_link_info *info, - asection *sec, - bfd_byte *contents, - Elf_Internal_Shdr *symtab_hdr, - struct elf_link_hash_entry **sym_hashes, - unsigned int r_type, - const Elf_Internal_Rela *rel, - const Elf_Internal_Rela *relend) -{ - unsigned int val; - unsigned long r_symndx; - bfd_boolean largepic = FALSE; - struct elf_link_hash_entry *h; - bfd_vma offset; - struct elf_x86_64_link_hash_table *htab; - bfd_byte *call; - bfd_boolean indirect_call, tls_get_addr; - - htab = elf_x86_64_hash_table (info); - offset = rel->r_offset; - switch (r_type) - { - case R_X86_64_TLSGD: - case R_X86_64_TLSLD: - if ((rel + 1) >= relend) - return FALSE; - - if (r_type == R_X86_64_TLSGD) - { - /* Check transition from GD access model. For 64bit, only - .byte 0x66; leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr@PLT - or - .byte 0x66; leaq foo@tlsgd(%rip), %rdi - .byte 0x66; rex64 - call *__tls_get_addr@GOTPCREL(%rip) - which may be converted to - addr32 call __tls_get_addr - can transit to different access model. For 32bit, only - leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr@PLT - or - leaq foo@tlsgd(%rip), %rdi - .byte 0x66; rex64 - call *__tls_get_addr@GOTPCREL(%rip) - which may be converted to - addr32 call __tls_get_addr - can transit to different access model. For largepic, - we also support: - leaq foo@tlsgd(%rip), %rdi - movabsq $__tls_get_addr@pltoff, %rax - addq $r15, %rax - call *%rax - or - leaq foo@tlsgd(%rip), %rdi - movabsq $__tls_get_addr@pltoff, %rax - addq $rbx, %rax - call *%rax */ - - static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d }; - - if ((offset + 12) > sec->size) - return FALSE; + if ((offset + 12) > sec->size) + return FALSE; call = contents + offset + 4; if (call[0] != 0x66 @@ -1378,22 +1121,22 @@ elf_x86_64_check_tls_transition (bfd *abfd, /* Check transition from LD access model. Only leaq foo@tlsld(%rip), %rdi; call __tls_get_addr@PLT - or + or leaq foo@tlsld(%rip), %rdi; call *__tls_get_addr@GOTPCREL(%rip) which may be converted to addr32 call __tls_get_addr can transit to different access model. For largepic we also support: - leaq foo@tlsld(%rip), %rdi - movabsq $__tls_get_addr@pltoff, %rax - addq $r15, %rax - call *%rax + leaq foo@tlsld(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq $r15, %rax + call *%rax or - leaq foo@tlsld(%rip), %rdi - movabsq $__tls_get_addr@pltoff, %rax - addq $rbx, %rax - call *%rax */ + leaq foo@tlsld(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq $rbx, %rax + call *%rax */ static const unsigned char lea[] = { 0x48, 0x8d, 0x3d }; @@ -1426,37 +1169,21 @@ elf_x86_64_check_tls_transition (bfd *abfd, if (r_symndx < symtab_hdr->sh_info) return FALSE; - tls_get_addr = FALSE; h = sym_hashes[r_symndx - symtab_hdr->sh_info]; - if (h != NULL && h->root.root.string != NULL) - { - struct elf_x86_64_link_hash_entry *eh - = (struct elf_x86_64_link_hash_entry *) h; - tls_get_addr = eh->tls_get_addr == 1; - if (eh->tls_get_addr > 1) - { - /* Use strncmp to check __tls_get_addr since - __tls_get_addr may be versioned. */ - if (strncmp (h->root.root.string, "__tls_get_addr", 14) - == 0) - { - eh->tls_get_addr = 1; - tls_get_addr = TRUE; - } - else - eh->tls_get_addr = 0; - } - } - - if (!tls_get_addr) + if (h == NULL + || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr) return FALSE; - else if (largepic) - return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64; - else if (indirect_call) - return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_GOTPCRELX; else - return (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32 - || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32); + { + r_type = (ELF32_R_TYPE (rel[1].r_info) + & ~R_X86_64_converted_reloc_bit); + if (largepic) + return r_type == R_X86_64_PLTOFF64; + else if (indirect_call) + return r_type == R_X86_64_GOTPCRELX; + else + return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32); + } case R_X86_64_GOTTPOFF: /* Check transition from IE access model: @@ -1493,7 +1220,8 @@ elf_x86_64_check_tls_transition (bfd *abfd, case R_X86_64_GOTPC32_TLSDESC: /* Check transition from GDesc access model: - leaq x@tlsdesc(%rip), %rax + leaq x@tlsdesc(%rip), %rax <--- LP64 mode. + rex leal x@tlsdesc(%rip), %eax <--- X32 mode. Make sure it's a leaq adding rip to a 32-bit offset into any register, although it's probably almost always @@ -1503,7 +1231,8 @@ elf_x86_64_check_tls_transition (bfd *abfd, return FALSE; val = bfd_get_8 (abfd, contents + offset - 3); - if ((val & 0xfb) != 0x48) + val &= 0xfb; + if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40)) return FALSE; if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d) @@ -1514,13 +1243,26 @@ elf_x86_64_check_tls_transition (bfd *abfd, case R_X86_64_TLSDESC_CALL: /* Check transition from GDesc access model: - call *x@tlsdesc(%rax) + call *x@tlsdesc(%rax) <--- LP64 mode. + call *x@tlsdesc(%eax) <--- X32 mode. */ if (offset + 2 <= sec->size) { - /* Make sure that it's a call *x@tlsdesc(%rax). */ + unsigned int prefix; call = contents + offset; - return call[0] == 0xff && call[1] == 0x10; + prefix = 0; + if (!ABI_64_P (abfd)) + { + /* Check for call *x@tlsdesc(%eax). */ + if (call[0] == 0x67) + { + prefix = 1; + if (offset + 3 > sec->size) + return FALSE; + } + } + /* Make sure that it's a call *x@tlsdesc(%rax). */ + return call[prefix] == 0xff && call[1 + prefix] == 0x10; } return FALSE; @@ -1575,10 +1317,7 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, { unsigned int new_to_type = to_type; - if (bfd_link_executable (info) - && h != NULL - && h->dynindx == -1 - && tls_type == GOT_TLS_IE) + if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type)) new_to_type = R_X86_64_TPOFF32; if (to_type == R_X86_64_TLSGD @@ -1623,13 +1362,16 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, from = elf_x86_64_rtype_to_howto (abfd, from_type); to = elf_x86_64_rtype_to_howto (abfd, to_type); + if (from == NULL || to == NULL) + return FALSE; + if (h) name = h->root.root.string; else { - struct elf_x86_64_link_hash_table *htab; + struct elf_x86_link_hash_table *htab; - htab = elf_x86_64_hash_table (info); + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab == NULL) name = "*unknown*"; else @@ -1642,11 +1384,11 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, } } - (*_bfd_error_handler) - (_("%B: TLS transition from %s to %s against `%s' at 0x%lx " - "in section `%A' failed"), - abfd, sec, from->name, to->name, name, - (unsigned long) rel->r_offset); + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64 + " in section `%pA' failed"), + abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec); bfd_set_error (bfd_error_bad_value); return FALSE; } @@ -1657,11 +1399,11 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, /* Rename some of the generic section flags to better document how they are used here. */ -#define need_convert_load sec_flg0 -#define check_relocs_failed sec_flg1 +#define check_relocs_failed sec_flg0 static bfd_boolean -elf_x86_64_need_pic (bfd *input_bfd, asection *sec, +elf_x86_64_need_pic (struct bfd_link_info *info, + bfd *input_bfd, asection *sec, struct elf_link_hash_entry *h, Elf_Internal_Shdr *symtab_hdr, Elf_Internal_Sym *isym, @@ -1670,6 +1412,7 @@ elf_x86_64_need_pic (bfd *input_bfd, asection *sec, const char *v = ""; const char *und = ""; const char *pic = ""; + const char *object; const char *name; if (h) @@ -1687,23 +1430,44 @@ elf_x86_64_need_pic (bfd *input_bfd, asection *sec, v = _("protected symbol "); break; default: - v = _("symbol "); - pic = _("; recompile with -fPIC"); + if (((struct elf_x86_link_hash_entry *) h)->def_protected) + v = _("protected symbol "); + else + v = _("symbol "); + pic = NULL; break; } - if (!h->def_regular && !h->def_dynamic) + if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic) und = _("undefined "); } else { name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL); - pic = _("; recompile with -fPIC"); + pic = NULL; + } + + if (bfd_link_dll (info)) + { + object = _("a shared object"); + if (!pic) + pic = _("; recompile with -fPIC"); + } + else + { + if (bfd_link_pie (info)) + object = _("a PIE object"); + else + object = _("a PDE object"); + if (!pic) + pic = _("; recompile with -fPIE"); } - (*_bfd_error_handler) (_("%B: relocation %s against %s%s`%s' can " - "not be used when making a shared object%s"), - input_bfd, howto->name, und, v, name, pic); + /* xgettext:c-format */ + _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can " + "not be used when making %s%s"), + input_bfd, howto->name, und, v, name, + object, pic); bfd_set_error (bfd_error_bad_value); sec->check_relocs_failed = 1; return FALSE; @@ -1729,27 +1493,29 @@ elf_x86_64_need_pic (bfd *input_bfd, asection *sec, instructions. */ static bfd_boolean -elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec, +elf_x86_64_convert_load_reloc (bfd *abfd, bfd_byte *contents, + unsigned int *r_type_p, Elf_Internal_Rela *irel, struct elf_link_hash_entry *h, bfd_boolean *converted, struct bfd_link_info *link_info) { - struct elf_x86_64_link_hash_table *htab; + struct elf_x86_link_hash_table *htab; bfd_boolean is_pic; - bfd_boolean require_reloc_pc32; + bfd_boolean no_overflow; bfd_boolean relocx; bfd_boolean to_reloc_pc32; + bfd_boolean abs_symbol; + bfd_boolean local_ref; asection *tsec; - char symtype; bfd_signed_vma raddend; unsigned int opcode; unsigned int modrm; - unsigned int r_type = ELF32_R_TYPE (irel->r_info); + unsigned int r_type = *r_type_p; unsigned int r_symndx; - bfd_vma toff; bfd_vma roff = irel->r_offset; + bfd_vma abs_relocation; if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2)) return TRUE; @@ -1759,16 +1525,14 @@ elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec, if (raddend != -4) return TRUE; - htab = elf_x86_64_hash_table (link_info); + htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA); is_pic = bfd_link_pic (link_info); relocx = (r_type == R_X86_64_GOTPCRELX || r_type == R_X86_64_REX_GOTPCRELX); - /* TRUE if we can convert only to R_X86_64_PC32. Enable it for - --no-relax. */ - require_reloc_pc32 - = link_info->disable_target_specific_optimizations > 1; + /* TRUE if --no-relax is used. */ + no_overflow = link_info->disable_target_specific_optimizations > 1; r_symndx = htab->r_sym (irel->r_info); @@ -1787,14 +1551,17 @@ elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec, /* We convert only to R_X86_64_PC32: 1. Branch. 2. R_X86_64_GOTPCREL since we can't modify REX byte. - 3. require_reloc_pc32 is true. + 3. no_overflow is true. 4. PIC. */ to_reloc_pc32 = (opcode == 0xff || !relocx - || require_reloc_pc32 + || no_overflow || is_pic); + abs_symbol = FALSE; + abs_relocation = 0; + /* Get the symbol referred to by the reloc. */ if (h == NULL) { @@ -1805,18 +1572,19 @@ elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec, if (isym->st_shndx == SHN_UNDEF) return TRUE; - symtype = ELF_ST_TYPE (isym->st_info); - + local_ref = TRUE; if (isym->st_shndx == SHN_ABS) - tsec = bfd_abs_section_ptr; + { + tsec = bfd_abs_section_ptr; + abs_symbol = TRUE; + abs_relocation = isym->st_value; + } else if (isym->st_shndx == SHN_COMMON) tsec = bfd_com_section_ptr; else if (isym->st_shndx == SHN_X86_64_LCOMMON) tsec = &_bfd_elf_large_com_section; else tsec = bfd_section_from_elf_index (abfd, isym->st_shndx); - - toff = isym->st_value; } else { @@ -1826,16 +1594,23 @@ elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec, GOTPCRELX relocations since we need to modify REX byte. It is OK convert mov with R_X86_64_GOTPCREL to R_X86_64_PC32. */ + struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h); + + abs_symbol = ABS_SYMBOL_P (h); + abs_relocation = h->root.u.def.value; + + /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */ + local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h); if ((relocx || opcode == 0x8b) - && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info, - TRUE, - elf_x86_64_hash_entry (h))) + && (h->root.type == bfd_link_hash_undefweak + && !eh->linker_def + && local_ref)) { if (opcode == 0xff) { /* Skip for branch instructions since R_X86_64_PC32 may overflow. */ - if (require_reloc_pc32) + if (no_overflow) return TRUE; } else if (relocx) @@ -1855,27 +1630,34 @@ elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec, } /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since ld.so may use its link-time address. */ - else if ((h->def_regular - || h->root.type == bfd_link_hash_defined - || h->root.type == bfd_link_hash_defweak) - && h != htab->elf.hdynamic - && SYMBOL_REFERENCES_LOCAL (link_info, h)) + else if (h->start_stop + || eh->linker_def + || ((h->def_regular + || h->root.type == bfd_link_hash_defined + || h->root.type == bfd_link_hash_defweak) + && h != htab->elf.hdynamic + && local_ref)) { /* bfd_link_hash_new or bfd_link_hash_undefined is set by an assignment in a linker script in - bfd_elf_record_link_assignment. */ - if (h->def_regular - && (h->root.type == bfd_link_hash_new - || h->root.type == bfd_link_hash_undefined)) + bfd_elf_record_link_assignment. start_stop is set + on __start_SECNAME/__stop_SECNAME which mark section + SECNAME. */ + if (h->start_stop + || eh->linker_def + || (h->def_regular + && (h->root.type == bfd_link_hash_new + || h->root.type == bfd_link_hash_undefined + || ((h->root.type == bfd_link_hash_defined + || h->root.type == bfd_link_hash_defweak) + && h->root.u.def.section == bfd_und_section_ptr)))) { /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */ - if (require_reloc_pc32) + if (no_overflow) return TRUE; goto convert; } tsec = h->root.u.def.section; - toff = h->root.u.def.value; - symtype = h->type; } else return TRUE; @@ -1886,94 +1668,11 @@ elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec, && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0) return TRUE; - /* We can only estimate relocation overflow for R_X86_64_PC32. */ - if (!to_reloc_pc32) - goto convert; - - if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE) - { - /* At this stage in linking, no SEC_MERGE symbol has been - adjusted, so all references to such symbols need to be - passed through _bfd_merged_section_offset. (Later, in - relocate_section, all SEC_MERGE symbols *except* for - section symbols have been adjusted.) - - gas may reduce relocations against symbols in SEC_MERGE - sections to a relocation against the section symbol when - the original addend was zero. When the reloc is against - a section symbol we should include the addend in the - offset passed to _bfd_merged_section_offset, since the - location of interest is the original symbol. On the - other hand, an access to "sym+addend" where "sym" is not - a section symbol should not include the addend; Such an - access is presumed to be an offset from "sym"; The - location of interest is just "sym". */ - if (symtype == STT_SECTION) - toff += raddend; - - toff = _bfd_merged_section_offset (abfd, &tsec, - elf_section_data (tsec)->sec_info, - toff); - - if (symtype != STT_SECTION) - toff += raddend; - } - else - toff += raddend; - - /* Don't convert if R_X86_64_PC32 relocation overflows. */ - if (tsec->output_section == sec->output_section) - { - if ((toff - roff + 0x80000000) > 0xffffffff) - return TRUE; - } - else - { - bfd_signed_vma distance; - - /* At this point, we don't know the load addresses of TSEC - section nor SEC section. We estimate the distrance between - SEC and TSEC. We store the estimated distances in the - compressed_size field of the output section, which is only - used to decompress the compressed input section. */ - if (sec->output_section->compressed_size == 0) - { - asection *asect; - bfd_size_type size = 0; - for (asect = link_info->output_bfd->sections; - asect != NULL; - asect = asect->next) - /* Skip debug sections since compressed_size is used to - compress debug sections. */ - if ((asect->flags & SEC_DEBUGGING) == 0) - { - asection *i; - for (i = asect->map_head.s; - i != NULL; - i = i->map_head.s) - { - size = align_power (size, i->alignment_power); - size += i->size; - } - asect->compressed_size = size; - } - } - - /* Don't convert GOTPCREL relocations if TSEC isn't placed - after SEC. */ - distance = (tsec->output_section->compressed_size - - sec->output_section->compressed_size); - if (distance < 0) - return TRUE; - - /* Take PT_GNU_RELRO segment into account by adding - maxpagesize. */ - if ((toff + distance + get_elf_backend_data (abfd)->maxpagesize - - roff + 0x80000000) > 0xffffffff) - return TRUE; - } + /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */ + if (no_overflow) + return TRUE; -convert: + convert: if (opcode == 0xff) { /* We have "call/jmp *foo@GOTPCREL(%rip)". */ @@ -1996,23 +1695,23 @@ convert: } else { - struct elf_x86_64_link_hash_entry *eh - = (struct elf_x86_64_link_hash_entry *) h; + struct elf_x86_link_hash_entry *eh + = (struct elf_x86_link_hash_entry *) h; /* Convert to "nop call foo". ADDR_PREFIX_OPCODE is a nop prefix. */ modrm = 0xe8; /* To support TLS optimization, always use addr32 prefix for "call *__tls_get_addr@GOTPCREL(%rip)". */ - if (eh && eh->tls_get_addr == 1) + if (eh && eh->tls_get_addr) { nop = 0x67; nop_offset = irel->r_offset - 2; } else { - nop = link_info->call_nop_byte; - if (link_info->call_nop_as_suffix) + nop = htab->params->call_nop_byte; + if (htab->params->call_nop_as_suffix) { nop_offset = irel->r_offset + 3; disp = bfd_get_32 (abfd, contents + irel->r_offset); @@ -2039,6 +1738,9 @@ convert: if (opcode == 0x8b) { + if (abs_symbol && local_ref) + to_reloc_pc32 = FALSE; + if (to_reloc_pc32) { /* Convert "mov foo@GOTPCREL(%rip), %reg" to @@ -2098,7 +1800,22 @@ convert: overflow when sign-extending imm32 to imm64. */ r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32; -rewrite_modrm_rex: + rewrite_modrm_rex: + if (abs_relocation) + { + /* Check if R_X86_64_32S/R_X86_64_32 fits. */ + if (r_type == R_X86_64_32S) + { + if ((abs_relocation + 0x80000000) > 0xffffffff) + return TRUE; + } + else + { + if (abs_relocation > 0xffffffff) + return TRUE; + } + } + bfd_put_8 (abfd, modrm, contents + roff - 1); if (rex) @@ -2115,7 +1832,9 @@ rewrite_modrm_rex: bfd_put_8 (abfd, opcode, contents + roff - 2); } - irel->r_info = htab->r_info (r_symndx, r_type); + *r_type_p = r_type; + irel->r_info = htab->r_info (r_symndx, + r_type | R_X86_64_converted_reloc_bit); *converted = TRUE; @@ -2131,27 +1850,36 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, asection *sec, const Elf_Internal_Rela *relocs) { - struct elf_x86_64_link_hash_table *htab; + struct elf_x86_link_hash_table *htab; Elf_Internal_Shdr *symtab_hdr; struct elf_link_hash_entry **sym_hashes; const Elf_Internal_Rela *rel; const Elf_Internal_Rela *rel_end; asection *sreloc; bfd_byte *contents; - bfd_boolean use_plt_got; + bfd_boolean converted; if (bfd_link_relocatable (info)) return TRUE; - BFD_ASSERT (is_x86_64_elf (abfd)); + /* Don't do anything special with non-loaded, non-alloced sections. + In particular, any relocs in such sections should not affect GOT + and PLT reference counting (ie. we don't allow them to create GOT + or PLT entries), there's no possibility or desire to optimize TLS + relocs, and there's not much point in propagating relocs to shared + libs that the dynamic linker won't relocate. */ + if ((sec->flags & SEC_ALLOC) == 0) + return TRUE; - htab = elf_x86_64_hash_table (info); + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab == NULL) { sec->check_relocs_failed = 1; return FALSE; } + BFD_ASSERT (is_x86_elf (abfd, htab)); + /* Get the section contents. */ if (elf_section_data (sec)->this_hdr.contents != NULL) contents = elf_section_data (sec)->this_hdr.contents; @@ -2161,31 +1889,34 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, return FALSE; } - use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed; - symtab_hdr = &elf_symtab_hdr (abfd); sym_hashes = elf_sym_hashes (abfd); + converted = FALSE; + sreloc = NULL; rel_end = relocs + sec->reloc_count; for (rel = relocs; rel < rel_end; rel++) { unsigned int r_type; - unsigned long r_symndx; + unsigned int r_symndx; struct elf_link_hash_entry *h; - struct elf_x86_64_link_hash_entry *eh; + struct elf_x86_link_hash_entry *eh; Elf_Internal_Sym *isym; const char *name; bfd_boolean size_reloc; + bfd_boolean converted_reloc; + bfd_boolean no_dynreloc; r_symndx = htab->r_sym (rel->r_info); r_type = ELF32_R_TYPE (rel->r_info); if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) { - (*_bfd_error_handler) (_("%B: bad symbol index: %d"), - abfd, r_symndx); + /* xgettext:c-format */ + _bfd_error_handler (_("%pB: bad symbol index: %d"), + abfd, r_symndx); goto error_return; } @@ -2200,12 +1931,14 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, /* Check relocation against local STT_GNU_IFUNC symbol. */ if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) { - h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, - TRUE); + h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel, + TRUE); if (h == NULL) goto error_return; /* Fake a STT_GNU_IFUNC symbol. */ + h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr, + isym, NULL); h->type = STT_GNU_IFUNC; h->def_regular = 1; h->ref_regular = 1; @@ -2246,8 +1979,9 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, else name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL); - (*_bfd_error_handler) - (_("%B: relocation %s against symbol `%s' isn't " + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB: relocation %s against symbol `%s' isn't " "supported in x32 mode"), abfd, x86_64_elf_howto_table[r_type].name, name); bfd_set_error (bfd_error_bad_value); @@ -2258,100 +1992,53 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (h != NULL) { - switch (r_type) - { - default: - break; - - case R_X86_64_PC32_BND: - case R_X86_64_PLT32_BND: - case R_X86_64_PC32: - case R_X86_64_PLT32: - case R_X86_64_32: - case R_X86_64_64: - /* MPX PLT is supported only if elf_x86_64_arch_bed - is used in 64-bit mode. */ - if (ABI_64_P (abfd) - && info->bndplt - && (get_elf_x86_64_backend_data (abfd) - == &elf_x86_64_arch_bed)) - { - elf_x86_64_hash_entry (h)->has_bnd_reloc = 1; - - /* Create the second PLT for Intel MPX support. */ - if (htab->plt_bnd == NULL) - { - unsigned int plt_bnd_align; - const struct elf_backend_data *bed; - - bed = get_elf_backend_data (info->output_bfd); - BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8 - && (sizeof (elf_x86_64_bnd_plt2_entry) - == sizeof (elf_x86_64_legacy_plt2_entry))); - plt_bnd_align = 3; - - if (htab->elf.dynobj == NULL) - htab->elf.dynobj = abfd; - htab->plt_bnd - = bfd_make_section_anyway_with_flags (htab->elf.dynobj, - ".plt.bnd", - (bed->dynamic_sec_flags - | SEC_ALLOC - | SEC_CODE - | SEC_LOAD - | SEC_READONLY)); - if (htab->plt_bnd == NULL - || !bfd_set_section_alignment (htab->elf.dynobj, - htab->plt_bnd, - plt_bnd_align)) - goto error_return; - } - } - - case R_X86_64_32S: - case R_X86_64_PC64: - case R_X86_64_GOTPCREL: - case R_X86_64_GOTPCRELX: - case R_X86_64_REX_GOTPCRELX: - case R_X86_64_GOTPCREL64: - if (htab->elf.dynobj == NULL) - htab->elf.dynobj = abfd; - /* Create the ifunc sections for static executables. */ - if (h->type == STT_GNU_IFUNC - && !_bfd_elf_create_ifunc_sections (htab->elf.dynobj, - info)) - goto error_return; - break; - } - /* It is referenced by a non-shared object. */ h->ref_regular = 1; - h->root.non_ir_ref = 1; - - if (h->type == STT_GNU_IFUNC) - elf_tdata (info->output_bfd)->has_gnu_symbols - |= elf_gnu_symbol_ifunc; } - if (! elf_x86_64_tls_transition (info, abfd, sec, contents, - symtab_hdr, sym_hashes, - &r_type, GOT_UNKNOWN, - rel, rel_end, h, r_symndx, FALSE)) - goto error_return; + converted_reloc = FALSE; + if ((r_type == R_X86_64_GOTPCREL + || r_type == R_X86_64_GOTPCRELX + || r_type == R_X86_64_REX_GOTPCRELX) + && (h == NULL || h->type != STT_GNU_IFUNC)) + { + Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel; + if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type, + irel, h, &converted_reloc, + info)) + goto error_return; + + if (converted_reloc) + converted = TRUE; + } + + if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym, + symtab_hdr, &no_dynreloc)) + return FALSE; + + if (! elf_x86_64_tls_transition (info, abfd, sec, contents, + symtab_hdr, sym_hashes, + &r_type, GOT_UNKNOWN, + rel, rel_end, h, r_symndx, FALSE)) + goto error_return; - eh = (struct elf_x86_64_link_hash_entry *) h; + /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */ + if (h == htab->elf.hgot) + htab->got_referenced = TRUE; + + eh = (struct elf_x86_link_hash_entry *) h; switch (r_type) { case R_X86_64_TLSLD: - htab->tls_ld_got.refcount += 1; + htab->tls_ld_or_ldm_got.refcount = 1; goto create_got; case R_X86_64_TPOFF32: if (!bfd_link_executable (info) && ABI_64_P (abfd)) - return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym, + return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym, &x86_64_elf_howto_table[r_type]); if (eh != NULL) - eh->has_got_reloc = 1; + eh->zero_undefweak &= 0x2; break; case R_X86_64_GOTTPOFF: @@ -2375,17 +2062,31 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, switch (r_type) { - default: tls_type = GOT_NORMAL; break; - case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break; - case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break; + default: + tls_type = GOT_NORMAL; + if (h) + { + if (ABS_SYMBOL_P (h)) + tls_type = GOT_ABS; + } + else if (isym->st_shndx == SHN_ABS) + tls_type = GOT_ABS; + break; + case R_X86_64_TLSGD: + tls_type = GOT_TLS_GD; + break; + case R_X86_64_GOTTPOFF: + tls_type = GOT_TLS_IE; + break; case R_X86_64_GOTPC32_TLSDESC: case R_X86_64_TLSDESC_CALL: - tls_type = GOT_TLS_GDESC; break; + tls_type = GOT_TLS_GDESC; + break; } if (h != NULL) { - h->got.refcount += 1; + h->got.refcount = 1; old_tls_type = eh->tls_type; } else @@ -2406,14 +2107,14 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (local_got_refcounts == NULL) goto error_return; elf_local_got_refcounts (abfd) = local_got_refcounts; - elf_x86_64_local_tlsdesc_gotent (abfd) + elf_x86_local_tlsdesc_gotent (abfd) = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info); - elf_x86_64_local_got_tls_type (abfd) + elf_x86_local_got_tls_type (abfd) = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info); } - local_got_refcounts[r_symndx] += 1; + local_got_refcounts[r_symndx] = 1; old_tls_type - = elf_x86_64_local_got_tls_type (abfd) [r_symndx]; + = elf_x86_local_got_tls_type (abfd) [r_symndx]; } /* If a TLS symbol is accessed using IE at least once, @@ -2434,8 +2135,10 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, else name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL); - (*_bfd_error_handler) - (_("%B: '%s' accessed both as normal and thread local symbol"), + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB: '%s' accessed both as normal and" + " thread local symbol"), abfd, name); bfd_set_error (bfd_error_bad_value); goto error_return; @@ -2447,7 +2150,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (eh != NULL) eh->tls_type = tls_type; else - elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type; + elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type; } } /* Fall through */ @@ -2457,15 +2160,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, case R_X86_64_GOTPC64: create_got: if (eh != NULL) - eh->has_got_reloc = 1; - if (htab->elf.sgot == NULL) - { - if (htab->elf.dynobj == NULL) - htab->elf.dynobj = abfd; - if (!_bfd_elf_create_got_section (htab->elf.dynobj, - info)) - goto error_return; - } + eh->zero_undefweak &= 0x2; break; case R_X86_64_PLT32: @@ -2482,9 +2177,9 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (h == NULL) continue; - eh->has_got_reloc = 1; + eh->zero_undefweak &= 0x2; h->needs_plt = 1; - h->plt.refcount += 1; + h->plt.refcount = 1; break; case R_X86_64_PLTOFF64: @@ -2493,7 +2188,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (h != NULL) { h->needs_plt = 1; - h->plt.refcount += 1; + h->plt.refcount = 1; } goto create_got; @@ -2505,6 +2200,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, case R_X86_64_32: if (!ABI_64_P (abfd)) goto pointer; + /* Fall through. */ case R_X86_64_8: case R_X86_64_16: case R_X86_64_32S: @@ -2512,15 +2208,15 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, run-time relocation overflow. Don't error out for sections we don't care about, such as debug sections or when relocation overflow check is disabled. */ - if (!info->no_reloc_overflow_check + if (!htab->params->no_reloc_overflow_check + && !converted_reloc && (bfd_link_pic (info) || (bfd_link_executable (info) && h != NULL && !h->def_regular && h->def_dynamic - && (sec->flags & SEC_READONLY) == 0)) - && (sec->flags & SEC_ALLOC) != 0) - return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym, + && (sec->flags & SEC_READONLY) == 0))) + return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym, &x86_64_elf_howto_table[r_type]); /* Fall through. */ @@ -2530,37 +2226,35 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, case R_X86_64_PC32_BND: case R_X86_64_PC64: case R_X86_64_64: -pointer: + pointer: if (eh != NULL && (sec->flags & SEC_CODE) != 0) - eh->has_non_got_reloc = 1; - /* STT_GNU_IFUNC symbol must go through PLT even if it is - locally defined and undefined symbol may turn out to be - a STT_GNU_IFUNC symbol later. */ + eh->zero_undefweak |= 0x2; + /* We are called after all symbols have been resolved. Only + relocation against STT_GNU_IFUNC symbol must go through + PLT. */ if (h != NULL && (bfd_link_executable (info) - || ((h->type == STT_GNU_IFUNC - || h->root.type == bfd_link_hash_undefweak - || h->root.type == bfd_link_hash_undefined) - && SYMBOLIC_BIND (info, h)))) + || h->type == STT_GNU_IFUNC)) { - /* If this reloc is in a read-only section, we might - need a copy reloc. We can't check reliably at this - stage whether the section is read-only, as input - sections have not yet been mapped to output sections. - Tentatively set the flag for now, and correct in - adjust_dynamic_symbol. */ - h->non_got_ref = 1; - - /* We may need a .plt entry if the function this reloc - refers to is in a shared lib. */ - h->plt.refcount += 1; + bfd_boolean func_pointer_ref = FALSE; + if (r_type == R_X86_64_PC32) { /* Since something like ".long foo - ." may be used as pointer, make sure that PLT is used if foo is a function defined in a shared library. */ if ((sec->flags & SEC_CODE) == 0) - h->pointer_equality_needed = 1; + { + h->pointer_equality_needed = 1; + if (bfd_link_pie (info) + && h->type == STT_FUNC + && !h->def_regular + && h->def_dynamic) + { + h->needs_plt = 1; + h->plt.refcount = 1; + } + } } else if (r_type != R_X86_64_PC32_BND && r_type != R_X86_64_PC64) @@ -2574,47 +2268,33 @@ pointer: || (!ABI_64_P (abfd) && (r_type == R_X86_64_32 || r_type == R_X86_64_32S)))) - eh->func_pointer_refcount += 1; + func_pointer_ref = TRUE; + } + + if (!func_pointer_ref) + { + /* If this reloc is in a read-only section, we might + need a copy reloc. We can't check reliably at this + stage whether the section is read-only, as input + sections have not yet been mapped to output sections. + Tentatively set the flag for now, and correct in + adjust_dynamic_symbol. */ + h->non_got_ref = 1; + + /* We may need a .plt entry if the symbol is a function + defined in a shared lib or is a function referenced + from the code or read-only section. */ + if (!h->def_regular + || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0) + h->plt.refcount = 1; } } size_reloc = FALSE; -do_size: - /* If we are creating a shared library, and this is a reloc - against a global symbol, or a non PC relative reloc - against a local symbol, then we need to copy the reloc - into the shared library. However, if we are linking with - -Bsymbolic, we do not need to copy a reloc against a - global symbol which is defined in an object we are - including in the link (i.e., DEF_REGULAR is set). At - this point we have not seen all the input files, so it is - possible that DEF_REGULAR is not set now but will be set - later (it is never cleared). In case of a weak definition, - DEF_REGULAR may be cleared later by a strong definition in - a shared library. We account for that possibility below by - storing information in the relocs_copied field of the hash - table entry. A similar situation occurs when creating - shared libraries and symbol visibility changes render the - symbol local. - - If on the other hand, we are creating an executable, we - may need to keep relocations for symbols satisfied by a - dynamic library if we manage to avoid copy relocs for the - symbol. */ - if ((bfd_link_pic (info) - && (sec->flags & SEC_ALLOC) != 0 - && (! IS_X86_64_PCREL_TYPE (r_type) - || (h != NULL - && (! (bfd_link_pie (info) - || SYMBOLIC_BIND (info, h)) - || h->root.type == bfd_link_hash_defweak - || !h->def_regular)))) - || (ELIMINATE_COPY_RELOCS - && !bfd_link_pic (info) - && (sec->flags & SEC_ALLOC) != 0 - && h != NULL - && (h->root.type == bfd_link_hash_defweak - || !h->def_regular))) + do_size: + if (!no_dynreloc + && NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type, + htab->pointer_r_type)) { struct elf_dyn_relocs *p; struct elf_dyn_relocs **head; @@ -2624,9 +2304,6 @@ do_size: this reloc. */ if (sreloc == NULL) { - if (htab->elf.dynobj == NULL) - htab->elf.dynobj = abfd; - sreloc = _bfd_elf_make_dynamic_reloc_section (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2, abfd, /*rela?*/ TRUE); @@ -2665,7 +2342,7 @@ do_size: p = *head; if (p == NULL || p->sec != sec) { - bfd_size_type amt = sizeof *p; + size_t amt = sizeof *p; p = ((struct elf_dyn_relocs *) bfd_alloc (htab->elf.dynobj, amt)); @@ -2680,7 +2357,7 @@ do_size: p->count += 1; /* Count size relocation as PC-relative relocation. */ - if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc) + if (X86_PCREL_TYPE_P (r_type) || size_reloc) p->pc_count += 1; } break; @@ -2695,1317 +2372,38 @@ do_size: /* This relocation describes which C++ vtable entries are actually used. Record for later use during GC. */ case R_X86_64_GNU_VTENTRY: - BFD_ASSERT (h != NULL); - if (h != NULL - && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend)) + if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend)) goto error_return; break; default: break; } - - if (use_plt_got - && h != NULL - && h->plt.refcount > 0 - && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed) - || h->got.refcount > 0) - && htab->plt_got == NULL) - { - /* Create the GOT procedure linkage table. */ - unsigned int plt_got_align; - const struct elf_backend_data *bed; - - bed = get_elf_backend_data (info->output_bfd); - BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8 - && (sizeof (elf_x86_64_bnd_plt2_entry) - == sizeof (elf_x86_64_legacy_plt2_entry))); - plt_got_align = 3; - - if (htab->elf.dynobj == NULL) - htab->elf.dynobj = abfd; - htab->plt_got - = bfd_make_section_anyway_with_flags (htab->elf.dynobj, - ".plt.got", - (bed->dynamic_sec_flags - | SEC_ALLOC - | SEC_CODE - | SEC_LOAD - | SEC_READONLY)); - if (htab->plt_got == NULL - || !bfd_set_section_alignment (htab->elf.dynobj, - htab->plt_got, - plt_got_align)) - goto error_return; - } - - if ((r_type == R_X86_64_GOTPCREL - || r_type == R_X86_64_GOTPCRELX - || r_type == R_X86_64_REX_GOTPCRELX) - && (h == NULL || h->type != STT_GNU_IFUNC)) - sec->need_convert_load = 1; } if (elf_section_data (sec)->this_hdr.contents != contents) { - if (!info->keep_memory) + if (!converted && !info->keep_memory) free (contents); else { - /* Cache the section contents for elf_link_input_bfd. */ - elf_section_data (sec)->this_hdr.contents = contents; - } - } - - return TRUE; - -error_return: - if (elf_section_data (sec)->this_hdr.contents != contents) - free (contents); - sec->check_relocs_failed = 1; - return FALSE; -} - -/* Return the section that should be marked against GC for a given - relocation. */ - -static asection * -elf_x86_64_gc_mark_hook (asection *sec, - struct bfd_link_info *info, - Elf_Internal_Rela *rel, - struct elf_link_hash_entry *h, - Elf_Internal_Sym *sym) -{ - if (h != NULL) - switch (ELF32_R_TYPE (rel->r_info)) - { - case R_X86_64_GNU_VTINHERIT: - case R_X86_64_GNU_VTENTRY: - return NULL; - } - - return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym); -} - -/* Remove undefined weak symbol from the dynamic symbol table if it - is resolved to 0. */ - -static bfd_boolean -elf_x86_64_fixup_symbol (struct bfd_link_info *info, - struct elf_link_hash_entry *h) -{ - if (h->dynindx != -1 - && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, - elf_x86_64_hash_entry (h)->has_got_reloc, - elf_x86_64_hash_entry (h))) - { - h->dynindx = -1; - _bfd_elf_strtab_delref (elf_hash_table (info)->dynstr, - h->dynstr_index); - } - return TRUE; -} - -/* Adjust a symbol defined by a dynamic object and referenced by a - regular object. The current definition is in some section of the - dynamic object, but we're not including those sections. We have to - change the definition to something the rest of the link can - understand. */ - -static bfd_boolean -elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info, - struct elf_link_hash_entry *h) -{ - struct elf_x86_64_link_hash_table *htab; - asection *s; - struct elf_x86_64_link_hash_entry *eh; - struct elf_dyn_relocs *p; - - /* STT_GNU_IFUNC symbol must go through PLT. */ - if (h->type == STT_GNU_IFUNC) - { - /* All local STT_GNU_IFUNC references must be treate as local - calls via local PLT. */ - if (h->ref_regular - && SYMBOL_CALLS_LOCAL (info, h)) - { - bfd_size_type pc_count = 0, count = 0; - struct elf_dyn_relocs **pp; - - eh = (struct elf_x86_64_link_hash_entry *) h; - for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) - { - pc_count += p->pc_count; - p->count -= p->pc_count; - p->pc_count = 0; - count += p->count; - if (p->count == 0) - *pp = p->next; - else - pp = &p->next; - } - - if (pc_count || count) - { - h->needs_plt = 1; - h->non_got_ref = 1; - if (h->plt.refcount <= 0) - h->plt.refcount = 1; - else - h->plt.refcount += 1; - } - } - - if (h->plt.refcount <= 0) - { - h->plt.offset = (bfd_vma) -1; - h->needs_plt = 0; - } - return TRUE; - } - - /* If this is a function, put it in the procedure linkage table. We - will fill in the contents of the procedure linkage table later, - when we know the address of the .got section. */ - if (h->type == STT_FUNC - || h->needs_plt) - { - if (h->plt.refcount <= 0 - || SYMBOL_CALLS_LOCAL (info, h) - || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT - && h->root.type == bfd_link_hash_undefweak)) - { - /* This case can occur if we saw a PLT32 reloc in an input - file, but the symbol was never referred to by a dynamic - object, or if all references were garbage collected. In - such a case, we don't actually need to build a procedure - linkage table, and we can just do a PC32 reloc instead. */ - h->plt.offset = (bfd_vma) -1; - h->needs_plt = 0; - } - - return TRUE; - } - else - /* It's possible that we incorrectly decided a .plt reloc was - needed for an R_X86_64_PC32 reloc to a non-function sym in - check_relocs. We can't decide accurately between function and - non-function syms in check-relocs; Objects loaded later in - the link may change h->type. So fix it now. */ - h->plt.offset = (bfd_vma) -1; - - /* If this is a weak symbol, and there is a real definition, the - processor independent code will have arranged for us to see the - real definition first, and we can just use the same value. */ - if (h->u.weakdef != NULL) - { - BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined - || h->u.weakdef->root.type == bfd_link_hash_defweak); - h->root.u.def.section = h->u.weakdef->root.u.def.section; - h->root.u.def.value = h->u.weakdef->root.u.def.value; - if (ELIMINATE_COPY_RELOCS || info->nocopyreloc) - { - eh = (struct elf_x86_64_link_hash_entry *) h; - h->non_got_ref = h->u.weakdef->non_got_ref; - eh->needs_copy = h->u.weakdef->needs_copy; - } - return TRUE; - } - - /* This is a reference to a symbol defined by a dynamic object which - is not a function. */ - - /* If we are creating a shared library, we must presume that the - only references to the symbol are via the global offset table. - For such cases we need not do anything here; the relocations will - be handled correctly by relocate_section. */ - if (!bfd_link_executable (info)) - return TRUE; - - /* If there are no references to this symbol that do not use the - GOT, we don't need to generate a copy reloc. */ - if (!h->non_got_ref) - return TRUE; - - /* If -z nocopyreloc was given, we won't generate them either. */ - if (info->nocopyreloc) - { - h->non_got_ref = 0; - return TRUE; - } - - if (ELIMINATE_COPY_RELOCS) - { - eh = (struct elf_x86_64_link_hash_entry *) h; - for (p = eh->dyn_relocs; p != NULL; p = p->next) - { - s = p->sec->output_section; - if (s != NULL && (s->flags & SEC_READONLY) != 0) - break; - } - - /* If we didn't find any dynamic relocs in read-only sections, then - we'll be keeping the dynamic relocs and avoiding the copy reloc. */ - if (p == NULL) - { - h->non_got_ref = 0; - return TRUE; - } - } - - /* We must allocate the symbol in our .dynbss section, which will - become part of the .bss section of the executable. There will be - an entry for this symbol in the .dynsym section. The dynamic - object will contain position independent code, so all references - from the dynamic object to this symbol will go through the global - offset table. The dynamic linker will use the .dynsym entry to - determine the address it must put in the global offset table, so - both the dynamic object and the regular object will refer to the - same memory location for the variable. */ - - htab = elf_x86_64_hash_table (info); - if (htab == NULL) - return FALSE; - - /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker - to copy the initial value out of the dynamic object and into the - runtime process image. */ - if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0) - { - const struct elf_backend_data *bed; - bed = get_elf_backend_data (info->output_bfd); - htab->srelbss->size += bed->s->sizeof_rela; - h->needs_copy = 1; - } - - s = htab->sdynbss; - - return _bfd_elf_adjust_dynamic_copy (info, h, s); -} - -/* Allocate space in .plt, .got and associated reloc sections for - dynamic relocs. */ - -static bfd_boolean -elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) -{ - struct bfd_link_info *info; - struct elf_x86_64_link_hash_table *htab; - struct elf_x86_64_link_hash_entry *eh; - struct elf_dyn_relocs *p; - const struct elf_backend_data *bed; - unsigned int plt_entry_size; - bfd_boolean resolved_to_zero; - - if (h->root.type == bfd_link_hash_indirect) - return TRUE; - - eh = (struct elf_x86_64_link_hash_entry *) h; - - info = (struct bfd_link_info *) inf; - htab = elf_x86_64_hash_table (info); - if (htab == NULL) - return FALSE; - bed = get_elf_backend_data (info->output_bfd); - plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd); - - resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, - eh->has_got_reloc, - eh); - - /* We can't use the GOT PLT if pointer equality is needed since - finish_dynamic_symbol won't clear symbol value and the dynamic - linker won't update the GOT slot. We will get into an infinite - loop at run-time. */ - if (htab->plt_got != NULL - && h->type != STT_GNU_IFUNC - && !h->pointer_equality_needed - && h->plt.refcount > 0 - && h->got.refcount > 0) - { - /* Don't use the regular PLT if there are both GOT and GOTPLT - reloctions. */ - h->plt.offset = (bfd_vma) -1; - - /* Use the GOT PLT. */ - eh->plt_got.refcount = 1; - } - - /* Clear the reference count of function pointer relocations if - symbol isn't a normal function. */ - if (h->type != STT_FUNC) - eh->func_pointer_refcount = 0; - - /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it - here if it is defined and referenced in a non-shared object. */ - if (h->type == STT_GNU_IFUNC - && h->def_regular) - { - if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h, - &eh->dyn_relocs, - &htab->readonly_dynrelocs_against_ifunc, - plt_entry_size, - plt_entry_size, - GOT_ENTRY_SIZE)) - { - asection *s = htab->plt_bnd; - if (h->plt.offset != (bfd_vma) -1 && s != NULL) - { - /* Use the .plt.bnd section if it is created. */ - eh->plt_bnd.offset = s->size; - - /* Make room for this entry in the .plt.bnd section. */ - s->size += sizeof (elf_x86_64_legacy_plt2_entry); - } - - return TRUE; - } - else - return FALSE; - } - /* Don't create the PLT entry if there are only function pointer - relocations which can be resolved at run-time. */ - else if (htab->elf.dynamic_sections_created - && (h->plt.refcount > eh->func_pointer_refcount - || eh->plt_got.refcount > 0)) - { - bfd_boolean use_plt_got; - - /* Clear the reference count of function pointer relocations - if PLT is used. */ - eh->func_pointer_refcount = 0; - - if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed) - { - /* Don't use the regular PLT for DF_BIND_NOW. */ - h->plt.offset = (bfd_vma) -1; - - /* Use the GOT PLT. */ - h->got.refcount = 1; - eh->plt_got.refcount = 1; - } - - use_plt_got = eh->plt_got.refcount > 0; - - /* Make sure this symbol is output as a dynamic symbol. - Undefined weak syms won't yet be marked as dynamic. */ - if (h->dynindx == -1 - && !h->forced_local - && !resolved_to_zero) - { - if (! bfd_elf_link_record_dynamic_symbol (info, h)) - return FALSE; - } - - if (bfd_link_pic (info) - || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) - { - asection *s = htab->elf.splt; - asection *bnd_s = htab->plt_bnd; - asection *got_s = htab->plt_got; - - /* If this is the first .plt entry, make room for the special - first entry. The .plt section is used by prelink to undo - prelinking for dynamic relocations. */ - if (s->size == 0) - s->size = plt_entry_size; - - if (use_plt_got) - eh->plt_got.offset = got_s->size; - else - { - h->plt.offset = s->size; - if (bnd_s) - eh->plt_bnd.offset = bnd_s->size; - } - - /* If this symbol is not defined in a regular file, and we are - not generating a shared library, then set the symbol to this - location in the .plt. This is required to make function - pointers compare as equal between the normal executable and - the shared library. */ - if (! bfd_link_pic (info) - && !h->def_regular) - { - if (use_plt_got) - { - /* We need to make a call to the entry of the GOT PLT - instead of regular PLT entry. */ - h->root.u.def.section = got_s; - h->root.u.def.value = eh->plt_got.offset; - } - else - { - if (bnd_s) - { - /* We need to make a call to the entry of the second - PLT instead of regular PLT entry. */ - h->root.u.def.section = bnd_s; - h->root.u.def.value = eh->plt_bnd.offset; - } - else - { - h->root.u.def.section = s; - h->root.u.def.value = h->plt.offset; - } - } - } - - /* Make room for this entry. */ - if (use_plt_got) - got_s->size += sizeof (elf_x86_64_legacy_plt2_entry); - else - { - s->size += plt_entry_size; - if (bnd_s) - bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry); - - /* We also need to make an entry in the .got.plt section, - which will be placed in the .got section by the linker - script. */ - htab->elf.sgotplt->size += GOT_ENTRY_SIZE; - - /* There should be no PLT relocation against resolved - undefined weak symbol in executable. */ - if (!resolved_to_zero) - { - /* We also need to make an entry in the .rela.plt - section. */ - htab->elf.srelplt->size += bed->s->sizeof_rela; - htab->elf.srelplt->reloc_count++; - } - } - } - else - { - eh->plt_got.offset = (bfd_vma) -1; - h->plt.offset = (bfd_vma) -1; - h->needs_plt = 0; - } - } - else - { - eh->plt_got.offset = (bfd_vma) -1; - h->plt.offset = (bfd_vma) -1; - h->needs_plt = 0; - } - - eh->tlsdesc_got = (bfd_vma) -1; - - /* If R_X86_64_GOTTPOFF symbol is now local to the binary, - make it a R_X86_64_TPOFF32 requiring no GOT entry. */ - if (h->got.refcount > 0 - && bfd_link_executable (info) - && h->dynindx == -1 - && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE) - { - h->got.offset = (bfd_vma) -1; - } - else if (h->got.refcount > 0) - { - asection *s; - bfd_boolean dyn; - int tls_type = elf_x86_64_hash_entry (h)->tls_type; - - /* Make sure this symbol is output as a dynamic symbol. - Undefined weak syms won't yet be marked as dynamic. */ - if (h->dynindx == -1 - && !h->forced_local - && !resolved_to_zero) - { - if (! bfd_elf_link_record_dynamic_symbol (info, h)) - return FALSE; - } - - if (GOT_TLS_GDESC_P (tls_type)) - { - eh->tlsdesc_got = htab->elf.sgotplt->size - - elf_x86_64_compute_jump_table_size (htab); - htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE; - h->got.offset = (bfd_vma) -2; - } - if (! GOT_TLS_GDESC_P (tls_type) - || GOT_TLS_GD_P (tls_type)) - { - s = htab->elf.sgot; - h->got.offset = s->size; - s->size += GOT_ENTRY_SIZE; - if (GOT_TLS_GD_P (tls_type)) - s->size += GOT_ENTRY_SIZE; - } - dyn = htab->elf.dynamic_sections_created; - /* R_X86_64_TLSGD needs one dynamic relocation if local symbol - and two if global. R_X86_64_GOTTPOFF needs one dynamic - relocation. No dynamic relocation against resolved undefined - weak symbol in executable. */ - if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1) - || tls_type == GOT_TLS_IE) - htab->elf.srelgot->size += bed->s->sizeof_rela; - else if (GOT_TLS_GD_P (tls_type)) - htab->elf.srelgot->size += 2 * bed->s->sizeof_rela; - else if (! GOT_TLS_GDESC_P (tls_type) - && ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT - && !resolved_to_zero) - || h->root.type != bfd_link_hash_undefweak) - && (bfd_link_pic (info) - || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))) - htab->elf.srelgot->size += bed->s->sizeof_rela; - if (GOT_TLS_GDESC_P (tls_type)) - { - htab->elf.srelplt->size += bed->s->sizeof_rela; - htab->tlsdesc_plt = (bfd_vma) -1; - } - } - else - h->got.offset = (bfd_vma) -1; - - if (eh->dyn_relocs == NULL) - return TRUE; - - /* In the shared -Bsymbolic case, discard space allocated for - dynamic pc-relative relocs against symbols which turn out to be - defined in regular objects. For the normal shared case, discard - space for pc-relative relocs that have become local due to symbol - visibility changes. */ - - if (bfd_link_pic (info)) - { - /* Relocs that use pc_count are those that appear on a call - insn, or certain REL relocs that can generated via assembly. - We want calls to protected symbols to resolve directly to the - function rather than going via the plt. If people want - function pointer comparisons to work as expected then they - should avoid writing weird assembly. */ - if (SYMBOL_CALLS_LOCAL (info, h)) - { - struct elf_dyn_relocs **pp; - - for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) - { - p->count -= p->pc_count; - p->pc_count = 0; - if (p->count == 0) - *pp = p->next; - else - pp = &p->next; - } - } - - /* Also discard relocs on undefined weak syms with non-default - visibility or in PIE. */ - if (eh->dyn_relocs != NULL) - { - if (h->root.type == bfd_link_hash_undefweak) - { - /* Undefined weak symbol is never bound locally in shared - library. */ - if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT - || resolved_to_zero) - eh->dyn_relocs = NULL; - else if (h->dynindx == -1 - && ! h->forced_local - && ! bfd_elf_link_record_dynamic_symbol (info, h)) - return FALSE; - } - /* For PIE, discard space for pc-relative relocs against - symbols which turn out to need copy relocs. */ - else if (bfd_link_executable (info) - && (h->needs_copy || eh->needs_copy) - && h->def_dynamic - && !h->def_regular) - { - struct elf_dyn_relocs **pp; - - for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) - { - if (p->pc_count != 0) - *pp = p->next; - else - pp = &p->next; - } - } - } - } - else if (ELIMINATE_COPY_RELOCS) - { - /* For the non-shared case, discard space for relocs against - symbols which turn out to need copy relocs or are not - dynamic. Keep dynamic relocations for run-time function - pointer initialization. */ - - if ((!h->non_got_ref - || eh->func_pointer_refcount > 0 - || (h->root.type == bfd_link_hash_undefweak - && !resolved_to_zero)) - && ((h->def_dynamic - && !h->def_regular) - || (htab->elf.dynamic_sections_created - && (h->root.type == bfd_link_hash_undefweak - || h->root.type == bfd_link_hash_undefined)))) - { - /* Make sure this symbol is output as a dynamic symbol. - Undefined weak syms won't yet be marked as dynamic. */ - if (h->dynindx == -1 - && ! h->forced_local - && ! resolved_to_zero - && ! bfd_elf_link_record_dynamic_symbol (info, h)) - return FALSE; - - /* If that succeeded, we know we'll be keeping all the - relocs. */ - if (h->dynindx != -1) - goto keep; - } - - eh->dyn_relocs = NULL; - eh->func_pointer_refcount = 0; - - keep: ; - } - - /* Finally, allocate space. */ - for (p = eh->dyn_relocs; p != NULL; p = p->next) - { - asection * sreloc; - - sreloc = elf_section_data (p->sec)->sreloc; - - BFD_ASSERT (sreloc != NULL); - - sreloc->size += p->count * bed->s->sizeof_rela; - } - - return TRUE; -} - -/* Allocate space in .plt, .got and associated reloc sections for - local dynamic relocs. */ - -static bfd_boolean -elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf) -{ - struct elf_link_hash_entry *h - = (struct elf_link_hash_entry *) *slot; - - if (h->type != STT_GNU_IFUNC - || !h->def_regular - || !h->ref_regular - || !h->forced_local - || h->root.type != bfd_link_hash_defined) - abort (); - - return elf_x86_64_allocate_dynrelocs (h, inf); -} - -/* Find any dynamic relocs that apply to read-only sections. */ - -static bfd_boolean -elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h, - void * inf) -{ - struct elf_x86_64_link_hash_entry *eh; - struct elf_dyn_relocs *p; - - /* Skip local IFUNC symbols. */ - if (h->forced_local && h->type == STT_GNU_IFUNC) - return TRUE; - - eh = (struct elf_x86_64_link_hash_entry *) h; - for (p = eh->dyn_relocs; p != NULL; p = p->next) - { - asection *s = p->sec->output_section; - - if (s != NULL && (s->flags & SEC_READONLY) != 0) - { - struct bfd_link_info *info = (struct bfd_link_info *) inf; - - info->flags |= DF_TEXTREL; - - if ((info->warn_shared_textrel && bfd_link_pic (info)) - || info->error_textrel) - info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"), - p->sec->owner, h->root.root.string, - p->sec); - - /* Not an error, just cut short the traversal. */ - return FALSE; - } - } - return TRUE; -} - -/* Convert load via the GOT slot to load immediate. */ - -static bfd_boolean -elf_x86_64_convert_load (bfd *abfd, asection *sec, - struct bfd_link_info *link_info) -{ - Elf_Internal_Shdr *symtab_hdr; - Elf_Internal_Rela *internal_relocs; - Elf_Internal_Rela *irel, *irelend; - bfd_byte *contents; - struct elf_x86_64_link_hash_table *htab; - bfd_boolean changed; - bfd_signed_vma *local_got_refcounts; - - /* Don't even try to convert non-ELF outputs. */ - if (!is_elf_hash_table (link_info->hash)) - return FALSE; - - /* Nothing to do if there is no need or no output. */ - if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC) - || sec->need_convert_load == 0 - || bfd_is_abs_section (sec->output_section)) - return TRUE; - - symtab_hdr = &elf_tdata (abfd)->symtab_hdr; - - /* Load the relocations for this section. */ - internal_relocs = (_bfd_elf_link_read_relocs - (abfd, sec, NULL, (Elf_Internal_Rela *) NULL, - link_info->keep_memory)); - if (internal_relocs == NULL) - return FALSE; - - changed = FALSE; - htab = elf_x86_64_hash_table (link_info); - local_got_refcounts = elf_local_got_refcounts (abfd); - - /* Get the section contents. */ - if (elf_section_data (sec)->this_hdr.contents != NULL) - contents = elf_section_data (sec)->this_hdr.contents; - else - { - if (!bfd_malloc_and_get_section (abfd, sec, &contents)) - goto error_return; - } - - irelend = internal_relocs + sec->reloc_count; - for (irel = internal_relocs; irel < irelend; irel++) - { - unsigned int r_type = ELF32_R_TYPE (irel->r_info); - unsigned int r_symndx; - struct elf_link_hash_entry *h; - bfd_boolean converted; - - if (r_type != R_X86_64_GOTPCRELX - && r_type != R_X86_64_REX_GOTPCRELX - && r_type != R_X86_64_GOTPCREL) - continue; - - r_symndx = htab->r_sym (irel->r_info); - if (r_symndx < symtab_hdr->sh_info) - h = elf_x86_64_get_local_sym_hash (htab, sec->owner, - (const Elf_Internal_Rela *) irel, - FALSE); - else - { - h = elf_sym_hashes (abfd)[r_symndx - symtab_hdr->sh_info]; - while (h->root.type == bfd_link_hash_indirect - || h->root.type == bfd_link_hash_warning) - h = (struct elf_link_hash_entry *) h->root.u.i.link; - } - - /* STT_GNU_IFUNC must keep GOTPCREL relocations. */ - if (h != NULL && h->type == STT_GNU_IFUNC) - continue; - - converted = FALSE; - if (!elf_x86_64_convert_load_reloc (abfd, sec, contents, irel, h, - &converted, link_info)) - goto error_return; - - if (converted) - { - changed = converted; - if (h) - { - if (h->got.refcount > 0) - h->got.refcount -= 1; - } - else - { - if (local_got_refcounts != NULL - && local_got_refcounts[r_symndx] > 0) - local_got_refcounts[r_symndx] -= 1; - } - } - } - - if (contents != NULL - && elf_section_data (sec)->this_hdr.contents != contents) - { - if (!changed && !link_info->keep_memory) - free (contents); - else - { - /* Cache the section contents for elf_link_input_bfd. */ - elf_section_data (sec)->this_hdr.contents = contents; - } - } - - if (elf_section_data (sec)->relocs != internal_relocs) - { - if (!changed) - free (internal_relocs); - else - elf_section_data (sec)->relocs = internal_relocs; - } - - return TRUE; - - error_return: - if (contents != NULL - && elf_section_data (sec)->this_hdr.contents != contents) - free (contents); - if (internal_relocs != NULL - && elf_section_data (sec)->relocs != internal_relocs) - free (internal_relocs); - return FALSE; -} - -/* Set the sizes of the dynamic sections. */ - -static bfd_boolean -elf_x86_64_size_dynamic_sections (bfd *output_bfd, - struct bfd_link_info *info) -{ - struct elf_x86_64_link_hash_table *htab; - bfd *dynobj; - asection *s; - bfd_boolean relocs; - bfd *ibfd; - const struct elf_backend_data *bed; - - htab = elf_x86_64_hash_table (info); - if (htab == NULL) - return FALSE; - bed = get_elf_backend_data (output_bfd); - - dynobj = htab->elf.dynobj; - if (dynobj == NULL) - abort (); - - /* Set up .got offsets for local syms, and space for local dynamic - relocs. */ - for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) - { - bfd_signed_vma *local_got; - bfd_signed_vma *end_local_got; - char *local_tls_type; - bfd_vma *local_tlsdesc_gotent; - bfd_size_type locsymcount; - Elf_Internal_Shdr *symtab_hdr; - asection *srel; - - if (! is_x86_64_elf (ibfd)) - continue; - - for (s = ibfd->sections; s != NULL; s = s->next) - { - struct elf_dyn_relocs *p; - - if (!elf_x86_64_convert_load (ibfd, s, info)) - return FALSE; - - for (p = (struct elf_dyn_relocs *) - (elf_section_data (s)->local_dynrel); - p != NULL; - p = p->next) - { - if (!bfd_is_abs_section (p->sec) - && bfd_is_abs_section (p->sec->output_section)) - { - /* Input section has been discarded, either because - it is a copy of a linkonce section or due to - linker script /DISCARD/, so we'll be discarding - the relocs too. */ - } - else if (p->count != 0) - { - srel = elf_section_data (p->sec)->sreloc; - srel->size += p->count * bed->s->sizeof_rela; - if ((p->sec->output_section->flags & SEC_READONLY) != 0 - && (info->flags & DF_TEXTREL) == 0) - { - info->flags |= DF_TEXTREL; - if ((info->warn_shared_textrel && bfd_link_pic (info)) - || info->error_textrel) - info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"), - p->sec->owner, p->sec); - } - } - } - } - - local_got = elf_local_got_refcounts (ibfd); - if (!local_got) - continue; - - symtab_hdr = &elf_symtab_hdr (ibfd); - locsymcount = symtab_hdr->sh_info; - end_local_got = local_got + locsymcount; - local_tls_type = elf_x86_64_local_got_tls_type (ibfd); - local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd); - s = htab->elf.sgot; - srel = htab->elf.srelgot; - for (; local_got < end_local_got; - ++local_got, ++local_tls_type, ++local_tlsdesc_gotent) - { - *local_tlsdesc_gotent = (bfd_vma) -1; - if (*local_got > 0) - { - if (GOT_TLS_GDESC_P (*local_tls_type)) - { - *local_tlsdesc_gotent = htab->elf.sgotplt->size - - elf_x86_64_compute_jump_table_size (htab); - htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE; - *local_got = (bfd_vma) -2; - } - if (! GOT_TLS_GDESC_P (*local_tls_type) - || GOT_TLS_GD_P (*local_tls_type)) - { - *local_got = s->size; - s->size += GOT_ENTRY_SIZE; - if (GOT_TLS_GD_P (*local_tls_type)) - s->size += GOT_ENTRY_SIZE; - } - if (bfd_link_pic (info) - || GOT_TLS_GD_ANY_P (*local_tls_type) - || *local_tls_type == GOT_TLS_IE) - { - if (GOT_TLS_GDESC_P (*local_tls_type)) - { - htab->elf.srelplt->size - += bed->s->sizeof_rela; - htab->tlsdesc_plt = (bfd_vma) -1; - } - if (! GOT_TLS_GDESC_P (*local_tls_type) - || GOT_TLS_GD_P (*local_tls_type)) - srel->size += bed->s->sizeof_rela; - } - } - else - *local_got = (bfd_vma) -1; - } - } - - if (htab->tls_ld_got.refcount > 0) - { - /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD - relocs. */ - htab->tls_ld_got.offset = htab->elf.sgot->size; - htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE; - htab->elf.srelgot->size += bed->s->sizeof_rela; - } - else - htab->tls_ld_got.offset = -1; - - /* Allocate global sym .plt and .got entries, and space for global - sym dynamic relocs. */ - elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs, - info); - - /* Allocate .plt and .got entries, and space for local symbols. */ - htab_traverse (htab->loc_hash_table, - elf_x86_64_allocate_local_dynrelocs, - info); - - /* For every jump slot reserved in the sgotplt, reloc_count is - incremented. However, when we reserve space for TLS descriptors, - it's not incremented, so in order to compute the space reserved - for them, it suffices to multiply the reloc count by the jump - slot size. - - PR ld/13302: We start next_irelative_index at the end of .rela.plt - so that R_X86_64_IRELATIVE entries come last. */ - if (htab->elf.srelplt) - { - htab->sgotplt_jump_table_size - = elf_x86_64_compute_jump_table_size (htab); - htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1; - } - else if (htab->elf.irelplt) - htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1; - - if (htab->tlsdesc_plt) - { - /* If we're not using lazy TLS relocations, don't generate the - PLT and GOT entries they require. */ - if ((info->flags & DF_BIND_NOW)) - htab->tlsdesc_plt = 0; - else - { - htab->tlsdesc_got = htab->elf.sgot->size; - htab->elf.sgot->size += GOT_ENTRY_SIZE; - /* Reserve room for the initial entry. - FIXME: we could probably do away with it in this case. */ - if (htab->elf.splt->size == 0) - htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd); - htab->tlsdesc_plt = htab->elf.splt->size; - htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd); - } - } - - if (htab->elf.sgotplt) - { - /* Don't allocate .got.plt section if there are no GOT nor PLT - entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */ - if ((htab->elf.hgot == NULL - || !htab->elf.hgot->ref_regular_nonweak) - && (htab->elf.sgotplt->size - == get_elf_backend_data (output_bfd)->got_header_size) - && (htab->elf.splt == NULL - || htab->elf.splt->size == 0) - && (htab->elf.sgot == NULL - || htab->elf.sgot->size == 0) - && (htab->elf.iplt == NULL - || htab->elf.iplt->size == 0) - && (htab->elf.igotplt == NULL - || htab->elf.igotplt->size == 0)) - htab->elf.sgotplt->size = 0; - } - - if (htab->plt_eh_frame != NULL - && htab->elf.splt != NULL - && htab->elf.splt->size != 0 - && !bfd_is_abs_section (htab->elf.splt->output_section) - && _bfd_elf_eh_frame_present (info)) - { - const struct elf_x86_64_backend_data *arch_data - = get_elf_x86_64_arch_data (bed); - htab->plt_eh_frame->size = arch_data->eh_frame_plt_size; - } - - /* We now have determined the sizes of the various dynamic sections. - Allocate memory for them. */ - relocs = FALSE; - for (s = dynobj->sections; s != NULL; s = s->next) - { - if ((s->flags & SEC_LINKER_CREATED) == 0) - continue; - - if (s == htab->elf.splt - || s == htab->elf.sgot - || s == htab->elf.sgotplt - || s == htab->elf.iplt - || s == htab->elf.igotplt - || s == htab->plt_bnd - || s == htab->plt_got - || s == htab->plt_eh_frame - || s == htab->sdynbss) - { - /* Strip this section if we don't need it; see the - comment below. */ - } - else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela")) - { - if (s->size != 0 && s != htab->elf.srelplt) - relocs = TRUE; - - /* We use the reloc_count field as a counter if we need - to copy relocs into the output file. */ - if (s != htab->elf.srelplt) - s->reloc_count = 0; - } - else - { - /* It's not one of our sections, so don't allocate space. */ - continue; - } - - if (s->size == 0) - { - /* If we don't need this section, strip it from the - output file. This is mostly to handle .rela.bss and - .rela.plt. We must create both sections in - create_dynamic_sections, because they must be created - before the linker maps input sections to output - sections. The linker does that before - adjust_dynamic_symbol is called, and it is that - function which decides whether anything needs to go - into these sections. */ - - s->flags |= SEC_EXCLUDE; - continue; - } - - if ((s->flags & SEC_HAS_CONTENTS) == 0) - continue; - - /* Allocate memory for the section contents. We use bfd_zalloc - here in case unused entries are not reclaimed before the - section's contents are written out. This should not happen, - but this way if it does, we get a R_X86_64_NONE reloc instead - of garbage. */ - s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size); - if (s->contents == NULL) - return FALSE; - } - - if (htab->plt_eh_frame != NULL - && htab->plt_eh_frame->contents != NULL) - { - const struct elf_x86_64_backend_data *arch_data - = get_elf_x86_64_arch_data (bed); - - memcpy (htab->plt_eh_frame->contents, - arch_data->eh_frame_plt, htab->plt_eh_frame->size); - bfd_put_32 (dynobj, htab->elf.splt->size, - htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET); - } - - if (htab->elf.dynamic_sections_created) - { - /* Add some entries to the .dynamic section. We fill in the - values later, in elf_x86_64_finish_dynamic_sections, but we - must add the entries now so that we get the correct size for - the .dynamic section. The DT_DEBUG entry is filled in by the - dynamic linker and used by the debugger. */ -#define add_dynamic_entry(TAG, VAL) \ - _bfd_elf_add_dynamic_entry (info, TAG, VAL) - - if (bfd_link_executable (info)) - { - if (!add_dynamic_entry (DT_DEBUG, 0)) - return FALSE; - } - - if (htab->elf.splt->size != 0) - { - /* DT_PLTGOT is used by prelink even if there is no PLT - relocation. */ - if (!add_dynamic_entry (DT_PLTGOT, 0)) - return FALSE; - - if (htab->elf.srelplt->size != 0) - { - if (!add_dynamic_entry (DT_PLTRELSZ, 0) - || !add_dynamic_entry (DT_PLTREL, DT_RELA) - || !add_dynamic_entry (DT_JMPREL, 0)) - return FALSE; - } - - if (htab->tlsdesc_plt - && (!add_dynamic_entry (DT_TLSDESC_PLT, 0) - || !add_dynamic_entry (DT_TLSDESC_GOT, 0))) - return FALSE; - } - - if (relocs) - { - if (!add_dynamic_entry (DT_RELA, 0) - || !add_dynamic_entry (DT_RELASZ, 0) - || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela)) - return FALSE; - - /* If any dynamic relocs apply to a read-only section, - then we need a DT_TEXTREL entry. */ - if ((info->flags & DF_TEXTREL) == 0) - elf_link_hash_traverse (&htab->elf, - elf_x86_64_readonly_dynrelocs, - info); - - if ((info->flags & DF_TEXTREL) != 0) - { - if (htab->readonly_dynrelocs_against_ifunc) - { - info->callbacks->einfo - (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n")); - bfd_set_error (bfd_error_bad_value); - return FALSE; - } - - if (!add_dynamic_entry (DT_TEXTREL, 0)) - return FALSE; - } - } - } -#undef add_dynamic_entry - - return TRUE; -} - -static bfd_boolean -elf_x86_64_always_size_sections (bfd *output_bfd, - struct bfd_link_info *info) -{ - asection *tls_sec = elf_hash_table (info)->tls_sec; - - if (tls_sec) - { - struct elf_link_hash_entry *tlsbase; - - tlsbase = elf_link_hash_lookup (elf_hash_table (info), - "_TLS_MODULE_BASE_", - FALSE, FALSE, FALSE); - - if (tlsbase && tlsbase->type == STT_TLS) - { - struct elf_x86_64_link_hash_table *htab; - struct bfd_link_hash_entry *bh = NULL; - const struct elf_backend_data *bed - = get_elf_backend_data (output_bfd); - - htab = elf_x86_64_hash_table (info); - if (htab == NULL) - return FALSE; - - if (!(_bfd_generic_link_add_one_symbol - (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL, - tls_sec, 0, NULL, FALSE, - bed->collect, &bh))) - return FALSE; - - htab->tls_module_base = bh; - - tlsbase = (struct elf_link_hash_entry *)bh; - tlsbase->def_regular = 1; - tlsbase->other = STV_HIDDEN; - tlsbase->root.linker_def = 1; - (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE); - } - } - - return TRUE; -} - -/* _TLS_MODULE_BASE_ needs to be treated especially when linking - executables. Rather than setting it to the beginning of the TLS - section, we have to set it to the end. This function may be called - multiple times, it is idempotent. */ - -static void -elf_x86_64_set_tls_module_base (struct bfd_link_info *info) -{ - struct elf_x86_64_link_hash_table *htab; - struct bfd_link_hash_entry *base; - - if (!bfd_link_executable (info)) - return; - - htab = elf_x86_64_hash_table (info); - if (htab == NULL) - return; - - base = htab->tls_module_base; - if (base == NULL) - return; + /* Cache the section contents for elf_link_input_bfd if any + load is converted or --no-keep-memory isn't used. */ + elf_section_data (sec)->this_hdr.contents = contents; + } + } - base->u.def.value = htab->elf.tls_size; -} + /* Cache relocations if any load is converted. */ + if (elf_section_data (sec)->relocs != relocs && converted) + elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs; -/* Return the base VMA address which should be subtracted from real addresses - when resolving @dtpoff relocation. - This is PT_TLS segment p_vaddr. */ + return TRUE; -static bfd_vma -elf_x86_64_dtpoff_base (struct bfd_link_info *info) -{ - /* If tls_sec is NULL, we should have signalled an error already. */ - if (elf_hash_table (info)->tls_sec == NULL) - return 0; - return elf_hash_table (info)->tls_sec->vma; + error_return: + if (elf_section_data (sec)->this_hdr.contents != contents) + free (contents); + sec->check_relocs_failed = 1; + return FALSE; } /* Return the relocation value for @tpoff relocation @@ -4027,24 +2425,6 @@ elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address) return address - static_tls_size - htab->tls_sec->vma; } -/* Is the instruction before OFFSET in CONTENTS a 32bit relative - branch? */ - -static bfd_boolean -is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset) -{ - /* Opcode Instruction - 0xe8 call - 0xe9 jump - 0x0f 0x8x conditional jump */ - return ((offset > 0 - && (contents [offset - 1] == 0xe8 - || contents [offset - 1] == 0xe9)) - || (offset > 1 - && contents [offset - 2] == 0x0f - && (contents [offset - 1] & 0xf0) == 0x80)); -} - /* Relocate an x86_64 ELF section. */ static bfd_boolean @@ -4057,7 +2437,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, Elf_Internal_Sym *local_syms, asection **local_sections) { - struct elf_x86_64_link_hash_table *htab; + struct elf_x86_link_hash_table *htab; Elf_Internal_Shdr *symtab_hdr; struct elf_link_hash_entry **sym_hashes; bfd_vma *local_got_offsets; @@ -4065,33 +2445,39 @@ elf_x86_64_relocate_section (bfd *output_bfd, Elf_Internal_Rela *rel; Elf_Internal_Rela *wrel; Elf_Internal_Rela *relend; - const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd); - - BFD_ASSERT (is_x86_64_elf (input_bfd)); + unsigned int plt_entry_size; /* Skip if check_relocs failed. */ if (input_section->check_relocs_failed) return FALSE; - htab = elf_x86_64_hash_table (info); + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab == NULL) return FALSE; + + if (!is_x86_elf (input_bfd, htab)) + { + bfd_set_error (bfd_error_wrong_format); + return FALSE; + } + + plt_entry_size = htab->plt.plt_entry_size; symtab_hdr = &elf_symtab_hdr (input_bfd); sym_hashes = elf_sym_hashes (input_bfd); local_got_offsets = elf_local_got_offsets (input_bfd); - local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd); + local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd); - elf_x86_64_set_tls_module_base (info); + _bfd_x86_elf_set_tls_module_base (info); rel = wrel = relocs; relend = relocs + input_section->reloc_count; for (; rel < relend; wrel++, rel++) { - unsigned int r_type; + unsigned int r_type, r_type_tls; reloc_howto_type *howto; unsigned long r_symndx; struct elf_link_hash_entry *h; - struct elf_x86_64_link_hash_entry *eh; + struct elf_x86_link_hash_entry *eh; Elf_Internal_Sym *sym; asection *sec; bfd_vma off, offplt, plt_offset; @@ -4102,6 +2488,10 @@ elf_x86_64_relocate_section (bfd *output_bfd, asection *base_got, *resolved_plt; bfd_vma st_size; bfd_boolean resolved_to_zero; + bfd_boolean relative_reloc; + bfd_boolean converted_reloc; + bfd_boolean need_copy_reloc_in_pie; + bfd_boolean no_copyreloc_p; r_type = ELF32_R_TYPE (rel->r_info); if (r_type == (int) R_X86_64_GNU_VTINHERIT @@ -4112,22 +2502,18 @@ elf_x86_64_relocate_section (bfd *output_bfd, continue; } - if (r_type >= (int) R_X86_64_standard) + r_symndx = htab->r_sym (rel->r_info); + converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0; + if (converted_reloc) { - (*_bfd_error_handler) - (_("%B: unrecognized relocation (0x%x) in section `%A'"), - input_bfd, input_section, r_type); - bfd_set_error (bfd_error_bad_value); - return FALSE; + r_type &= ~R_X86_64_converted_reloc_bit; + rel->r_info = htab->r_info (r_symndx, r_type); } - if (r_type != (int) R_X86_64_32 - || ABI_64_P (output_bfd)) - howto = x86_64_elf_howto_table + r_type; - else - howto = (x86_64_elf_howto_table - + ARRAY_SIZE (x86_64_elf_howto_table) - 1); - r_symndx = htab->r_sym (rel->r_info); + howto = elf_x86_64_rtype_to_howto (input_bfd, r_type); + if (howto == NULL) + return _bfd_unrecognized_reloc (input_bfd, input_section, r_type); + h = NULL; sym = NULL; sec = NULL; @@ -4145,8 +2531,8 @@ elf_x86_64_relocate_section (bfd *output_bfd, if (!bfd_link_relocatable (info) && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC) { - h = elf_x86_64_get_local_sym_hash (htab, input_bfd, - rel, FALSE); + h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd, + rel, FALSE); if (h == NULL) abort (); @@ -4170,7 +2556,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, if (sec != NULL && discarded_section (sec)) { _bfd_clear_contents (howto, input_bfd, input_section, - contents + rel->r_offset); + contents, rel->r_offset); wrel->r_offset = rel->r_offset; wrel->r_info = 0; wrel->r_addend = 0; @@ -4210,7 +2596,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, } } - eh = (struct elf_x86_64_link_hash_entry *) h; + eh = (struct elf_x86_link_hash_entry *) h; /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it here if it is defined in a non-shared object. */ @@ -4223,6 +2609,10 @@ elf_x86_64_relocate_section (bfd *output_bfd, if ((input_section->flags & SEC_ALLOC) == 0) { + /* If this is a SHT_NOTE section without SHF_ALLOC, treat + STT_GNU_IFUNC symbol as STT_FUNC. */ + if (elf_section_type (input_section) == SHT_NOTE) + goto skip_ifunc; /* Dynamic relocs are not propagated for SEC_DEBUGGING sections because such sections are not SEC_ALLOC and thus ld.so will not process them. */ @@ -4230,16 +2620,93 @@ elf_x86_64_relocate_section (bfd *output_bfd, continue; abort (); } - else if (h->plt.offset == (bfd_vma) -1) - abort (); + + switch (r_type) + { + default: + break; + + case R_X86_64_GOTPCREL: + case R_X86_64_GOTPCRELX: + case R_X86_64_REX_GOTPCRELX: + case R_X86_64_GOTPCREL64: + base_got = htab->elf.sgot; + off = h->got.offset; + + if (base_got == NULL) + abort (); + + if (off == (bfd_vma) -1) + { + /* We can't use h->got.offset here to save state, or + even just remember the offset, as finish_dynamic_symbol + would use that as offset into .got. */ + + if (h->plt.offset == (bfd_vma) -1) + abort (); + + if (htab->elf.splt != NULL) + { + plt_index = (h->plt.offset / plt_entry_size + - htab->plt.has_plt0); + off = (plt_index + 3) * GOT_ENTRY_SIZE; + base_got = htab->elf.sgotplt; + } + else + { + plt_index = h->plt.offset / plt_entry_size; + off = plt_index * GOT_ENTRY_SIZE; + base_got = htab->elf.igotplt; + } + + if (h->dynindx == -1 + || h->forced_local + || info->symbolic) + { + /* This references the local defitionion. We must + initialize this entry in the global offset table. + Since the offset must always be a multiple of 8, + we use the least significant bit to record + whether we have initialized it already. + + When doing a dynamic link, we create a .rela.got + relocation entry to initialize the value. This + is done in the finish_dynamic_symbol routine. */ + if ((off & 1) != 0) + off &= ~1; + else + { + bfd_put_64 (output_bfd, relocation, + base_got->contents + off); + /* Note that this is harmless for the GOTPLT64 + case, as -1 | 1 still is -1. */ + h->got.offset |= 1; + } + } + } + + relocation = (base_got->output_section->vma + + base_got->output_offset + off); + + goto do_relocation; + } + + if (h->plt.offset == (bfd_vma) -1) + { + /* Handle static pointers of STT_GNU_IFUNC symbols. */ + if (r_type == htab->pointer_r_type + && (input_section->flags & SEC_CODE) == 0) + goto do_ifunc_pointer; + goto bad_ifunc_reloc; + } /* STT_GNU_IFUNC symbol must go through PLT. */ if (htab->elf.splt != NULL) { - if (htab->plt_bnd != NULL) + if (htab->plt_second != NULL) { - resolved_plt = htab->plt_bnd; - plt_offset = eh->plt_bnd.offset; + resolved_plt = htab->plt_second; + plt_offset = eh->plt_second.offset; } else { @@ -4259,15 +2726,17 @@ elf_x86_64_relocate_section (bfd *output_bfd, switch (r_type) { default: + bad_ifunc_reloc: if (h->root.root.string) name = h->root.root.string; else name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL); - (*_bfd_error_handler) - (_("%B: relocation %s against STT_GNU_IFUNC " - "symbol `%s' isn't handled by %s"), input_bfd, - howto->name, name, __FUNCTION__); + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB: relocation %s against STT_GNU_IFUNC " + "symbol `%s' isn't supported"), input_bfd, + howto->name, name); bfd_set_error (bfd_error_bad_value); return FALSE; @@ -4281,6 +2750,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, goto do_relocation; /* FALLTHROUGH */ case R_X86_64_64: + do_ifunc_pointer: if (rel->r_addend != 0) { if (h->root.root.string) @@ -4288,17 +2758,20 @@ elf_x86_64_relocate_section (bfd *output_bfd, else name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL); - (*_bfd_error_handler) - (_("%B: relocation %s against STT_GNU_IFUNC " - "symbol `%s' has non-zero addend: %d"), - input_bfd, howto->name, name, rel->r_addend); + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB: relocation %s against STT_GNU_IFUNC " + "symbol `%s' has non-zero addend: %" PRId64), + input_bfd, howto->name, name, (int64_t) rel->r_addend); bfd_set_error (bfd_error_bad_value); return FALSE; } /* Generate dynamic relcoation only when there is a - non-GOT reference in a shared object. */ - if (bfd_link_pic (info) && h->non_got_ref) + non-GOT reference in a shared object or there is no + PLT. */ + if ((bfd_link_pic (info) && h->non_got_ref) + || h->plt.offset == (bfd_vma) -1) { Elf_Internal_Rela outrel; asection *sreloc; @@ -4316,10 +2789,12 @@ elf_x86_64_relocate_section (bfd *output_bfd, outrel.r_offset += (input_section->output_section->vma + input_section->output_offset); - if (h->dynindx == -1 - || h->forced_local - || bfd_link_executable (info)) + if (POINTER_LOCAL_IFUNC_P (info, h)) { + info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"), + h->root.root.string, + h->root.u.def.section->owner); + /* This symbol is resolved locally. */ outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE); outrel.r_addend = (h->root.u.def.value @@ -4332,7 +2807,16 @@ elf_x86_64_relocate_section (bfd *output_bfd, outrel.r_addend = 0; } - sreloc = htab->elf.irelifunc; + /* Dynamic relocations are stored in + 1. .rela.ifunc section in PIC object. + 2. .rela.got section in dynamic executable. + 3. .rela.iplt section in static executable. */ + if (bfd_link_pic (info)) + sreloc = htab->elf.irelifunc; + else if (htab->elf.splt != NULL) + sreloc = htab->elf.srelgot; + else + sreloc = htab->elf.irelplt; elf_append_rela (output_bfd, sreloc, &outrel); /* If this reloc is against an external symbol, we @@ -4349,73 +2833,12 @@ elf_x86_64_relocate_section (bfd *output_bfd, case R_X86_64_PLT32: case R_X86_64_PLT32_BND: goto do_relocation; - - case R_X86_64_GOTPCREL: - case R_X86_64_GOTPCRELX: - case R_X86_64_REX_GOTPCRELX: - case R_X86_64_GOTPCREL64: - base_got = htab->elf.sgot; - off = h->got.offset; - - if (base_got == NULL) - abort (); - - if (off == (bfd_vma) -1) - { - /* We can't use h->got.offset here to save state, or - even just remember the offset, as finish_dynamic_symbol - would use that as offset into .got. */ - - if (htab->elf.splt != NULL) - { - plt_index = h->plt.offset / plt_entry_size - 1; - off = (plt_index + 3) * GOT_ENTRY_SIZE; - base_got = htab->elf.sgotplt; - } - else - { - plt_index = h->plt.offset / plt_entry_size; - off = plt_index * GOT_ENTRY_SIZE; - base_got = htab->elf.igotplt; - } - - if (h->dynindx == -1 - || h->forced_local - || info->symbolic) - { - /* This references the local defitionion. We must - initialize this entry in the global offset table. - Since the offset must always be a multiple of 8, - we use the least significant bit to record - whether we have initialized it already. - - When doing a dynamic link, we create a .rela.got - relocation entry to initialize the value. This - is done in the finish_dynamic_symbol routine. */ - if ((off & 1) != 0) - off &= ~1; - else - { - bfd_put_64 (output_bfd, relocation, - base_got->contents + off); - /* Note that this is harmless for the GOTPLT64 - case, as -1 | 1 still is -1. */ - h->got.offset |= 1; - } - } - } - - relocation = (base_got->output_section->vma - + base_got->output_offset + off); - - goto do_relocation; } } + skip_ifunc: resolved_to_zero = (eh != NULL - && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, - eh->has_got_reloc, - eh)); + && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh)); /* When generating a shared object, the relocations handled here are copied into the output file to be resolved at run time. */ @@ -4431,16 +2854,15 @@ elf_x86_64_relocate_section (bfd *output_bfd, case R_X86_64_GOTPCREL64: /* Use global offset table entry as symbol value. */ case R_X86_64_GOTPLT64: - /* This is obsolete and treated the the same as GOT64. */ + /* This is obsolete and treated the same as GOT64. */ base_got = htab->elf.sgot; if (htab->elf.sgot == NULL) abort (); + relative_reloc = FALSE; if (h != NULL) { - bfd_boolean dyn; - off = h->got.offset; if (h->needs_plt && h->plt.offset != (bfd_vma)-1 @@ -4450,26 +2872,18 @@ elf_x86_64_relocate_section (bfd *output_bfd, state, or even just remember the offset, as finish_dynamic_symbol would use that as offset into .got. */ - bfd_vma plt_index = h->plt.offset / plt_entry_size - 1; + bfd_vma plt_index = (h->plt.offset / plt_entry_size + - htab->plt.has_plt0); off = (plt_index + 3) * GOT_ENTRY_SIZE; base_got = htab->elf.sgotplt; } - dyn = htab->elf.dynamic_sections_created; - - if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h) - || (bfd_link_pic (info) - && SYMBOL_REFERENCES_LOCAL (info, h)) - || (ELF_ST_VISIBILITY (h->other) - && h->root.type == bfd_link_hash_undefweak)) + if (RESOLVED_LOCALLY_P (info, h, htab)) { - /* This is actually a static link, or it is a -Bsymbolic - link and the symbol is defined locally, or the symbol - was forced to be local because of a version file. We - must initialize this entry in the global offset table. - Since the offset must always be a multiple of 8, we - use the least significant bit to record whether we - have initialized it already. + /* We must initialize this entry in the global offset + table. Since the offset must always be a multiple + of 8, we use the least significant bit to record + whether we have initialized it already. When doing a dynamic link, we create a .rela.got relocation entry to initialize the value. This is @@ -4483,6 +2897,14 @@ elf_x86_64_relocate_section (bfd *output_bfd, /* Note that this is harmless for the GOTPLT64 case, as -1 | 1 still is -1. */ h->got.offset |= 1; + + if (GENERATE_RELATIVE_RELOC_P (info, h)) + { + /* If this symbol isn't dynamic in PIC, + generate R_X86_64_RELATIVE here. */ + eh->no_finish_dynamic_symbol = 1; + relative_reloc = TRUE; + } } } else @@ -4504,30 +2926,39 @@ elf_x86_64_relocate_section (bfd *output_bfd, { bfd_put_64 (output_bfd, relocation, base_got->contents + off); - - if (bfd_link_pic (info)) - { - asection *s; - Elf_Internal_Rela outrel; - - /* We need to generate a R_X86_64_RELATIVE reloc - for the dynamic linker. */ - s = htab->elf.srelgot; - if (s == NULL) - abort (); - - outrel.r_offset = (base_got->output_section->vma - + base_got->output_offset - + off); - outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE); - outrel.r_addend = relocation; - elf_append_rela (output_bfd, s, &outrel); - } - local_got_offsets[r_symndx] |= 1; + + /* NB: GOTPCREL relocations against local absolute + symbol store relocation value in the GOT slot + without relative relocation. */ + if (bfd_link_pic (info) + && !(sym->st_shndx == SHN_ABS + && (r_type == R_X86_64_GOTPCREL + || r_type == R_X86_64_GOTPCRELX + || r_type == R_X86_64_REX_GOTPCRELX))) + relative_reloc = TRUE; } } + if (relative_reloc) + { + asection *s; + Elf_Internal_Rela outrel; + + /* We need to generate a R_X86_64_RELATIVE reloc + for the dynamic linker. */ + s = htab->elf.srelgot; + if (s == NULL) + abort (); + + outrel.r_offset = (base_got->output_section->vma + + base_got->output_offset + + off); + outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE); + outrel.r_addend = relocation; + elf_append_rela (output_bfd, s, &outrel); + } + if (off >= (bfd_vma) -2) abort (); @@ -4572,25 +3003,29 @@ elf_x86_64_relocate_section (bfd *output_bfd, break; } - (*_bfd_error_handler) - (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"), + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s" + " `%s' can not be used when making a shared object"), input_bfd, v, h->root.root.string); bfd_set_error (bfd_error_bad_value); return FALSE; } else if (!bfd_link_executable (info) - && !SYMBOL_REFERENCES_LOCAL (info, h) + && !SYMBOL_REFERENCES_LOCAL_P (info, h) && (h->type == STT_FUNC || h->type == STT_OBJECT) && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED) { - (*_bfd_error_handler) - (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"), + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s" + " `%s' can not be used when making a shared object"), input_bfd, h->type == STT_FUNC ? "function" : "data", h->root.root.string); bfd_set_error (bfd_error_bad_value); - return FALSE; + return FALSE; } } @@ -4616,13 +3051,20 @@ elf_x86_64_relocate_section (bfd *output_bfd, symbols it's the symbol itself relative to GOT. */ if (h != NULL /* See PLT32 handling. */ - && h->plt.offset != (bfd_vma) -1 + && (h->plt.offset != (bfd_vma) -1 + || eh->plt_got.offset != (bfd_vma) -1) && htab->elf.splt != NULL) { - if (htab->plt_bnd != NULL) + if (eh->plt_got.offset != (bfd_vma) -1) + { + /* Use the GOT PLT. */ + resolved_plt = htab->plt_got; + plt_offset = eh->plt_got.offset; + } + else if (htab->plt_second != NULL) { - resolved_plt = htab->plt_bnd; - plt_offset = eh->plt_bnd.offset; + resolved_plt = htab->plt_second; + plt_offset = eh->plt_second.offset; } else { @@ -4660,12 +3102,13 @@ elf_x86_64_relocate_section (bfd *output_bfd, break; } + use_plt: if (h->plt.offset != (bfd_vma) -1) { - if (htab->plt_bnd != NULL) + if (htab->plt_second != NULL) { - resolved_plt = htab->plt_bnd; - plt_offset = eh->plt_bnd.offset; + resolved_plt = htab->plt_second; + plt_offset = eh->plt_second.offset; } else { @@ -4697,43 +3140,73 @@ elf_x86_64_relocate_section (bfd *output_bfd, case R_X86_64_PC32: case R_X86_64_PC32_BND: /* Don't complain about -fPIC if the symbol is undefined when - building executable unless it is unresolved weak symbol. */ - if ((input_section->flags & SEC_ALLOC) != 0 + building executable unless it is unresolved weak symbol, + references a dynamic definition in PIE or -z nocopyreloc + is used. */ + no_copyreloc_p + = (info->nocopyreloc + || (h != NULL + && !h->root.linker_def + && !h->root.ldscript_def + && eh->def_protected + && elf_has_no_copy_on_protected (h->root.u.def.section->owner))); + + if ((input_section->flags & SEC_ALLOC) != 0 && (input_section->flags & SEC_READONLY) != 0 && h != NULL && ((bfd_link_executable (info) - && h->root.type == bfd_link_hash_undefweak - && !resolved_to_zero) - || (bfd_link_pic (info) - && !(bfd_link_pie (info) - && h->root.type == bfd_link_hash_undefined)))) + && ((h->root.type == bfd_link_hash_undefweak + && (eh == NULL + || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, + eh))) + || (bfd_link_pie (info) + && !SYMBOL_DEFINED_NON_SHARED_P (h) + && h->def_dynamic) + || (no_copyreloc_p + && h->def_dynamic + && !(h->root.u.def.section->flags & SEC_CODE)))) + || bfd_link_dll (info))) { bfd_boolean fail = FALSE; - bfd_boolean branch - = ((r_type == R_X86_64_PC32 - || r_type == R_X86_64_PC32_BND) - && is_32bit_relative_branch (contents, rel->r_offset)); - - if (SYMBOL_REFERENCES_LOCAL (info, h)) + if (SYMBOL_REFERENCES_LOCAL_P (info, h)) { /* Symbol is referenced locally. Make sure it is - defined locally or for a branch. */ - fail = !h->def_regular && !branch; + defined locally. */ + fail = !SYMBOL_DEFINED_NON_SHARED_P (h); + } + else if (bfd_link_pie (info)) + { + /* We can only use PC-relative relocations in PIE + from non-code sections. */ + if (h->type == STT_FUNC + && (sec->flags & SEC_CODE) != 0) + fail = TRUE; } - else if (!(bfd_link_pie (info) - && (h->needs_copy || eh->needs_copy))) + else if (no_copyreloc_p || bfd_link_dll (info)) { - /* Symbol doesn't need copy reloc and isn't referenced - locally. We only allow branch to symbol with - non-default visibility. */ - fail = (!branch - || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT); + /* Symbol doesn't need copy reloc and isn't + referenced locally. Don't allow PC-relative + relocations against default and protected + symbols since address of protected function + and location of protected data may not be in + the shared object. */ + fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT + || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED); } if (fail) - return elf_x86_64_need_pic (input_bfd, input_section, + return elf_x86_64_need_pic (info, input_bfd, input_section, h, NULL, NULL, howto); } + /* Since x86-64 has PC-relative PLT, we can use PLT in PIE + as function address. */ + else if (h != NULL + && (input_section->flags & SEC_CODE) == 0 + && bfd_link_pie (info) + && h->type == STT_FUNC + && !h->def_regular + && h->def_dynamic) + goto use_plt; /* Fall through. */ case R_X86_64_8: @@ -4744,42 +3217,22 @@ elf_x86_64_relocate_section (bfd *output_bfd, /* FIXME: The ABI says the linker should make sure the value is the same when it's zeroextended to 64 bit. */ -direct: + direct: if ((input_section->flags & SEC_ALLOC) == 0) break; - /* Don't copy a pc-relative relocation into the output file - if the symbol needs copy reloc or the symbol is undefined - when building executable. Copy dynamic function pointer - relocations. Don't generate dynamic relocations against - resolved undefined weak symbols in PIE. */ - if ((bfd_link_pic (info) - && !(bfd_link_pie (info) - && h != NULL - && (h->needs_copy - || eh->needs_copy - || h->root.type == bfd_link_hash_undefined) - && IS_X86_64_PCREL_TYPE (r_type)) - && (h == NULL - || ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT - && !resolved_to_zero) - || h->root.type != bfd_link_hash_undefweak)) - && ((! IS_X86_64_PCREL_TYPE (r_type) - && r_type != R_X86_64_SIZE32 - && r_type != R_X86_64_SIZE64) - || ! SYMBOL_CALLS_LOCAL (info, h))) - || (ELIMINATE_COPY_RELOCS - && !bfd_link_pic (info) - && h != NULL - && h->dynindx != -1 - && (!h->non_got_ref - || eh->func_pointer_refcount > 0 - || (h->root.type == bfd_link_hash_undefweak - && !resolved_to_zero)) - && ((h->def_dynamic && !h->def_regular) - /* Undefined weak symbol is bound locally when - PIC is false. */ - || h->root.type == bfd_link_hash_undefined))) + need_copy_reloc_in_pie = (bfd_link_pie (info) + && h != NULL + && (h->needs_copy + || eh->needs_copy + || (h->root.type + == bfd_link_hash_undefined)) + && (X86_PCREL_TYPE_P (r_type) + || X86_SIZE_TYPE_P (r_type))); + + if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type, sec, + need_copy_reloc_in_pie, + resolved_to_zero, FALSE)) { Elf_Internal_Rela outrel; bfd_boolean skip, relocate; @@ -4805,14 +3258,7 @@ direct: if (skip) memset (&outrel, 0, sizeof outrel); - /* h->dynindx may be -1 if this symbol was marked to - become local. */ - else if (h != NULL - && h->dynindx != -1 - && (IS_X86_64_PCREL_TYPE (r_type) - || !(bfd_link_executable (info) - || SYMBOLIC_BIND (info, h)) - || ! h->def_regular)) + else if (COPY_INPUT_RELOC_P (info, h, r_type)) { outrel.r_info = htab->r_info (h->dynindx, r_type); outrel.r_addend = rel->r_addend; @@ -4824,7 +3270,7 @@ direct: convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */ if (r_type == htab->pointer_r_type || (r_type == R_X86_64_32 - && info->no_reloc_overflow_check)) + && htab->params->no_reloc_overflow_check)) { relocate = TRUE; outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE); @@ -4848,22 +3294,14 @@ direct: else name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL); - if (addend < 0) - (*_bfd_error_handler) - (_("%B: addend -0x%x in relocation %s against " - "symbol `%s' at 0x%lx in section `%A' is " - "out of range"), - input_bfd, input_section, addend, - howto->name, name, - (unsigned long) rel->r_offset); - else - (*_bfd_error_handler) - (_("%B: addend 0x%x in relocation %s against " - "symbol `%s' at 0x%lx in section `%A' is " - "out of range"), - input_bfd, input_section, addend, - howto->name, name, - (unsigned long) rel->r_offset); + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB: addend %s%#x in relocation %s against " + "symbol `%s' at %#" PRIx64 + " in section `%pA' is out of range"), + input_bfd, addend < 0 ? "-" : "", addend, + howto->name, name, (uint64_t) rel->r_offset, + input_section); bfd_set_error (bfd_error_bad_value); return FALSE; } @@ -4929,24 +3367,25 @@ direct: case R_X86_64_GOTTPOFF: tls_type = GOT_UNKNOWN; if (h == NULL && local_got_offsets) - tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx]; + tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx]; else if (h != NULL) - tls_type = elf_x86_64_hash_entry (h)->tls_type; + tls_type = elf_x86_hash_entry (h)->tls_type; + r_type_tls = r_type; if (! elf_x86_64_tls_transition (info, input_bfd, input_section, contents, symtab_hdr, sym_hashes, - &r_type, tls_type, rel, + &r_type_tls, tls_type, rel, relend, h, r_symndx, TRUE)) return FALSE; - if (r_type == R_X86_64_TPOFF32) + if (r_type_tls == R_X86_64_TPOFF32) { bfd_vma roff = rel->r_offset; BFD_ASSERT (! unresolved_reloc); - if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD) + if (r_type == R_X86_64_TLSGD) { /* GD->LE transition. For 64bit, change .byte 0x66; leaq foo@tlsgd(%rip), %rdi @@ -4986,20 +3425,39 @@ direct: { if (contents[roff + 5] == 0xb8) { + if (roff < 3 + || (roff - 3 + 22) > input_section->size) + { + corrupt_input: + info->callbacks->einfo + (_("%F%P: corrupt input: %pB\n"), + input_bfd); + return FALSE; + } memcpy (contents + roff - 3, "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80" "\0\0\0\0\x66\x0f\x1f\x44\0", 22); largepic = 1; } else - memcpy (contents + roff - 4, - "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", - 16); + { + if (roff < 4 + || (roff - 4 + 16) > input_section->size) + goto corrupt_input; + memcpy (contents + roff - 4, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", + 16); + } } else - memcpy (contents + roff - 3, - "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", - 15); + { + if (roff < 3 + || (roff - 3 + 15) > input_section->size) + goto corrupt_input; + memcpy (contents + roff - 3, + "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", + 15); + } bfd_put_32 (output_bfd, elf_x86_64_tpoff (info, relocation), contents + roff + 8 + largepic); @@ -5009,20 +3467,26 @@ direct: wrel++; continue; } - else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC) + else if (r_type == R_X86_64_GOTPC32_TLSDESC) { /* GDesc -> LE transition. It's originally something like: - leaq x@tlsdesc(%rip), %rax + leaq x@tlsdesc(%rip), %rax <--- LP64 mode. + rex leal x@tlsdesc(%rip), %eax <--- X32 mode. Change it to: - movl $x@tpoff, %rax. */ + movq $x@tpoff, %rax <--- LP64 mode. + rex movl $x@tpoff, %eax <--- X32 mode. + */ unsigned int val, type; + if (roff < 3) + goto corrupt_input; type = bfd_get_8 (input_bfd, contents + roff - 3); val = bfd_get_8 (input_bfd, contents + roff - 1); - bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1), + bfd_put_8 (output_bfd, + (type & 0x48) | ((type >> 2) & 1), contents + roff - 3); bfd_put_8 (output_bfd, 0xc7, contents + roff - 2); bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7), @@ -5032,18 +3496,37 @@ direct: contents + roff); continue; } - else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL) + else if (r_type == R_X86_64_TLSDESC_CALL) { /* GDesc -> LE transition. It's originally: - call *(%rax) + call *(%rax) <--- LP64 mode. + call *(%eax) <--- X32 mode. Turn it into: - xchg %ax,%ax. */ - bfd_put_8 (output_bfd, 0x66, contents + roff); - bfd_put_8 (output_bfd, 0x90, contents + roff + 1); + xchg %ax,%ax <-- LP64 mode. + nopl (%rax) <-- X32 mode. + */ + unsigned int prefix = 0; + if (!ABI_64_P (input_bfd)) + { + /* Check for call *x@tlsdesc(%eax). */ + if (contents[roff] == 0x67) + prefix = 1; + } + if (prefix) + { + bfd_put_8 (output_bfd, 0x0f, contents + roff); + bfd_put_8 (output_bfd, 0x1f, contents + roff + 1); + bfd_put_8 (output_bfd, 0x00, contents + roff + 2); + } + else + { + bfd_put_8 (output_bfd, 0x66, contents + roff); + bfd_put_8 (output_bfd, 0x90, contents + roff + 1); + } continue; } - else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF) + else if (r_type == R_X86_64_GOTTPOFF) { /* IE->LE transition: For 64bit, originally it can be one of: @@ -5066,7 +3549,11 @@ direct: if (roff >= 3) val = bfd_get_8 (input_bfd, contents + roff - 3); else - val = 0; + { + if (roff < 2) + goto corrupt_input; + val = 0; + } type = bfd_get_8 (input_bfd, contents + roff - 2); reg = bfd_get_8 (input_bfd, contents + roff - 1); reg >>= 3; @@ -5074,11 +3561,19 @@ direct: { /* movq */ if (val == 0x4c) - bfd_put_8 (output_bfd, 0x49, - contents + roff - 3); + { + if (roff < 3) + goto corrupt_input; + bfd_put_8 (output_bfd, 0x49, + contents + roff - 3); + } else if (!ABI_64_P (output_bfd) && val == 0x44) - bfd_put_8 (output_bfd, 0x41, - contents + roff - 3); + { + if (roff < 3) + goto corrupt_input; + bfd_put_8 (output_bfd, 0x41, + contents + roff - 3); + } bfd_put_8 (output_bfd, 0xc7, contents + roff - 2); bfd_put_8 (output_bfd, 0xc0 | reg, @@ -5089,11 +3584,19 @@ direct: /* addq/addl -> addq/addl - addressing with %rsp/%r12 is special */ if (val == 0x4c) - bfd_put_8 (output_bfd, 0x49, - contents + roff - 3); + { + if (roff < 3) + goto corrupt_input; + bfd_put_8 (output_bfd, 0x49, + contents + roff - 3); + } else if (!ABI_64_P (output_bfd) && val == 0x44) - bfd_put_8 (output_bfd, 0x41, - contents + roff - 3); + { + if (roff < 3) + goto corrupt_input; + bfd_put_8 (output_bfd, 0x41, + contents + roff - 3); + } bfd_put_8 (output_bfd, 0x81, contents + roff - 2); bfd_put_8 (output_bfd, 0xc0 | reg, @@ -5103,11 +3606,19 @@ direct: { /* addq/addl -> leaq/leal */ if (val == 0x4c) - bfd_put_8 (output_bfd, 0x4d, - contents + roff - 3); + { + if (roff < 3) + goto corrupt_input; + bfd_put_8 (output_bfd, 0x4d, + contents + roff - 3); + } else if (!ABI_64_P (output_bfd) && val == 0x44) - bfd_put_8 (output_bfd, 0x45, - contents + roff - 3); + { + if (roff < 3) + goto corrupt_input; + bfd_put_8 (output_bfd, 0x45, + contents + roff - 3); + } bfd_put_8 (output_bfd, 0x8d, contents + roff - 2); bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3), @@ -5128,7 +3639,7 @@ direct: if (h != NULL) { off = h->got.offset; - offplt = elf_x86_64_hash_entry (h)->tlsdesc_got; + offplt = elf_x86_hash_entry (h)->tlsdesc_got; } else { @@ -5163,7 +3674,7 @@ direct: + htab->sgotplt_jump_table_size); sreloc = htab->elf.srelplt; if (indx == 0) - outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info); + outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info); else outrel.r_addend = 0; elf_append_rela (output_bfd, sreloc, &outrel); @@ -5185,7 +3696,7 @@ direct: outrel.r_addend = 0; if ((dr_type == R_X86_64_TPOFF64 || dr_type == R_X86_64_TLSDESC) && indx == 0) - outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info); + outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info); outrel.r_info = htab->r_info (indx, dr_type); elf_append_rela (output_bfd, sreloc, &outrel); @@ -5196,7 +3707,7 @@ direct: { BFD_ASSERT (! unresolved_reloc); bfd_put_64 (output_bfd, - relocation - elf_x86_64_dtpoff_base (info), + relocation - _bfd_x86_elf_dtpoff_base (info), htab->elf.sgot->contents + off + GOT_ENTRY_SIZE); } else @@ -5221,7 +3732,7 @@ direct: if (off >= (bfd_vma) -2 && ! GOT_TLS_GDESC_P (tls_type)) abort (); - if (r_type == ELF32_R_TYPE (rel->r_info)) + if (r_type_tls == r_type) { if (r_type == R_X86_64_GOTPC32_TLSDESC || r_type == R_X86_64_TLSDESC_CALL) @@ -5237,7 +3748,7 @@ direct: { bfd_vma roff = rel->r_offset; - if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD) + if (r_type == R_X86_64_TLSGD) { /* GD->IE transition. For 64bit, change .byte 0x66; leaq foo@tlsgd(%rip), %rdi @@ -5277,20 +3788,33 @@ direct: { if (contents[roff + 5] == 0xb8) { + if (roff < 3 + || (roff - 3 + 22) > input_section->size) + goto corrupt_input; memcpy (contents + roff - 3, "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05" "\0\0\0\0\x66\x0f\x1f\x44\0", 22); largepic = 1; } else - memcpy (contents + roff - 4, - "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", - 16); + { + if (roff < 4 + || (roff - 4 + 16) > input_section->size) + goto corrupt_input; + memcpy (contents + roff - 4, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", + 16); + } } else - memcpy (contents + roff - 3, - "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", - 15); + { + if (roff < 3 + || (roff - 3 + 15) > input_section->size) + goto corrupt_input; + memcpy (contents + roff - 3, + "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", + 15); + } relocation = (htab->elf.sgot->output_section->vma + htab->elf.sgot->output_offset + off @@ -5306,19 +3830,26 @@ direct: wrel++; continue; } - else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC) + else if (r_type == R_X86_64_GOTPC32_TLSDESC) { /* GDesc -> IE transition. It's originally something like: - leaq x@tlsdesc(%rip), %rax + leaq x@tlsdesc(%rip), %rax <--- LP64 mode. + rex leal x@tlsdesc(%rip), %eax <--- X32 mode. Change it to: - movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */ + # before xchg %ax,%ax in LP64 mode. + movq x@gottpoff(%rip), %rax + # before nopl (%rax) in X32 mode. + rex movl x@gottpoff(%rip), %eax + */ /* Now modify the instruction as appropriate. To - turn a leaq into a movq in the form we use it, it + turn a lea into a mov in the form we use it, it suffices to change the second byte from 0x8d to 0x8b. */ + if (roff < 2) + goto corrupt_input; bfd_put_8 (output_bfd, 0x8b, contents + roff - 2); bfd_put_32 (output_bfd, @@ -5331,17 +3862,36 @@ direct: contents + roff); continue; } - else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL) + else if (r_type == R_X86_64_TLSDESC_CALL) { /* GDesc -> IE transition. It's originally: - call *(%rax) + call *(%rax) <--- LP64 mode. + call *(%eax) <--- X32 mode. Change it to: - xchg %ax, %ax. */ + xchg %ax, %ax <-- LP64 mode. + nopl (%rax) <-- X32 mode. + */ - bfd_put_8 (output_bfd, 0x66, contents + roff); - bfd_put_8 (output_bfd, 0x90, contents + roff + 1); + unsigned int prefix = 0; + if (!ABI_64_P (input_bfd)) + { + /* Check for call *x@tlsdesc(%eax). */ + if (contents[roff] == 0x67) + prefix = 1; + } + if (prefix) + { + bfd_put_8 (output_bfd, 0x0f, contents + roff); + bfd_put_8 (output_bfd, 0x1f, contents + roff + 1); + bfd_put_8 (output_bfd, 0x00, contents + roff + 2); + } + else + { + bfd_put_8 (output_bfd, 0x66, contents + roff); + bfd_put_8 (output_bfd, 0x90, contents + roff + 1); + } continue; } else @@ -5387,28 +3937,58 @@ direct: BFD_ASSERT (r_type == R_X86_64_TPOFF32); if (ABI_64_P (output_bfd)) { + if ((rel->r_offset + 5) >= input_section->size) + goto corrupt_input; if (contents[rel->r_offset + 5] == 0xb8) - memcpy (contents + rel->r_offset - 3, - "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0" - "\x64\x48\x8b\x04\x25\0\0\0", 22); + { + if (rel->r_offset < 3 + || (rel->r_offset - 3 + 22) > input_section->size) + goto corrupt_input; + memcpy (contents + rel->r_offset - 3, + "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0" + "\x64\x48\x8b\x04\x25\0\0\0", 22); + } else if (contents[rel->r_offset + 4] == 0xff || contents[rel->r_offset + 4] == 0x67) - memcpy (contents + rel->r_offset - 3, - "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", - 13); + { + if (rel->r_offset < 3 + || (rel->r_offset - 3 + 13) > input_section->size) + goto corrupt_input; + memcpy (contents + rel->r_offset - 3, + "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", + 13); + + } else - memcpy (contents + rel->r_offset - 3, - "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12); + { + if (rel->r_offset < 3 + || (rel->r_offset - 3 + 12) > input_section->size) + goto corrupt_input; + memcpy (contents + rel->r_offset - 3, + "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12); + } } else { + if ((rel->r_offset + 4) >= input_section->size) + goto corrupt_input; if (contents[rel->r_offset + 4] == 0xff) - memcpy (contents + rel->r_offset - 3, - "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", - 13); + { + if (rel->r_offset < 3 + || (rel->r_offset - 3 + 13) > input_section->size) + goto corrupt_input; + memcpy (contents + rel->r_offset - 3, + "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", + 13); + } else - memcpy (contents + rel->r_offset - 3, - "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12); + { + if (rel->r_offset < 3 + || (rel->r_offset - 3 + 12) > input_section->size) + goto corrupt_input; + memcpy (contents + rel->r_offset - 3, + "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12); + } } /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */ @@ -5420,7 +4000,7 @@ direct: if (htab->elf.sgot == NULL) abort (); - off = htab->tls_ld_got.offset; + off = htab->tls_ld_or_ldm_got.offset; if (off & 1) off &= ~1; else @@ -5441,7 +4021,7 @@ direct: outrel.r_addend = 0; elf_append_rela (output_bfd, htab->elf.srelgot, &outrel); - htab->tls_ld_got.offset |= 1; + htab->tls_ld_or_ldm_got.offset |= 1; } relocation = htab->elf.sgot->output_section->vma + htab->elf.sgot->output_offset + off; @@ -5451,7 +4031,7 @@ direct: case R_X86_64_DTPOFF32: if (!bfd_link_executable (info) || (input_section->flags & SEC_CODE) == 0) - relocation -= elf_x86_64_dtpoff_base (info); + relocation -= _bfd_x86_elf_dtpoff_base (info); else relocation = elf_x86_64_tpoff (info, relocation); break; @@ -5464,7 +4044,7 @@ direct: case R_X86_64_DTPOFF64: BFD_ASSERT ((input_section->flags & SEC_CODE) == 0); - relocation -= elf_x86_64_dtpoff_base (info); + relocation -= _bfd_x86_elf_dtpoff_base (info); break; default: @@ -5480,22 +4060,38 @@ direct: && _bfd_elf_section_offset (output_bfd, info, input_section, rel->r_offset) != (bfd_vma) -1) { - (*_bfd_error_handler) - (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"), - input_bfd, - input_section, - (long) rel->r_offset, - howto->name, - h->root.root.string); - return FALSE; + switch (r_type) + { + case R_X86_64_32S: + sec = h->root.u.def.section; + if ((info->nocopyreloc + || (eh->def_protected + && elf_has_no_copy_on_protected (h->root.u.def.section->owner))) + && !(h->root.u.def.section->flags & SEC_CODE)) + return elf_x86_64_need_pic (info, input_bfd, input_section, + h, NULL, NULL, howto); + /* Fall through. */ + + default: + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB(%pA+%#" PRIx64 "): " + "unresolvable %s relocation against symbol `%s'"), + input_bfd, + input_section, + (uint64_t) rel->r_offset, + howto->name, + h->root.root.string); + return FALSE; + } } -do_relocation: + do_relocation: r = _bfd_final_link_relocate (howto, input_bfd, input_section, contents, rel->r_offset, relocation, rel->r_addend); -check_relocation_error: + check_relocation_error: if (r != bfd_reloc_ok) { const char *name; @@ -5510,19 +4106,28 @@ check_relocation_error: if (name == NULL) return FALSE; if (*name == '\0') - name = bfd_section_name (input_bfd, sec); + name = bfd_section_name (sec); + } + + if (r == bfd_reloc_overflow) + { + if (converted_reloc) + { + info->callbacks->einfo + (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n")); + return FALSE; + } + (*info->callbacks->reloc_overflow) + (info, (h ? &h->root : NULL), name, howto->name, + (bfd_vma) 0, input_bfd, input_section, rel->r_offset); } - - if (r == bfd_reloc_overflow) - (*info->callbacks->reloc_overflow) - (info, (h ? &h->root : NULL), name, howto->name, - (bfd_vma) 0, input_bfd, input_section, rel->r_offset); else { - (*_bfd_error_handler) - (_("%B(%A+0x%lx): reloc against `%s': error %d"), + _bfd_error_handler + /* xgettext:c-format */ + (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"), input_bfd, input_section, - (long) rel->r_offset, name, (int) r); + (uint64_t) rel->r_offset, name, (int) r); return FALSE; } } @@ -5563,37 +4168,31 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, struct elf_link_hash_entry *h, Elf_Internal_Sym *sym) { - struct elf_x86_64_link_hash_table *htab; - const struct elf_x86_64_backend_data *abed; - bfd_boolean use_plt_bnd; - struct elf_x86_64_link_hash_entry *eh; + struct elf_x86_link_hash_table *htab; + bfd_boolean use_plt_second; + struct elf_x86_link_hash_entry *eh; bfd_boolean local_undefweak; - htab = elf_x86_64_hash_table (info); + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab == NULL) return FALSE; - /* Use MPX backend data in case of BND relocation. Use .plt_bnd - section only if there is .plt section. */ - use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL; - abed = (use_plt_bnd - ? &elf_x86_64_bnd_arch_bed - : get_elf_x86_64_backend_data (output_bfd)); + /* Use the second PLT section only if there is .plt section. */ + use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL; - eh = (struct elf_x86_64_link_hash_entry *) h; + eh = (struct elf_x86_link_hash_entry *) h; + if (eh->no_finish_dynamic_symbol) + abort (); /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for resolved undefined weak symbols in executable so that their references have value 0 at run-time. */ - local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, - eh->has_got_reloc, - eh); + local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh); if (h->plt.offset != (bfd_vma) -1) { bfd_vma plt_index; - bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset; - bfd_vma plt_plt_insn_end, plt_got_insn_size; + bfd_vma got_offset, plt_offset; Elf_Internal_Rela rela; bfd_byte *loc; asection *plt, *gotplt, *relplt, *resolved_plt; @@ -5615,17 +4214,7 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, relplt = htab->elf.irelplt; } - /* This symbol has an entry in the procedure linkage table. Set - it up. */ - if ((h->dynindx == -1 - && !local_undefweak - && !((h->forced_local || bfd_link_executable (info)) - && h->def_regular - && h->type == STT_GNU_IFUNC)) - || plt == NULL - || gotplt == NULL - || relplt == NULL) - abort (); + VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak) /* Get the index in the procedure linkage table which corresponds to this symbol. This is the index of this symbol @@ -5640,60 +4229,30 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, if (plt == htab->elf.splt) { - got_offset = h->plt.offset / abed->plt_entry_size - 1; + got_offset = (h->plt.offset / htab->plt.plt_entry_size + - htab->plt.has_plt0); got_offset = (got_offset + 3) * GOT_ENTRY_SIZE; } else { - got_offset = h->plt.offset / abed->plt_entry_size; + got_offset = h->plt.offset / htab->plt.plt_entry_size; got_offset = got_offset * GOT_ENTRY_SIZE; } - plt_plt_insn_end = abed->plt_plt_insn_end; - plt_plt_offset = abed->plt_plt_offset; - plt_got_insn_size = abed->plt_got_insn_size; - plt_got_offset = abed->plt_got_offset; - if (use_plt_bnd) + /* Fill in the entry in the procedure linkage table. */ + memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry, + htab->plt.plt_entry_size); + if (use_plt_second) { - /* Use the second PLT with BND relocations. */ - const bfd_byte *plt_entry, *plt2_entry; - - if (eh->has_bnd_reloc) - { - plt_entry = elf_x86_64_bnd_plt_entry; - plt2_entry = elf_x86_64_bnd_plt2_entry; - } - else - { - plt_entry = elf_x86_64_legacy_plt_entry; - plt2_entry = elf_x86_64_legacy_plt2_entry; - - /* Subtract 1 since there is no BND prefix. */ - plt_plt_insn_end -= 1; - plt_plt_offset -= 1; - plt_got_insn_size -= 1; - plt_got_offset -= 1; - } - - BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry) - == sizeof (elf_x86_64_legacy_plt_entry)); + memcpy (htab->plt_second->contents + eh->plt_second.offset, + htab->non_lazy_plt->plt_entry, + htab->non_lazy_plt->plt_entry_size); - /* Fill in the entry in the procedure linkage table. */ - memcpy (plt->contents + h->plt.offset, - plt_entry, sizeof (elf_x86_64_legacy_plt_entry)); - /* Fill in the entry in the second PLT. */ - memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset, - plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry)); - - resolved_plt = htab->plt_bnd; - plt_offset = eh->plt_bnd.offset; + resolved_plt = htab->plt_second; + plt_offset = eh->plt_second.offset; } else { - /* Fill in the entry in the procedure linkage table. */ - memcpy (plt->contents + h->plt.offset, abed->plt_entry, - abed->plt_entry_size); - resolved_plt = plt; plt_offset = h->plt.offset; } @@ -5708,15 +4267,17 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, - resolved_plt->output_section->vma - resolved_plt->output_offset - plt_offset - - plt_got_insn_size); + - htab->plt.plt_got_insn_size); /* Check PC-relative offset overflow in PLT entry. */ if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff) - info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"), + /* xgettext:c-format */ + info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"), output_bfd, h->root.root.string); bfd_put_32 (output_bfd, plt_got_pcrel_offset, - resolved_plt->contents + plt_offset + plt_got_offset); + (resolved_plt->contents + plt_offset + + htab->plt.plt_got_offset)); /* Fill in the entry in the global offset table, initially this points to the second part of the PLT entry. Leave the entry @@ -5724,22 +4285,23 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, against undefined weak symbol in PIE. */ if (!local_undefweak) { - bfd_put_64 (output_bfd, (plt->output_section->vma - + plt->output_offset - + h->plt.offset - + abed->plt_lazy_offset), - gotplt->contents + got_offset); + if (htab->plt.has_plt0) + bfd_put_64 (output_bfd, (plt->output_section->vma + + plt->output_offset + + h->plt.offset + + htab->lazy_plt->plt_lazy_offset), + gotplt->contents + got_offset); /* Fill in the entry in the .rela.plt section. */ rela.r_offset = (gotplt->output_section->vma + gotplt->output_offset + got_offset); - if (h->dynindx == -1 - || ((bfd_link_executable (info) - || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT) - && h->def_regular - && h->type == STT_GNU_IFUNC)) + if (PLT_LOCAL_IFUNC_P (info, h)) { + info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"), + h->root.root.string, + h->root.u.def.section->owner); + /* If an STT_GNU_IFUNC symbol is locally defined, generate R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */ rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE); @@ -5756,24 +4318,28 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, plt_index = htab->next_jump_slot_index++; } - /* Don't fill PLT entry for static executables. */ - if (plt == htab->elf.splt) + /* Don't fill the second and third slots in PLT entry for + static executables nor without PLT0. */ + if (plt == htab->elf.splt && htab->plt.has_plt0) { - bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end; + bfd_vma plt0_offset + = h->plt.offset + htab->lazy_plt->plt_plt_insn_end; /* Put relocation index. */ bfd_put_32 (output_bfd, plt_index, (plt->contents + h->plt.offset - + abed->plt_reloc_offset)); + + htab->lazy_plt->plt_reloc_offset)); /* Put offset for jmp .PLT0 and check for overflow. We don't check relocation index for overflow since branch displacement will overflow first. */ if (plt0_offset > 0x80000000) - info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"), + /* xgettext:c-format */ + info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"), output_bfd, h->root.root.string); bfd_put_32 (output_bfd, - plt0_offset, - plt->contents + h->plt.offset + plt_plt_offset); + (plt->contents + h->plt.offset + + htab->lazy_plt->plt_plt_offset)); } bed = get_elf_backend_data (output_bfd); @@ -5783,11 +4349,10 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, } else if (eh->plt_got.offset != (bfd_vma) -1) { - bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size; + bfd_vma got_offset, plt_offset; asection *plt, *got; bfd_boolean got_after_plt; int32_t got_pcrel_offset; - const bfd_byte *got_plt_entry; /* Set the entry in the GOT procedure linkage table. */ plt = htab->plt_got; @@ -5795,30 +4360,18 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, got_offset = h->got.offset; if (got_offset == (bfd_vma) -1 - || h->type == STT_GNU_IFUNC + || (h->type == STT_GNU_IFUNC && h->def_regular) || plt == NULL || got == NULL) abort (); - /* Use the second PLT entry template for the GOT PLT since they + /* Use the non-lazy PLT entry template for the GOT PLT since they are the identical. */ - plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size; - plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset; - if (eh->has_bnd_reloc) - got_plt_entry = elf_x86_64_bnd_plt2_entry; - else - { - got_plt_entry = elf_x86_64_legacy_plt2_entry; - - /* Subtract 1 since there is no BND prefix. */ - plt_got_insn_size -= 1; - plt_got_offset -= 1; - } - /* Fill in the entry in the GOT procedure linkage table. */ plt_offset = eh->plt_got.offset; memcpy (plt->contents + plt_offset, - got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry)); + htab->non_lazy_plt->plt_entry, + htab->non_lazy_plt->plt_entry_size); /* Put offset the PC-relative instruction referring to the GOT entry, subtracting the size of that instruction. */ @@ -5828,17 +4381,19 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, - plt->output_section->vma - plt->output_offset - plt_offset - - plt_got_insn_size); + - htab->non_lazy_plt->plt_got_insn_size); /* Check PC-relative offset overflow in GOT PLT entry. */ got_after_plt = got->output_section->vma > plt->output_section->vma; if ((got_after_plt && got_pcrel_offset < 0) || (!got_after_plt && got_pcrel_offset > 0)) - info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"), + /* xgettext:c-format */ + info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"), output_bfd, h->root.root.string); bfd_put_32 (output_bfd, got_pcrel_offset, - plt->contents + plt_offset + plt_got_offset); + (plt->contents + plt_offset + + htab->non_lazy_plt->plt_got_offset)); } if (!local_undefweak @@ -5859,14 +4414,17 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, sym->st_value = 0; } + _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym); + /* Don't generate dynamic GOT relocation against undefined weak symbol in executable. */ if (h->got.offset != (bfd_vma) -1 - && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type) - && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE + && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type) + && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE && !local_undefweak) { Elf_Internal_Rela rela; + asection *relgot = htab->elf.srelgot; /* This symbol has an entry in the global offset table. Set it up. */ @@ -5885,7 +4443,31 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, if (h->def_regular && h->type == STT_GNU_IFUNC) { - if (bfd_link_pic (info)) + if (h->plt.offset == (bfd_vma) -1) + { + /* STT_GNU_IFUNC is referenced without PLT. */ + if (htab->elf.splt == NULL) + { + /* use .rel[a].iplt section to store .got relocations + in static executable. */ + relgot = htab->elf.irelplt; + } + if (SYMBOL_REFERENCES_LOCAL_P (info, h)) + { + info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"), + h->root.root.string, + h->root.u.def.section->owner); + + rela.r_info = htab->r_info (0, + R_X86_64_IRELATIVE); + rela.r_addend = (h->root.u.def.value + + h->root.u.def.section->output_section->vma + + h->root.u.def.section->output_offset); + } + else + goto do_glob_dat; + } + else if (bfd_link_pic (info)) { /* Generate R_X86_64_GLOB_DAT. */ goto do_glob_dat; @@ -5893,6 +4475,7 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, else { asection *plt; + bfd_vma plt_offset; if (!h->pointer_equality_needed) abort (); @@ -5900,18 +4483,27 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, /* For non-shared object, we can't use .got.plt, which contains the real function addres if we need pointer equality. We load the GOT entry with the PLT entry. */ - plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt; + if (htab->plt_second != NULL) + { + plt = htab->plt_second; + plt_offset = eh->plt_second.offset; + } + else + { + plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt; + plt_offset = h->plt.offset; + } bfd_put_64 (output_bfd, (plt->output_section->vma + plt->output_offset - + h->plt.offset), + + plt_offset), htab->elf.sgot->contents + h->got.offset); return TRUE; } } else if (bfd_link_pic (info) - && SYMBOL_REFERENCES_LOCAL (info, h)) + && SYMBOL_REFERENCES_LOCAL_P (info, h)) { - if (!h->def_regular) + if (!SYMBOL_DEFINED_NON_SHARED_P (h)) return FALSE; BFD_ASSERT((h->got.offset & 1) != 0); rela.r_info = htab->r_info (0, R_X86_64_RELATIVE); @@ -5922,34 +4514,34 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, else { BFD_ASSERT((h->got.offset & 1) == 0); -do_glob_dat: + do_glob_dat: bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgot->contents + h->got.offset); rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT); rela.r_addend = 0; } - elf_append_rela (output_bfd, htab->elf.srelgot, &rela); + elf_append_rela (output_bfd, relgot, &rela); } if (h->needs_copy) { Elf_Internal_Rela rela; + asection *s; /* This symbol needs a copy reloc. Set it up. */ - - if (h->dynindx == -1 - || (h->root.type != bfd_link_hash_defined - && h->root.type != bfd_link_hash_defweak) - || htab->srelbss == NULL) - abort (); + VERIFY_COPY_RELOC (h, htab) rela.r_offset = (h->root.u.def.value + h->root.u.def.section->output_section->vma + h->root.u.def.section->output_offset); rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY); rela.r_addend = 0; - elf_append_rela (output_bfd, htab->srelbss, &rela); + if (h->root.u.def.section == htab->elf.sdynrelro) + s = htab->elf.sreldynrelro; + else + s = htab->elf.srelbss; + elf_append_rela (output_bfd, s, &rela); } return TRUE; @@ -5967,7 +4559,7 @@ elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf) = (struct bfd_link_info *) inf; return elf_x86_64_finish_dynamic_symbol (info->output_bfd, - info, h, NULL); + info, h, NULL); } /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry @@ -5986,7 +4578,7 @@ elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh, return TRUE; return elf_x86_64_finish_dynamic_symbol (info->output_bfd, - info, h, NULL); + info, h, NULL); } /* Used to decide how to sort relocs in an optimal manner for the @@ -5999,16 +4591,14 @@ elf_x86_64_reloc_type_class (const struct bfd_link_info *info, { bfd *abfd = info->output_bfd; const struct elf_backend_data *bed = get_elf_backend_data (abfd); - struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info); - - if ((int) ELF32_R_TYPE (rela->r_info) == R_X86_64_IRELATIVE) - return reloc_class_ifunc; + struct elf_x86_link_hash_table *htab + = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab->elf.dynsym != NULL && htab->elf.dynsym->contents != NULL) { /* Check relocation against STT_GNU_IFUNC symbol if there are - dynamic symbols. */ + dynamic symbols. */ unsigned long r_symndx = htab->r_sym (rela->r_info); if (r_symndx != STN_UNDEF) { @@ -6026,6 +4616,8 @@ elf_x86_64_reloc_type_class (const struct bfd_link_info *info, switch ((int) ELF32_R_TYPE (rela->r_info)) { + case R_X86_64_IRELATIVE: + return reloc_class_ifunc; case R_X86_64_RELATIVE: case R_X86_64_RELATIVE64: return reloc_class_relative; @@ -6044,100 +4636,27 @@ static bfd_boolean elf_x86_64_finish_dynamic_sections (bfd *output_bfd, struct bfd_link_info *info) { - struct elf_x86_64_link_hash_table *htab; - bfd *dynobj; - asection *sdyn; - const struct elf_x86_64_backend_data *abed; + struct elf_x86_link_hash_table *htab; - htab = elf_x86_64_hash_table (info); + htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info); if (htab == NULL) return FALSE; - /* Use MPX backend data in case of BND relocation. Use .plt_bnd - section only if there is .plt section. */ - abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL - ? &elf_x86_64_bnd_arch_bed - : get_elf_x86_64_backend_data (output_bfd)); - - dynobj = htab->elf.dynobj; - sdyn = bfd_get_linker_section (dynobj, ".dynamic"); + if (! htab->elf.dynamic_sections_created) + return TRUE; - if (htab->elf.dynamic_sections_created) + if (htab->elf.splt && htab->elf.splt->size > 0) { - bfd_byte *dyncon, *dynconend; - const struct elf_backend_data *bed; - bfd_size_type sizeof_dyn; - - if (sdyn == NULL || htab->elf.sgot == NULL) - abort (); - - bed = get_elf_backend_data (dynobj); - sizeof_dyn = bed->s->sizeof_dyn; - dyncon = sdyn->contents; - dynconend = sdyn->contents + sdyn->size; - for (; dyncon < dynconend; dyncon += sizeof_dyn) - { - Elf_Internal_Dyn dyn; - asection *s; - - (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn); - - switch (dyn.d_tag) - { - default: - continue; - - case DT_PLTGOT: - s = htab->elf.sgotplt; - dyn.d_un.d_ptr = s->output_section->vma + s->output_offset; - break; - - case DT_JMPREL: - dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma; - break; - - case DT_PLTRELSZ: - s = htab->elf.srelplt->output_section; - dyn.d_un.d_val = s->size; - break; - - case DT_RELASZ: - /* The procedure linkage table relocs (DT_JMPREL) should - not be included in the overall relocs (DT_RELA). - Therefore, we override the DT_RELASZ entry here to - make it not include the JMPREL relocs. Since the - linker script arranges for .rela.plt to follow all - other relocation sections, we don't have to worry - about changing the DT_RELA entry. */ - if (htab->elf.srelplt != NULL) - { - s = htab->elf.srelplt->output_section; - dyn.d_un.d_val -= s->size; - } - break; + elf_section_data (htab->elf.splt->output_section) + ->this_hdr.sh_entsize = htab->plt.plt_entry_size; - case DT_TLSDESC_PLT: - s = htab->elf.splt; - dyn.d_un.d_ptr = s->output_section->vma + s->output_offset - + htab->tlsdesc_plt; - break; - - case DT_TLSDESC_GOT: - s = htab->elf.sgot; - dyn.d_un.d_ptr = s->output_section->vma + s->output_offset - + htab->tlsdesc_got; - break; - } - - (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon); - } - - /* Fill in the special first entry in the procedure linkage table. */ - if (htab->elf.splt && htab->elf.splt->size > 0) + if (htab->plt.has_plt0) { - /* Fill in the first entry in the procedure linkage table. */ + /* Fill in the special first entry in the procedure linkage + table. */ memcpy (htab->elf.splt->contents, - abed->plt0_entry, abed->plt_entry_size); + htab->lazy_plt->plt0_entry, + htab->lazy_plt->plt0_entry_size); /* Add offset for pushq GOT+8(%rip), since the instruction uses 6 bytes subtract this value. */ bfd_put_32 (output_bfd, @@ -6147,256 +4666,296 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, - htab->elf.splt->output_section->vma - htab->elf.splt->output_offset - 6), - htab->elf.splt->contents + abed->plt0_got1_offset); - /* Add offset for the PC-relative instruction accessing GOT+16, - subtracting the offset to the end of that instruction. */ + (htab->elf.splt->contents + + htab->lazy_plt->plt0_got1_offset)); + /* Add offset for the PC-relative instruction accessing + GOT+16, subtracting the offset to the end of that + instruction. */ bfd_put_32 (output_bfd, (htab->elf.sgotplt->output_section->vma + htab->elf.sgotplt->output_offset + 16 - htab->elf.splt->output_section->vma - htab->elf.splt->output_offset - - abed->plt0_got2_insn_end), - htab->elf.splt->contents + abed->plt0_got2_offset); + - htab->lazy_plt->plt0_got2_insn_end), + (htab->elf.splt->contents + + htab->lazy_plt->plt0_got2_offset)); + } - elf_section_data (htab->elf.splt->output_section) - ->this_hdr.sh_entsize = abed->plt_entry_size; + if (htab->tlsdesc_plt) + { + bfd_put_64 (output_bfd, (bfd_vma) 0, + htab->elf.sgot->contents + htab->tlsdesc_got); - if (htab->tlsdesc_plt) - { - bfd_put_64 (output_bfd, (bfd_vma) 0, - htab->elf.sgot->contents + htab->tlsdesc_got); - - memcpy (htab->elf.splt->contents + htab->tlsdesc_plt, - abed->plt0_entry, abed->plt_entry_size); - - /* Add offset for pushq GOT+8(%rip), since the - instruction uses 6 bytes subtract this value. */ - bfd_put_32 (output_bfd, - (htab->elf.sgotplt->output_section->vma - + htab->elf.sgotplt->output_offset - + 8 - - htab->elf.splt->output_section->vma - - htab->elf.splt->output_offset - - htab->tlsdesc_plt - - 6), - htab->elf.splt->contents - + htab->tlsdesc_plt + abed->plt0_got1_offset); - /* Add offset for the PC-relative instruction accessing GOT+TDG, - where TGD stands for htab->tlsdesc_got, subtracting the offset + memcpy (htab->elf.splt->contents + htab->tlsdesc_plt, + htab->lazy_plt->plt_tlsdesc_entry, + htab->lazy_plt->plt_tlsdesc_entry_size); + + /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4 + bytes and the instruction uses 6 bytes, subtract these + values. */ + bfd_put_32 (output_bfd, + (htab->elf.sgotplt->output_section->vma + + htab->elf.sgotplt->output_offset + + 8 + - htab->elf.splt->output_section->vma + - htab->elf.splt->output_offset + - htab->tlsdesc_plt + - htab->lazy_plt->plt_tlsdesc_got1_insn_end), + (htab->elf.splt->contents + + htab->tlsdesc_plt + + htab->lazy_plt->plt_tlsdesc_got1_offset)); + /* Add offset for indirect branch via GOT+TDG, where TDG + stands for htab->tlsdesc_got, subtracting the offset to the end of that instruction. */ - bfd_put_32 (output_bfd, - (htab->elf.sgot->output_section->vma - + htab->elf.sgot->output_offset - + htab->tlsdesc_got - - htab->elf.splt->output_section->vma - - htab->elf.splt->output_offset - - htab->tlsdesc_plt - - abed->plt0_got2_insn_end), - htab->elf.splt->contents - + htab->tlsdesc_plt + abed->plt0_got2_offset); - } + bfd_put_32 (output_bfd, + (htab->elf.sgot->output_section->vma + + htab->elf.sgot->output_offset + + htab->tlsdesc_got + - htab->elf.splt->output_section->vma + - htab->elf.splt->output_offset + - htab->tlsdesc_plt + - htab->lazy_plt->plt_tlsdesc_got2_insn_end), + (htab->elf.splt->contents + + htab->tlsdesc_plt + + htab->lazy_plt->plt_tlsdesc_got2_offset)); } } - if (htab->plt_bnd != NULL) - elf_section_data (htab->plt_bnd->output_section) - ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry); - - if (htab->elf.sgotplt) - { - if (bfd_is_abs_section (htab->elf.sgotplt->output_section)) - { - (*_bfd_error_handler) - (_("discarded output section: `%A'"), htab->elf.sgotplt); - return FALSE; - } - - /* Fill in the first three entries in the global offset table. */ - if (htab->elf.sgotplt->size > 0) - { - /* Set the first entry in the global offset table to the address of - the dynamic section. */ - if (sdyn == NULL) - bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents); - else - bfd_put_64 (output_bfd, - sdyn->output_section->vma + sdyn->output_offset, - htab->elf.sgotplt->contents); - /* Write GOT[1] and GOT[2], needed for the dynamic linker. */ - bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE); - bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2); - } + /* Fill PLT entries for undefined weak symbols in PIE. */ + if (bfd_link_pie (info)) + bfd_hash_traverse (&info->hash->table, + elf_x86_64_pie_finish_undefweak_symbol, + info); - elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize = - GOT_ENTRY_SIZE; - } + return TRUE; +} - /* Adjust .eh_frame for .plt section. */ - if (htab->plt_eh_frame != NULL - && htab->plt_eh_frame->contents != NULL) - { - if (htab->elf.splt != NULL - && htab->elf.splt->size != 0 - && (htab->elf.splt->flags & SEC_EXCLUDE) == 0 - && htab->elf.splt->output_section != NULL - && htab->plt_eh_frame->output_section != NULL) - { - bfd_vma plt_start = htab->elf.splt->output_section->vma; - bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma - + htab->plt_eh_frame->output_offset - + PLT_FDE_START_OFFSET; - bfd_put_signed_32 (dynobj, plt_start - eh_frame_start, - htab->plt_eh_frame->contents - + PLT_FDE_START_OFFSET); - } - if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME) - { - if (! _bfd_elf_write_section_eh_frame (output_bfd, info, - htab->plt_eh_frame, - htab->plt_eh_frame->contents)) - return FALSE; - } - } +/* Fill PLT/GOT entries and allocate dynamic relocations for local + STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table. + It has to be done before elf_link_sort_relocs is called so that + dynamic relocations are properly sorted. */ - if (htab->elf.sgot && htab->elf.sgot->size > 0) - elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize - = GOT_ENTRY_SIZE; +static bfd_boolean +elf_x86_64_output_arch_local_syms + (bfd *output_bfd ATTRIBUTE_UNUSED, + struct bfd_link_info *info, + void *flaginfo ATTRIBUTE_UNUSED, + int (*func) (void *, const char *, + Elf_Internal_Sym *, + asection *, + struct elf_link_hash_entry *) ATTRIBUTE_UNUSED) +{ + struct elf_x86_link_hash_table *htab + = elf_x86_hash_table (info, X86_64_ELF_DATA); + if (htab == NULL) + return FALSE; /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */ htab_traverse (htab->loc_hash_table, elf_x86_64_finish_local_dynamic_symbol, info); - /* Fill PLT entries for undefined weak symbols in PIE. */ - if (bfd_link_pie (info)) - bfd_hash_traverse (&info->hash->table, - elf_x86_64_pie_finish_undefweak_symbol, - info); - return TRUE; } -/* Return an array of PLT entry symbol values. */ +/* Forward declaration. */ +static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt; + +/* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all + dynamic relocations. */ -static bfd_vma * -elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt, - asection *relplt) +static long +elf_x86_64_get_synthetic_symtab (bfd *abfd, + long symcount ATTRIBUTE_UNUSED, + asymbol **syms ATTRIBUTE_UNUSED, + long dynsymcount, + asymbol **dynsyms, + asymbol **ret) { - bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean); - arelent *p; - long count, i; - bfd_vma *plt_sym_val; - bfd_vma plt_offset; + long count, i, n; + int j; bfd_byte *plt_contents; - const struct elf_x86_64_backend_data *bed; - Elf_Internal_Shdr *hdr; - asection *plt_bnd; - - /* Get the .plt section contents. PLT passed down may point to the - .plt.bnd section. Make sure that PLT always points to the .plt - section. */ - plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd"); - if (plt_bnd) - { - if (plt != plt_bnd) - abort (); - plt = bfd_get_section_by_name (abfd, ".plt"); - if (plt == NULL) - abort (); - bed = &elf_x86_64_bnd_arch_bed; - } - else - bed = get_elf_x86_64_backend_data (abfd); - - plt_contents = (bfd_byte *) bfd_malloc (plt->size); - if (plt_contents == NULL) - return NULL; - if (!bfd_get_section_contents (abfd, (asection *) plt, - plt_contents, 0, plt->size)) + long relsize; + const struct elf_x86_lazy_plt_layout *lazy_plt; + const struct elf_x86_non_lazy_plt_layout *non_lazy_plt; + const struct elf_x86_lazy_plt_layout *lazy_bnd_plt; + const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt; + const struct elf_x86_lazy_plt_layout *lazy_ibt_plt; + const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt; + asection *plt; + enum elf_x86_plt_type plt_type; + struct elf_x86_plt plts[] = { -bad_return: - free (plt_contents); - return NULL; - } + { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 }, + { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }, + { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 }, + { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 }, + { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 } + }; - slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table; - if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE)) - goto bad_return; + *ret = NULL; - hdr = &elf_section_data (relplt)->this_hdr; - count = relplt->size / hdr->sh_entsize; + if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0) + return 0; - plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count); - if (plt_sym_val == NULL) - goto bad_return; + if (dynsymcount <= 0) + return 0; - for (i = 0; i < count; i++) - plt_sym_val[i] = -1; + relsize = bfd_get_dynamic_reloc_upper_bound (abfd); + if (relsize <= 0) + return -1; - plt_offset = bed->plt_entry_size; - p = relplt->relocation; - for (i = 0; i < count; i++, p++) + if (get_elf_x86_backend_data (abfd)->target_os != is_nacl) + { + lazy_plt = &elf_x86_64_lazy_plt; + non_lazy_plt = &elf_x86_64_non_lazy_plt; + lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt; + non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt; + if (ABI_64_P (abfd)) + { + lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt; + non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt; + } + else + { + lazy_ibt_plt = &elf_x32_lazy_ibt_plt; + non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt; + } + } + else { - long reloc_index; + lazy_plt = &elf_x86_64_nacl_plt; + non_lazy_plt = NULL; + lazy_bnd_plt = NULL; + non_lazy_bnd_plt = NULL; + lazy_ibt_plt = NULL; + non_lazy_ibt_plt = NULL; + } - /* Skip unknown relocation. */ - if (p->howto == NULL) + count = 0; + for (j = 0; plts[j].name != NULL; j++) + { + plt = bfd_get_section_by_name (abfd, plts[j].name); + if (plt == NULL || plt->size == 0) continue; - if (p->howto->type != R_X86_64_JUMP_SLOT - && p->howto->type != R_X86_64_IRELATIVE) - continue; + /* Get the PLT section contents. */ + plt_contents = (bfd_byte *) bfd_malloc (plt->size); + if (plt_contents == NULL) + break; + if (!bfd_get_section_contents (abfd, (asection *) plt, + plt_contents, 0, plt->size)) + { + free (plt_contents); + break; + } - reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset - + bed->plt_reloc_offset)); - if (reloc_index < count) + /* Check what kind of PLT it is. */ + plt_type = plt_unknown; + if (plts[j].type == plt_unknown + && (plt->size >= (lazy_plt->plt_entry_size + + lazy_plt->plt_entry_size))) { - if (plt_bnd) + /* Match lazy PLT first. Need to check the first two + instructions. */ + if ((memcmp (plt_contents, lazy_plt->plt0_entry, + lazy_plt->plt0_got1_offset) == 0) + && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6, + 2) == 0)) + plt_type = plt_lazy; + else if (lazy_bnd_plt != NULL + && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry, + lazy_bnd_plt->plt0_got1_offset) == 0) + && (memcmp (plt_contents + 6, + lazy_bnd_plt->plt0_entry + 6, 3) == 0)) { - /* This is the index in .plt section. */ - long plt_index = plt_offset / bed->plt_entry_size; - /* Store VMA + the offset in .plt.bnd section. */ - plt_sym_val[reloc_index] = - (plt_bnd->vma - + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry)); + plt_type = plt_lazy | plt_second; + /* The fist entry in the lazy IBT PLT is the same as the + lazy BND PLT. */ + if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size, + lazy_ibt_plt->plt_entry, + lazy_ibt_plt->plt_got_offset) == 0)) + lazy_plt = lazy_ibt_plt; + else + lazy_plt = lazy_bnd_plt; } - else - plt_sym_val[reloc_index] = plt->vma + plt_offset; } - plt_offset += bed->plt_entry_size; - /* PR binutils/18437: Skip extra relocations in the .rela.plt - section. */ - if (plt_offset >= plt->size) - break; - } + if (non_lazy_plt != NULL + && (plt_type == plt_unknown || plt_type == plt_non_lazy) + && plt->size >= non_lazy_plt->plt_entry_size) + { + /* Match non-lazy PLT. */ + if (memcmp (plt_contents, non_lazy_plt->plt_entry, + non_lazy_plt->plt_got_offset) == 0) + plt_type = plt_non_lazy; + } + + if (plt_type == plt_unknown || plt_type == plt_second) + { + if (non_lazy_bnd_plt != NULL + && plt->size >= non_lazy_bnd_plt->plt_entry_size + && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry, + non_lazy_bnd_plt->plt_got_offset) == 0)) + { + /* Match BND PLT. */ + plt_type = plt_second; + non_lazy_plt = non_lazy_bnd_plt; + } + else if (non_lazy_ibt_plt != NULL + && plt->size >= non_lazy_ibt_plt->plt_entry_size + && (memcmp (plt_contents, + non_lazy_ibt_plt->plt_entry, + non_lazy_ibt_plt->plt_got_offset) == 0)) + { + /* Match IBT PLT. */ + plt_type = plt_second; + non_lazy_plt = non_lazy_ibt_plt; + } + } - free (plt_contents); + if (plt_type == plt_unknown) + { + free (plt_contents); + continue; + } - return plt_sym_val; -} + plts[j].sec = plt; + plts[j].type = plt_type; -/* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section - support. */ + if ((plt_type & plt_lazy)) + { + plts[j].plt_got_offset = lazy_plt->plt_got_offset; + plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size; + plts[j].plt_entry_size = lazy_plt->plt_entry_size; + /* Skip PLT0 in lazy PLT. */ + i = 1; + } + else + { + plts[j].plt_got_offset = non_lazy_plt->plt_got_offset; + plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size; + plts[j].plt_entry_size = non_lazy_plt->plt_entry_size; + i = 0; + } -static long -elf_x86_64_get_synthetic_symtab (bfd *abfd, - long symcount, - asymbol **syms, - long dynsymcount, - asymbol **dynsyms, - asymbol **ret) -{ - /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab - as PLT if it exists. */ - asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd"); - if (plt == NULL) - plt = bfd_get_section_by_name (abfd, ".plt"); - return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms, - dynsymcount, dynsyms, ret, - plt, - elf_x86_64_get_plt_sym_val); + /* Skip lazy PLT when the second PLT is used. */ + if (plt_type == (plt_lazy | plt_second)) + plts[j].count = 0; + else + { + n = plt->size / plts[j].plt_entry_size; + plts[j].count = n; + count += n - i; + } + + plts[j].contents = plt_contents; + } + + return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize, + (bfd_vma) 0, plts, dynsyms, + ret); } /* Handle an x86-64 specific section when reading an object file. This @@ -6568,19 +5127,6 @@ elf_x86_64_additional_program_headers (bfd *abfd, return count; } -/* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */ - -static bfd_boolean -elf_x86_64_hash_symbol (struct elf_link_hash_entry *h) -{ - if (h->plt.offset != (bfd_vma) -1 - && !h->def_regular - && !h->pointer_equality_needed) - return FALSE; - - return _bfd_elf_hash_symbol (h); -} - /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */ static bfd_boolean @@ -6592,16 +5138,87 @@ elf_x86_64_relocs_compatible (const bfd_target *input, && _bfd_elf_relocs_compatible (input, output)); } +/* Set up x86-64 GNU properties. Return the first relocatable ELF input + with GNU properties if found. Otherwise, return NULL. */ + +static bfd * +elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info) +{ + struct elf_x86_init_table init_table; + + if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit + || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit + || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit) + != (int) R_X86_64_GNU_VTINHERIT) + || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit) + != (int) R_X86_64_GNU_VTENTRY)) + abort (); + + /* This is unused for x86-64. */ + init_table.plt0_pad_byte = 0x90; + + if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl) + { + const struct elf_backend_data *bed + = get_elf_backend_data (info->output_bfd); + struct elf_x86_link_hash_table *htab + = elf_x86_hash_table (info, bed->target_id); + if (!htab) + abort (); + if (htab->params->bndplt) + { + init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt; + init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt; + } + else + { + init_table.lazy_plt = &elf_x86_64_lazy_plt; + init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt; + } + + if (ABI_64_P (info->output_bfd)) + { + init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt; + init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt; + } + else + { + init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt; + init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt; + } + } + else + { + init_table.lazy_plt = &elf_x86_64_nacl_plt; + init_table.non_lazy_plt = NULL; + init_table.lazy_ibt_plt = NULL; + init_table.non_lazy_ibt_plt = NULL; + } + + if (ABI_64_P (info->output_bfd)) + { + init_table.r_info = elf64_r_info; + init_table.r_sym = elf64_r_sym; + } + else + { + init_table.r_info = elf32_r_info; + init_table.r_sym = elf32_r_sym; + } + + return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table); +} + static const struct bfd_elf_special_section - elf_x86_64_special_sections[]= +elf_x86_64_special_sections[]= { { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE}, { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE}, - { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, + { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE}, - { NULL, 0, 0, 0, 0 } + { NULL, 0, 0, 0, 0 } }; #define TARGET_LITTLE_SYM x86_64_elf64_vec @@ -6609,7 +5226,11 @@ static const struct bfd_elf_special_section #define ELF_ARCH bfd_arch_i386 #define ELF_TARGET_ID X86_64_ELF_DATA #define ELF_MACHINE_CODE EM_X86_64 -#define ELF_MAXPAGESIZE 0x200000 +#if DEFAULT_LD_Z_SEPARATE_CODE +# define ELF_MAXPAGESIZE 0x1000 +#else +# define ELF_MAXPAGESIZE 0x200000 +#endif #define ELF_MINPAGESIZE 0x1000 #define ELF_COMMONPAGESIZE 0x1000 @@ -6620,26 +5241,24 @@ static const struct bfd_elf_special_section #define elf_backend_want_plt_sym 0 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3) #define elf_backend_rela_normal 1 -#define elf_backend_plt_alignment 4 +#define elf_backend_plt_alignment 4 #define elf_backend_extern_protected_data 1 #define elf_backend_caches_rawsize 1 +#define elf_backend_dtrel_excludes_plt 1 +#define elf_backend_want_dynrelro 1 #define elf_info_to_howto elf_x86_64_info_to_howto -#define bfd_elf64_bfd_link_hash_table_create \ - elf_x86_64_link_hash_table_create #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup #define bfd_elf64_bfd_reloc_name_lookup \ elf_x86_64_reloc_name_lookup -#define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible #define elf_backend_check_relocs elf_x86_64_check_relocs -#define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol -#define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections +#define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol -#define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook +#define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo #ifdef CORE_HEADER @@ -6647,11 +5266,8 @@ static const struct bfd_elf_special_section #endif #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class #define elf_backend_relocate_section elf_x86_64_relocate_section -#define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections -#define elf_backend_always_size_sections elf_x86_64_always_size_sections #define elf_backend_init_index_section _bfd_elf_init_1_index_section #define elf_backend_object_p elf64_x86_64_elf_object_p -#define bfd_elf64_mkobject elf_x86_64_mkobject #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab #define elf_backend_section_from_shdr \ @@ -6675,41 +5291,42 @@ static const struct bfd_elf_special_section elf_x86_64_special_sections #define elf_backend_additional_program_headers \ elf_x86_64_additional_program_headers -#define elf_backend_hash_symbol \ - elf_x86_64_hash_symbol -#define elf_backend_omit_section_dynsym \ - ((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true) -#define elf_backend_fixup_symbol \ - elf_x86_64_fixup_symbol +#define elf_backend_setup_gnu_properties \ + elf_x86_64_link_setup_gnu_properties +#define elf_backend_hide_symbol \ + _bfd_x86_elf_hide_symbol + +#undef elf64_bed +#define elf64_bed elf64_x86_64_bed #include "elf64-target.h" /* CloudABI support. */ -#undef TARGET_LITTLE_SYM +#undef TARGET_LITTLE_SYM #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec -#undef TARGET_LITTLE_NAME +#undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi" #undef ELF_OSABI #define ELF_OSABI ELFOSABI_CLOUDABI -#undef elf64_bed +#undef elf64_bed #define elf64_bed elf64_x86_64_cloudabi_bed #include "elf64-target.h" /* FreeBSD support. */ -#undef TARGET_LITTLE_SYM +#undef TARGET_LITTLE_SYM #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec -#undef TARGET_LITTLE_NAME +#undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd" #undef ELF_OSABI #define ELF_OSABI ELFOSABI_FREEBSD -#undef elf64_bed +#undef elf64_bed #define elf64_bed elf64_x86_64_fbsd_bed #include "elf64-target.h" @@ -6721,6 +5338,14 @@ static const struct bfd_elf_special_section #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf64-x86-64-sol2" +static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed = + { + is_solaris /* os */ + }; + +#undef elf_backend_arch_data +#define elf_backend_arch_data &elf_x86_64_solaris_arch_bed + /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE objects won't be recognized. */ #undef ELF_OSABI @@ -6800,11 +5425,11 @@ elf64_x86_64_nacl_elf_object_p (bfd *abfd) static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] = { - 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ + 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */ - 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */ - 0x4d, 0x01, 0xfb, /* add %r15, %r11 */ - 0x41, 0xff, 0xe3, /* jmpq *%r11 */ + 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */ + 0x4d, 0x01, 0xfb, /* add %r15, %r11 */ + 0x41, 0xff, 0xe3, /* jmpq *%r11 */ /* 9-byte nop sequence to pad out to the next 32-byte boundary. */ 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */ @@ -6814,49 +5439,49 @@ static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] = 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ - 0x66, /* excess data16 prefix */ - 0x90 /* nop */ + 0x66, /* excess data16 prefix */ + 0x90 /* nop */ }; static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] = { 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */ - 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */ - 0x4d, 0x01, 0xfb, /* add %r15, %r11 */ - 0x41, 0xff, 0xe3, /* jmpq *%r11 */ + 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */ + 0x4d, 0x01, 0xfb, /* add %r15, %r11 */ + 0x41, 0xff, 0xe3, /* jmpq *%r11 */ /* 15-byte nop sequence to pad out to the next 32-byte boundary. */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ /* Lazy GOT entries point here (32-byte aligned). */ - 0x68, /* pushq immediate */ - 0, 0, 0, 0, /* replaced with index into relocation table. */ - 0xe9, /* jmp relative */ - 0, 0, 0, 0, /* replaced with offset to start of .plt0. */ + 0x68, /* pushq immediate */ + 0, 0, 0, 0, /* replaced with index into relocation table. */ + 0xe9, /* jmp relative */ + 0, 0, 0, 0, /* replaced with offset to start of .plt0. */ - /* 22 bytes of nop to pad out to the standard size. */ + /* 22 bytes of nop to pad out to the standard size. */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ - 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */ + 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */ }; /* .eh_frame covering the .plt section. */ static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] = { -#if (PLT_CIE_LENGTH != 20 \ - || PLT_FDE_LENGTH != 36 \ - || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \ +#if (PLT_CIE_LENGTH != 20 \ + || PLT_FDE_LENGTH != 36 \ + || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \ || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12) -# error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!" +# error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!" #endif PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 0, 0, 0, 0, /* CIE ID */ 1, /* CIE version */ - 'z', 'R', 0, /* Augmentation string */ + 'z', 'R', 0, /* Augmentation string */ 1, /* Code alignment factor */ - 0x78, /* Data alignment factor */ + 0x78, /* Data alignment factor */ 16, /* Return address column */ 1, /* Augmentation size */ DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ @@ -6882,22 +5507,36 @@ static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] = DW_CFA_nop, DW_CFA_nop }; -static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed = +static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt = + { + elf_x86_64_nacl_plt0_entry, /* plt0_entry */ + NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */ + elf_x86_64_nacl_plt_entry, /* plt_entry */ + NACL_PLT_ENTRY_SIZE, /* plt_entry_size */ + elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */ + NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ + 2, /* plt_tlsdesc_got1_offset */ + 9, /* plt_tlsdesc_got2_offset */ + 6, /* plt_tlsdesc_got1_insn_end */ + 13, /* plt_tlsdesc_got2_insn_end */ + 2, /* plt0_got1_offset */ + 9, /* plt0_got2_offset */ + 13, /* plt0_got2_insn_end */ + 3, /* plt_got_offset */ + 33, /* plt_reloc_offset */ + 38, /* plt_plt_offset */ + 7, /* plt_got_insn_size */ + 42, /* plt_plt_insn_end */ + 32, /* plt_lazy_offset */ + elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */ + elf_x86_64_nacl_plt_entry, /* pic_plt_entry */ + elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */ + }; + +static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed = { - elf_x86_64_nacl_plt0_entry, /* plt0_entry */ - elf_x86_64_nacl_plt_entry, /* plt_entry */ - NACL_PLT_ENTRY_SIZE, /* plt_entry_size */ - 2, /* plt0_got1_offset */ - 9, /* plt0_got2_offset */ - 13, /* plt0_got2_insn_end */ - 3, /* plt_got_offset */ - 33, /* plt_reloc_offset */ - 38, /* plt_plt_offset */ - 7, /* plt_got_insn_size */ - 42, /* plt_plt_insn_end */ - 32, /* plt_lazy_offset */ - elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */ - sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */ + is_nacl /* os */ }; #undef elf_backend_arch_data @@ -6907,8 +5546,8 @@ static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed = #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p #undef elf_backend_modify_segment_map #define elf_backend_modify_segment_map nacl_modify_segment_map -#undef elf_backend_modify_program_headers -#define elf_backend_modify_program_headers nacl_modify_program_headers +#undef elf_backend_modify_headers +#define elf_backend_modify_headers nacl_modify_headers #undef elf_backend_final_write_processing #define elf_backend_final_write_processing nacl_final_write_processing @@ -6924,21 +5563,17 @@ elf32_x86_64_nacl_elf_object_p (bfd *abfd) return TRUE; } -#undef TARGET_LITTLE_SYM +#undef TARGET_LITTLE_SYM #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec -#undef TARGET_LITTLE_NAME +#undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf32-x86-64-nacl" #undef elf32_bed #define elf32_bed elf32_x86_64_nacl_bed -#define bfd_elf32_bfd_link_hash_table_create \ - elf_x86_64_link_hash_table_create #define bfd_elf32_bfd_reloc_type_lookup \ elf_x86_64_reloc_type_lookup #define bfd_elf32_bfd_reloc_name_lookup \ elf_x86_64_reloc_name_lookup -#define bfd_elf32_mkobject \ - elf_x86_64_mkobject #define bfd_elf32_get_synthetic_symtab \ elf_x86_64_get_synthetic_symtab @@ -6954,6 +5589,9 @@ elf32_x86_64_nacl_elf_object_p (bfd *abfd) #define elf_backend_size_info \ _bfd_elf32_size_info +#undef elf32_bed +#define elf32_bed elf32_x86_64_bed + #include "elf32-target.h" /* Restore defaults. */ @@ -6962,7 +5600,7 @@ elf32_x86_64_nacl_elf_object_p (bfd *abfd) #undef elf_backend_bfd_from_remote_memory #undef elf_backend_size_info #undef elf_backend_modify_segment_map -#undef elf_backend_modify_program_headers +#undef elf_backend_modify_headers #undef elf_backend_final_write_processing /* Intel L1OM support. */ @@ -6997,7 +5635,11 @@ elf64_l1om_elf_object_p (bfd *abfd) #undef ELF_MAXPAGESIZE #undef ELF_MINPAGESIZE #undef ELF_COMMONPAGESIZE -#define ELF_MAXPAGESIZE 0x200000 +#if DEFAULT_LD_Z_SEPARATE_CODE +# define ELF_MAXPAGESIZE 0x1000 +#else +# define ELF_MAXPAGESIZE 0x200000 +#endif #define ELF_MINPAGESIZE 0x1000 #define ELF_COMMONPAGESIZE 0x1000 #undef elf_backend_plt_alignment