/* X86-64 specific support for ELF
- Copyright (C) 2000-2016 Free Software Foundation, Inc.
+ Copyright (C) 2000-2017 Free Software Foundation, Inc.
Contributed by Jan Hubicka <jh@suse.cz>.
This file is part of BFD, the Binary File Descriptor library.
{
if (r_type >= (unsigned int) R_X86_64_standard)
{
- (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
- abfd, (int) r_type);
+ /* xgettext:c-format */
+ _bfd_error_handler (_("%B: invalid relocation type %d"),
+ abfd, (int) r_type);
r_type = R_X86_64_NONE;
}
i = r_type;
#define GOT_ENTRY_SIZE 8
-/* The size in bytes of an entry in the procedure linkage table. */
+/* The size in bytes of an entry in the lazy procedure linkage table. */
-#define PLT_ENTRY_SIZE 16
+#define LAZY_PLT_ENTRY_SIZE 16
-/* The first entry in a procedure linkage table looks like this. See the
- SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
+/* The size in bytes of an entry in the non-lazy procedure linkage
+ table. */
-static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
+#define NON_LAZY_PLT_ENTRY_SIZE 8
+
+/* The first entry in a lazy procedure linkage table looks like this.
+ See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
+ works. */
+
+static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
{
0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
};
-/* Subsequent entries in a procedure linkage table look like this. */
+/* Subsequent entries in a lazy procedure linkage table look like this. */
-static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
+static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
{
0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
0, 0, 0, 0 /* replaced with offset to start of .plt0. */
};
-/* The first entry in a procedure linkage table with BND relocations
+/* The first entry in a lazy procedure linkage table with BND prefix
like this. */
-static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
+static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
{
0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
0x0f, 0x1f, 0 /* nopl (%rax) */
};
-/* Subsequent entries for legacy branches in a procedure linkage table
- with BND relocations look like this. */
+/* Subsequent entries for branches with BND prefx in a lazy procedure
+ linkage table look like this. */
-static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
+static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
{
0x68, 0, 0, 0, 0, /* pushq immediate */
- 0xe9, 0, 0, 0, 0, /* jmpq relative */
- 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
+ 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
+ 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
};
-/* Subsequent entries for branches with BND prefx in a procedure linkage
- table with BND relocations look like this. */
+/* The first entry in the IBT-enabled lazy procedure linkage table is the
+ the same as the lazy PLT with BND prefix so that bound registers are
+ preserved when control is passed to dynamic linker. Subsequent
+ entries for a IBT-enabled lazy procedure linkage table look like
+ this. */
-static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
+static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
{
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
0x68, 0, 0, 0, 0, /* pushq immediate */
0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
- 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
+ 0x90 /* nop */
+};
+
+/* The first entry in the x32 IBT-enabled lazy procedure linkage table
+ is the same as the normal lazy PLT. Subsequent entries for an
+ x32 IBT-enabled lazy procedure linkage table look like this. */
+
+static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
+{
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0x68, 0, 0, 0, 0, /* pushq immediate */
+ 0xe9, 0, 0, 0, 0, /* jmpq relative */
+ 0x66, 0x90 /* xchg %ax,%ax */
};
-/* Entries for legacy branches in the second procedure linkage table
- look like this. */
+/* Entries in the non-lazey procedure linkage table look like this. */
-static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
+static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
{
- 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
- 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
- 0x66, 0x90 /* xchg %ax,%ax */
+ 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
+ 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
+ 0x66, 0x90 /* xchg %ax,%ax */
};
-/* Entries for branches with BND prefix in the second procedure linkage
- table look like this. */
+/* Entries for branches with BND prefix in the non-lazey procedure
+ linkage table look like this. */
-static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
+static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
{
- 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
- 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
- 0x90 /* nop */
+ 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
+ 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
+ 0x90 /* nop */
};
-/* .eh_frame covering the .plt section. */
+/* Entries for branches with IBT-enabled in the non-lazey procedure
+ linkage table look like this. They have the same size as the lazy
+ PLT entry. */
+
+static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
+{
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
+ 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
+ 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
+};
-static const bfd_byte elf_x86_64_eh_frame_plt[] =
+/* Entries for branches with IBT-enabled in the x32 non-lazey procedure
+ linkage table look like this. They have the same size as the lazy
+ PLT entry. */
+
+static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
+{
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
+ 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
+ 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
+};
+
+/* .eh_frame covering the lazy .plt section. */
+
+static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
{
#define PLT_CIE_LENGTH 20
#define PLT_FDE_LENGTH 36
DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
};
-/* Architecture-specific backend data for x86-64. */
+/* .eh_frame covering the lazy BND .plt section. */
-struct elf_x86_64_backend_data
+static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
+{
+ PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
+ 0, 0, 0, 0, /* CIE ID */
+ 1, /* CIE version */
+ 'z', 'R', 0, /* Augmentation string */
+ 1, /* Code alignment factor */
+ 0x78, /* Data alignment factor */
+ 16, /* Return address column */
+ 1, /* Augmentation size */
+ DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
+ DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
+ DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
+ DW_CFA_nop, DW_CFA_nop,
+
+ PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
+ PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
+ 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
+ 0, 0, 0, 0, /* .plt size goes here */
+ 0, /* Augmentation size */
+ DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
+ DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
+ DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
+ DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
+ DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
+ 11, /* Block length */
+ DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
+ DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
+ DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
+ DW_OP_lit3, DW_OP_shl, DW_OP_plus,
+ DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
+};
+
+/* .eh_frame covering the lazy .plt section with IBT-enabled. */
+
+static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
+{
+ PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
+ 0, 0, 0, 0, /* CIE ID */
+ 1, /* CIE version */
+ 'z', 'R', 0, /* Augmentation string */
+ 1, /* Code alignment factor */
+ 0x78, /* Data alignment factor */
+ 16, /* Return address column */
+ 1, /* Augmentation size */
+ DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
+ DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
+ DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
+ DW_CFA_nop, DW_CFA_nop,
+
+ PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
+ PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
+ 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
+ 0, 0, 0, 0, /* .plt size goes here */
+ 0, /* Augmentation size */
+ DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
+ DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
+ DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
+ DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
+ DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
+ 11, /* Block length */
+ DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
+ DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
+ DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
+ DW_OP_lit3, DW_OP_shl, DW_OP_plus,
+ DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
+};
+
+/* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
+
+static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
+{
+ PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
+ 0, 0, 0, 0, /* CIE ID */
+ 1, /* CIE version */
+ 'z', 'R', 0, /* Augmentation string */
+ 1, /* Code alignment factor */
+ 0x78, /* Data alignment factor */
+ 16, /* Return address column */
+ 1, /* Augmentation size */
+ DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
+ DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
+ DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
+ DW_CFA_nop, DW_CFA_nop,
+
+ PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
+ PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
+ 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
+ 0, 0, 0, 0, /* .plt size goes here */
+ 0, /* Augmentation size */
+ DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
+ DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
+ DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
+ DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
+ DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
+ 11, /* Block length */
+ DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
+ DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
+ DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
+ DW_OP_lit3, DW_OP_shl, DW_OP_plus,
+ DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
+};
+
+/* .eh_frame covering the non-lazy .plt section. */
+
+static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
+{
+#define PLT_GOT_FDE_LENGTH 20
+ PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
+ 0, 0, 0, 0, /* CIE ID */
+ 1, /* CIE version */
+ 'z', 'R', 0, /* Augmentation string */
+ 1, /* Code alignment factor */
+ 0x78, /* Data alignment factor */
+ 16, /* Return address column */
+ 1, /* Augmentation size */
+ DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
+ DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
+ DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
+ DW_CFA_nop, DW_CFA_nop,
+
+ PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
+ PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
+ 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
+ 0, 0, 0, 0, /* non-lazy .plt size goes here */
+ 0, /* Augmentation size */
+ DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
+ DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
+};
+
+struct elf_x86_64_lazy_plt_layout
{
/* Templates for the initial PLT entry and for subsequent entries. */
const bfd_byte *plt0_entry;
/* Offset into plt_entry where the initial value of the GOT entry points. */
unsigned int plt_lazy_offset;
+ /* .eh_frame covering the lazy .plt section. */
+ const bfd_byte *eh_frame_plt;
+ unsigned int eh_frame_plt_size;
+};
+
+struct elf_x86_64_non_lazy_plt_layout
+{
+ /* Template for the lazy PLT entries. */
+ const bfd_byte *plt_entry;
+ unsigned int plt_entry_size; /* Size of each PLT entry. */
+
+ /* Offsets into plt_entry that are to be replaced with... */
+ unsigned int plt_got_offset; /* ... address of this symbol in .got. */
+
+ /* Length of the PC-relative instruction containing plt_got_offset. */
+ unsigned int plt_got_insn_size;
+
+ /* .eh_frame covering the non-lazy .plt section. */
+ const bfd_byte *eh_frame_plt;
+ unsigned int eh_frame_plt_size;
+};
+
+struct elf_x86_64_plt_layout
+{
+ /* Template for the PLT entries. */
+ const bfd_byte *plt_entry;
+ unsigned int plt_entry_size; /* Size of each PLT entry. */
+
+ /* 1 has PLT0. */
+ unsigned int has_plt0;
+
+ /* Offsets into plt_entry that are to be replaced with... */
+ unsigned int plt_got_offset; /* ... address of this symbol in .got. */
+
+ /* Length of the PC-relative instruction containing plt_got_offset. */
+ unsigned int plt_got_insn_size;
+
/* .eh_frame covering the .plt section. */
const bfd_byte *eh_frame_plt;
unsigned int eh_frame_plt_size;
};
+/* Architecture-specific backend data for x86-64. */
+
+struct elf_x86_64_backend_data
+{
+ /* Target system. */
+ enum
+ {
+ is_normal,
+ is_nacl
+ } os;
+};
+
#define get_elf_x86_64_arch_data(bed) \
((const struct elf_x86_64_backend_data *) (bed)->arch_data)
#define get_elf_x86_64_backend_data(abfd) \
get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
-#define GET_PLT_ENTRY_SIZE(abfd) \
- get_elf_x86_64_backend_data (abfd)->plt_entry_size
-
/* These are the standard parameters. */
-static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
+static const struct elf_x86_64_lazy_plt_layout elf_x86_64_lazy_plt =
{
- elf_x86_64_plt0_entry, /* plt0_entry */
- elf_x86_64_plt_entry, /* plt_entry */
- sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
+ elf_x86_64_lazy_plt0_entry, /* plt0_entry */
+ elf_x86_64_lazy_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
2, /* plt0_got1_offset */
8, /* plt0_got2_offset */
12, /* plt0_got2_insn_end */
7, /* plt_reloc_offset */
12, /* plt_plt_offset */
6, /* plt_got_insn_size */
- PLT_ENTRY_SIZE, /* plt_plt_insn_end */
+ LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
6, /* plt_lazy_offset */
- elf_x86_64_eh_frame_plt, /* eh_frame_plt */
- sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
+ elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
+ sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
+ };
+
+static const struct elf_x86_64_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
+ {
+ elf_x86_64_non_lazy_plt_entry, /* plt_entry */
+ NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 2, /* plt_got_offset */
+ 6, /* plt_got_insn_size */
+ elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
+ sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
};
-static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
+static const struct elf_x86_64_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
{
- elf_x86_64_bnd_plt0_entry, /* plt0_entry */
- elf_x86_64_bnd_plt_entry, /* plt_entry */
- sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
+ elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
+ elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
2, /* plt0_got1_offset */
1+8, /* plt0_got2_offset */
1+12, /* plt0_got2_insn_end */
1+6, /* plt_got_insn_size */
11, /* plt_plt_insn_end */
0, /* plt_lazy_offset */
- elf_x86_64_eh_frame_plt, /* eh_frame_plt */
- sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
+ elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
+ sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
+ };
+
+static const struct elf_x86_64_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
+ {
+ elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
+ NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 1+2, /* plt_got_offset */
+ 1+6, /* plt_got_insn_size */
+ elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
+ sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
+ };
+
+static const struct elf_x86_64_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
+ {
+ elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
+ elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 2, /* plt0_got1_offset */
+ 1+8, /* plt0_got2_offset */
+ 1+12, /* plt0_got2_insn_end */
+ 4+1+2, /* plt_got_offset */
+ 4+1, /* plt_reloc_offset */
+ 4+1+6, /* plt_plt_offset */
+ 4+1+6, /* plt_got_insn_size */
+ 4+1+5+5, /* plt_plt_insn_end */
+ 0, /* plt_lazy_offset */
+ elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
+ sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
+ };
+
+static const struct elf_x86_64_lazy_plt_layout elf_x32_lazy_ibt_plt =
+ {
+ elf_x86_64_lazy_plt0_entry, /* plt0_entry */
+ elf_x32_lazy_ibt_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 2, /* plt0_got1_offset */
+ 8, /* plt0_got2_offset */
+ 12, /* plt0_got2_insn_end */
+ 4+2, /* plt_got_offset */
+ 4+1, /* plt_reloc_offset */
+ 4+6, /* plt_plt_offset */
+ 4+6, /* plt_got_insn_size */
+ 4+5+5, /* plt_plt_insn_end */
+ 0, /* plt_lazy_offset */
+ elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
+ sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
+ };
+
+static const struct elf_x86_64_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
+ {
+ elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 4+1+2, /* plt_got_offset */
+ 4+1+6, /* plt_got_insn_size */
+ elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
+ sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
+ };
+
+static const struct elf_x86_64_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
+ {
+ elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 4+2, /* plt_got_offset */
+ 4+6, /* plt_got_insn_size */
+ elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
+ sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
+ };
+
+static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
+ {
+ is_normal /* os */
};
#define elf_backend_arch_data &elf_x86_64_arch_bed
it isn't dynamic and
1. Has non-GOT/non-PLT relocations in text section. Or
2. Has no GOT/PLT relocation.
+ Local undefined weak symbol is always resolved to 0.
*/
#define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, GOT_RELOC, EH) \
((EH)->elf.root.type == bfd_link_hash_undefweak \
- && bfd_link_executable (INFO) \
- && (elf_x86_64_hash_table (INFO)->interp == NULL \
- || !(GOT_RELOC) \
- || (EH)->has_non_got_reloc \
- || !(INFO)->dynamic_undefined_weak))
+ && ((EH)->elf.forced_local \
+ || (bfd_link_executable (INFO) \
+ && (elf_x86_64_hash_table (INFO)->interp == NULL \
+ || !(GOT_RELOC) \
+ || (EH)->has_non_got_reloc \
+ || !(INFO)->dynamic_undefined_weak))))
/* x86-64 ELF linker hash entry. */
real definition and check it when allowing copy reloc in PIE. */
unsigned int needs_copy : 1;
- /* TRUE if symbol has at least one BND relocation. */
- unsigned int has_bnd_reloc : 1;
-
/* TRUE if symbol has GOT or PLT relocations. */
unsigned int has_got_reloc : 1;
/* TRUE if symbol has non-GOT/non-PLT relocations in text sections. */
unsigned int has_non_got_reloc : 1;
- /* 0: symbol isn't __tls_get_addr.
- 1: symbol is __tls_get_addr.
- 2: symbol is unknown. */
- unsigned int tls_get_addr : 2;
+ /* Don't call finish_dynamic_symbol on this symbol. */
+ unsigned int no_finish_dynamic_symbol : 1;
+
+ /* TRUE if symbol is __tls_get_addr. */
+ unsigned int tls_get_addr : 1;
/* Reference count of C/C++ function pointer relocations in read-write
section which can be resolved at run-time. */
GOT and PLT relocations against the same function. */
union gotplt_union plt_got;
- /* Information about the second PLT entry. Filled when has_bnd_reloc is
- set. */
- union gotplt_union plt_bnd;
+ /* Information about the second PLT entry. */
+ union gotplt_union plt_second;
/* Offset of the GOTPLT entry reserved for the TLS descriptor,
starting at the end of the jump table. */
/* Short-cuts to get to dynamic linker sections. */
asection *interp;
- asection *sdynbss;
- asection *srelbss;
asection *plt_eh_frame;
- asection *plt_bnd;
+ asection *plt_second;
+ asection *plt_second_eh_frame;
asection *plt_got;
+ asection *plt_got_eh_frame;
+
+ /* Parameters describing PLT generation, lazy or non-lazy. */
+ struct elf_x86_64_plt_layout plt;
+
+ /* Parameters describing lazy PLT generation. */
+ const struct elf_x86_64_lazy_plt_layout *lazy_plt;
+
+ /* Parameters describing non-lazy PLT generation. */
+ const struct elf_x86_64_non_lazy_plt_layout *non_lazy_plt;
union
{
eh->dyn_relocs = NULL;
eh->tls_type = GOT_UNKNOWN;
eh->needs_copy = 0;
- eh->has_bnd_reloc = 0;
eh->has_got_reloc = 0;
eh->has_non_got_reloc = 0;
- eh->tls_get_addr = 2;
+ eh->no_finish_dynamic_symbol = 0;
+ eh->tls_get_addr = 0;
eh->func_pointer_refcount = 0;
- eh->plt_bnd.offset = (bfd_vma) -1;
+ eh->plt_second.offset = (bfd_vma) -1;
eh->plt_got.offset = (bfd_vma) -1;
eh->tlsdesc_got = (bfd_vma) -1;
}
return &ret->elf.root;
}
-/* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
- .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
- hash table. */
-
-static bfd_boolean
-elf_x86_64_create_dynamic_sections (bfd *dynobj,
- struct bfd_link_info *info)
-{
- struct elf_x86_64_link_hash_table *htab;
-
- if (!_bfd_elf_create_dynamic_sections (dynobj, info))
- return FALSE;
-
- htab = elf_x86_64_hash_table (info);
- if (htab == NULL)
- return FALSE;
-
- /* Set the contents of the .interp section to the interpreter. */
- if (bfd_link_executable (info) && !info->nointerp)
- {
- asection *s = bfd_get_linker_section (dynobj, ".interp");
- if (s == NULL)
- abort ();
- s->size = htab->dynamic_interpreter_size;
- s->contents = (unsigned char *) htab->dynamic_interpreter;
- htab->interp = s;
- }
-
- htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
- if (!htab->sdynbss)
- abort ();
-
- if (bfd_link_executable (info))
- {
- /* Always allow copy relocs for building executables. */
- asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
- if (s == NULL)
- {
- const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
- s = bfd_make_section_anyway_with_flags (dynobj,
- ".rela.bss",
- (bed->dynamic_sec_flags
- | SEC_READONLY));
- if (s == NULL
- || ! bfd_set_section_alignment (dynobj, s,
- bed->s->log_file_align))
- return FALSE;
- }
- htab->srelbss = s;
- }
-
- if (!info->no_ld_generated_unwind_info
- && htab->plt_eh_frame == NULL
- && htab->elf.splt != NULL)
- {
- flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
- | SEC_HAS_CONTENTS | SEC_IN_MEMORY
- | SEC_LINKER_CREATED);
- htab->plt_eh_frame
- = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
- if (htab->plt_eh_frame == NULL
- || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
- return FALSE;
- }
- return TRUE;
-}
-
/* Copy the extra info we tack onto an elf_link_hash_entry. */
static void
edir = (struct elf_x86_64_link_hash_entry *) dir;
eind = (struct elf_x86_64_link_hash_entry *) ind;
- if (!edir->has_bnd_reloc)
- edir->has_bnd_reloc = eind->has_bnd_reloc;
-
- if (!edir->has_got_reloc)
- edir->has_got_reloc = eind->has_got_reloc;
-
- if (!edir->has_non_got_reloc)
- edir->has_non_got_reloc = eind->has_non_got_reloc;
+ edir->has_got_reloc |= eind->has_got_reloc;
+ edir->has_non_got_reloc |= eind->has_non_got_reloc;
if (eind->dyn_relocs != NULL)
{
/* If called to transfer flags for a weakdef during processing
of elf_adjust_dynamic_symbol, don't copy non_got_ref.
We clear it ourselves for ELIMINATE_COPY_RELOCS. */
- dir->ref_dynamic |= ind->ref_dynamic;
+ if (dir->versioned != versioned_hidden)
+ dir->ref_dynamic |= ind->ref_dynamic;
dir->ref_regular |= ind->ref_regular;
dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
dir->needs_plt |= ind->needs_plt;
bfd_vma offset;
struct elf_x86_64_link_hash_table *htab;
bfd_byte *call;
- bfd_boolean indirect_call, tls_get_addr;
+ bfd_boolean indirect_call;
htab = elf_x86_64_hash_table (info);
offset = rel->r_offset;
if (r_symndx < symtab_hdr->sh_info)
return FALSE;
- tls_get_addr = FALSE;
h = sym_hashes[r_symndx - symtab_hdr->sh_info];
- if (h != NULL && h->root.root.string != NULL)
- {
- struct elf_x86_64_link_hash_entry *eh
- = (struct elf_x86_64_link_hash_entry *) h;
- tls_get_addr = eh->tls_get_addr == 1;
- if (eh->tls_get_addr > 1)
- {
- /* Use strncmp to check __tls_get_addr since
- __tls_get_addr may be versioned. */
- if (strncmp (h->root.root.string, "__tls_get_addr", 14)
- == 0)
- {
- eh->tls_get_addr = 1;
- tls_get_addr = TRUE;
- }
- else
- eh->tls_get_addr = 0;
- }
- }
-
- if (!tls_get_addr)
+ if (h == NULL
+ || !((struct elf_x86_64_link_hash_entry *) h)->tls_get_addr)
return FALSE;
else if (largepic)
return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64;
}
}
- (*_bfd_error_handler)
- (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B: TLS transition from %s to %s against `%s' at %#Lx "
"in section `%A' failed"),
- abfd, sec, from->name, to->name, name,
- (unsigned long) rel->r_offset);
+ abfd, from->name, to->name, name, rel->r_offset, sec);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
#define check_relocs_failed sec_flg1
static bfd_boolean
-elf_x86_64_need_pic (bfd *input_bfd, asection *sec,
+elf_x86_64_need_pic (struct bfd_link_info *info,
+ bfd *input_bfd, asection *sec,
struct elf_link_hash_entry *h,
Elf_Internal_Shdr *symtab_hdr,
Elf_Internal_Sym *isym,
const char *v = "";
const char *und = "";
const char *pic = "";
+ const char *object;
const char *name;
if (h)
pic = _("; recompile with -fPIC");
}
- (*_bfd_error_handler) (_("%B: relocation %s against %s%s`%s' can "
- "not be used when making a shared object%s"),
- input_bfd, howto->name, und, v, name, pic);
+ if (bfd_link_dll (info))
+ object = _("a shared object");
+ else if (bfd_link_pie (info))
+ object = _("a PIE object");
+ else
+ object = _("a PDE object");
+
+ /* xgettext:c-format */
+ _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
+ "not be used when making %s%s"),
+ input_bfd, howto->name, und, v, name,
+ object, pic);
bfd_set_error (bfd_error_bad_value);
sec->check_relocs_failed = 1;
return FALSE;
}
/* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
ld.so may use its link-time address. */
- else if ((h->def_regular
- || h->root.type == bfd_link_hash_defined
- || h->root.type == bfd_link_hash_defweak)
- && h != htab->elf.hdynamic
- && SYMBOL_REFERENCES_LOCAL (link_info, h))
+ else if (h->start_stop
+ || ((h->def_regular
+ || h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ && h != htab->elf.hdynamic
+ && SYMBOL_REFERENCES_LOCAL (link_info, h)))
{
/* bfd_link_hash_new or bfd_link_hash_undefined is
set by an assignment in a linker script in
- bfd_elf_record_link_assignment. */
- if (h->def_regular
- && (h->root.type == bfd_link_hash_new
- || h->root.type == bfd_link_hash_undefined))
+ bfd_elf_record_link_assignment. start_stop is set
+ on __start_SECNAME/__stop_SECNAME which mark section
+ SECNAME. */
+ if (h->start_stop
+ || (h->def_regular
+ && (h->root.type == bfd_link_hash_new
+ || h->root.type == bfd_link_hash_undefined
+ || ((h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ && h->root.u.def.section == bfd_und_section_ptr))))
{
/* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
if (require_reloc_pc32)
modrm = 0xe8;
/* To support TLS optimization, always use addr32 prefix for
"call *__tls_get_addr@GOTPCREL(%rip)". */
- if (eh && eh->tls_get_addr == 1)
+ if (eh && eh->tls_get_addr)
{
nop = 0x67;
nop_offset = irel->r_offset - 2;
const Elf_Internal_Rela *rel_end;
asection *sreloc;
bfd_byte *contents;
- bfd_boolean use_plt_got;
if (bfd_link_relocatable (info))
return TRUE;
+ /* Don't do anything special with non-loaded, non-alloced sections.
+ In particular, any relocs in such sections should not affect GOT
+ and PLT reference counting (ie. we don't allow them to create GOT
+ or PLT entries), there's no possibility or desire to optimize TLS
+ relocs, and there's not much point in propagating relocs to shared
+ libs that the dynamic linker won't relocate. */
+ if ((sec->flags & SEC_ALLOC) == 0)
+ return TRUE;
+
BFD_ASSERT (is_x86_64_elf (abfd));
htab = elf_x86_64_hash_table (info);
return FALSE;
}
- use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
-
symtab_hdr = &elf_symtab_hdr (abfd);
sym_hashes = elf_sym_hashes (abfd);
for (rel = relocs; rel < rel_end; rel++)
{
unsigned int r_type;
- unsigned long r_symndx;
+ unsigned int r_symndx;
struct elf_link_hash_entry *h;
struct elf_x86_64_link_hash_entry *eh;
Elf_Internal_Sym *isym;
if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
{
- (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
- abfd, r_symndx);
+ /* xgettext:c-format */
+ _bfd_error_handler (_("%B: bad symbol index: %d"),
+ abfd, r_symndx);
goto error_return;
}
goto error_return;
/* Fake a STT_GNU_IFUNC symbol. */
+ h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
+ isym, NULL);
h->type = STT_GNU_IFUNC;
h->def_regular = 1;
h->ref_regular = 1;
else
name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
NULL);
- (*_bfd_error_handler)
+ _bfd_error_handler
+ /* xgettext:c-format */
(_("%B: relocation %s against symbol `%s' isn't "
"supported in x32 mode"), abfd,
x86_64_elf_howto_table[r_type].name, name);
if (h != NULL)
{
- switch (r_type)
- {
- default:
- break;
-
- case R_X86_64_PC32_BND:
- case R_X86_64_PLT32_BND:
- case R_X86_64_PC32:
- case R_X86_64_PLT32:
- case R_X86_64_32:
- case R_X86_64_64:
- /* MPX PLT is supported only if elf_x86_64_arch_bed
- is used in 64-bit mode. */
- if (ABI_64_P (abfd)
- && info->bndplt
- && (get_elf_x86_64_backend_data (abfd)
- == &elf_x86_64_arch_bed))
- {
- elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
-
- /* Create the second PLT for Intel MPX support. */
- if (htab->plt_bnd == NULL)
- {
- unsigned int plt_bnd_align;
- const struct elf_backend_data *bed;
-
- bed = get_elf_backend_data (info->output_bfd);
- BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
- && (sizeof (elf_x86_64_bnd_plt2_entry)
- == sizeof (elf_x86_64_legacy_plt2_entry)));
- plt_bnd_align = 3;
-
- if (htab->elf.dynobj == NULL)
- htab->elf.dynobj = abfd;
- htab->plt_bnd
- = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
- ".plt.bnd",
- (bed->dynamic_sec_flags
- | SEC_ALLOC
- | SEC_CODE
- | SEC_LOAD
- | SEC_READONLY));
- if (htab->plt_bnd == NULL
- || !bfd_set_section_alignment (htab->elf.dynobj,
- htab->plt_bnd,
- plt_bnd_align))
- goto error_return;
- }
- }
-
- case R_X86_64_32S:
- case R_X86_64_PC64:
- case R_X86_64_GOTPCREL:
- case R_X86_64_GOTPCRELX:
- case R_X86_64_REX_GOTPCRELX:
- case R_X86_64_GOTPCREL64:
- if (htab->elf.dynobj == NULL)
- htab->elf.dynobj = abfd;
- /* Create the ifunc sections for static executables. */
- if (h->type == STT_GNU_IFUNC
- && !_bfd_elf_create_ifunc_sections (htab->elf.dynobj,
- info))
- goto error_return;
- break;
- }
-
/* It is referenced by a non-shared object. */
h->ref_regular = 1;
- h->root.non_ir_ref = 1;
+ h->root.non_ir_ref_regular = 1;
if (h->type == STT_GNU_IFUNC)
elf_tdata (info->output_bfd)->has_gnu_symbols
case R_X86_64_TPOFF32:
if (!bfd_link_executable (info) && ABI_64_P (abfd))
- return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
+ return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
&x86_64_elf_howto_table[r_type]);
if (eh != NULL)
eh->has_got_reloc = 1;
else
name = bfd_elf_sym_name (abfd, symtab_hdr,
isym, NULL);
- (*_bfd_error_handler)
- (_("%B: '%s' accessed both as normal and thread local symbol"),
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B: '%s' accessed both as normal and"
+ " thread local symbol"),
abfd, name);
bfd_set_error (bfd_error_bad_value);
goto error_return;
create_got:
if (eh != NULL)
eh->has_got_reloc = 1;
- if (htab->elf.sgot == NULL)
- {
- if (htab->elf.dynobj == NULL)
- htab->elf.dynobj = abfd;
- if (!_bfd_elf_create_got_section (htab->elf.dynobj,
- info))
- goto error_return;
- }
break;
case R_X86_64_PLT32:
case R_X86_64_32:
if (!ABI_64_P (abfd))
goto pointer;
+ /* Fall through. */
case R_X86_64_8:
case R_X86_64_16:
case R_X86_64_32S:
&& h != NULL
&& !h->def_regular
&& h->def_dynamic
- && (sec->flags & SEC_READONLY) == 0))
- && (sec->flags & SEC_ALLOC) != 0)
- return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
+ && (sec->flags & SEC_READONLY) == 0)))
+ return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
&x86_64_elf_howto_table[r_type]);
/* Fall through. */
pointer:
if (eh != NULL && (sec->flags & SEC_CODE) != 0)
eh->has_non_got_reloc = 1;
- /* STT_GNU_IFUNC symbol must go through PLT even if it is
- locally defined and undefined symbol may turn out to be
- a STT_GNU_IFUNC symbol later. */
+ /* We are called after all symbols have been resolved. Only
+ relocation against STT_GNU_IFUNC symbol must go through
+ PLT. */
if (h != NULL
&& (bfd_link_executable (info)
- || ((h->type == STT_GNU_IFUNC
- || h->root.type == bfd_link_hash_undefweak
- || h->root.type == bfd_link_hash_undefined)
- && SYMBOLIC_BIND (info, h))))
+ || h->type == STT_GNU_IFUNC))
{
/* If this reloc is in a read-only section, we might
need a copy reloc. We can't check reliably at this
adjust_dynamic_symbol. */
h->non_got_ref = 1;
- /* We may need a .plt entry if the function this reloc
- refers to is in a shared lib. */
- h->plt.refcount += 1;
+ /* We may need a .plt entry if the symbol is a function
+ defined in a shared lib or is a STT_GNU_IFUNC function
+ referenced from the code or read-only section. */
+ if (!h->def_regular
+ || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
+ h->plt.refcount += 1;
+
if (r_type == R_X86_64_PC32)
{
/* Since something like ".long foo - ." may be used
If on the other hand, we are creating an executable, we
may need to keep relocations for symbols satisfied by a
dynamic library if we manage to avoid copy relocs for the
- symbol. */
+ symbol.
+
+ Generate dynamic pointer relocation against STT_GNU_IFUNC
+ symbol in the non-code section. */
if ((bfd_link_pic (info)
- && (sec->flags & SEC_ALLOC) != 0
&& (! IS_X86_64_PCREL_TYPE (r_type)
|| (h != NULL
&& (! (bfd_link_pie (info)
|| SYMBOLIC_BIND (info, h))
|| h->root.type == bfd_link_hash_defweak
|| !h->def_regular))))
+ || (h != NULL
+ && h->type == STT_GNU_IFUNC
+ && r_type == htab->pointer_r_type
+ && (sec->flags & SEC_CODE) == 0)
|| (ELIMINATE_COPY_RELOCS
&& !bfd_link_pic (info)
- && (sec->flags & SEC_ALLOC) != 0
&& h != NULL
&& (h->root.type == bfd_link_hash_defweak
|| !h->def_regular)))
this reloc. */
if (sreloc == NULL)
{
- if (htab->elf.dynobj == NULL)
- htab->elf.dynobj = abfd;
-
sreloc = _bfd_elf_make_dynamic_reloc_section
(sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
abfd, /*rela?*/ TRUE);
break;
}
- if (use_plt_got
- && h != NULL
- && h->plt.refcount > 0
- && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
- || h->got.refcount > 0)
- && htab->plt_got == NULL)
- {
- /* Create the GOT procedure linkage table. */
- unsigned int plt_got_align;
- const struct elf_backend_data *bed;
-
- bed = get_elf_backend_data (info->output_bfd);
- BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
- && (sizeof (elf_x86_64_bnd_plt2_entry)
- == sizeof (elf_x86_64_legacy_plt2_entry)));
- plt_got_align = 3;
-
- if (htab->elf.dynobj == NULL)
- htab->elf.dynobj = abfd;
- htab->plt_got
- = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
- ".plt.got",
- (bed->dynamic_sec_flags
- | SEC_ALLOC
- | SEC_CODE
- | SEC_LOAD
- | SEC_READONLY));
- if (htab->plt_got == NULL
- || !bfd_set_section_alignment (htab->elf.dynobj,
- htab->plt_got,
- plt_got_align))
- goto error_return;
- }
-
if ((r_type == R_X86_64_GOTPCREL
|| r_type == R_X86_64_GOTPCRELX
|| r_type == R_X86_64_REX_GOTPCRELX)
struct elf_link_hash_entry *h)
{
struct elf_x86_64_link_hash_table *htab;
- asection *s;
+ asection *s, *srel;
struct elf_x86_64_link_hash_entry *eh;
struct elf_dyn_relocs *p;
if (pc_count || count)
{
- h->needs_plt = 1;
h->non_got_ref = 1;
- if (h->plt.refcount <= 0)
- h->plt.refcount = 1;
- else
- h->plt.refcount += 1;
+ if (pc_count)
+ {
+ /* Increment PLT reference count only for PC-relative
+ references. */
+ h->needs_plt = 1;
+ if (h->plt.refcount <= 0)
+ h->plt.refcount = 1;
+ else
+ h->plt.refcount += 1;
+ }
}
}
/* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
to copy the initial value out of the dynamic object and into the
runtime process image. */
+ if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
+ {
+ s = htab->elf.sdynrelro;
+ srel = htab->elf.sreldynrelro;
+ }
+ else
+ {
+ s = htab->elf.sdynbss;
+ srel = htab->elf.srelbss;
+ }
if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
{
const struct elf_backend_data *bed;
bed = get_elf_backend_data (info->output_bfd);
- htab->srelbss->size += bed->s->sizeof_rela;
+ srel->size += bed->s->sizeof_rela;
h->needs_copy = 1;
}
- s = htab->sdynbss;
-
return _bfd_elf_adjust_dynamic_copy (info, h, s);
}
if (htab == NULL)
return FALSE;
bed = get_elf_backend_data (info->output_bfd);
- plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
+ plt_entry_size = htab->plt.plt_entry_size;
resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
eh->has_got_reloc,
&eh->dyn_relocs,
&htab->readonly_dynrelocs_against_ifunc,
plt_entry_size,
- plt_entry_size,
- GOT_ENTRY_SIZE))
+ (htab->plt.has_plt0
+ * plt_entry_size),
+ GOT_ENTRY_SIZE, TRUE))
{
- asection *s = htab->plt_bnd;
+ asection *s = htab->plt_second;
if (h->plt.offset != (bfd_vma) -1 && s != NULL)
{
- /* Use the .plt.bnd section if it is created. */
- eh->plt_bnd.offset = s->size;
+ /* Use the second PLT section if it is created. */
+ eh->plt_second.offset = s->size;
- /* Make room for this entry in the .plt.bnd section. */
- s->size += sizeof (elf_x86_64_legacy_plt2_entry);
+ /* Make room for this entry in the second PLT section. */
+ s->size += htab->non_lazy_plt->plt_entry_size;
}
return TRUE;
&& (h->plt.refcount > eh->func_pointer_refcount
|| eh->plt_got.refcount > 0))
{
- bfd_boolean use_plt_got;
+ bfd_boolean use_plt_got = eh->plt_got.refcount > 0;
/* Clear the reference count of function pointer relocations
if PLT is used. */
eh->func_pointer_refcount = 0;
- if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
- {
- /* Don't use the regular PLT for DF_BIND_NOW. */
- h->plt.offset = (bfd_vma) -1;
-
- /* Use the GOT PLT. */
- h->got.refcount = 1;
- eh->plt_got.refcount = 1;
- }
-
- use_plt_got = eh->plt_got.refcount > 0;
-
/* Make sure this symbol is output as a dynamic symbol.
Undefined weak syms won't yet be marked as dynamic. */
if (h->dynindx == -1
&& !h->forced_local
- && !resolved_to_zero)
+ && !resolved_to_zero
+ && h->root.type == bfd_link_hash_undefweak)
{
if (! bfd_elf_link_record_dynamic_symbol (info, h))
return FALSE;
|| WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
{
asection *s = htab->elf.splt;
- asection *bnd_s = htab->plt_bnd;
+ asection *second_s = htab->plt_second;
asection *got_s = htab->plt_got;
/* If this is the first .plt entry, make room for the special
first entry. The .plt section is used by prelink to undo
prelinking for dynamic relocations. */
if (s->size == 0)
- s->size = plt_entry_size;
+ s->size = htab->plt.has_plt0 * plt_entry_size;
if (use_plt_got)
eh->plt_got.offset = got_s->size;
else
{
h->plt.offset = s->size;
- if (bnd_s)
- eh->plt_bnd.offset = bnd_s->size;
+ if (second_s)
+ eh->plt_second.offset = second_s->size;
}
/* If this symbol is not defined in a regular file, and we are
}
else
{
- if (bnd_s)
+ if (second_s)
{
- /* We need to make a call to the entry of the second
- PLT instead of regular PLT entry. */
- h->root.u.def.section = bnd_s;
- h->root.u.def.value = eh->plt_bnd.offset;
+ /* We need to make a call to the entry of the
+ second PLT instead of regular PLT entry. */
+ h->root.u.def.section = second_s;
+ h->root.u.def.value = eh->plt_second.offset;
}
else
{
/* Make room for this entry. */
if (use_plt_got)
- got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
+ got_s->size += htab->non_lazy_plt->plt_entry_size;
else
{
s->size += plt_entry_size;
- if (bnd_s)
- bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
+ if (second_s)
+ second_s->size += htab->non_lazy_plt->plt_entry_size;
/* We also need to make an entry in the .got.plt section,
which will be placed in the .got section by the linker
Undefined weak syms won't yet be marked as dynamic. */
if (h->dynindx == -1
&& !h->forced_local
- && !resolved_to_zero)
+ && !resolved_to_zero
+ && h->root.type == bfd_link_hash_undefweak)
{
if (! bfd_elf_link_record_dynamic_symbol (info, h))
return FALSE;
if (h->dynindx == -1
&& ! h->forced_local
&& ! resolved_to_zero
+ && h->root.type == bfd_link_hash_undefweak
&& ! bfd_elf_link_record_dynamic_symbol (info, h))
return FALSE;
if ((info->warn_shared_textrel && bfd_link_pic (info))
|| info->error_textrel)
+ /* xgettext:c-format */
info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
p->sec->owner, h->root.root.string,
p->sec);
info->flags |= DF_TEXTREL;
if ((info->warn_shared_textrel && bfd_link_pic (info))
|| info->error_textrel)
+ /* xgettext:c-format */
info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
p->sec->owner, p->sec);
}
/* Reserve room for the initial entry.
FIXME: we could probably do away with it in this case. */
if (htab->elf.splt->size == 0)
- htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
+ htab->elf.splt->size = htab->plt.plt_entry_size;
htab->tlsdesc_plt = htab->elf.splt->size;
- htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
+ htab->elf.splt->size += htab->plt.plt_entry_size;
}
}
htab->elf.sgotplt->size = 0;
}
- if (htab->plt_eh_frame != NULL
- && htab->elf.splt != NULL
- && htab->elf.splt->size != 0
- && !bfd_is_abs_section (htab->elf.splt->output_section)
- && _bfd_elf_eh_frame_present (info))
+ if (_bfd_elf_eh_frame_present (info))
{
- const struct elf_x86_64_backend_data *arch_data
- = get_elf_x86_64_arch_data (bed);
- htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
+ if (htab->plt_eh_frame != NULL
+ && htab->elf.splt != NULL
+ && htab->elf.splt->size != 0
+ && !bfd_is_abs_section (htab->elf.splt->output_section))
+ htab->plt_eh_frame->size = htab->plt.eh_frame_plt_size;
+
+ if (htab->plt_got_eh_frame != NULL
+ && htab->plt_got != NULL
+ && htab->plt_got->size != 0
+ && !bfd_is_abs_section (htab->plt_got->output_section))
+ htab->plt_got_eh_frame->size
+ = htab->non_lazy_plt->eh_frame_plt_size;
+
+ /* Unwind info for the second PLT and .plt.got sections are
+ identical. */
+ if (htab->plt_second_eh_frame != NULL
+ && htab->plt_second != NULL
+ && htab->plt_second->size != 0
+ && !bfd_is_abs_section (htab->plt_second->output_section))
+ htab->plt_second_eh_frame->size
+ = htab->non_lazy_plt->eh_frame_plt_size;
}
/* We now have determined the sizes of the various dynamic sections.
|| s == htab->elf.sgotplt
|| s == htab->elf.iplt
|| s == htab->elf.igotplt
- || s == htab->plt_bnd
+ || s == htab->plt_second
|| s == htab->plt_got
|| s == htab->plt_eh_frame
- || s == htab->sdynbss)
+ || s == htab->plt_got_eh_frame
+ || s == htab->plt_second_eh_frame
+ || s == htab->elf.sdynbss
+ || s == htab->elf.sdynrelro)
{
/* Strip this section if we don't need it; see the
comment below. */
if (htab->plt_eh_frame != NULL
&& htab->plt_eh_frame->contents != NULL)
{
- const struct elf_x86_64_backend_data *arch_data
- = get_elf_x86_64_arch_data (bed);
-
memcpy (htab->plt_eh_frame->contents,
- arch_data->eh_frame_plt, htab->plt_eh_frame->size);
+ htab->plt.eh_frame_plt, htab->plt_eh_frame->size);
bfd_put_32 (dynobj, htab->elf.splt->size,
htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
}
+ if (htab->plt_got_eh_frame != NULL
+ && htab->plt_got_eh_frame->contents != NULL)
+ {
+ memcpy (htab->plt_got_eh_frame->contents,
+ htab->non_lazy_plt->eh_frame_plt,
+ htab->plt_got_eh_frame->size);
+ bfd_put_32 (dynobj, htab->plt_got->size,
+ (htab->plt_got_eh_frame->contents
+ + PLT_FDE_LEN_OFFSET));
+ }
+
+ if (htab->plt_second_eh_frame != NULL
+ && htab->plt_second_eh_frame->contents != NULL)
+ {
+ memcpy (htab->plt_second_eh_frame->contents,
+ htab->non_lazy_plt->eh_frame_plt,
+ htab->plt_second_eh_frame->size);
+ bfd_put_32 (dynobj, htab->plt_second->size,
+ (htab->plt_second_eh_frame->contents
+ + PLT_FDE_LEN_OFFSET));
+ }
+
if (htab->elf.dynamic_sections_created)
{
/* Add some entries to the .dynamic section. We fill in the
relocation. */
if (!add_dynamic_entry (DT_PLTGOT, 0))
return FALSE;
+ }
- if (htab->elf.srelplt->size != 0)
- {
- if (!add_dynamic_entry (DT_PLTRELSZ, 0)
- || !add_dynamic_entry (DT_PLTREL, DT_RELA)
- || !add_dynamic_entry (DT_JMPREL, 0))
- return FALSE;
- }
-
- if (htab->tlsdesc_plt
- && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
- || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
+ if (htab->elf.srelplt->size != 0)
+ {
+ if (!add_dynamic_entry (DT_PLTRELSZ, 0)
+ || !add_dynamic_entry (DT_PLTREL, DT_RELA)
+ || !add_dynamic_entry (DT_JMPREL, 0))
return FALSE;
}
+ if (htab->tlsdesc_plt
+ && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
+ || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
+ return FALSE;
+
if (relocs)
{
if (!add_dynamic_entry (DT_RELA, 0)
Elf_Internal_Rela *rel;
Elf_Internal_Rela *wrel;
Elf_Internal_Rela *relend;
- const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
+ unsigned int plt_entry_size;
BFD_ASSERT (is_x86_64_elf (input_bfd));
htab = elf_x86_64_hash_table (info);
if (htab == NULL)
return FALSE;
+ plt_entry_size = htab->plt.plt_entry_size;
symtab_hdr = &elf_symtab_hdr (input_bfd);
sym_hashes = elf_sym_hashes (input_bfd);
local_got_offsets = elf_local_got_offsets (input_bfd);
asection *base_got, *resolved_plt;
bfd_vma st_size;
bfd_boolean resolved_to_zero;
+ bfd_boolean relative_reloc;
r_type = ELF32_R_TYPE (rel->r_info);
if (r_type == (int) R_X86_64_GNU_VTINHERIT
}
if (r_type >= (int) R_X86_64_standard)
- {
- (*_bfd_error_handler)
- (_("%B: unrecognized relocation (0x%x) in section `%A'"),
- input_bfd, input_section, r_type);
- bfd_set_error (bfd_error_bad_value);
- return FALSE;
- }
+ return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
if (r_type != (int) R_X86_64_32
|| ABI_64_P (output_bfd))
continue;
abort ();
}
- else if (h->plt.offset == (bfd_vma) -1)
- abort ();
+
+ switch (r_type)
+ {
+ default:
+ break;
+
+ case R_X86_64_GOTPCREL:
+ case R_X86_64_GOTPCRELX:
+ case R_X86_64_REX_GOTPCRELX:
+ case R_X86_64_GOTPCREL64:
+ base_got = htab->elf.sgot;
+ off = h->got.offset;
+
+ if (base_got == NULL)
+ abort ();
+
+ if (off == (bfd_vma) -1)
+ {
+ /* We can't use h->got.offset here to save state, or
+ even just remember the offset, as finish_dynamic_symbol
+ would use that as offset into .got. */
+
+ if (h->plt.offset == (bfd_vma) -1)
+ abort ();
+
+ if (htab->elf.splt != NULL)
+ {
+ plt_index = (h->plt.offset / plt_entry_size
+ - htab->plt.has_plt0);
+ off = (plt_index + 3) * GOT_ENTRY_SIZE;
+ base_got = htab->elf.sgotplt;
+ }
+ else
+ {
+ plt_index = h->plt.offset / plt_entry_size;
+ off = plt_index * GOT_ENTRY_SIZE;
+ base_got = htab->elf.igotplt;
+ }
+
+ if (h->dynindx == -1
+ || h->forced_local
+ || info->symbolic)
+ {
+ /* This references the local defitionion. We must
+ initialize this entry in the global offset table.
+ Since the offset must always be a multiple of 8,
+ we use the least significant bit to record
+ whether we have initialized it already.
+
+ When doing a dynamic link, we create a .rela.got
+ relocation entry to initialize the value. This
+ is done in the finish_dynamic_symbol routine. */
+ if ((off & 1) != 0)
+ off &= ~1;
+ else
+ {
+ bfd_put_64 (output_bfd, relocation,
+ base_got->contents + off);
+ /* Note that this is harmless for the GOTPLT64
+ case, as -1 | 1 still is -1. */
+ h->got.offset |= 1;
+ }
+ }
+ }
+
+ relocation = (base_got->output_section->vma
+ + base_got->output_offset + off);
+
+ goto do_relocation;
+ }
+
+ if (h->plt.offset == (bfd_vma) -1)
+ {
+ /* Handle static pointers of STT_GNU_IFUNC symbols. */
+ if (r_type == htab->pointer_r_type
+ && (input_section->flags & SEC_CODE) == 0)
+ goto do_ifunc_pointer;
+ goto bad_ifunc_reloc;
+ }
/* STT_GNU_IFUNC symbol must go through PLT. */
if (htab->elf.splt != NULL)
{
- if (htab->plt_bnd != NULL)
+ if (htab->plt_second != NULL)
{
- resolved_plt = htab->plt_bnd;
- plt_offset = eh->plt_bnd.offset;
+ resolved_plt = htab->plt_second;
+ plt_offset = eh->plt_second.offset;
}
else
{
switch (r_type)
{
default:
+bad_ifunc_reloc:
if (h->root.root.string)
name = h->root.root.string;
else
name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
NULL);
- (*_bfd_error_handler)
+ _bfd_error_handler
+ /* xgettext:c-format */
(_("%B: relocation %s against STT_GNU_IFUNC "
- "symbol `%s' isn't handled by %s"), input_bfd,
- howto->name, name, __FUNCTION__);
+ "symbol `%s' isn't supported"), input_bfd,
+ howto->name, name);
bfd_set_error (bfd_error_bad_value);
return FALSE;
goto do_relocation;
/* FALLTHROUGH */
case R_X86_64_64:
+do_ifunc_pointer:
if (rel->r_addend != 0)
{
if (h->root.root.string)
else
name = bfd_elf_sym_name (input_bfd, symtab_hdr,
sym, NULL);
- (*_bfd_error_handler)
+ _bfd_error_handler
+ /* xgettext:c-format */
(_("%B: relocation %s against STT_GNU_IFUNC "
- "symbol `%s' has non-zero addend: %d"),
+ "symbol `%s' has non-zero addend: %Ld"),
input_bfd, howto->name, name, rel->r_addend);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
/* Generate dynamic relcoation only when there is a
- non-GOT reference in a shared object. */
- if (bfd_link_pic (info) && h->non_got_ref)
+ non-GOT reference in a shared object or there is no
+ PLT. */
+ if ((bfd_link_pic (info) && h->non_got_ref)
+ || h->plt.offset == (bfd_vma) -1)
{
Elf_Internal_Rela outrel;
asection *sreloc;
|| h->forced_local
|| bfd_link_executable (info))
{
+ info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
+ h->root.root.string,
+ h->root.u.def.section->owner);
+
/* This symbol is resolved locally. */
outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
outrel.r_addend = (h->root.u.def.value
outrel.r_addend = 0;
}
- sreloc = htab->elf.irelifunc;
+ /* Dynamic relocations are stored in
+ 1. .rela.ifunc section in PIC object.
+ 2. .rela.got section in dynamic executable.
+ 3. .rela.iplt section in static executable. */
+ if (bfd_link_pic (info))
+ sreloc = htab->elf.irelifunc;
+ else if (htab->elf.splt != NULL)
+ sreloc = htab->elf.srelgot;
+ else
+ sreloc = htab->elf.irelplt;
elf_append_rela (output_bfd, sreloc, &outrel);
/* If this reloc is against an external symbol, we
case R_X86_64_PLT32:
case R_X86_64_PLT32_BND:
goto do_relocation;
-
- case R_X86_64_GOTPCREL:
- case R_X86_64_GOTPCRELX:
- case R_X86_64_REX_GOTPCRELX:
- case R_X86_64_GOTPCREL64:
- base_got = htab->elf.sgot;
- off = h->got.offset;
-
- if (base_got == NULL)
- abort ();
-
- if (off == (bfd_vma) -1)
- {
- /* We can't use h->got.offset here to save state, or
- even just remember the offset, as finish_dynamic_symbol
- would use that as offset into .got. */
-
- if (htab->elf.splt != NULL)
- {
- plt_index = h->plt.offset / plt_entry_size - 1;
- off = (plt_index + 3) * GOT_ENTRY_SIZE;
- base_got = htab->elf.sgotplt;
- }
- else
- {
- plt_index = h->plt.offset / plt_entry_size;
- off = plt_index * GOT_ENTRY_SIZE;
- base_got = htab->elf.igotplt;
- }
-
- if (h->dynindx == -1
- || h->forced_local
- || info->symbolic)
- {
- /* This references the local defitionion. We must
- initialize this entry in the global offset table.
- Since the offset must always be a multiple of 8,
- we use the least significant bit to record
- whether we have initialized it already.
-
- When doing a dynamic link, we create a .rela.got
- relocation entry to initialize the value. This
- is done in the finish_dynamic_symbol routine. */
- if ((off & 1) != 0)
- off &= ~1;
- else
- {
- bfd_put_64 (output_bfd, relocation,
- base_got->contents + off);
- /* Note that this is harmless for the GOTPLT64
- case, as -1 | 1 still is -1. */
- h->got.offset |= 1;
- }
- }
- }
-
- relocation = (base_got->output_section->vma
- + base_got->output_offset + off);
-
- goto do_relocation;
}
}
case R_X86_64_GOTPCREL64:
/* Use global offset table entry as symbol value. */
case R_X86_64_GOTPLT64:
- /* This is obsolete and treated the the same as GOT64. */
+ /* This is obsolete and treated the same as GOT64. */
base_got = htab->elf.sgot;
if (htab->elf.sgot == NULL)
abort ();
+ relative_reloc = FALSE;
if (h != NULL)
{
bfd_boolean dyn;
state, or even just remember the offset, as
finish_dynamic_symbol would use that as offset into
.got. */
- bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
+ bfd_vma plt_index = (h->plt.offset / plt_entry_size
+ - htab->plt.has_plt0);
off = (plt_index + 3) * GOT_ENTRY_SIZE;
base_got = htab->elf.sgotplt;
}
/* Note that this is harmless for the GOTPLT64 case,
as -1 | 1 still is -1. */
h->got.offset |= 1;
+
+ if (h->dynindx == -1
+ && !h->forced_local
+ && h->root.type != bfd_link_hash_undefweak
+ && bfd_link_pic (info))
+ {
+ /* If this symbol isn't dynamic in PIC,
+ generate R_X86_64_RELATIVE here. */
+ eh->no_finish_dynamic_symbol = 1;
+ relative_reloc = TRUE;
+ }
}
}
else
{
bfd_put_64 (output_bfd, relocation,
base_got->contents + off);
+ local_got_offsets[r_symndx] |= 1;
if (bfd_link_pic (info))
- {
- asection *s;
- Elf_Internal_Rela outrel;
-
- /* We need to generate a R_X86_64_RELATIVE reloc
- for the dynamic linker. */
- s = htab->elf.srelgot;
- if (s == NULL)
- abort ();
-
- outrel.r_offset = (base_got->output_section->vma
- + base_got->output_offset
- + off);
- outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
- outrel.r_addend = relocation;
- elf_append_rela (output_bfd, s, &outrel);
- }
-
- local_got_offsets[r_symndx] |= 1;
+ relative_reloc = TRUE;
}
}
+ if (relative_reloc)
+ {
+ asection *s;
+ Elf_Internal_Rela outrel;
+
+ /* We need to generate a R_X86_64_RELATIVE reloc
+ for the dynamic linker. */
+ s = htab->elf.srelgot;
+ if (s == NULL)
+ abort ();
+
+ outrel.r_offset = (base_got->output_section->vma
+ + base_got->output_offset
+ + off);
+ outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
+ outrel.r_addend = relocation;
+ elf_append_rela (output_bfd, s, &outrel);
+ }
+
if (off >= (bfd_vma) -2)
abort ();
break;
}
- (*_bfd_error_handler)
- (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s"
+ " `%s' can not be used when making a shared object"),
input_bfd, v, h->root.root.string);
bfd_set_error (bfd_error_bad_value);
return FALSE;
|| h->type == STT_OBJECT)
&& ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
{
- (*_bfd_error_handler)
- (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B: relocation R_X86_64_GOTOFF64 against protected %s"
+ " `%s' can not be used when making a shared object"),
input_bfd,
h->type == STT_FUNC ? "function" : "data",
h->root.root.string);
symbols it's the symbol itself relative to GOT. */
if (h != NULL
/* See PLT32 handling. */
- && h->plt.offset != (bfd_vma) -1
+ && (h->plt.offset != (bfd_vma) -1
+ || eh->plt_got.offset != (bfd_vma) -1)
&& htab->elf.splt != NULL)
{
- if (htab->plt_bnd != NULL)
+ if (eh->plt_got.offset != (bfd_vma) -1)
+ {
+ /* Use the GOT PLT. */
+ resolved_plt = htab->plt_got;
+ plt_offset = eh->plt_got.offset;
+ }
+ else if (htab->plt_second != NULL)
{
- resolved_plt = htab->plt_bnd;
- plt_offset = eh->plt_bnd.offset;
+ resolved_plt = htab->plt_second;
+ plt_offset = eh->plt_second.offset;
}
else
{
if (h->plt.offset != (bfd_vma) -1)
{
- if (htab->plt_bnd != NULL)
+ if (htab->plt_second != NULL)
{
- resolved_plt = htab->plt_bnd;
- plt_offset = eh->plt_bnd.offset;
+ resolved_plt = htab->plt_second;
+ plt_offset = eh->plt_second.offset;
}
else
{
case R_X86_64_PC32:
case R_X86_64_PC32_BND:
/* Don't complain about -fPIC if the symbol is undefined when
- building executable unless it is unresolved weak symbol. */
+ building executable unless it is unresolved weak symbol or
+ -z nocopyreloc is used. */
if ((input_section->flags & SEC_ALLOC) != 0
&& (input_section->flags & SEC_READONLY) != 0
&& h != NULL
&& ((bfd_link_executable (info)
- && h->root.type == bfd_link_hash_undefweak
- && !resolved_to_zero)
- || (bfd_link_pic (info)
- && !(bfd_link_pie (info)
- && h->root.type == bfd_link_hash_undefined))))
+ && ((h->root.type == bfd_link_hash_undefweak
+ && !resolved_to_zero)
+ || (info->nocopyreloc
+ && h->def_dynamic
+ && !(h->root.u.def.section->flags & SEC_CODE))))
+ || bfd_link_dll (info)))
{
bfd_boolean fail = FALSE;
bfd_boolean branch
{
/* Symbol is referenced locally. Make sure it is
defined locally or for a branch. */
- fail = !h->def_regular && !branch;
+ fail = (!(h->def_regular || ELF_COMMON_DEF_P (h))
+ && !branch);
}
else if (!(bfd_link_pie (info)
&& (h->needs_copy || eh->needs_copy)))
}
if (fail)
- return elf_x86_64_need_pic (input_bfd, input_section,
+ return elf_x86_64_need_pic (info, input_bfd, input_section,
h, NULL, NULL, howto);
}
/* Fall through. */
&& (h->needs_copy
|| eh->needs_copy
|| h->root.type == bfd_link_hash_undefined)
- && IS_X86_64_PCREL_TYPE (r_type))
+ && (IS_X86_64_PCREL_TYPE (r_type)
+ || r_type == R_X86_64_SIZE32
+ || r_type == R_X86_64_SIZE64))
&& (h == NULL
|| ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
&& !resolved_to_zero)
else
name = bfd_elf_sym_name (input_bfd, symtab_hdr,
sym, NULL);
- if (addend < 0)
- (*_bfd_error_handler)
- (_("%B: addend -0x%x in relocation %s against "
- "symbol `%s' at 0x%lx in section `%A' is "
- "out of range"),
- input_bfd, input_section, addend,
- howto->name, name,
- (unsigned long) rel->r_offset);
- else
- (*_bfd_error_handler)
- (_("%B: addend 0x%x in relocation %s against "
- "symbol `%s' at 0x%lx in section `%A' is "
- "out of range"),
- input_bfd, input_section, addend,
- howto->name, name,
- (unsigned long) rel->r_offset);
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B: addend %s%#x in relocation %s against "
+ "symbol `%s' at %#Lx in section `%A' is "
+ "out of range"),
+ input_bfd, addend < 0 ? "-" : "", addend,
+ howto->name, name, rel->r_offset, input_section);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
&& _bfd_elf_section_offset (output_bfd, info, input_section,
rel->r_offset) != (bfd_vma) -1)
{
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
- input_bfd,
- input_section,
- (long) rel->r_offset,
- howto->name,
- h->root.root.string);
- return FALSE;
+ switch (r_type)
+ {
+ case R_X86_64_32S:
+ if (info->nocopyreloc
+ && !(h->root.u.def.section->flags & SEC_CODE))
+ return elf_x86_64_need_pic (info, input_bfd, input_section,
+ h, NULL, NULL, howto);
+ /* Fall through. */
+
+ default:
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"),
+ input_bfd,
+ input_section,
+ rel->r_offset,
+ howto->name,
+ h->root.root.string);
+ return FALSE;
+ }
}
do_relocation:
(bfd_vma) 0, input_bfd, input_section, rel->r_offset);
else
{
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): reloc against `%s': error %d"),
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B(%A+%#Lx): reloc against `%s': error %d"),
input_bfd, input_section,
- (long) rel->r_offset, name, (int) r);
+ rel->r_offset, name, (int) r);
return FALSE;
}
}
Elf_Internal_Sym *sym)
{
struct elf_x86_64_link_hash_table *htab;
- const struct elf_x86_64_backend_data *abed;
- bfd_boolean use_plt_bnd;
+ bfd_boolean use_plt_second;
struct elf_x86_64_link_hash_entry *eh;
bfd_boolean local_undefweak;
if (htab == NULL)
return FALSE;
- /* Use MPX backend data in case of BND relocation. Use .plt_bnd
- section only if there is .plt section. */
- use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
- abed = (use_plt_bnd
- ? &elf_x86_64_bnd_arch_bed
- : get_elf_x86_64_backend_data (output_bfd));
+ /* Use the second PLT section only if there is .plt section. */
+ use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
eh = (struct elf_x86_64_link_hash_entry *) h;
+ if (eh->no_finish_dynamic_symbol)
+ abort ();
/* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
resolved undefined weak symbols in executable so that their
if (h->plt.offset != (bfd_vma) -1)
{
bfd_vma plt_index;
- bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
- bfd_vma plt_plt_insn_end, plt_got_insn_size;
+ bfd_vma got_offset, plt_offset;
Elf_Internal_Rela rela;
bfd_byte *loc;
asection *plt, *gotplt, *relplt, *resolved_plt;
if (plt == htab->elf.splt)
{
- got_offset = h->plt.offset / abed->plt_entry_size - 1;
+ got_offset = (h->plt.offset / htab->plt.plt_entry_size
+ - htab->plt.has_plt0);
got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
}
else
{
- got_offset = h->plt.offset / abed->plt_entry_size;
+ got_offset = h->plt.offset / htab->plt.plt_entry_size;
got_offset = got_offset * GOT_ENTRY_SIZE;
}
- plt_plt_insn_end = abed->plt_plt_insn_end;
- plt_plt_offset = abed->plt_plt_offset;
- plt_got_insn_size = abed->plt_got_insn_size;
- plt_got_offset = abed->plt_got_offset;
- if (use_plt_bnd)
+ /* Fill in the entry in the procedure linkage table. */
+ memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
+ htab->plt.plt_entry_size);
+ if (use_plt_second)
{
- /* Use the second PLT with BND relocations. */
- const bfd_byte *plt_entry, *plt2_entry;
+ memcpy (htab->plt_second->contents + eh->plt_second.offset,
+ htab->non_lazy_plt->plt_entry,
+ htab->non_lazy_plt->plt_entry_size);
- if (eh->has_bnd_reloc)
- {
- plt_entry = elf_x86_64_bnd_plt_entry;
- plt2_entry = elf_x86_64_bnd_plt2_entry;
- }
- else
- {
- plt_entry = elf_x86_64_legacy_plt_entry;
- plt2_entry = elf_x86_64_legacy_plt2_entry;
-
- /* Subtract 1 since there is no BND prefix. */
- plt_plt_insn_end -= 1;
- plt_plt_offset -= 1;
- plt_got_insn_size -= 1;
- plt_got_offset -= 1;
- }
-
- BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
- == sizeof (elf_x86_64_legacy_plt_entry));
-
- /* Fill in the entry in the procedure linkage table. */
- memcpy (plt->contents + h->plt.offset,
- plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
- /* Fill in the entry in the second PLT. */
- memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
- plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
-
- resolved_plt = htab->plt_bnd;
- plt_offset = eh->plt_bnd.offset;
+ resolved_plt = htab->plt_second;
+ plt_offset = eh->plt_second.offset;
}
else
{
- /* Fill in the entry in the procedure linkage table. */
- memcpy (plt->contents + h->plt.offset, abed->plt_entry,
- abed->plt_entry_size);
-
resolved_plt = plt;
plt_offset = h->plt.offset;
}
- resolved_plt->output_section->vma
- resolved_plt->output_offset
- plt_offset
- - plt_got_insn_size);
+ - htab->plt.plt_got_insn_size);
/* Check PC-relative offset overflow in PLT entry. */
if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
+ /* xgettext:c-format */
info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
output_bfd, h->root.root.string);
bfd_put_32 (output_bfd, plt_got_pcrel_offset,
- resolved_plt->contents + plt_offset + plt_got_offset);
+ (resolved_plt->contents + plt_offset
+ + htab->plt.plt_got_offset));
/* Fill in the entry in the global offset table, initially this
points to the second part of the PLT entry. Leave the entry
against undefined weak symbol in PIE. */
if (!local_undefweak)
{
- bfd_put_64 (output_bfd, (plt->output_section->vma
- + plt->output_offset
- + h->plt.offset
- + abed->plt_lazy_offset),
- gotplt->contents + got_offset);
+ if (htab->plt.has_plt0)
+ bfd_put_64 (output_bfd, (plt->output_section->vma
+ + plt->output_offset
+ + h->plt.offset
+ + htab->lazy_plt->plt_lazy_offset),
+ gotplt->contents + got_offset);
/* Fill in the entry in the .rela.plt section. */
rela.r_offset = (gotplt->output_section->vma
&& h->def_regular
&& h->type == STT_GNU_IFUNC))
{
+ info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
+ h->root.root.string,
+ h->root.u.def.section->owner);
+
/* If an STT_GNU_IFUNC symbol is locally defined, generate
R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
plt_index = htab->next_jump_slot_index++;
}
- /* Don't fill PLT entry for static executables. */
- if (plt == htab->elf.splt)
+ /* Don't fill the second and third slots in PLT entry for
+ static executables nor without PLT0. */
+ if (plt == htab->elf.splt && htab->plt.has_plt0)
{
- bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
+ bfd_vma plt0_offset
+ = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
/* Put relocation index. */
bfd_put_32 (output_bfd, plt_index,
(plt->contents + h->plt.offset
- + abed->plt_reloc_offset));
+ + htab->lazy_plt->plt_reloc_offset));
/* Put offset for jmp .PLT0 and check for overflow. We don't
check relocation index for overflow since branch displacement
will overflow first. */
if (plt0_offset > 0x80000000)
+ /* xgettext:c-format */
info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
output_bfd, h->root.root.string);
bfd_put_32 (output_bfd, - plt0_offset,
- plt->contents + h->plt.offset + plt_plt_offset);
+ (plt->contents + h->plt.offset
+ + htab->lazy_plt->plt_plt_offset));
}
bed = get_elf_backend_data (output_bfd);
}
else if (eh->plt_got.offset != (bfd_vma) -1)
{
- bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
+ bfd_vma got_offset, plt_offset;
asection *plt, *got;
bfd_boolean got_after_plt;
int32_t got_pcrel_offset;
- const bfd_byte *got_plt_entry;
/* Set the entry in the GOT procedure linkage table. */
plt = htab->plt_got;
got_offset = h->got.offset;
if (got_offset == (bfd_vma) -1
- || h->type == STT_GNU_IFUNC
+ || (h->type == STT_GNU_IFUNC && h->def_regular)
|| plt == NULL
|| got == NULL)
abort ();
- /* Use the second PLT entry template for the GOT PLT since they
+ /* Use the non-lazy PLT entry template for the GOT PLT since they
are the identical. */
- plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
- plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
- if (eh->has_bnd_reloc)
- got_plt_entry = elf_x86_64_bnd_plt2_entry;
- else
- {
- got_plt_entry = elf_x86_64_legacy_plt2_entry;
-
- /* Subtract 1 since there is no BND prefix. */
- plt_got_insn_size -= 1;
- plt_got_offset -= 1;
- }
-
/* Fill in the entry in the GOT procedure linkage table. */
plt_offset = eh->plt_got.offset;
memcpy (plt->contents + plt_offset,
- got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
+ htab->non_lazy_plt->plt_entry,
+ htab->non_lazy_plt->plt_entry_size);
/* Put offset the PC-relative instruction referring to the GOT
entry, subtracting the size of that instruction. */
- plt->output_section->vma
- plt->output_offset
- plt_offset
- - plt_got_insn_size);
+ - htab->non_lazy_plt->plt_got_insn_size);
/* Check PC-relative offset overflow in GOT PLT entry. */
got_after_plt = got->output_section->vma > plt->output_section->vma;
if ((got_after_plt && got_pcrel_offset < 0)
|| (!got_after_plt && got_pcrel_offset > 0))
+ /* xgettext:c-format */
info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
output_bfd, h->root.root.string);
bfd_put_32 (output_bfd, got_pcrel_offset,
- plt->contents + plt_offset + plt_got_offset);
+ (plt->contents + plt_offset
+ + htab->non_lazy_plt->plt_got_offset));
}
if (!local_undefweak
&& !local_undefweak)
{
Elf_Internal_Rela rela;
+ asection *relgot = htab->elf.srelgot;
/* This symbol has an entry in the global offset table. Set it
up. */
if (h->def_regular
&& h->type == STT_GNU_IFUNC)
{
- if (bfd_link_pic (info))
+ if (h->plt.offset == (bfd_vma) -1)
+ {
+ /* STT_GNU_IFUNC is referenced without PLT. */
+ if (htab->elf.splt == NULL)
+ {
+ /* use .rel[a].iplt section to store .got relocations
+ in static executable. */
+ relgot = htab->elf.irelplt;
+ }
+ if (SYMBOL_REFERENCES_LOCAL (info, h))
+ {
+ info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
+ output_bfd,
+ h->root.root.string,
+ h->root.u.def.section->owner);
+
+ rela.r_info = htab->r_info (0,
+ R_X86_64_IRELATIVE);
+ rela.r_addend = (h->root.u.def.value
+ + h->root.u.def.section->output_section->vma
+ + h->root.u.def.section->output_offset);
+ }
+ else
+ goto do_glob_dat;
+ }
+ else if (bfd_link_pic (info))
{
/* Generate R_X86_64_GLOB_DAT. */
goto do_glob_dat;
else
{
asection *plt;
+ bfd_vma plt_offset;
if (!h->pointer_equality_needed)
abort ();
/* For non-shared object, we can't use .got.plt, which
contains the real function addres if we need pointer
equality. We load the GOT entry with the PLT entry. */
- plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
+ if (htab->plt_second != NULL)
+ {
+ plt = htab->plt_second;
+ plt_offset = eh->plt_second.offset;
+ }
+ else
+ {
+ plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
+ plt_offset = h->plt.offset;
+ }
bfd_put_64 (output_bfd, (plt->output_section->vma
+ plt->output_offset
- + h->plt.offset),
+ + plt_offset),
htab->elf.sgot->contents + h->got.offset);
return TRUE;
}
rela.r_addend = 0;
}
- elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
+ elf_append_rela (output_bfd, relgot, &rela);
}
if (h->needs_copy)
{
Elf_Internal_Rela rela;
+ asection *s;
/* This symbol needs a copy reloc. Set it up. */
if (h->dynindx == -1
|| (h->root.type != bfd_link_hash_defined
&& h->root.type != bfd_link_hash_defweak)
- || htab->srelbss == NULL)
+ || htab->elf.srelbss == NULL
+ || htab->elf.sreldynrelro == NULL)
abort ();
rela.r_offset = (h->root.u.def.value
+ h->root.u.def.section->output_offset);
rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
rela.r_addend = 0;
- elf_append_rela (output_bfd, htab->srelbss, &rela);
+ if (h->root.u.def.section == htab->elf.sdynrelro)
+ s = htab->elf.sreldynrelro;
+ else
+ s = htab->elf.srelbss;
+ elf_append_rela (output_bfd, s, &rela);
}
return TRUE;
const struct elf_backend_data *bed = get_elf_backend_data (abfd);
struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
- if ((int) ELF32_R_TYPE (rela->r_info) == R_X86_64_IRELATIVE)
- return reloc_class_ifunc;
-
if (htab->elf.dynsym != NULL
&& htab->elf.dynsym->contents != NULL)
{
switch ((int) ELF32_R_TYPE (rela->r_info))
{
+ case R_X86_64_IRELATIVE:
+ return reloc_class_ifunc;
case R_X86_64_RELATIVE:
case R_X86_64_RELATIVE64:
return reloc_class_relative;
struct elf_x86_64_link_hash_table *htab;
bfd *dynobj;
asection *sdyn;
- const struct elf_x86_64_backend_data *abed;
htab = elf_x86_64_hash_table (info);
if (htab == NULL)
return FALSE;
- /* Use MPX backend data in case of BND relocation. Use .plt_bnd
- section only if there is .plt section. */
- abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
- ? &elf_x86_64_bnd_arch_bed
- : get_elf_x86_64_backend_data (output_bfd));
-
dynobj = htab->elf.dynobj;
sdyn = bfd_get_linker_section (dynobj, ".dynamic");
dyn.d_un.d_val = s->size;
break;
- case DT_RELASZ:
- /* The procedure linkage table relocs (DT_JMPREL) should
- not be included in the overall relocs (DT_RELA).
- Therefore, we override the DT_RELASZ entry here to
- make it not include the JMPREL relocs. Since the
- linker script arranges for .rela.plt to follow all
- other relocation sections, we don't have to worry
- about changing the DT_RELA entry. */
- if (htab->elf.srelplt != NULL)
- {
- s = htab->elf.srelplt->output_section;
- dyn.d_un.d_val -= s->size;
- }
- break;
-
case DT_TLSDESC_PLT:
s = htab->elf.splt;
dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
(*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
}
- /* Fill in the special first entry in the procedure linkage table. */
if (htab->elf.splt && htab->elf.splt->size > 0)
{
- /* Fill in the first entry in the procedure linkage table. */
- memcpy (htab->elf.splt->contents,
- abed->plt0_entry, abed->plt_entry_size);
- /* Add offset for pushq GOT+8(%rip), since the instruction
- uses 6 bytes subtract this value. */
- bfd_put_32 (output_bfd,
- (htab->elf.sgotplt->output_section->vma
- + htab->elf.sgotplt->output_offset
- + 8
- - htab->elf.splt->output_section->vma
- - htab->elf.splt->output_offset
- - 6),
- htab->elf.splt->contents + abed->plt0_got1_offset);
- /* Add offset for the PC-relative instruction accessing GOT+16,
- subtracting the offset to the end of that instruction. */
- bfd_put_32 (output_bfd,
- (htab->elf.sgotplt->output_section->vma
- + htab->elf.sgotplt->output_offset
- + 16
- - htab->elf.splt->output_section->vma
- - htab->elf.splt->output_offset
- - abed->plt0_got2_insn_end),
- htab->elf.splt->contents + abed->plt0_got2_offset);
-
elf_section_data (htab->elf.splt->output_section)
- ->this_hdr.sh_entsize = abed->plt_entry_size;
+ ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
- if (htab->tlsdesc_plt)
+ if (htab->plt.has_plt0)
{
- bfd_put_64 (output_bfd, (bfd_vma) 0,
- htab->elf.sgot->contents + htab->tlsdesc_got);
-
- memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
- abed->plt0_entry, abed->plt_entry_size);
-
- /* Add offset for pushq GOT+8(%rip), since the
- instruction uses 6 bytes subtract this value. */
+ /* Fill in the special first entry in the procedure linkage
+ table. */
+ memcpy (htab->elf.splt->contents,
+ htab->lazy_plt->plt0_entry,
+ htab->lazy_plt->plt_entry_size);
+ /* Add offset for pushq GOT+8(%rip), since the instruction
+ uses 6 bytes subtract this value. */
bfd_put_32 (output_bfd,
(htab->elf.sgotplt->output_section->vma
+ htab->elf.sgotplt->output_offset
+ 8
- htab->elf.splt->output_section->vma
- htab->elf.splt->output_offset
- - htab->tlsdesc_plt
- 6),
- htab->elf.splt->contents
- + htab->tlsdesc_plt + abed->plt0_got1_offset);
- /* Add offset for the PC-relative instruction accessing GOT+TDG,
- where TGD stands for htab->tlsdesc_got, subtracting the offset
- to the end of that instruction. */
+ (htab->elf.splt->contents
+ + htab->lazy_plt->plt0_got1_offset));
+ /* Add offset for the PC-relative instruction accessing
+ GOT+16, subtracting the offset to the end of that
+ instruction. */
bfd_put_32 (output_bfd,
- (htab->elf.sgot->output_section->vma
- + htab->elf.sgot->output_offset
- + htab->tlsdesc_got
+ (htab->elf.sgotplt->output_section->vma
+ + htab->elf.sgotplt->output_offset
+ + 16
- htab->elf.splt->output_section->vma
- htab->elf.splt->output_offset
- - htab->tlsdesc_plt
- - abed->plt0_got2_insn_end),
- htab->elf.splt->contents
- + htab->tlsdesc_plt + abed->plt0_got2_offset);
+ - htab->lazy_plt->plt0_got2_insn_end),
+ (htab->elf.splt->contents
+ + htab->lazy_plt->plt0_got2_offset));
+
+ if (htab->tlsdesc_plt)
+ {
+ bfd_put_64 (output_bfd, (bfd_vma) 0,
+ htab->elf.sgot->contents + htab->tlsdesc_got);
+
+ memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
+ htab->lazy_plt->plt0_entry,
+ htab->lazy_plt->plt_entry_size);
+
+ /* Add offset for pushq GOT+8(%rip), since the
+ instruction uses 6 bytes subtract this value. */
+ bfd_put_32 (output_bfd,
+ (htab->elf.sgotplt->output_section->vma
+ + htab->elf.sgotplt->output_offset
+ + 8
+ - htab->elf.splt->output_section->vma
+ - htab->elf.splt->output_offset
+ - htab->tlsdesc_plt
+ - 6),
+ (htab->elf.splt->contents
+ + htab->tlsdesc_plt
+ + htab->lazy_plt->plt0_got1_offset));
+ /* Add offset for the PC-relative instruction accessing
+ GOT+TDG, where TDG stands for htab->tlsdesc_got,
+ subtracting the offset to the end of that
+ instruction. */
+ bfd_put_32 (output_bfd,
+ (htab->elf.sgot->output_section->vma
+ + htab->elf.sgot->output_offset
+ + htab->tlsdesc_got
+ - htab->elf.splt->output_section->vma
+ - htab->elf.splt->output_offset
+ - htab->tlsdesc_plt
+ - htab->lazy_plt->plt0_got2_insn_end),
+ (htab->elf.splt->contents
+ + htab->tlsdesc_plt
+ + htab->lazy_plt->plt0_got2_offset));
+ }
}
}
}
- if (htab->plt_bnd != NULL)
- elf_section_data (htab->plt_bnd->output_section)
- ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
+ if (htab->plt_got != NULL && htab->plt_got->size > 0)
+ elf_section_data (htab->plt_got->output_section)
+ ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
- if (htab->elf.sgotplt)
+ if (htab->plt_second != NULL && htab->plt_second->size > 0)
+ elf_section_data (htab->plt_second->output_section)
+ ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
+
+ /* GOT is always created in setup_gnu_properties. But it may not be
+ needed. */
+ if (htab->elf.sgotplt && htab->elf.sgotplt->size > 0)
{
if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
{
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("discarded output section: `%A'"), htab->elf.sgotplt);
return FALSE;
}
- /* Fill in the first three entries in the global offset table. */
- if (htab->elf.sgotplt->size > 0)
- {
- /* Set the first entry in the global offset table to the address of
- the dynamic section. */
- if (sdyn == NULL)
- bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
- else
- bfd_put_64 (output_bfd,
- sdyn->output_section->vma + sdyn->output_offset,
- htab->elf.sgotplt->contents);
- /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
- bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
- bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
- }
-
- elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
- GOT_ENTRY_SIZE;
+ /* Set the first entry in the global offset table to the address of
+ the dynamic section. */
+ if (sdyn == NULL)
+ bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
+ else
+ bfd_put_64 (output_bfd,
+ sdyn->output_section->vma + sdyn->output_offset,
+ htab->elf.sgotplt->contents);
+ /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
+ bfd_put_64 (output_bfd, (bfd_vma) 0,
+ htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
+ bfd_put_64 (output_bfd, (bfd_vma) 0,
+ htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
+
+ elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize
+ = GOT_ENTRY_SIZE;
}
/* Adjust .eh_frame for .plt section. */
}
}
+ /* Adjust .eh_frame for .plt.got section. */
+ if (htab->plt_got_eh_frame != NULL
+ && htab->plt_got_eh_frame->contents != NULL)
+ {
+ if (htab->plt_got != NULL
+ && htab->plt_got->size != 0
+ && (htab->plt_got->flags & SEC_EXCLUDE) == 0
+ && htab->plt_got->output_section != NULL
+ && htab->plt_got_eh_frame->output_section != NULL)
+ {
+ bfd_vma plt_start = htab->plt_got->output_section->vma;
+ bfd_vma eh_frame_start = htab->plt_got_eh_frame->output_section->vma
+ + htab->plt_got_eh_frame->output_offset
+ + PLT_FDE_START_OFFSET;
+ bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
+ htab->plt_got_eh_frame->contents
+ + PLT_FDE_START_OFFSET);
+ }
+ if (htab->plt_got_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
+ {
+ if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
+ htab->plt_got_eh_frame,
+ htab->plt_got_eh_frame->contents))
+ return FALSE;
+ }
+ }
+
+ /* Adjust .eh_frame for the second PLT section. */
+ if (htab->plt_second_eh_frame != NULL
+ && htab->plt_second_eh_frame->contents != NULL)
+ {
+ if (htab->plt_second != NULL
+ && htab->plt_second->size != 0
+ && (htab->plt_second->flags & SEC_EXCLUDE) == 0
+ && htab->plt_second->output_section != NULL
+ && htab->plt_second_eh_frame->output_section != NULL)
+ {
+ bfd_vma plt_start = htab->plt_second->output_section->vma;
+ bfd_vma eh_frame_start
+ = (htab->plt_second_eh_frame->output_section->vma
+ + htab->plt_second_eh_frame->output_offset
+ + PLT_FDE_START_OFFSET);
+ bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
+ htab->plt_second_eh_frame->contents
+ + PLT_FDE_START_OFFSET);
+ }
+ if (htab->plt_second_eh_frame->sec_info_type
+ == SEC_INFO_TYPE_EH_FRAME)
+ {
+ if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
+ htab->plt_second_eh_frame,
+ htab->plt_second_eh_frame->contents))
+ return FALSE;
+ }
+ }
+
if (htab->elf.sgot && htab->elf.sgot->size > 0)
elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
= GOT_ENTRY_SIZE;
- /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
- htab_traverse (htab->loc_hash_table,
- elf_x86_64_finish_local_dynamic_symbol,
- info);
-
/* Fill PLT entries for undefined weak symbols in PIE. */
if (bfd_link_pie (info))
bfd_hash_traverse (&info->hash->table,
return TRUE;
}
-/* Return an array of PLT entry symbol values. */
+/* Fill PLT/GOT entries and allocate dynamic relocations for local
+ STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
+ It has to be done before elf_link_sort_relocs is called so that
+ dynamic relocations are properly sorted. */
-static bfd_vma *
-elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
- asection *relplt)
+static bfd_boolean
+elf_x86_64_output_arch_local_syms
+ (bfd *output_bfd ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info,
+ void *flaginfo ATTRIBUTE_UNUSED,
+ int (*func) (void *, const char *,
+ Elf_Internal_Sym *,
+ asection *,
+ struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
{
- bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
- arelent *p;
- long count, i;
- bfd_vma *plt_sym_val;
- bfd_vma plt_offset;
- bfd_byte *plt_contents;
- const struct elf_x86_64_backend_data *bed;
- Elf_Internal_Shdr *hdr;
- asection *plt_bnd;
-
- /* Get the .plt section contents. PLT passed down may point to the
- .plt.bnd section. Make sure that PLT always points to the .plt
- section. */
- plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
- if (plt_bnd)
- {
- if (plt != plt_bnd)
- abort ();
- plt = bfd_get_section_by_name (abfd, ".plt");
- if (plt == NULL)
- abort ();
- bed = &elf_x86_64_bnd_arch_bed;
- }
+ struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
+ if (htab == NULL)
+ return FALSE;
+
+ /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
+ htab_traverse (htab->loc_hash_table,
+ elf_x86_64_finish_local_dynamic_symbol,
+ info);
+
+ return TRUE;
+}
+
+/* Sort relocs into address order. */
+
+static int
+compare_relocs (const void *ap, const void *bp)
+{
+ const arelent *a = * (const arelent **) ap;
+ const arelent *b = * (const arelent **) bp;
+
+ if (a->address > b->address)
+ return 1;
+ else if (a->address < b->address)
+ return -1;
else
- bed = get_elf_x86_64_backend_data (abfd);
+ return 0;
+}
- plt_contents = (bfd_byte *) bfd_malloc (plt->size);
- if (plt_contents == NULL)
- return NULL;
- if (!bfd_get_section_contents (abfd, (asection *) plt,
- plt_contents, 0, plt->size))
+enum elf_x86_64_plt_type
+{
+ plt_non_lazy = 0,
+ plt_lazy = 1 << 0,
+ plt_second = 1 << 1,
+ plt_unknown = -1
+};
+
+struct elf_x86_64_plt
+{
+ const char *name;
+ asection *sec;
+ bfd_byte *contents;
+ enum elf_x86_64_plt_type type;
+ unsigned int plt_got_offset;
+ unsigned int plt_got_insn_size;
+ unsigned int plt_entry_size;
+ long count;
+};
+
+/* Forward declaration. */
+static const struct elf_x86_64_lazy_plt_layout elf_x86_64_nacl_plt;
+
+/* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
+ dynamic relocations. */
+
+static long
+elf_x86_64_get_synthetic_symtab (bfd *abfd,
+ long symcount ATTRIBUTE_UNUSED,
+ asymbol **syms ATTRIBUTE_UNUSED,
+ long dynsymcount,
+ asymbol **dynsyms,
+ asymbol **ret)
+{
+ long size, count, i, n, len;
+ int j;
+ unsigned int plt_got_offset, plt_entry_size, plt_got_insn_size;
+ asymbol *s;
+ bfd_byte *plt_contents;
+ long dynrelcount, relsize;
+ arelent **dynrelbuf, *p;
+ const struct elf_x86_64_lazy_plt_layout *lazy_plt;
+ const struct elf_x86_64_non_lazy_plt_layout *non_lazy_plt;
+ const struct elf_x86_64_lazy_plt_layout *lazy_bnd_plt;
+ const struct elf_x86_64_non_lazy_plt_layout *non_lazy_bnd_plt;
+ const struct elf_x86_64_lazy_plt_layout *lazy_ibt_plt;
+ const struct elf_x86_64_non_lazy_plt_layout *non_lazy_ibt_plt;
+ asection *plt;
+ char *names;
+ enum elf_x86_64_plt_type plt_type;
+ struct elf_x86_64_plt plts[] =
{
-bad_return:
- free (plt_contents);
- return NULL;
- }
+ { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
+ { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
+ { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
+ { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
+ { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
+ };
- slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
- if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
- goto bad_return;
+ *ret = NULL;
- hdr = &elf_section_data (relplt)->this_hdr;
- count = relplt->size / hdr->sh_entsize;
+ if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
+ return 0;
- plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
- if (plt_sym_val == NULL)
- goto bad_return;
+ if (dynsymcount <= 0)
+ return 0;
- for (i = 0; i < count; i++)
- plt_sym_val[i] = -1;
+ relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
+ if (relsize <= 0)
+ return -1;
- plt_offset = bed->plt_entry_size;
- p = relplt->relocation;
- for (i = 0; i < count; i++, p++)
+ dynrelbuf = (arelent **) bfd_malloc (relsize);
+ if (dynrelbuf == NULL)
+ return -1;
+
+ dynrelcount = bfd_canonicalize_dynamic_reloc (abfd, dynrelbuf,
+ dynsyms);
+
+ /* Sort the relocs by address. */
+ qsort (dynrelbuf, dynrelcount, sizeof (arelent *), compare_relocs);
+
+ if (get_elf_x86_64_backend_data (abfd)->os == is_normal)
+ {
+ lazy_plt = &elf_x86_64_lazy_plt;
+ non_lazy_plt = &elf_x86_64_non_lazy_plt;
+ lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
+ non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
+ if (ABI_64_P (abfd))
+ {
+ lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
+ non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
+ }
+ else
+ {
+ lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
+ non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
+ }
+ }
+ else
{
- long reloc_index;
+ lazy_plt = &elf_x86_64_nacl_plt;
+ non_lazy_plt = NULL;
+ lazy_bnd_plt = NULL;
+ non_lazy_bnd_plt = NULL;
+ lazy_ibt_plt = NULL;
+ non_lazy_ibt_plt = NULL;
+ }
- /* Skip unknown relocation. */
- if (p->howto == NULL)
+ count = 0;
+ for (j = 0; plts[j].name != NULL; j++)
+ {
+ plt = bfd_get_section_by_name (abfd, plts[j].name);
+ if (plt == NULL)
continue;
- if (p->howto->type != R_X86_64_JUMP_SLOT
- && p->howto->type != R_X86_64_IRELATIVE)
- continue;
+ /* Get the PLT section contents. */
+ plt_contents = (bfd_byte *) bfd_malloc (plt->size);
+ if (plt_contents == NULL)
+ break;
+ if (!bfd_get_section_contents (abfd, (asection *) plt,
+ plt_contents, 0, plt->size))
+ {
+ free (plt_contents);
+ break;
+ }
- reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
- + bed->plt_reloc_offset));
- if (reloc_index < count)
+ /* Check what kind of PLT it is. */
+ plt_type = plt_unknown;
+ if (plts[j].type == plt_unknown)
{
- if (plt_bnd)
+ /* Match lazy PLT first. Need to check the first two
+ instructions. */
+ if ((memcmp (plt_contents, lazy_plt->plt0_entry,
+ lazy_plt->plt0_got1_offset) == 0)
+ && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
+ 2) == 0))
+ plt_type = plt_lazy;
+ else if (lazy_bnd_plt != NULL
+ && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
+ lazy_bnd_plt->plt0_got1_offset) == 0)
+ && (memcmp (plt_contents + 6,
+ lazy_bnd_plt->plt0_entry + 6, 3) == 0))
{
- /* This is the index in .plt section. */
- long plt_index = plt_offset / bed->plt_entry_size;
- /* Store VMA + the offset in .plt.bnd section. */
- plt_sym_val[reloc_index] =
- (plt_bnd->vma
- + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
+ plt_type = plt_lazy | plt_second;
+ /* The fist entry in the lazy IBT PLT is the same as the
+ lazy BND PLT. */
+ if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
+ lazy_ibt_plt->plt_entry,
+ lazy_ibt_plt->plt_got_offset) == 0))
+ lazy_plt = lazy_ibt_plt;
+ else
+ lazy_plt = lazy_bnd_plt;
}
- else
- plt_sym_val[reloc_index] = plt->vma + plt_offset;
}
- plt_offset += bed->plt_entry_size;
- /* PR binutils/18437: Skip extra relocations in the .rela.plt
- section. */
- if (plt_offset >= plt->size)
- break;
+ if (non_lazy_plt != NULL
+ && (plt_type == plt_unknown || plt_type == plt_non_lazy))
+ {
+ /* Match non-lazy PLT. */
+ if (memcmp (plt_contents, non_lazy_plt->plt_entry,
+ non_lazy_plt->plt_got_offset) == 0)
+ plt_type = plt_non_lazy;
+ }
+
+ if (plt_type == plt_unknown || plt_type == plt_second)
+ {
+ if (non_lazy_bnd_plt != NULL
+ && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
+ non_lazy_bnd_plt->plt_got_offset) == 0))
+ {
+ /* Match BND PLT. */
+ plt_type = plt_second;
+ non_lazy_plt = non_lazy_bnd_plt;
+ }
+ else if (non_lazy_ibt_plt != NULL
+ && (memcmp (plt_contents,
+ non_lazy_ibt_plt->plt_entry,
+ non_lazy_ibt_plt->plt_got_offset) == 0))
+ {
+ /* Match IBT PLT. */
+ plt_type = plt_second;
+ non_lazy_plt = non_lazy_ibt_plt;
+ }
+ }
+
+ if (plt_type == plt_unknown)
+ continue;
+
+ plts[j].sec = plt;
+ plts[j].type = plt_type;
+
+ if ((plt_type & plt_lazy))
+ {
+ plts[j].plt_got_offset = lazy_plt->plt_got_offset;
+ plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
+ plts[j].plt_entry_size = lazy_plt->plt_entry_size;
+ /* Skip PLT0 in lazy PLT. */
+ i = 1;
+ }
+ else
+ {
+ plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
+ plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
+ plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
+ i = 0;
+ }
+
+ /* Skip lazy PLT when the second PLT is used. */
+ if (plt_type == (plt_lazy | plt_second))
+ plts[j].count = 0;
+ else
+ {
+ n = plt->size / plts[j].plt_entry_size;
+ plts[j].count = n;
+ count += n - i;
+ }
+
+ plts[j].contents = plt_contents;
}
- free (plt_contents);
+ size = count * sizeof (asymbol);
- return plt_sym_val;
-}
+ /* Allocate space for @plt suffixes. */
+ n = 0;
+ for (i = 0; i < dynrelcount; i++)
+ {
+ p = dynrelbuf[i];
+ size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
+ if (p->addend != 0)
+ size += sizeof ("+0x") - 1 + 8 + 8 * ABI_64_P (abfd);
+ }
-/* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
- support. */
+ s = *ret = (asymbol *) bfd_zmalloc (size);
+ if (s == NULL)
+ {
+bad_return:
+ for (j = 0; plts[j].name != NULL; j++)
+ if (plts[j].contents != NULL)
+ free (plts[j].contents);
+ free (dynrelbuf);
+ return -1;
+ }
-static long
-elf_x86_64_get_synthetic_symtab (bfd *abfd,
- long symcount,
- asymbol **syms,
- long dynsymcount,
- asymbol **dynsyms,
- asymbol **ret)
-{
- /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
- as PLT if it exists. */
- asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
- if (plt == NULL)
- plt = bfd_get_section_by_name (abfd, ".plt");
- return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
- dynsymcount, dynsyms, ret,
- plt,
- elf_x86_64_get_plt_sym_val);
+ /* Check for each PLT section. */
+ names = (char *) (s + count);
+ size = 0;
+ n = 0;
+ for (j = 0; plts[j].name != NULL; j++)
+ if ((plt_contents = plts[j].contents) != NULL)
+ {
+ long k;
+ bfd_vma offset;
+
+ plt_got_offset = plts[j].plt_got_offset;
+ plt_got_insn_size = plts[j].plt_got_insn_size;
+ plt_entry_size = plts[j].plt_entry_size;
+
+ plt = plts[j].sec;
+
+ if ((plts[j].type & plt_lazy))
+ {
+ /* Skip PLT0 in lazy PLT. */
+ k = 1;
+ offset = plt_entry_size;
+ }
+ else
+ {
+ k = 0;
+ offset = 0;
+ }
+
+ /* Check each PLT entry against dynamic relocations. */
+ for (; k < plts[j].count; k++)
+ {
+ int off;
+ bfd_vma got_vma;
+ long min, max, mid;
+
+ /* Get the PC-relative offset, a signed 32-bit integer. */
+ off = H_GET_32 (abfd, (plt_contents + offset
+ + plt_got_offset));
+ got_vma = plt->vma + offset + off + plt_got_insn_size;
+
+ /* Binary search. */
+ p = dynrelbuf[0];
+ min = 0;
+ max = dynrelcount;
+ while ((min + 1) < max)
+ {
+ arelent *r;
+
+ mid = (min + max) / 2;
+ r = dynrelbuf[mid];
+ if (got_vma > r->address)
+ min = mid;
+ else if (got_vma < r->address)
+ max = mid;
+ else
+ {
+ p = r;
+ break;
+ }
+ }
+
+ /* Skip unknown relocation. PR 17512: file: bc9d6cf5. */
+ if (got_vma == p->address
+ && p->howto != NULL
+ && (p->howto->type == R_X86_64_JUMP_SLOT
+ || p->howto->type == R_X86_64_GLOB_DAT
+ || p->howto->type == R_X86_64_IRELATIVE))
+ {
+ *s = **p->sym_ptr_ptr;
+ /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL
+ set. Since we are defining a symbol, ensure one
+ of them is set. */
+ if ((s->flags & BSF_LOCAL) == 0)
+ s->flags |= BSF_GLOBAL;
+ s->flags |= BSF_SYNTHETIC;
+ /* This is no longer a section symbol. */
+ s->flags &= ~BSF_SECTION_SYM;
+ s->section = plt;
+ s->the_bfd = plt->owner;
+ s->value = offset;
+ s->udata.p = NULL;
+ s->name = names;
+ len = strlen ((*p->sym_ptr_ptr)->name);
+ memcpy (names, (*p->sym_ptr_ptr)->name, len);
+ names += len;
+ if (p->addend != 0)
+ {
+ char buf[30], *a;
+
+ memcpy (names, "+0x", sizeof ("+0x") - 1);
+ names += sizeof ("+0x") - 1;
+ bfd_sprintf_vma (abfd, buf, p->addend);
+ for (a = buf; *a == '0'; ++a)
+ ;
+ size = strlen (a);
+ memcpy (names, a, size);
+ names += size;
+ }
+ memcpy (names, "@plt", sizeof ("@plt"));
+ names += sizeof ("@plt");
+ n++;
+ s++;
+ }
+ offset += plt_entry_size;
+ }
+ }
+
+ /* PLT entries with R_X86_64_TLSDESC relocations are skipped. */
+ if (n == 0)
+ goto bad_return;
+
+ count = n;
+
+ for (j = 0; plts[j].name != NULL; j++)
+ if (plts[j].contents != NULL)
+ free (plts[j].contents);
+
+ free (dynrelbuf);
+
+ return count;
}
/* Handle an x86-64 specific section when reading an object file. This
&& _bfd_elf_relocs_compatible (input, output));
}
+/* Parse x86-64 GNU properties. */
+
+static enum elf_property_kind
+elf_x86_64_parse_gnu_properties (bfd *abfd, unsigned int type,
+ bfd_byte *ptr, unsigned int datasz)
+{
+ elf_property *prop;
+
+ switch (type)
+ {
+ case GNU_PROPERTY_X86_ISA_1_USED:
+ case GNU_PROPERTY_X86_ISA_1_NEEDED:
+ case GNU_PROPERTY_X86_FEATURE_1_AND:
+ if (datasz != 4)
+ {
+ _bfd_error_handler
+ ((type == GNU_PROPERTY_X86_ISA_1_USED
+ ? _("error: %B: <corrupt x86 ISA used size: 0x%x>")
+ : (type == GNU_PROPERTY_X86_ISA_1_NEEDED
+ ? _("error: %B: <corrupt x86 ISA needed size: 0x%x>")
+ : _("error: %B: <corrupt x86 feature size: 0x%x>"))),
+ abfd, datasz);
+ return property_corrupt;
+ }
+ prop = _bfd_elf_get_property (abfd, type, datasz);
+ /* Combine properties of the same type. */
+ prop->u.number |= bfd_h_get_32 (abfd, ptr);
+ prop->pr_kind = property_number;
+ break;
+
+ default:
+ return property_ignored;
+ }
+
+ return property_number;
+}
+
+/* Merge x86-64 GNU property BPROP with APROP. If APROP isn't NULL,
+ return TRUE if APROP is updated. Otherwise, return TRUE if BPROP
+ should be merged with ABFD. */
+
+static bfd_boolean
+elf_x86_64_merge_gnu_properties (struct bfd_link_info *info,
+ bfd *abfd ATTRIBUTE_UNUSED,
+ elf_property *aprop,
+ elf_property *bprop)
+{
+ unsigned int number, features;
+ bfd_boolean updated = FALSE;
+ unsigned int pr_type = aprop != NULL ? aprop->pr_type : bprop->pr_type;
+
+ switch (pr_type)
+ {
+ case GNU_PROPERTY_X86_ISA_1_USED:
+ case GNU_PROPERTY_X86_ISA_1_NEEDED:
+ if (aprop != NULL && bprop != NULL)
+ {
+ number = aprop->u.number;
+ aprop->u.number = number | bprop->u.number;
+ updated = number != (unsigned int) aprop->u.number;
+ }
+ else
+ {
+ /* Return TRUE if APROP is NULL to indicate that BPROP should
+ be added to ABFD. */
+ updated = aprop == NULL;
+ }
+ break;
+
+ case GNU_PROPERTY_X86_FEATURE_1_AND:
+ /* Only one of APROP and BPROP can be NULL:
+ 1. APROP & BPROP when both APROP and BPROP aren't NULL.
+ 2. If APROP is NULL, remove x86 feature.
+ 3. Otherwise, do nothing.
+ */
+ if (aprop != NULL && bprop != NULL)
+ {
+ features = 0;
+ if (info->ibt)
+ features = GNU_PROPERTY_X86_FEATURE_1_IBT;
+ if (info->shstk)
+ features |= GNU_PROPERTY_X86_FEATURE_1_SHSTK;
+ number = aprop->u.number;
+ /* Add GNU_PROPERTY_X86_FEATURE_1_IBT and
+ GNU_PROPERTY_X86_FEATURE_1_SHSTK. */
+ aprop->u.number = (number & bprop->u.number) | features;
+ updated = number != (unsigned int) aprop->u.number;
+ /* Remove the property if all feature bits are cleared. */
+ if (aprop->u.number == 0)
+ aprop->pr_kind = property_remove;
+ }
+ else
+ {
+ features = 0;
+ if (info->ibt)
+ features = GNU_PROPERTY_X86_FEATURE_1_IBT;
+ if (info->shstk)
+ features |= GNU_PROPERTY_X86_FEATURE_1_SHSTK;
+ if (features)
+ {
+ /* Add GNU_PROPERTY_X86_FEATURE_1_IBT and
+ GNU_PROPERTY_X86_FEATURE_1_SHSTK. */
+ if (aprop != NULL)
+ {
+ number = aprop->u.number;
+ aprop->u.number = number | features;
+ updated = number != (unsigned int) aprop->u.number;
+ }
+ else
+ {
+ bprop->u.number |= features;
+ updated = TRUE;
+ }
+ }
+ else if (aprop != NULL)
+ {
+ aprop->pr_kind = property_remove;
+ updated = TRUE;
+ }
+ }
+ break;
+
+ default:
+ /* Never should happen. */
+ abort ();
+ }
+
+ return updated;
+}
+
+/* Set up x86-64 GNU properties. Return the first relocatable ELF input
+ with GNU properties if found. Otherwise, return NULL. */
+
+static bfd *
+elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
+{
+ bfd_boolean normal_target;
+ bfd_boolean lazy_plt;
+ asection *sec, *pltsec;
+ bfd *dynobj;
+ bfd_boolean use_ibt_plt;
+ unsigned int plt_alignment, features;
+ struct elf_x86_64_link_hash_table *htab;
+ bfd *pbfd;
+ bfd *ebfd = NULL;
+ elf_property *prop;
+
+ features = 0;
+ if (info->ibt)
+ features = GNU_PROPERTY_X86_FEATURE_1_IBT;
+ if (info->shstk)
+ features |= GNU_PROPERTY_X86_FEATURE_1_SHSTK;
+
+ /* Find a normal input file with GNU property note. */
+ for (pbfd = info->input_bfds;
+ pbfd != NULL;
+ pbfd = pbfd->link.next)
+ if (bfd_get_flavour (pbfd) == bfd_target_elf_flavour
+ && bfd_count_sections (pbfd) != 0)
+ {
+ ebfd = pbfd;
+
+ if (elf_properties (pbfd) != NULL)
+ break;
+ }
+
+ if (ebfd != NULL)
+ {
+ if (features)
+ {
+ /* If features is set, add GNU_PROPERTY_X86_FEATURE_1_IBT and
+ GNU_PROPERTY_X86_FEATURE_1_SHSTK. */
+ prop = _bfd_elf_get_property (ebfd,
+ GNU_PROPERTY_X86_FEATURE_1_AND,
+ 4);
+ prop->u.number |= features;
+ prop->pr_kind = property_number;
+
+ /* Create the GNU property note section if needed. */
+ if (pbfd == NULL)
+ {
+ sec = bfd_make_section_with_flags (ebfd,
+ NOTE_GNU_PROPERTY_SECTION_NAME,
+ (SEC_ALLOC
+ | SEC_LOAD
+ | SEC_IN_MEMORY
+ | SEC_READONLY
+ | SEC_HAS_CONTENTS
+ | SEC_DATA));
+ if (sec == NULL)
+ info->callbacks->einfo (_("%F: failed to create GNU property section\n"));
+
+ if (!bfd_set_section_alignment (ebfd, sec,
+ ABI_64_P (ebfd) ? 3 : 2))
+ {
+error_alignment:
+ info->callbacks->einfo (_("%F%A: failed to align section\n"),
+ sec);
+ }
+
+ elf_section_type (sec) = SHT_NOTE;
+ }
+ }
+
+ /* Check GNU_PROPERTY_NO_COPY_ON_PROTECTED. */
+ for (; pbfd != NULL; pbfd = pbfd->link.next)
+ if (bfd_get_flavour (pbfd) == bfd_target_elf_flavour
+ && (pbfd->flags
+ & (DYNAMIC | BFD_LINKER_CREATED | BFD_PLUGIN)) == 0)
+ {
+ elf_property_list *p;
+
+ /* The property list is sorted in order of type. */
+ for (p = elf_properties (pbfd); p != NULL; p = p->next)
+ {
+ if (GNU_PROPERTY_NO_COPY_ON_PROTECTED
+ == p->property.pr_type)
+ {
+ /* Clear extern_protected_data if
+ GNU_PROPERTY_NO_COPY_ON_PROTECTED is
+ set on any input relocatable file. */
+ info->extern_protected_data = FALSE;
+ break;
+ }
+ else if (GNU_PROPERTY_NO_COPY_ON_PROTECTED
+ < p->property.pr_type)
+ break;
+ }
+ }
+ }
+
+ pbfd = _bfd_elf_link_setup_gnu_properties (info);
+
+ if (bfd_link_relocatable (info))
+ return pbfd;
+
+ htab = elf_x86_64_hash_table (info);
+ if (htab == NULL)
+ return pbfd;
+
+ use_ibt_plt = info->ibtplt || info->ibt;
+ if (!use_ibt_plt && pbfd != NULL)
+ {
+ /* Check if GNU_PROPERTY_X86_FEATURE_1_IBT is on. */
+ elf_property_list *p;
+
+ /* The property list is sorted in order of type. */
+ for (p = elf_properties (pbfd); p; p = p->next)
+ {
+ if (GNU_PROPERTY_X86_FEATURE_1_AND == p->property.pr_type)
+ {
+ use_ibt_plt = !!(p->property.u.number
+ & GNU_PROPERTY_X86_FEATURE_1_IBT);
+ break;
+ }
+ else if (GNU_PROPERTY_X86_FEATURE_1_AND < p->property.pr_type)
+ break;
+ }
+ }
+
+ dynobj = htab->elf.dynobj;
+
+ /* Set htab->elf.dynobj here so that there is no need to check and
+ set it in check_relocs. */
+ if (dynobj == NULL)
+ {
+ if (pbfd != NULL)
+ {
+ htab->elf.dynobj = pbfd;
+ dynobj = pbfd;
+ }
+ else
+ {
+ bfd *abfd;
+
+ /* Find a normal input file to hold linker created
+ sections. */
+ for (abfd = info->input_bfds;
+ abfd != NULL;
+ abfd = abfd->link.next)
+ if (bfd_get_flavour (abfd) == bfd_target_elf_flavour
+ && (abfd->flags
+ & (DYNAMIC | BFD_LINKER_CREATED | BFD_PLUGIN)) == 0)
+ {
+ htab->elf.dynobj = abfd;
+ dynobj = abfd;
+ break;
+ }
+ }
+ }
+
+ /* Even when lazy binding is disabled by "-z now", the PLT0 entry may
+ still be used with LD_AUDIT or LD_PROFILE if PLT entry is used for
+ canonical function address. */
+ htab->plt.has_plt0 = 1;
+
+ if (get_elf_x86_64_backend_data (info->output_bfd)->os
+ == is_normal)
+ {
+ if (use_ibt_plt)
+ {
+ if (ABI_64_P (dynobj))
+ {
+ htab->lazy_plt = &elf_x86_64_lazy_ibt_plt;
+ htab->non_lazy_plt = &elf_x86_64_non_lazy_ibt_plt;
+ }
+ else
+ {
+ htab->lazy_plt = &elf_x32_lazy_ibt_plt;
+ htab->non_lazy_plt = &elf_x32_non_lazy_ibt_plt;
+ }
+ }
+ else if (info->bndplt)
+ {
+ htab->lazy_plt = &elf_x86_64_lazy_bnd_plt;
+ htab->non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
+ }
+ else
+ {
+ htab->lazy_plt = &elf_x86_64_lazy_plt;
+ htab->non_lazy_plt = &elf_x86_64_non_lazy_plt;
+ }
+ normal_target = TRUE;
+ }
+ else
+ {
+ htab->lazy_plt = &elf_x86_64_nacl_plt;
+ htab->non_lazy_plt = NULL;
+ normal_target = FALSE;
+ }
+
+ pltsec = htab->elf.splt;
+
+ /* If the non-lazy PLT is available, use it for all PLT entries if
+ there are no PLT0 or no .plt section. */
+ if (htab->non_lazy_plt != NULL
+ && (!htab->plt.has_plt0 || pltsec == NULL))
+ {
+ lazy_plt = FALSE;
+ htab->plt.plt_entry
+ = htab->non_lazy_plt->plt_entry;
+ htab->plt.plt_entry_size
+ = htab->non_lazy_plt->plt_entry_size;
+ htab->plt.plt_got_offset
+ = htab->non_lazy_plt->plt_got_offset;
+ htab->plt.plt_got_insn_size
+ = htab->non_lazy_plt->plt_got_insn_size;
+ htab->plt.eh_frame_plt_size
+ = htab->non_lazy_plt->eh_frame_plt_size;
+ htab->plt.eh_frame_plt
+ = htab->non_lazy_plt->eh_frame_plt;
+ }
+ else
+ {
+ lazy_plt = TRUE;
+ htab->plt.plt_entry
+ = htab->lazy_plt->plt_entry;
+ htab->plt.plt_entry_size
+ = htab->lazy_plt->plt_entry_size;
+ htab->plt.plt_got_offset
+ = htab->lazy_plt->plt_got_offset;
+ htab->plt.plt_got_insn_size
+ = htab->lazy_plt->plt_got_insn_size;
+ htab->plt.eh_frame_plt_size
+ = htab->lazy_plt->eh_frame_plt_size;
+ htab->plt.eh_frame_plt
+ = htab->lazy_plt->eh_frame_plt;
+ }
+
+ /* Return if there are no normal input files. */
+ if (dynobj == NULL)
+ return pbfd;
+
+ /* Since create_dynamic_sections isn't always called, but GOT
+ relocations need GOT relocations, create them here so that we
+ don't need to do it in check_relocs. */
+ if (htab->elf.sgot == NULL
+ && !_bfd_elf_create_got_section (dynobj, info))
+ info->callbacks->einfo (_("%F: failed to create GOT sections\n"));
+
+ /* Align .got and .got.plt sections to their entry size. Do it here
+ instead of in create_dynamic_sections so that they are always
+ properly aligned even if create_dynamic_sections isn't called. */
+ sec = htab->elf.sgot;
+ if (!bfd_set_section_alignment (dynobj, sec, 3))
+ goto error_alignment;
+
+ sec = htab->elf.sgotplt;
+ if (!bfd_set_section_alignment (dynobj, sec, 3))
+ goto error_alignment;
+
+ /* Create the ifunc sections here so that check_relocs can be
+ simplified. */
+ if (!_bfd_elf_create_ifunc_sections (dynobj, info))
+ info->callbacks->einfo (_("%F: failed to create ifunc sections\n"));
+
+ plt_alignment = bfd_log2 (htab->plt.plt_entry_size);
+
+ if (pltsec != NULL)
+ {
+ /* Whe creating executable, set the contents of the .interp
+ section to the interpreter. */
+ if (bfd_link_executable (info) && !info->nointerp)
+ {
+ asection *s = bfd_get_linker_section (dynobj, ".interp");
+ if (s == NULL)
+ abort ();
+ s->size = htab->dynamic_interpreter_size;
+ s->contents = (unsigned char *) htab->dynamic_interpreter;
+ htab->interp = s;
+ }
+
+ /* Don't change PLT section alignment for NaCl since it uses
+ 64-byte PLT entry and sets PLT section alignment to 32
+ bytes. Don't create additional PLT sections for NaCl. */
+ if (normal_target)
+ {
+ const struct elf_backend_data *bed
+ = get_elf_backend_data (dynobj);
+ flagword pltflags = (bed->dynamic_sec_flags
+ | SEC_ALLOC
+ | SEC_CODE
+ | SEC_LOAD
+ | SEC_READONLY);
+ unsigned int non_lazy_plt_alignment
+ = bfd_log2 (htab->non_lazy_plt->plt_entry_size);
+
+ sec = pltsec;
+ if (!bfd_set_section_alignment (sec->owner, sec,
+ plt_alignment))
+ goto error_alignment;
+
+ /* Create the GOT procedure linkage table. */
+ sec = bfd_make_section_anyway_with_flags (dynobj,
+ ".plt.got",
+ pltflags);
+ if (sec == NULL)
+ info->callbacks->einfo (_("%F: failed to create GOT PLT section\n"));
+
+ if (!bfd_set_section_alignment (dynobj, sec,
+ non_lazy_plt_alignment))
+ goto error_alignment;
+
+ htab->plt_got = sec;
+
+ if (lazy_plt)
+ {
+ sec = NULL;
+
+ if (use_ibt_plt)
+ {
+ /* Create the second PLT for Intel IBT support. IBT
+ PLT is supported only for non-NaCl target and is
+ is needed only for lazy binding. */
+ sec = bfd_make_section_anyway_with_flags (dynobj,
+ ".plt.sec",
+ pltflags);
+ if (sec == NULL)
+ info->callbacks->einfo (_("%F: failed to create IBT-enabled PLT section\n"));
+
+ if (!bfd_set_section_alignment (dynobj, sec,
+ plt_alignment))
+ goto error_alignment;
+ }
+ else if (info->bndplt && ABI_64_P (dynobj))
+ {
+ /* Create the second PLT for Intel MPX support. MPX
+ PLT is supported only for non-NaCl target in 64-bit
+ mode and is needed only for lazy binding. */
+ sec = bfd_make_section_anyway_with_flags (dynobj,
+ ".plt.sec",
+ pltflags);
+ if (sec == NULL)
+ info->callbacks->einfo (_("%F: failed to create BND PLT section\n"));
+
+ if (!bfd_set_section_alignment (dynobj, sec,
+ non_lazy_plt_alignment))
+ goto error_alignment;
+ }
+
+ htab->plt_second = sec;
+ }
+ }
+
+ if (!info->no_ld_generated_unwind_info)
+ {
+ flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
+ | SEC_HAS_CONTENTS | SEC_IN_MEMORY
+ | SEC_LINKER_CREATED);
+
+ sec = bfd_make_section_anyway_with_flags (dynobj,
+ ".eh_frame",
+ flags);
+ if (sec == NULL)
+ info->callbacks->einfo (_("%F: failed to create PLT .eh_frame section\n"));
+
+ if (!bfd_set_section_alignment (dynobj, sec,
+ ABI_64_P (dynobj) ? 3 : 2))
+ goto error_alignment;
+
+ htab->plt_eh_frame = sec;
+
+ if (htab->plt_got != NULL)
+ {
+ sec = bfd_make_section_anyway_with_flags (dynobj,
+ ".eh_frame",
+ flags);
+ if (sec == NULL)
+ info->callbacks->einfo (_("%F: failed to create GOT PLT .eh_frame section\n"));
+
+ if (!bfd_set_section_alignment (dynobj, sec,
+ ABI_64_P (dynobj) ? 3 : 2))
+ goto error_alignment;
+
+ htab->plt_got_eh_frame = sec;
+ }
+
+ if (htab->plt_second != NULL)
+ {
+ sec = bfd_make_section_anyway_with_flags (dynobj,
+ ".eh_frame",
+ flags);
+ if (sec == NULL)
+ info->callbacks->einfo (_("%F: failed to create BND PLT .eh_frame section\n"));
+
+ if (!bfd_set_section_alignment (dynobj, sec, 3))
+ goto error_alignment;
+
+ htab->plt_second_eh_frame = sec;
+ }
+ }
+ }
+
+ if (normal_target)
+ {
+ /* The .iplt section is used for IFUNC symbols in static
+ executables. */
+ sec = htab->elf.iplt;
+ if (sec != NULL
+ && !bfd_set_section_alignment (sec->owner, sec,
+ plt_alignment))
+ goto error_alignment;
+ }
+
+ return pbfd;
+}
+
+static bfd_boolean
+elf_x86_64_link_check_relocs (bfd *abfd, struct bfd_link_info *info)
+{
+ if (!bfd_link_relocatable (info))
+ {
+ /* Check for __tls_get_addr reference. */
+ struct elf_link_hash_entry *h;
+ h = elf_link_hash_lookup (elf_hash_table (info), "__tls_get_addr",
+ FALSE, FALSE, FALSE);
+ if (h != NULL)
+ ((struct elf_x86_64_link_hash_entry *) h)->tls_get_addr = 1;
+ }
+
+ /* Invoke the regular ELF backend linker to do all the work. */
+ return _bfd_elf_link_check_relocs (abfd, info);
+}
+
static const struct bfd_elf_special_section
- elf_x86_64_special_sections[]=
+elf_x86_64_special_sections[]=
{
{ STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
{ STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
#define elf_backend_plt_alignment 4
#define elf_backend_extern_protected_data 1
#define elf_backend_caches_rawsize 1
+#define elf_backend_dtrel_excludes_plt 1
+#define elf_backend_want_dynrelro 1
#define elf_info_to_howto elf_x86_64_info_to_howto
#define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
#define elf_backend_check_relocs elf_x86_64_check_relocs
#define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
-#define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
+#define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
#define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
#define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
+#define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
#define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
#define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
#define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
#define elf_backend_object_p elf64_x86_64_elf_object_p
#define bfd_elf64_mkobject elf_x86_64_mkobject
#define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
+#define bfd_elf64_bfd_link_check_relocs elf_x86_64_link_check_relocs
#define elf_backend_section_from_shdr \
elf_x86_64_section_from_shdr
((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true)
#define elf_backend_fixup_symbol \
elf_x86_64_fixup_symbol
+#define elf_backend_parse_gnu_properties \
+ elf_x86_64_parse_gnu_properties
+#define elf_backend_merge_gnu_properties \
+ elf_x86_64_merge_gnu_properties
+#define elf_backend_setup_gnu_properties \
+ elf_x86_64_link_setup_gnu_properties
#include "elf64-target.h"
DW_CFA_nop, DW_CFA_nop
};
-static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
+static const struct elf_x86_64_lazy_plt_layout elf_x86_64_nacl_plt =
{
elf_x86_64_nacl_plt0_entry, /* plt0_entry */
elf_x86_64_nacl_plt_entry, /* plt_entry */
42, /* plt_plt_insn_end */
32, /* plt_lazy_offset */
elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
- sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
+ sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
+ };
+
+static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
+ {
+ is_nacl /* os */
};
#undef elf_backend_arch_data
elf_x86_64_mkobject
#define bfd_elf32_get_synthetic_symtab \
elf_x86_64_get_synthetic_symtab
+#define bfd_elf32_bfd_link_check_relocs \
+ elf_x86_64_link_check_relocs
#undef elf_backend_object_p
#define elf_backend_object_p \