/* PowerPC64-specific support for 64-bit ELF.
- Copyright (C) 1999-2018 Free Software Foundation, Inc.
+ Copyright (C) 1999-2019 Free Software Foundation, Inc.
Written by Linus Nordberg, Swox AB <info@swox.com>,
based on elf32-ppc.c by Ian Lance Taylor.
Largely rewritten by Alan Modra.
(bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
static bfd_reloc_status_type ppc64_elf_toc64_reloc
(bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
+static bfd_reloc_status_type ppc64_elf_prefix_reloc
+ (bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
static bfd_reloc_status_type ppc64_elf_unhandled_reloc
(bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
static bfd_vma opd_entry_value
#define LD_R2_0R12 0xe84c0000 /* ld %r2,0(%r12) */
#define ADD_R2_R2_R12 0x7c426214 /* add %r2,%r2,%r12 */
+#define LI_R11_0 0x39600000 /* li %r11,0 */
#define LIS_R2 0x3c400000 /* lis %r2,xxx@ha */
+#define LIS_R11 0x3d600000 /* lis %r11,xxx@ha */
#define LIS_R12 0x3d800000 /* lis %r12,xxx@ha */
#define ADDIS_R2_R12 0x3c4c0000 /* addis %r2,%r12,xxx@ha */
#define ADDIS_R12_R2 0x3d820000 /* addis %r12,%r2,xxx@ha */
#define ADDIS_R12_R11 0x3d8b0000 /* addis %r12,%r11,xxx@ha */
#define ADDIS_R12_R12 0x3d8c0000 /* addis %r12,%r12,xxx@ha */
#define ORIS_R12_R12_0 0x658c0000 /* oris %r12,%r12,xxx@hi */
+#define ORI_R11_R11_0 0x616b0000 /* ori %r11,%r11,xxx@l */
#define ORI_R12_R12_0 0x618c0000 /* ori %r12,%r12,xxx@l */
#define LD_R12_0R12 0xe98c0000 /* ld %r12,xxx@l(%r12) */
+#define SLDI_R11_R11_34 0x796b1746 /* sldi %r11,%r11,34 */
#define SLDI_R12_R12_32 0x799c07c6 /* sldi %r12,%r12,32 */
#define LDX_R12_R11_R12 0x7d8b602a /* ldx %r12,%r11,%r12 */
#define ADD_R12_R11_R12 0x7d8b6214 /* add %r12,%r11,%r12 */
+#define PADDI_R12_PC 0x0610000039800000ULL
+#define PLD_R12_PC 0x04100000e5800000ULL
+#define PNOP 0x0700000000000000ULL
/* __glink_PLTresolve stub instructions. We enter with the index in R0. */
#define GLINK_PLTRESOLVE_SIZE(htab) \
HOW (R_PPC64_REL16_HA, 1, 16, 0xffff, 16, TRUE, signed,
ppc64_elf_ha_reloc),
+ HOW (R_PPC64_REL16_HIGH, 1, 16, 0xffff, 16, TRUE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_REL16_HIGHA, 1, 16, 0xffff, 16, TRUE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_REL16_HIGHER, 1, 16, 0xffff, 32, TRUE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_REL16_HIGHERA, 1, 16, 0xffff, 32, TRUE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_REL16_HIGHEST, 1, 16, 0xffff, 48, TRUE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_REL16_HIGHESTA, 1, 16, 0xffff, 48, TRUE, dont,
+ ppc64_elf_ha_reloc),
+
/* Like R_PPC64_REL16_HA but for split field in addpcis. */
HOW (R_PPC64_REL16DX_HA, 2, 16, 0x1fffc1, 16, TRUE, signed,
ppc64_elf_ha_reloc),
HOW (R_PPC64_ADDR64_LOCAL, 4, 64, 0xffffffffffffffffULL, 0, FALSE, dont,
bfd_elf_generic_reloc),
+ HOW (R_PPC64_PLTSEQ_NOTOC, 2, 32, 0, 0, FALSE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_PLTCALL_NOTOC, 2, 32, 0, 0, FALSE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_PCREL_OPT, 2, 32, 0, 0, FALSE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_D34, 4, 34, 0x3ffff0000ffffULL, 0, FALSE, signed,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_D34_LO, 4, 34, 0x3ffff0000ffffULL, 0, FALSE, dont,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_D34_HI30, 4, 34, 0x3ffff0000ffffULL, 34, FALSE, dont,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_D34_HA30, 4, 34, 0x3ffff0000ffffULL, 34, FALSE, dont,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_PCREL34, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_GOT_PCREL34, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_PLT_PCREL34, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_PLT_PCREL34_NOTOC, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_ADDR16_HIGHER34, 1, 16, 0xffff, 34, FALSE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_ADDR16_HIGHERA34, 1, 16, 0xffff, 34, FALSE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_ADDR16_HIGHEST34, 1, 16, 0xffff, 50, FALSE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_ADDR16_HIGHESTA34, 1, 16, 0xffff, 50, FALSE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_REL16_HIGHER34, 1, 16, 0xffff, 34, TRUE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_REL16_HIGHERA34, 1, 16, 0xffff, 34, TRUE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_REL16_HIGHEST34, 1, 16, 0xffff, 50, TRUE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_REL16_HIGHESTA34, 1, 16, 0xffff, 50, TRUE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_D28, 4, 28, 0xfff0000ffffULL, 0, FALSE, signed,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_PCREL28, 4, 28, 0xfff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_prefix_reloc),
+
/* GNU extension to record C++ vtable hierarchy. */
HOW (R_PPC64_GNU_VTINHERIT, 0, 0, 0, 0, FALSE, dont,
NULL),
break;
case BFD_RELOC_HI16_S_PCREL: r = R_PPC64_REL16_HA;
break;
+ case BFD_RELOC_PPC64_REL16_HIGH: r = R_PPC64_REL16_HIGH;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHA: r = R_PPC64_REL16_HIGHA;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHER: r = R_PPC64_REL16_HIGHER;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHERA: r = R_PPC64_REL16_HIGHERA;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHEST: r = R_PPC64_REL16_HIGHEST;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHESTA: r = R_PPC64_REL16_HIGHESTA;
+ break;
case BFD_RELOC_PPC_16DX_HA: r = R_PPC64_16DX_HA;
break;
case BFD_RELOC_PPC_REL16DX_HA: r = R_PPC64_REL16DX_HA;
break;
case BFD_RELOC_PPC64_ADDR64_LOCAL: r = R_PPC64_ADDR64_LOCAL;
break;
+ case BFD_RELOC_PPC64_D34: r = R_PPC64_D34;
+ break;
+ case BFD_RELOC_PPC64_D34_LO: r = R_PPC64_D34_LO;
+ break;
+ case BFD_RELOC_PPC64_D34_HI30: r = R_PPC64_D34_HI30;
+ break;
+ case BFD_RELOC_PPC64_D34_HA30: r = R_PPC64_D34_HA30;
+ break;
+ case BFD_RELOC_PPC64_PCREL34: r = R_PPC64_PCREL34;
+ break;
+ case BFD_RELOC_PPC64_GOT_PCREL34: r = R_PPC64_GOT_PCREL34;
+ break;
+ case BFD_RELOC_PPC64_PLT_PCREL34: r = R_PPC64_PLT_PCREL34;
+ break;
+ case BFD_RELOC_PPC64_ADDR16_HIGHER34: r = R_PPC64_ADDR16_HIGHER34;
+ break;
+ case BFD_RELOC_PPC64_ADDR16_HIGHERA34: r = R_PPC64_ADDR16_HIGHERA34;
+ break;
+ case BFD_RELOC_PPC64_ADDR16_HIGHEST34: r = R_PPC64_ADDR16_HIGHEST34;
+ break;
+ case BFD_RELOC_PPC64_ADDR16_HIGHESTA34: r = R_PPC64_ADDR16_HIGHESTA34;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHER34: r = R_PPC64_REL16_HIGHER34;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHERA34: r = R_PPC64_REL16_HIGHERA34;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHEST34: r = R_PPC64_REL16_HIGHEST34;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHESTA34: r = R_PPC64_REL16_HIGHESTA34;
+ break;
+ case BFD_RELOC_PPC64_D28: r = R_PPC64_D28;
+ break;
+ case BFD_RELOC_PPC64_PCREL28: r = R_PPC64_PCREL28;
+ break;
case BFD_RELOC_VTABLE_INHERIT: r = R_PPC64_GNU_VTINHERIT;
break;
case BFD_RELOC_VTABLE_ENTRY: r = R_PPC64_GNU_VTENTRY;
return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
- /* Adjust the addend for sign extension of the low 16 bits.
- We won't actually be using the low 16 bits, so trashing them
+ /* Adjust the addend for sign extension of the low 16 (or 34) bits.
+ We won't actually be using the low bits, so trashing them
doesn't matter. */
- reloc_entry->addend += 0x8000;
r_type = reloc_entry->howto->type;
+ if (r_type == R_PPC64_ADDR16_HIGHERA34
+ || r_type == R_PPC64_ADDR16_HIGHESTA34
+ || r_type == R_PPC64_REL16_HIGHERA34
+ || r_type == R_PPC64_REL16_HIGHESTA34)
+ reloc_entry->addend += 1ULL << 33;
+ else
+ reloc_entry->addend += 1U << 15;
if (r_type != R_PPC64_REL16DX_HA)
return bfd_reloc_continue;
return bfd_reloc_ok;
}
+static bfd_reloc_status_type
+ppc64_elf_prefix_reloc (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
+ void *data, asection *input_section,
+ bfd *output_bfd, char **error_message)
+{
+ uint64_t insn;
+ bfd_vma targ;
+
+ if (output_bfd != NULL)
+ return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
+ input_section, output_bfd, error_message);
+
+ insn = bfd_get_32 (abfd, (bfd_byte *) data + reloc_entry->address);
+ insn <<= 32;
+ insn |= bfd_get_32 (abfd, (bfd_byte *) data + reloc_entry->address + 4);
+
+ targ = (symbol->section->output_section->vma
+ + symbol->section->output_offset
+ + reloc_entry->addend);
+ if (!bfd_is_com_section (symbol->section))
+ targ += symbol->value;
+ if (reloc_entry->howto->type == R_PPC64_D34_HA30)
+ targ += 1ULL << 33;
+ if (reloc_entry->howto->pc_relative)
+ {
+ bfd_vma from = (reloc_entry->address
+ + input_section->output_offset
+ + input_section->output_section->vma);
+ targ -=from;
+ }
+ targ >>= reloc_entry->howto->rightshift;
+ insn &= ~reloc_entry->howto->dst_mask;
+ insn |= ((targ << 16) | (targ & 0xffff)) & reloc_entry->howto->dst_mask;
+ bfd_put_32 (abfd, insn >> 32, (bfd_byte *) data + reloc_entry->address);
+ bfd_put_32 (abfd, insn, (bfd_byte *) data + reloc_entry->address + 4);
+ if (reloc_entry->howto->complain_on_overflow == complain_overflow_signed
+ && (targ + (1ULL << (reloc_entry->howto->bitsize - 1))
+ >= 1ULL << reloc_entry->howto->bitsize))
+ return bfd_reloc_overflow;
+ return bfd_reloc_ok;
+}
+
static bfd_reloc_status_type
ppc64_elf_unhandled_reloc (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
void *data, asection *input_section,
/* Set if toc/got ha relocs detected not using r2, or lo reloc
instruction not one we handle. */
unsigned int unexpected_toc_insn : 1;
+
+ /* Set if got relocs that can be optimised are present in this file. */
+ unsigned int has_gotrel : 1;
};
#define ppc64_elf_tdata(bfd) \
/* Flag set when PLTCALL relocs are detected. */
unsigned int has_pltcall:1;
+
+ /* Flag set when section has GOT relocations that can be optimised. */
+ unsigned int has_gotrel:1;
};
#define ppc64_elf_section_data(sec) \
. mflr %r11
. mtlr %r12
. lis %r12,xxx-1b@highest
- . ori %r12,xxx-1b@higher
+ . ori %r12,%r12,xxx-1b@higher
. sldi %r12,%r12,32
- . oris %r12,%r12,xxx-1b@hi
+ . oris %r12,%r12,xxx-1b@high
. ori %r12,%r12,xxx-1b@l
. add %r12,%r11,%r12
. mtctr %r12
. mflr %r11
. mtlr %r12
. lis %r12,xxx-1b@highest
- . ori %r12,xxx-1b@higher
+ . ori %r12,%r12,xxx-1b@higher
. sldi %r12,%r12,32
- . oris %r12,%r12,xxx-1b@hi
+ . oris %r12,%r12,xxx-1b@high
. ori %r12,%r12,xxx-1b@l
. ldx %r12,%r11,%r12
. mtctr %r12
. bctr
+ There are also ELFv1 powerxx variants of these stubs.
+ ppc_stub_long_branch_notoc:
+ . pla %r12,dest@pcrel
+ . b dest
+ ppc_stub_plt_branch_notoc:
+ . lis %r11,(dest-1f)@highesta34
+ . ori %r11,%r11,(dest-1f)@highera34
+ . sldi %r11,%r11,34
+ . 1: pla %r12,dest@pcrel
+ . add %r12,%r11,%r12
+ . mtctr %r12
+ . bctr
+ ppc_stub_plt_call_notoc:
+ . lis %r11,(xxx-1f)@highesta34
+ . ori %r11,%r11,(xxx-1f)@highera34
+ . sldi %r11,%r11,34
+ . 1: pla %r12,xxx@pcrel
+ . ldx %r12,%r11,%r12
+ . mtctr %r12
+ . bctr
+
In cases where the high instructions would add zero, they are
omitted and following instructions modified in some cases.
+ For example, a powerxx ppc_stub_plt_call_notoc might simplify down
+ to
+ . pld %r12,xxx@pcrel
+ . mtctr %r12
+ . bctr
For a given stub group (a set of sections all using the same toc
pointer value) there will be just one stub type used for any
of the other TLS bits are set. tls_optimize clears bits when
optimizing to indicate the corresponding GOT entry type is not
needed. If set, TLS_TLS is never cleared. tls_optimize may also
- set TLS_TPRELGD when a GD reloc turns into a TPREL one. We use a
- separate flag rather than setting TPREL just for convenience in
- distinguishing the two cases.
+ set TLS_GDIE when a GD reloc turns into an IE one.
These flags are also kept for local symbols. */
#define TLS_TLS 1 /* Any TLS reloc. */
#define TLS_GD 2 /* GD reloc. */
#define TLS_TPREL 8 /* TPREL reloc, => IE. */
#define TLS_DTPREL 16 /* DTPREL reloc, => LD. */
#define TLS_MARK 32 /* __tls_get_addr call marked. */
-#define TLS_TPRELGD 64 /* TPREL reloc resulting from GD->IE. */
-#define TLS_EXPLICIT 128 /* Marks TOC section TLS relocs. */
+#define TLS_GDIE 64 /* GOT TPREL reloc resulting from GD->IE. */
+#define TLS_EXPLICIT 256 /* TOC section TLS reloc, not stored. */
unsigned char tls_mask;
/* The above field is also used to mark function symbols. In which
/* Whether plt calls for ELFv2 localentry:0 funcs have been optimized. */
unsigned int has_plt_localentry0:1;
+ /* Whether calls are made via the PLT from NOTOC functions. */
+ unsigned int notoc_plt:1;
+
+ /* Whether to use powerxx instructions in linkage stubs. */
+ unsigned int powerxx_stubs:1;
+
/* Incremented every time we size stubs. */
unsigned int stub_iteration;
|| r_type == R_PPC64_ADDR14
|| r_type == R_PPC64_ADDR14_BRTAKEN
|| r_type == R_PPC64_ADDR14_BRNTAKEN
- || r_type == R_PPC64_PLTCALL);
+ || r_type == R_PPC64_PLTCALL
+ || r_type == R_PPC64_PLTCALL_NOTOC);
}
/* Relocs on inline plt call sequence insns prior to the call. */
|| r_type == R_PPC64_PLT16_HI
|| r_type == R_PPC64_PLT16_LO
|| r_type == R_PPC64_PLT16_LO_DS
- || r_type == R_PPC64_PLTSEQ);
+ || r_type == R_PPC64_PLT_PCREL34
+ || r_type == R_PPC64_PLT_PCREL34_NOTOC
+ || r_type == R_PPC64_PLTSEQ
+ || r_type == R_PPC64_PLTSEQ_NOTOC);
}
/* Look through the relocs for a section during the first phase, and
int tls_type;
struct _ppc64_elf_section_data *ppc64_sec;
struct plt_entry **ifunc, **plt_list;
+ bfd_vma sym_addend;
r_symndx = ELF64_R_SYM (rel->r_info);
if (r_symndx < symtab_hdr->sh_info)
tls_type = 0;
ifunc = NULL;
+ r_type = ELF64_R_TYPE (rel->r_info);
+ switch (r_type)
+ {
+ case R_PPC64_D34:
+ case R_PPC64_D34_LO:
+ case R_PPC64_D34_HI30:
+ case R_PPC64_D34_HA30:
+ case R_PPC64_D28:
+ htab->powerxx_stubs = 1;
+ /* Fall through. */
+ default:
+ /* Somewhat foolishly, because the ABIs don't specifically
+ allow it, ppc64 gas and ld support GOT and PLT relocs
+ with non-zero addends where the addend results in
+ sym+addend being stored in the GOT or PLT entry. This
+ can't be supported for pcrel relocs because the addend is
+ used to specify the pcrel offset. */
+ sym_addend = rel->r_addend;
+ break;
+
+ case R_PPC64_PCREL34:
+ case R_PPC64_GOT_PCREL34:
+ case R_PPC64_PLT_PCREL34:
+ case R_PPC64_PLT_PCREL34_NOTOC:
+ case R_PPC64_PCREL28:
+ htab->powerxx_stubs = 1;
+ sym_addend = 0;
+ break;
+ }
if (h != NULL)
{
if (h->type == STT_GNU_IFUNC)
if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
{
ifunc = update_local_sym_info (abfd, symtab_hdr, r_symndx,
- rel->r_addend,
+ sym_addend,
NON_GOT | PLT_IFUNC);
if (ifunc == NULL)
return FALSE;
}
}
- r_type = ELF64_R_TYPE (rel->r_info);
switch (r_type)
{
case R_PPC64_TLSGD:
((struct ppc_link_hash_entry *) h)->tls_mask |= TLS_TLS | TLS_MARK;
else
if (!update_local_sym_info (abfd, symtab_hdr, r_symndx,
- rel->r_addend,
+ sym_addend,
NON_GOT | TLS_TLS | TLS_MARK))
return FALSE;
sec->has_tls_reloc = 1;
tls_type = TLS_TLS | TLS_DTPREL;
dogottls:
sec->has_tls_reloc = 1;
- /* Fall through */
+ goto dogot;
- case R_PPC64_GOT16:
- case R_PPC64_GOT16_DS:
case R_PPC64_GOT16_HA:
+ case R_PPC64_GOT16_LO_DS:
+ case R_PPC64_GOT_PCREL34:
+ ppc64_elf_tdata (abfd)->has_gotrel = 1;
+ ppc64_elf_section_data (sec)->has_gotrel = 1;
+ /* Fall through. */
+
+ case R_PPC64_GOT16_DS:
+ case R_PPC64_GOT16:
case R_PPC64_GOT16_HI:
case R_PPC64_GOT16_LO:
- case R_PPC64_GOT16_LO_DS:
+ dogot:
/* This symbol requires a global offset table entry. */
sec->has_toc_reloc = 1;
if (r_type == R_PPC64_GOT_TLSLD16
eh = (struct ppc_link_hash_entry *) h;
for (ent = eh->elf.got.glist; ent != NULL; ent = ent->next)
- if (ent->addend == rel->r_addend
+ if (ent->addend == sym_addend
&& ent->owner == abfd
&& ent->tls_type == tls_type)
break;
if (ent == NULL)
return FALSE;
ent->next = eh->elf.got.glist;
- ent->addend = rel->r_addend;
+ ent->addend = sym_addend;
ent->owner = abfd;
ent->tls_type = tls_type;
ent->is_indirect = FALSE;
else
/* This is a global offset table entry for a local symbol. */
if (!update_local_sym_info (abfd, symtab_hdr, r_symndx,
- rel->r_addend, tls_type))
+ sym_addend, tls_type))
return FALSE;
/* We may also need a plt entry if the symbol turns out to be
an ifunc. */
if (h != NULL && !bfd_link_pic (info) && abiversion (abfd) != 1)
{
- if (!update_plt_info (abfd, &h->plt.plist, rel->r_addend))
+ if (!update_plt_info (abfd, &h->plt.plist, sym_addend))
return FALSE;
}
break;
case R_PPC64_PLT16_HI:
case R_PPC64_PLT16_LO:
case R_PPC64_PLT16_LO_DS:
+ case R_PPC64_PLT_PCREL34:
+ case R_PPC64_PLT_PCREL34_NOTOC:
case R_PPC64_PLT32:
case R_PPC64_PLT64:
/* This symbol requires a procedure linkage table entry. */
}
if (plt_list == NULL)
plt_list = update_local_sym_info (abfd, symtab_hdr, r_symndx,
- rel->r_addend,
+ sym_addend,
NON_GOT | PLT_KEEP);
- if (!update_plt_info (abfd, plt_list, rel->r_addend))
+ if (!update_plt_info (abfd, plt_list, sym_addend))
return FALSE;
break;
case R_PPC64_REL16_LO:
case R_PPC64_REL16_HI:
case R_PPC64_REL16_HA:
+ case R_PPC64_REL16_HIGH:
+ case R_PPC64_REL16_HIGHA:
+ case R_PPC64_REL16_HIGHER:
+ case R_PPC64_REL16_HIGHERA:
+ case R_PPC64_REL16_HIGHEST:
+ case R_PPC64_REL16_HIGHESTA:
+ case R_PPC64_REL16_HIGHER34:
+ case R_PPC64_REL16_HIGHERA34:
+ case R_PPC64_REL16_HIGHEST34:
+ case R_PPC64_REL16_HIGHESTA34:
case R_PPC64_REL16DX_HA:
break;
/* This relocation describes which C++ vtable entries are actually
used. Record for later use during GC. */
case R_PPC64_GNU_VTENTRY:
- BFD_ASSERT (h != NULL);
- if (h != NULL
- && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
+ if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
return FALSE;
break;
goto rel24;
case R_PPC64_PLTCALL:
+ case R_PPC64_PLTCALL_NOTOC:
ppc64_elf_section_data (sec)->has_pltcall = 1;
/* Fall through. */
/* We may need a .plt entry if the function this reloc
refers to is in a shared lib. */
if (plt_list
- && !update_plt_info (abfd, plt_list, rel->r_addend))
+ && !update_plt_info (abfd, plt_list, sym_addend))
return FALSE;
break;
{
struct ppc_link_hash_entry *eh;
eh = (struct ppc_link_hash_entry *) h;
- eh->tls_mask |= tls_type;
+ eh->tls_mask |= tls_type & 0xff;
}
else
if (!update_local_sym_info (abfd, symtab_hdr, r_symndx,
- rel->r_addend, tls_type))
+ sym_addend, tls_type))
return FALSE;
ppc64_sec = ppc64_elf_section_data (sec);
}
BFD_ASSERT (rel->r_offset % 8 == 0);
ppc64_sec->u.toc.symndx[rel->r_offset / 8] = r_symndx;
- ppc64_sec->u.toc.add[rel->r_offset / 8] = rel->r_addend;
+ ppc64_sec->u.toc.add[rel->r_offset / 8] = sym_addend;
/* Mark the second slot of a GD or LD entry.
-1 to indicate GD and -2 to indicate LD. */
case R_PPC64_ADDR16_HIGHESTA:
case R_PPC64_ADDR16_LO:
case R_PPC64_ADDR16_LO_DS:
+ case R_PPC64_D34:
+ case R_PPC64_D34_LO:
+ case R_PPC64_D34_HI30:
+ case R_PPC64_D34_HA30:
+ case R_PPC64_ADDR16_HIGHER34:
+ case R_PPC64_ADDR16_HIGHERA34:
+ case R_PPC64_ADDR16_HIGHEST34:
+ case R_PPC64_ADDR16_HIGHESTA34:
+ case R_PPC64_D28:
if (h != NULL && !bfd_link_pic (info) && abiversion (abfd) != 1
&& rel->r_addend == 0)
{
/* We may need a .plt entry if this reloc refers to a
function in a shared lib. */
- if (!update_plt_info (abfd, &h->plt.plist, rel->r_addend))
+ if (!update_plt_info (abfd, &h->plt.plist, 0))
return FALSE;
h->pointer_equality_needed = 1;
}
struct ppc_link_hash_entry *eh;
_bfd_elf_link_hash_hide_symbol (info, h, force_local);
+ if (ppc_hash_table (info) == NULL)
+ return;
+
eh = (struct ppc_link_hash_entry *) h;
if (eh->is_func_descriptor)
{
case R_PPC64_UADDR32:
case R_PPC64_UADDR64:
case R_PPC64_TOC:
+ case R_PPC64_D34:
+ case R_PPC64_D34_LO:
+ case R_PPC64_D34_HI30:
+ case R_PPC64_D34_HA30:
+ case R_PPC64_ADDR16_HIGHER34:
+ case R_PPC64_ADDR16_HIGHERA34:
+ case R_PPC64_ADDR16_HIGHEST34:
+ case R_PPC64_ADDR16_HIGHESTA34:
+ case R_PPC64_D28:
break;
}
unsigned char *tls_maskp;
r_type = ELF64_R_TYPE (rel->r_info);
- if (r_type != R_PPC64_PLTCALL)
+ if (r_type != R_PPC64_PLTCALL
+ && r_type != R_PPC64_PLTCALL_NOTOC)
continue;
r_symndx = ELF64_R_SYM (rel->r_info);
from = (rel->r_offset
+ sec->output_offset
+ sec->output_section->vma);
- if (to - from + limit < 2 * limit)
+ if (to - from + limit < 2 * limit
+ && !(r_type == R_PPC64_PLTCALL_NOTOC
+ && (((h ? h->other : sym->st_other)
+ & STO_PPC64_LOCAL_MASK)
+ > 1 << STO_PPC64_LOCAL_BIT)))
*tls_maskp &= ~PLT_KEEP;
}
}
Elf_Internal_Sym *sym;
asection *sym_sec;
unsigned char *tls_mask;
- unsigned char tls_set, tls_clear, tls_type = 0;
+ unsigned int tls_set, tls_clear, tls_type = 0;
bfd_vma value;
bfd_boolean ok_tprel, is_local;
long toc_ref_index = 0;
{
value += sym_sec->output_offset;
value += sym_sec->output_section->vma;
- value -= htab->elf.tls_sec->vma;
- ok_tprel = (value + TP_OFFSET + ((bfd_vma) 1 << 31)
- < (bfd_vma) 1 << 32);
+ value -= htab->elf.tls_sec->vma + TP_OFFSET;
+ ok_tprel = value + 0x80008000ULL < 1ULL << 32;
}
}
tls_set = 0;
else
/* GD -> IE */
- tls_set = TLS_TLS | TLS_TPRELGD;
+ tls_set = TLS_TLS | TLS_GDIE;
tls_clear = TLS_GD;
tls_type = TLS_TLS | TLS_GD;
break;
{
if (pass != 0
&& (ELF64_R_TYPE (rel[1].r_info)
- != R_PPC64_PLTSEQ))
+ != R_PPC64_PLTSEQ)
+ && (ELF64_R_TYPE (rel[1].r_info)
+ != R_PPC64_PLTSEQ_NOTOC))
{
r_symndx = ELF64_R_SYM (rel[1].r_info);
if (!get_sym_h (&h, NULL, NULL, NULL, &locsyms,
tls_set = TLS_EXPLICIT | TLS_GD;
else
/* GD -> IE */
- tls_set = TLS_EXPLICIT | TLS_GD | TLS_TPRELGD;
+ tls_set = TLS_EXPLICIT | TLS_GD | TLS_GDIE;
tls_clear = TLS_GD;
}
else
}
}
- *tls_mask |= tls_set;
+ *tls_mask |= tls_set & 0xff;
*tls_mask &= ~tls_clear;
}
&& (insn & 1) == 0));
}
+/* PCREL_OPT in one instance flags to the linker that a pair of insns:
+ pld ra,symbol@got@pcrel
+ load/store rt,0(ra)
+ or
+ pla ra,symbol@pcrel
+ load/store rt,0(ra)
+ may be translated to
+ pload/pstore rt,symbol@pcrel
+ nop.
+ This function returns true if the optimization is possible, placing
+ the prefix insn in *PINSN1 and a NOP in *PINSN2.
+
+ On entry to this function, the linker has already determined that
+ the pld can be replaced with pla: *PINSN1 is that pla insn,
+ while *PINSN2 is the second instruction. */
+
+static bfd_boolean
+xlate_pcrel_opt (uint64_t *pinsn1, uint64_t *pinsn2)
+{
+ uint32_t insn2 = *pinsn2 >> 32;
+ uint64_t i1new;
+
+ /* Check that regs match. */
+ if (((insn2 >> 16) & 31) != ((*pinsn1 >> 21) & 31))
+ return FALSE;
+
+ switch ((insn2 >> 26) & 63)
+ {
+ default:
+ return FALSE;
+
+ case 32: /* lwz */
+ case 34: /* lbz */
+ case 36: /* stw */
+ case 38: /* stb */
+ case 40: /* lhz */
+ case 42: /* lha */
+ case 44: /* sth */
+ case 48: /* lfs */
+ case 50: /* lfd */
+ case 52: /* stfs */
+ case 54: /* stfd */
+ /* These are the PMLS cases, where we just need to tack a prefix
+ on the insn. Check that the D field is zero. */
+ if ((insn2 & 0xffff) != 0)
+ return FALSE;
+ i1new = ((1ULL << 58) | (2ULL << 56) | (1ULL << 52)
+ | (insn2 & ((63ULL << 26) | (31ULL << 21))));
+ break;
+
+ case 58: /* lwa, ld */
+ if ((insn2 & 0xfffd) != 0)
+ return FALSE;
+ i1new = ((1ULL << 58) | (1ULL << 52)
+ | (insn2 & 2 ? 41ULL << 26 : 57ULL << 26)
+ | (insn2 & (31ULL << 21)));
+ break;
+
+ case 57: /* lxsd, lxssp */
+ if ((insn2 & 0xfffc) != 0 || (insn2 & 3) < 2)
+ return FALSE;
+ i1new = ((1ULL << 58) | (1ULL << 52)
+ | ((40ULL | (insn2 & 3)) << 26)
+ | (insn2 & (31ULL << 21)));
+ break;
+
+ case 61: /* stxsd, stxssp, lxv, stxv */
+ if ((insn2 & 3) == 0)
+ return FALSE;
+ else if ((insn2 & 3) >= 2)
+ {
+ if ((insn2 & 0xfffc) != 0)
+ return FALSE;
+ i1new = ((1ULL << 58) | (1ULL << 52)
+ | ((44ULL | (insn2 & 3)) << 26)
+ | (insn2 & (31ULL << 21)));
+ }
+ else
+ {
+ if ((insn2 & 0xfff0) != 0)
+ return FALSE;
+ i1new = ((1ULL << 58) | (1ULL << 52)
+ | ((50ULL | (insn2 & 4) | ((insn2 & 8) >> 3)) << 26)
+ | (insn2 & (31ULL << 21)));
+ }
+ break;
+
+ case 56: /* lq */
+ if ((insn2 & 0xffff) != 0)
+ return FALSE;
+ i1new = ((1ULL << 58) | (1ULL << 52)
+ | (insn2 & ((63ULL << 26) | (31ULL << 21))));
+ break;
+
+ case 62: /* std, stq */
+ if ((insn2 & 0xfffd) != 0)
+ return FALSE;
+ i1new = ((1ULL << 58) | (1ULL << 52)
+ | ((insn2 & 2) == 0 ? 61ULL << 26 : 60ULL << 26)
+ | (insn2 & (31ULL << 21)));
+ break;
+ }
+
+ *pinsn1 = i1new;
+ *pinsn2 = (uint64_t) NOP << 32;
+ return TRUE;
+}
+
/* Examine all relocs referencing .toc sections in order to remove
unused .toc entries. */
free (skip);
}
- return TRUE;
-}
+ /* Look for cases where we can change an indirect GOT access to
+ a GOT relative or PC relative access, possibly reducing the
+ number of GOT entries. */
+ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
+ {
+ asection *sec;
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Sym *local_syms;
+ Elf_Internal_Rela *relstart, *rel;
+ bfd_vma got;
-/* Return true iff input section I references the TOC using
- instructions limited to +/-32k offsets. */
+ if (!is_ppc64_elf (ibfd))
+ continue;
-bfd_boolean
-ppc64_elf_has_small_toc_reloc (asection *i)
-{
- return (is_ppc64_elf (i->owner)
- && ppc64_elf_tdata (i->owner)->has_small_toc_reloc);
-}
+ if (!ppc64_elf_tdata (ibfd)->has_gotrel)
+ continue;
-/* Allocate space for one GOT entry. */
+ sec = ppc64_elf_tdata (ibfd)->got;
+ got = sec->output_section->vma + sec->output_offset + 0x8000;
-static void
-allocate_got (struct elf_link_hash_entry *h,
- struct bfd_link_info *info,
- struct got_entry *gent)
-{
- struct ppc_link_hash_table *htab = ppc_hash_table (info);
- struct ppc_link_hash_entry *eh = (struct ppc_link_hash_entry *) h;
- int entsize = (gent->tls_type & eh->tls_mask & (TLS_GD | TLS_LD)
- ? 16 : 8);
- int rentsize = (gent->tls_type & eh->tls_mask & TLS_GD
- ? 2 : 1) * sizeof (Elf64_External_Rela);
- asection *got = ppc64_elf_tdata (gent->owner)->got;
+ local_syms = NULL;
+ symtab_hdr = &elf_symtab_hdr (ibfd);
- gent->got.offset = got->size;
- got->size += entsize;
+ for (sec = ibfd->sections; sec != NULL; sec = sec->next)
+ {
+ if (sec->reloc_count == 0
+ || !ppc64_elf_section_data (sec)->has_gotrel
+ || discarded_section (sec))
+ continue;
- if (h->type == STT_GNU_IFUNC)
- {
- htab->elf.irelplt->size += rentsize;
- htab->got_reli_size += rentsize;
- }
- else if (((bfd_link_pic (info)
- && !((gent->tls_type & TLS_TPREL) != 0
- && bfd_link_executable (info)
- && SYMBOL_REFERENCES_LOCAL (info, h)))
- || (htab->elf.dynamic_sections_created
- && h->dynindx != -1
- && !SYMBOL_REFERENCES_LOCAL (info, h)))
- && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
- {
- asection *relgot = ppc64_elf_tdata (gent->owner)->relgot;
- relgot->size += rentsize;
- }
-}
+ relstart = _bfd_elf_link_read_relocs (ibfd, sec, NULL, NULL,
+ info->keep_memory);
+ if (relstart == NULL)
+ {
+ got_error_ret:
+ if (local_syms != NULL
+ && symtab_hdr->contents != (unsigned char *) local_syms)
+ free (local_syms);
+ if (sec != NULL
+ && relstart != NULL
+ && elf_section_data (sec)->relocs != relstart)
+ free (relstart);
+ return FALSE;
+ }
-/* This function merges got entries in the same toc group. */
+ for (rel = relstart; rel < relstart + sec->reloc_count; ++rel)
+ {
+ enum elf_ppc64_reloc_type r_type;
+ unsigned long r_symndx;
+ Elf_Internal_Sym *sym;
+ asection *sym_sec;
+ struct elf_link_hash_entry *h;
+ struct got_entry *ent;
+ bfd_vma sym_addend, val, pc;
+ unsigned char buf[8];
+ unsigned int insn;
-static void
-merge_got_entries (struct got_entry **pent)
-{
- struct got_entry *ent, *ent2;
+ r_type = ELF64_R_TYPE (rel->r_info);
+ switch (r_type)
+ {
+ /* Note that we don't delete GOT entries for
+ R_PPC64_GOT16_DS since we'd need a lot more
+ analysis. For starters, the preliminary layout is
+ before the GOT, PLT, dynamic sections and stubs are
+ laid out. Then we'd need to allow for changes in
+ distance between sections caused by alignment. */
+ default:
+ continue;
- for (ent = *pent; ent != NULL; ent = ent->next)
+ case R_PPC64_GOT16_HA:
+ case R_PPC64_GOT16_LO_DS:
+ sym_addend = rel->r_addend;
+ break;
+
+ case R_PPC64_GOT_PCREL34:
+ sym_addend = 0;
+ break;
+ }
+
+ r_symndx = ELF64_R_SYM (rel->r_info);
+ if (!get_sym_h (&h, &sym, &sym_sec, NULL, &local_syms,
+ r_symndx, ibfd))
+ goto got_error_ret;
+
+ if (!SYMBOL_REFERENCES_LOCAL (info, h))
+ continue;
+
+ if (h != NULL)
+ val = h->root.u.def.value;
+ else
+ val = sym->st_value;
+ val += sym_addend;
+ val += sym_sec->output_section->vma + sym_sec->output_offset;
+
+/* Fudge factor to allow for the fact that the preliminary layout
+ isn't exact. Reduce limits by this factor. */
+#define LIMIT_ADJUST(LIMIT) ((LIMIT) - (LIMIT) / 16)
+
+ switch (r_type)
+ {
+ default:
+ continue;
+
+ case R_PPC64_GOT16_HA:
+ if (val - got + LIMIT_ADJUST (0x80008000ULL)
+ >= LIMIT_ADJUST (0x100000000ULL))
+ continue;
+
+ if (!bfd_get_section_contents (ibfd, sec, buf,
+ rel->r_offset & ~3, 4))
+ goto got_error_ret;
+ insn = bfd_get_32 (ibfd, buf);
+ if (((insn & ((0x3f << 26) | 0x1f << 16))
+ != ((15u << 26) | (2 << 16)) /* addis rt,2,imm */))
+ continue;
+ break;
+
+ case R_PPC64_GOT16_LO_DS:
+ if (val - got + LIMIT_ADJUST (0x80008000ULL)
+ >= LIMIT_ADJUST (0x100000000ULL))
+ continue;
+ if (!bfd_get_section_contents (ibfd, sec, buf,
+ rel->r_offset & ~3, 4))
+ goto got_error_ret;
+ insn = bfd_get_32 (ibfd, buf);
+ if ((insn & (0x3f << 26 | 0x3)) != 58u << 26 /* ld */)
+ continue;
+ break;
+
+ case R_PPC64_GOT_PCREL34:
+ pc = rel->r_offset;
+ pc += sec->output_section->vma + sec->output_offset;
+ if (val - pc + LIMIT_ADJUST (1ULL << 33)
+ >= LIMIT_ADJUST (1ULL << 34))
+ continue;
+ if (!bfd_get_section_contents (ibfd, sec, buf,
+ rel->r_offset & ~3, 8))
+ goto got_error_ret;
+ insn = bfd_get_32 (ibfd, buf);
+ if ((insn & (-1u << 18)) != ((1u << 26) | (1u << 20)))
+ continue;
+ insn = bfd_get_32 (ibfd, buf + 4);
+ if ((insn & (0x3f << 26)) != 57u << 26)
+ continue;
+ break;
+ }
+#undef LIMIT_ADJUST
+
+ if (h != NULL)
+ ent = h->got.glist;
+ else
+ {
+ struct got_entry **local_got_ents = elf_local_got_ents (ibfd);
+ ent = local_got_ents[r_symndx];
+ }
+ for (; ent != NULL; ent = ent->next)
+ if (ent->addend == sym_addend
+ && ent->owner == ibfd
+ && ent->tls_type == 0)
+ break;
+ BFD_ASSERT (ent && ent->got.refcount > 0);
+ ent->got.refcount -= 1;
+ }
+
+ if (elf_section_data (sec)->relocs != relstart)
+ free (relstart);
+ }
+
+ if (local_syms != NULL
+ && symtab_hdr->contents != (unsigned char *) local_syms)
+ {
+ if (!info->keep_memory)
+ free (local_syms);
+ else
+ symtab_hdr->contents = (unsigned char *) local_syms;
+ }
+ }
+
+ return TRUE;
+}
+
+/* Return true iff input section I references the TOC using
+ instructions limited to +/-32k offsets. */
+
+bfd_boolean
+ppc64_elf_has_small_toc_reloc (asection *i)
+{
+ return (is_ppc64_elf (i->owner)
+ && ppc64_elf_tdata (i->owner)->has_small_toc_reloc);
+}
+
+/* Allocate space for one GOT entry. */
+
+static void
+allocate_got (struct elf_link_hash_entry *h,
+ struct bfd_link_info *info,
+ struct got_entry *gent)
+{
+ struct ppc_link_hash_table *htab = ppc_hash_table (info);
+ struct ppc_link_hash_entry *eh = (struct ppc_link_hash_entry *) h;
+ int entsize = (gent->tls_type & eh->tls_mask & (TLS_GD | TLS_LD)
+ ? 16 : 8);
+ int rentsize = (gent->tls_type & eh->tls_mask & TLS_GD
+ ? 2 : 1) * sizeof (Elf64_External_Rela);
+ asection *got = ppc64_elf_tdata (gent->owner)->got;
+
+ gent->got.offset = got->size;
+ got->size += entsize;
+
+ if (h->type == STT_GNU_IFUNC)
+ {
+ htab->elf.irelplt->size += rentsize;
+ htab->got_reli_size += rentsize;
+ }
+ else if (((bfd_link_pic (info)
+ && !((gent->tls_type & TLS_TPREL) != 0
+ && bfd_link_executable (info)
+ && SYMBOL_REFERENCES_LOCAL (info, h)))
+ || (htab->elf.dynamic_sections_created
+ && h->dynindx != -1
+ && !SYMBOL_REFERENCES_LOCAL (info, h)))
+ && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
+ {
+ asection *relgot = ppc64_elf_tdata (gent->owner)->relgot;
+ relgot->size += rentsize;
+ }
+}
+
+/* This function merges got entries in the same toc group. */
+
+static void
+merge_got_entries (struct got_entry **pent)
+{
+ struct got_entry *ent, *ent2;
+
+ for (ent = *pent; ent != NULL; ent = ent->next)
if (!ent->is_indirect)
for (ent2 = ent->next; ent2 != NULL; ent2 = ent2->next)
if (!ent2->is_indirect
eh = (struct ppc_link_hash_entry *) h;
/* Run through the TLS GD got entries first if we're changing them
to TPREL. */
- if ((eh->tls_mask & (TLS_TLS | TLS_TPRELGD)) == (TLS_TLS | TLS_TPRELGD))
+ if ((eh->tls_mask & (TLS_TLS | TLS_GDIE)) == (TLS_TLS | TLS_GDIE))
for (gent = h->got.glist; gent != NULL; gent = gent->next)
if (gent->got.refcount > 0
&& (gent->tls_type & TLS_GD) != 0)
#define PPC_LO(v) ((v) & 0xffff)
#define PPC_HI(v) (((v) >> 16) & 0xffff)
#define PPC_HA(v) PPC_HI ((v) + 0x8000)
+#define D34(v) \
+ ((((v) & 0x3ffff0000ULL) << 16) | (v & 0xffff))
+#define HA34(v) ((v + (1ULL << 33)) >> 34)
/* Called via elf_link_hash_traverse from ppc64_elf_size_dynamic_sections
to set up space for global entry stubs. These are put in glink,
return ppc_stub_none;
}
-/* Builds a 64-bit offset in r12 then adds it to r11 (LOAD false) or
- loads r12 from r11+r12 (LOAD true).
+/* Gets the address of a label (1:) in r11 and builds an offset in r12,
+ then adds it to r11 (LOAD false) or loads r12 from r11+r12 (LOAD true).
+ . mflr %r12
+ . bcl 20,31,1f
+ .1: mflr %r11
+ . mtlr %r12
. lis %r12,xxx-1b@highest
- . ori %r12,xxx-1b@higher
+ . ori %r12,%r12,xxx-1b@higher
. sldi %r12,%r12,32
- . oris %r12,%r12,xxx-1b@hi
+ . oris %r12,%r12,xxx-1b@high
. ori %r12,%r12,xxx-1b@l
- . add %r12,%r11,%r12 */
+ . add/ldx %r12,%r11,%r12 */
static bfd_byte *
build_offset (bfd *abfd, bfd_byte *p, bfd_vma off, bfd_boolean load)
{
+ bfd_put_32 (abfd, MFLR_R12, p);
+ p += 4;
+ bfd_put_32 (abfd, BCL_20_31, p);
+ p += 4;
+ bfd_put_32 (abfd, MFLR_R11, p);
+ p += 4;
+ bfd_put_32 (abfd, MTLR_R12, p);
+ p += 4;
if (off + 0x8000 < 0x10000)
{
if (load)
size += 4;
size += 4;
}
- return size;
+ return size + 16;
+}
+
+static unsigned int
+num_relocs_for_offset (bfd_vma off)
+{
+ unsigned int num_rel;
+ if (off + 0x8000 < 0x10000)
+ num_rel = 1;
+ else if (off + 0x80008000ULL < 0x100000000ULL)
+ num_rel = 2;
+ else
+ {
+ num_rel = 1;
+ if (off + 0x800000000000ULL >= 0x1000000000000ULL
+ && ((off >> 32) & 0xffff) != 0)
+ num_rel += 1;
+ if (PPC_HI (off) != 0)
+ num_rel += 1;
+ if (PPC_LO (off) != 0)
+ num_rel += 1;
+ }
+ return num_rel;
+}
+
+static Elf_Internal_Rela *
+emit_relocs_for_offset (struct bfd_link_info *info, Elf_Internal_Rela *r,
+ bfd_vma roff, bfd_vma targ, bfd_vma off)
+{
+ bfd_vma relative_targ = targ - (roff - 8);
+ if (bfd_big_endian (info->output_bfd))
+ roff += 2;
+ r->r_offset = roff;
+ r->r_addend = relative_targ + roff;
+ if (off + 0x8000 < 0x10000)
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16);
+ else if (off + 0x80008000ULL < 0x100000000ULL)
+ {
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HA);
+ ++r;
+ roff += 4;
+ r->r_offset = roff;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_LO);
+ r->r_addend = relative_targ + roff;
+ }
+ else
+ {
+ if (off + 0x800000000000ULL < 0x1000000000000ULL)
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHER);
+ else
+ {
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHEST);
+ if (((off >> 32) & 0xffff) != 0)
+ {
+ ++r;
+ roff += 4;
+ r->r_offset = roff;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHER);
+ r->r_addend = relative_targ + roff;
+ }
+ }
+ if (((off >> 32) & 0xffffffffULL) != 0)
+ roff += 4;
+ if (PPC_HI (off) != 0)
+ {
+ ++r;
+ roff += 4;
+ r->r_offset = roff;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGH);
+ r->r_addend = relative_targ + roff;
+ }
+ if (PPC_LO (off) != 0)
+ {
+ ++r;
+ roff += 4;
+ r->r_offset = roff;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_LO);
+ r->r_addend = relative_targ + roff;
+ }
+ }
+ return r;
+}
+
+static bfd_byte *
+build_powerxx_offset (bfd *abfd, bfd_byte *p, bfd_vma off, int odd,
+ bfd_boolean load)
+{
+ uint64_t insn;
+ if (off - odd + (1ULL << 33) < 1ULL << 34)
+ {
+ off -= odd;
+ if (odd)
+ {
+ bfd_put_32 (abfd, NOP, p);
+ p += 4;
+ }
+ if (load)
+ insn = PLD_R12_PC;
+ else
+ insn = PADDI_R12_PC;
+ insn |= D34 (off);
+ bfd_put_32 (abfd, insn >> 32, p);
+ p += 4;
+ bfd_put_32 (abfd, insn, p);
+ }
+ /* The minimum value for paddi is -0x200000000. The minimum value
+ for li is -0x8000, which when shifted by 34 and added gives a
+ minimum value of -0x2000200000000. The maximum value is
+ 0x1ffffffff+0x7fff<<34 which is 0x2000200000000-1. */
+ else if (off - (8 - odd) + (0x20002ULL << 32) < 0x40004ULL << 32)
+ {
+ off -= 8 - odd;
+ bfd_put_32 (abfd, LI_R11_0 | (HA34 (off) & 0xffff), p);
+ p += 4;
+ if (!odd)
+ {
+ bfd_put_32 (abfd, SLDI_R11_R11_34, p);
+ p += 4;
+ }
+ insn = PADDI_R12_PC | D34 (off);
+ bfd_put_32 (abfd, insn >> 32, p);
+ p += 4;
+ bfd_put_32 (abfd, insn, p);
+ p += 4;
+ if (odd)
+ {
+ bfd_put_32 (abfd, SLDI_R11_R11_34, p);
+ p += 4;
+ }
+ if (load)
+ bfd_put_32 (abfd, LDX_R12_R11_R12, p);
+ else
+ bfd_put_32 (abfd, ADD_R12_R11_R12, p);
+ }
+ else
+ {
+ off -= odd + 8;
+ bfd_put_32 (abfd, LIS_R11 | ((HA34 (off) >> 16) & 0x3fff), p);
+ p += 4;
+ bfd_put_32 (abfd, ORI_R11_R11_0 | (HA34 (off) & 0xffff), p);
+ p += 4;
+ if (odd)
+ {
+ bfd_put_32 (abfd, SLDI_R11_R11_34, p);
+ p += 4;
+ }
+ insn = PADDI_R12_PC | D34 (off);
+ bfd_put_32 (abfd, insn >> 32, p);
+ p += 4;
+ bfd_put_32 (abfd, insn, p);
+ p += 4;
+ if (!odd)
+ {
+ bfd_put_32 (abfd, SLDI_R11_R11_34, p);
+ p += 4;
+ }
+ if (load)
+ bfd_put_32 (abfd, LDX_R12_R11_R12, p);
+ else
+ bfd_put_32 (abfd, ADD_R12_R11_R12, p);
+ }
+ p += 4;
+ return p;
+}
+
+static unsigned int
+size_powerxx_offset (bfd_vma off, int odd)
+{
+ if (off - odd + (1ULL << 33) < 1ULL << 34)
+ return odd + 8;
+ else if (off - (8 - odd) + (0x20002ULL << 32) < 0x40004ULL << 32)
+ return 20;
+ else
+ return 24;
+}
+
+static unsigned int
+num_relocs_for_powerxx_offset (bfd_vma off, int odd)
+{
+ if (off - odd + (1ULL << 33) < 1ULL << 34)
+ return 1;
+ else if (off - (8 - odd) + (0x20002ULL << 32) < 0x40004ULL << 32)
+ return 2;
+ else
+ return 3;
+}
+
+static Elf_Internal_Rela *
+emit_relocs_for_powerxx_offset (struct bfd_link_info *info,
+ Elf_Internal_Rela *r, bfd_vma roff,
+ bfd_vma targ, bfd_vma off, int odd)
+{
+ if (off - odd + (1ULL << 33) < 1ULL << 34)
+ roff += odd;
+ else if (off - (8 - odd) + (0x20002ULL << 32) < 0x40004ULL << 32)
+ {
+ int d_offset = bfd_big_endian (info->output_bfd) ? 2 : 0;
+ r->r_offset = roff + d_offset;
+ r->r_addend = targ + 8 - odd - d_offset;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHERA34);
+ ++r;
+ roff += 8 - odd;
+ }
+ else
+ {
+ int d_offset = bfd_big_endian (info->output_bfd) ? 2 : 0;
+ r->r_offset = roff + d_offset;
+ r->r_addend = targ + 8 + odd - d_offset;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHESTA34);
+ ++r;
+ roff += 4;
+ r->r_offset = roff + d_offset;
+ r->r_addend = targ + 4 + odd - d_offset;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHERA34);
+ ++r;
+ roff += 4 + odd;
+ }
+ r->r_offset = roff;
+ r->r_addend = targ;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_PCREL34);
+ return r;
}
/* Emit .eh_frame opcode to advance pc by DELTA. */
if (stub_entry->stub_type >= ppc_stub_plt_call_notoc)
{
- size = 24 + size_offset (off);
+ if (htab->powerxx_stubs)
+ {
+ bfd_vma start = (stub_entry->stub_offset
+ + stub_entry->group->stub_sec->output_offset
+ + stub_entry->group->stub_sec->output_section->vma);
+ if (stub_entry->stub_type > ppc_stub_plt_call_notoc)
+ start += 4;
+ size = 8 + size_powerxx_offset (off, start & 4);
+ }
+ else
+ size = 8 + size_offset (off - 8);
if (stub_entry->stub_type > ppc_stub_plt_call_notoc)
size += 4;
return size;
return relocs;
}
+/* Convert the relocs R[0] thru R[-NUM_REL+1], which are all no-symbol
+ forms, to the equivalent relocs against the global symbol given by
+ STUB_ENTRY->H. */
+
+static bfd_boolean
+use_global_in_relocs (struct ppc_link_hash_table *htab,
+ struct ppc_stub_hash_entry *stub_entry,
+ Elf_Internal_Rela *r, unsigned int num_rel)
+{
+ struct elf_link_hash_entry **hashes;
+ unsigned long symndx;
+ struct ppc_link_hash_entry *h;
+ bfd_vma symval;
+
+ /* Relocs are always against symbols in their own object file. Fake
+ up global sym hashes for the stub bfd (which has no symbols). */
+ hashes = elf_sym_hashes (htab->params->stub_bfd);
+ if (hashes == NULL)
+ {
+ bfd_size_type hsize;
+
+ /* When called the first time, stub_globals will contain the
+ total number of symbols seen during stub sizing. After
+ allocating, stub_globals is used as an index to fill the
+ hashes array. */
+ hsize = (htab->stub_globals + 1) * sizeof (*hashes);
+ hashes = bfd_zalloc (htab->params->stub_bfd, hsize);
+ if (hashes == NULL)
+ return FALSE;
+ elf_sym_hashes (htab->params->stub_bfd) = hashes;
+ htab->stub_globals = 1;
+ }
+ symndx = htab->stub_globals++;
+ h = stub_entry->h;
+ hashes[symndx] = &h->elf;
+ if (h->oh != NULL && h->oh->is_func)
+ h = ppc_follow_link (h->oh);
+ BFD_ASSERT (h->elf.root.type == bfd_link_hash_defined
+ || h->elf.root.type == bfd_link_hash_defweak);
+ symval = (h->elf.root.u.def.value
+ + h->elf.root.u.def.section->output_offset
+ + h->elf.root.u.def.section->output_section->vma);
+ while (num_rel-- != 0)
+ {
+ r->r_info = ELF64_R_INFO (symndx, ELF64_R_TYPE (r->r_info));
+ if (h->elf.root.u.def.section != stub_entry->target_section)
+ {
+ /* H is an opd symbol. The addend must be zero, and the
+ branch reloc is the only one we can convert. */
+ r->r_addend = 0;
+ break;
+ }
+ else
+ r->r_addend -= symval;
+ --r;
+ }
+ return TRUE;
+}
+
static bfd_vma
get_r2off (struct bfd_link_info *info,
struct ppc_stub_hash_entry *stub_entry)
struct bfd_link_info *info;
struct ppc_link_hash_table *htab;
bfd_byte *loc;
- bfd_byte *p;
+ bfd_byte *p, *relp;
bfd_vma targ, off;
Elf_Internal_Rela *r;
asection *plt;
+ int num_rel;
+ int odd;
/* Massage our args to the form they really have. */
stub_entry = (struct ppc_stub_hash_entry *) gen_entry;
r->r_offset = p - 4 - stub_entry->group->stub_sec->contents;
r->r_info = ELF64_R_INFO (0, R_PPC64_REL24);
r->r_addend = targ;
- if (stub_entry->h != NULL)
- {
- struct elf_link_hash_entry **hashes;
- unsigned long symndx;
- struct ppc_link_hash_entry *h;
-
- hashes = elf_sym_hashes (htab->params->stub_bfd);
- if (hashes == NULL)
- {
- bfd_size_type hsize;
-
- hsize = (htab->stub_globals + 1) * sizeof (*hashes);
- hashes = bfd_zalloc (htab->params->stub_bfd, hsize);
- if (hashes == NULL)
- return FALSE;
- elf_sym_hashes (htab->params->stub_bfd) = hashes;
- htab->stub_globals = 1;
- }
- symndx = htab->stub_globals++;
- h = stub_entry->h;
- hashes[symndx] = &h->elf;
- r->r_info = ELF64_R_INFO (symndx, R_PPC64_REL24);
- if (h->oh != NULL && h->oh->is_func)
- h = ppc_follow_link (h->oh);
- if (h->elf.root.u.def.section != stub_entry->target_section)
- /* H is an opd symbol. The addend must be zero. */
- r->r_addend = 0;
- else
- {
- off = (h->elf.root.u.def.value
- + h->elf.root.u.def.section->output_offset
- + h->elf.root.u.def.section->output_section->vma);
- r->r_addend -= off;
- }
- }
+ if (stub_entry->h != NULL
+ && !use_global_in_relocs (htab, stub_entry, r, 1))
+ return FALSE;
}
break;
case ppc_stub_plt_call_notoc:
case ppc_stub_plt_call_both:
p = loc;
- off = (8 + stub_entry->stub_offset
+ off = (stub_entry->stub_offset
+ stub_entry->group->stub_sec->output_offset
+ stub_entry->group->stub_sec->output_section->vma);
if (stub_entry->stub_type == ppc_stub_long_branch_both
targ = (stub_entry->target_value
+ stub_entry->target_section->output_offset
+ stub_entry->target_section->output_section->vma);
+ odd = off & 4;
off = targ - off;
- bfd_put_32 (htab->params->stub_bfd, MFLR_R12, p);
- p += 4;
- bfd_put_32 (htab->params->stub_bfd, BCL_20_31, p);
- p += 4;
- bfd_put_32 (htab->params->stub_bfd, MFLR_R11, p);
- p += 4;
- bfd_put_32 (htab->params->stub_bfd, MTLR_R12, p);
- p += 4;
- p = build_offset (htab->params->stub_bfd, p, off,
- stub_entry->stub_type >= ppc_stub_plt_call_notoc);
- if (stub_entry->stub_type == ppc_stub_long_branch_notoc)
+
+ relp = p;
+ num_rel = 0;
+ if (htab->powerxx_stubs)
{
- off += 8;
- bfd_put_32 (htab->params->stub_bfd,
- B_DOT | ((off - (p - loc)) & 0x3fffffc), p);
+ bfd_boolean load = stub_entry->stub_type >= ppc_stub_plt_call_notoc;
+ p = build_powerxx_offset (htab->params->stub_bfd, p, off, odd, load);
}
- else if (stub_entry->stub_type == ppc_stub_long_branch_both)
+ else
{
- off += 12;
+ /* The notoc stubs calculate their target (either a PLT entry or
+ the global entry point of a function) relative to the PC
+ returned by the "bcl" two instructions past the start of the
+ sequence emitted by build_offset. The offset is therefore 8
+ less than calculated from the start of the sequence. */
+ off -= 8;
+ p = build_offset (htab->params->stub_bfd, p, off,
+ stub_entry->stub_type >= ppc_stub_plt_call_notoc);
+ }
+
+ if (stub_entry->stub_type <= ppc_stub_long_branch_both)
+ {
+ bfd_vma from;
+ num_rel = 1;
+ from = (stub_entry->stub_offset
+ + stub_entry->group->stub_sec->output_offset
+ + stub_entry->group->stub_sec->output_section->vma
+ + (p - loc));
bfd_put_32 (htab->params->stub_bfd,
- B_DOT | ((off - (p - loc)) & 0x3fffffc), p);
+ B_DOT | ((targ - from) & 0x3fffffc), p);
}
else
{
}
p += 4;
- if (htab->glink_eh_frame != NULL
- && htab->glink_eh_frame->size != 0)
+ if (info->emitrelocations)
+ {
+ bfd_vma roff = relp - stub_entry->group->stub_sec->contents;
+ if (htab->powerxx_stubs)
+ num_rel += num_relocs_for_powerxx_offset (off, odd);
+ else
+ {
+ num_rel += num_relocs_for_offset (off);
+ roff += 16;
+ }
+ r = get_relocs (stub_entry->group->stub_sec, num_rel);
+ if (r == NULL)
+ return FALSE;
+ if (htab->powerxx_stubs)
+ r = emit_relocs_for_powerxx_offset (info, r, roff, targ, off, odd);
+ else
+ r = emit_relocs_for_offset (info, r, roff, targ, off);
+ if (stub_entry->stub_type == ppc_stub_long_branch_notoc
+ || stub_entry->stub_type == ppc_stub_long_branch_both)
+ {
+ ++r;
+ roff = p - 4 - stub_entry->group->stub_sec->contents;
+ r->r_offset = roff;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL24);
+ r->r_addend = targ;
+ if (stub_entry->h != NULL
+ && !use_global_in_relocs (htab, stub_entry, r, num_rel))
+ return FALSE;
+ }
+ }
+
+ if (!htab->powerxx_stubs
+ && htab->glink_eh_frame != NULL
+ && htab->glink_eh_frame->size != 0)
{
bfd_byte *base, *eh;
unsigned int lr_used, delta;
struct ppc_stub_hash_entry *stub_entry;
struct bfd_link_info *info;
struct ppc_link_hash_table *htab;
- bfd_vma targ, off;
- int size;
+ asection *plt;
+ bfd_vma targ, off, r2off;
+ unsigned int size, extra, lr_used, delta, odd;
/* Massage our args to the form they really have. */
stub_entry = (struct ppc_stub_hash_entry *) gen_entry;
return TRUE;
}
- if (stub_entry->stub_type >= ppc_stub_plt_call
- && stub_entry->stub_type <= ppc_stub_plt_call_both)
+ switch (stub_entry->stub_type)
{
- asection *plt;
- targ = stub_entry->plt_ent->plt.offset & ~(bfd_vma) 1;
- if (targ >= (bfd_vma) -2)
- abort ();
- plt = htab->elf.splt;
- if (!htab->elf.dynamic_sections_created
- || stub_entry->h == NULL
- || stub_entry->h->elf.dynindx == -1)
+ case ppc_stub_plt_branch:
+ case ppc_stub_plt_branch_r2off:
+ /* Reset the stub type from the plt branch variant in case we now
+ can reach with a shorter stub. */
+ stub_entry->stub_type += ppc_stub_long_branch - ppc_stub_plt_branch;
+ /* Fall through. */
+ case ppc_stub_long_branch:
+ case ppc_stub_long_branch_r2off:
+ targ = (stub_entry->target_value
+ + stub_entry->target_section->output_offset
+ + stub_entry->target_section->output_section->vma);
+ targ += PPC64_LOCAL_ENTRY_OFFSET (stub_entry->other);
+ off = (stub_entry->stub_offset
+ + stub_entry->group->stub_sec->output_offset
+ + stub_entry->group->stub_sec->output_section->vma);
+
+ size = 4;
+ r2off = 0;
+ if (stub_entry->stub_type == ppc_stub_long_branch_r2off)
{
- if (stub_entry->symtype == STT_GNU_IFUNC)
- plt = htab->elf.iplt;
+ r2off = get_r2off (info, stub_entry);
+ if (r2off == (bfd_vma) -1)
+ {
+ htab->stub_error = TRUE;
+ return FALSE;
+ }
+ size = 8;
+ if (PPC_HA (r2off) != 0)
+ size += 4;
+ if (PPC_LO (r2off) != 0)
+ size += 4;
+ off += size - 4;
+ }
+ off = targ - off;
+
+ /* If the branch offset is too big, use a ppc_stub_plt_branch.
+ Do the same for -R objects without function descriptors. */
+ if ((stub_entry->stub_type == ppc_stub_long_branch_r2off
+ && r2off == 0
+ && htab->sec_info[stub_entry->target_section->id].toc_off == 0)
+ || off + (1 << 25) >= (bfd_vma) (1 << 26))
+ {
+ struct ppc_branch_hash_entry *br_entry;
+
+ br_entry = ppc_branch_hash_lookup (&htab->branch_hash_table,
+ stub_entry->root.string + 9,
+ TRUE, FALSE);
+ if (br_entry == NULL)
+ {
+ _bfd_error_handler (_("can't build branch stub `%s'"),
+ stub_entry->root.string);
+ htab->stub_error = TRUE;
+ return FALSE;
+ }
+
+ if (br_entry->iter != htab->stub_iteration)
+ {
+ br_entry->iter = htab->stub_iteration;
+ br_entry->offset = htab->brlt->size;
+ htab->brlt->size += 8;
+
+ if (htab->relbrlt != NULL)
+ htab->relbrlt->size += sizeof (Elf64_External_Rela);
+ else if (info->emitrelocations)
+ {
+ htab->brlt->reloc_count += 1;
+ htab->brlt->flags |= SEC_RELOC;
+ }
+ }
+
+ targ = (br_entry->offset
+ + htab->brlt->output_offset
+ + htab->brlt->output_section->vma);
+ off = (elf_gp (info->output_bfd)
+ + htab->sec_info[stub_entry->group->link_sec->id].toc_off);
+ off = targ - off;
+
+ if (info->emitrelocations)
+ {
+ stub_entry->group->stub_sec->reloc_count
+ += 1 + (PPC_HA (off) != 0);
+ stub_entry->group->stub_sec->flags |= SEC_RELOC;
+ }
+
+ stub_entry->stub_type += ppc_stub_plt_branch - ppc_stub_long_branch;
+ if (stub_entry->stub_type != ppc_stub_plt_branch_r2off)
+ {
+ size = 12;
+ if (PPC_HA (off) != 0)
+ size = 16;
+ }
else
- plt = htab->pltlocal;
+ {
+ size = 16;
+ if (PPC_HA (off) != 0)
+ size += 4;
+
+ if (PPC_HA (r2off) != 0)
+ size += 4;
+ if (PPC_LO (r2off) != 0)
+ size += 4;
+ }
}
- targ += plt->output_offset + plt->output_section->vma;
-
- if (stub_entry->stub_type >= ppc_stub_plt_call_notoc)
+ else if (info->emitrelocations)
{
- off = (8 + stub_entry->stub_offset
- + stub_entry->group->stub_sec->output_offset
- + stub_entry->group->stub_sec->output_section->vma);
- if (stub_entry->stub_type > ppc_stub_plt_call_notoc)
- off += 4;
+ stub_entry->group->stub_sec->reloc_count += 1;
+ stub_entry->group->stub_sec->flags |= SEC_RELOC;
}
- else
- off = (elf_gp (info->output_bfd)
- + htab->sec_info[stub_entry->group->link_sec->id].toc_off);
+ break;
- if (htab->params->plt_stub_align != 0)
- {
- unsigned pad = plt_stub_pad (htab, stub_entry, targ - off);
+ case ppc_stub_plt_branch_notoc:
+ case ppc_stub_plt_branch_both:
+ stub_entry->stub_type += ppc_stub_long_branch - ppc_stub_plt_branch;
+ /* Fall through. */
+ case ppc_stub_long_branch_notoc:
+ case ppc_stub_long_branch_both:
+ off = (stub_entry->stub_offset
+ + stub_entry->group->stub_sec->output_offset
+ + stub_entry->group->stub_sec->output_section->vma);
+ size = 0;
+ if (stub_entry->stub_type == ppc_stub_long_branch_both)
+ size = 4;
+ off += size;
+ targ = (stub_entry->target_value
+ + stub_entry->target_section->output_offset
+ + stub_entry->target_section->output_section->vma);
+ odd = off & 4;
+ off = targ - off;
- stub_entry->group->stub_sec->size += pad;
- stub_entry->stub_offset = stub_entry->group->stub_sec->size;
- if (stub_entry->stub_type >= ppc_stub_plt_call_notoc)
- off += pad;
+ if (info->emitrelocations)
+ {
+ unsigned int num_rel;
+ if (htab->powerxx_stubs)
+ num_rel = num_relocs_for_powerxx_offset (off, odd);
+ else
+ num_rel = num_relocs_for_offset (off - 8);
+ stub_entry->group->stub_sec->reloc_count += num_rel;
+ stub_entry->group->stub_sec->flags |= SEC_RELOC;
}
- off = targ - off;
- size = plt_stub_size (htab, stub_entry, off);
-
- if (stub_entry->stub_type >= ppc_stub_plt_call_notoc)
+ if (htab->powerxx_stubs)
+ extra = size_powerxx_offset (off, odd);
+ else
+ extra = size_offset (off - 8);
+ /* Include branch insn plus those in the offset sequence. */
+ size += 4 + extra;
+ /* The branch insn is at the end, or "extra" bytes along. So
+ its offset will be "extra" bytes less that that already
+ calculated. */
+ off -= extra;
+
+ if (!htab->powerxx_stubs)
{
/* After the bcl, lr has been modified so we need to emit
.eh_frame info saying the return address is in r12. */
- unsigned int lr_used = stub_entry->stub_offset + 8;
- unsigned int delta;
- if (stub_entry->stub_type > ppc_stub_plt_call_notoc)
+ lr_used = stub_entry->stub_offset + 8;
+ if (stub_entry->stub_type == ppc_stub_long_branch_both)
lr_used += 4;
/* The eh_frame info will consist of a DW_CFA_advance_loc or
variant, DW_CFA_register, 65, 12, DW_CFA_advance_loc+2,
stub_entry->group->eh_size += eh_advance_size (delta) + 6;
stub_entry->group->lr_restore = lr_used + 8;
}
- else
- {
- if (stub_entry->h != NULL
- && (stub_entry->h == htab->tls_get_addr_fd
- || stub_entry->h == htab->tls_get_addr)
- && htab->params->tls_get_addr_opt
- && stub_entry->stub_type == ppc_stub_plt_call_r2save)
- {
- /* After the bctrl, lr has been modified so we need to
- emit .eh_frame info saying the return address is
- on the stack. In fact we put the EH info specifying
- that the return address is on the stack *at* the
- call rather than after it, because the EH info for a
- call needs to be specified by that point.
- See libgcc/unwind-dw2.c execute_cfa_program. */
- unsigned int lr_used = stub_entry->stub_offset + size - 20;
- unsigned int delta;
- /* The eh_frame info will consist of a DW_CFA_advance_loc
- or variant, DW_CFA_offset_externed_sf, 65, -stackoff,
- DW_CFA_advance_loc+4, DW_CFA_restore_extended, 65. */
- delta = lr_used - stub_entry->group->lr_restore;
- stub_entry->group->eh_size += eh_advance_size (delta) + 6;
- stub_entry->group->lr_restore = size - 4;
- }
- if (info->emitrelocations)
- {
- stub_entry->group->stub_sec->reloc_count
- += ((PPC_HA (off) != 0)
- + (htab->opd_abi
- ? 2 + (htab->params->plt_static_chain
- && PPC_HA (off + 16) == PPC_HA (off))
- : 1));
- stub_entry->group->stub_sec->flags |= SEC_RELOC;
- }
+ /* If the branch can't reach, use a plt_branch. */
+ if (off + (1 << 25) >= (bfd_vma) (1 << 26))
+ {
+ stub_entry->stub_type += (ppc_stub_plt_branch_notoc
+ - ppc_stub_long_branch_notoc);
+ size += 4;
}
- }
- else
- {
- /* ppc_stub_long_branch or ppc_stub_plt_branch, or their r2off
- variants. */
- bfd_vma r2off = 0;
- bfd_vma local_off = 0;
+ else if (info->emitrelocations)
+ stub_entry->group->stub_sec->reloc_count +=1;
+ break;
- targ = (stub_entry->target_value
- + stub_entry->target_section->output_offset
- + stub_entry->target_section->output_section->vma);
+ case ppc_stub_plt_call_notoc:
+ case ppc_stub_plt_call_both:
off = (stub_entry->stub_offset
+ stub_entry->group->stub_sec->output_offset
+ stub_entry->group->stub_sec->output_section->vma);
+ if (stub_entry->stub_type == ppc_stub_plt_call_both)
+ off += 4;
+ targ = stub_entry->plt_ent->plt.offset & ~1;
+ if (targ >= (bfd_vma) -2)
+ abort ();
- /* Reset the stub type from the plt variant in case we now
- can reach with a shorter stub. */
- if (stub_entry->stub_type >= ppc_stub_plt_branch)
- stub_entry->stub_type += ppc_stub_long_branch - ppc_stub_plt_branch;
+ plt = htab->elf.splt;
+ if (!htab->elf.dynamic_sections_created
+ || stub_entry->h == NULL
+ || stub_entry->h->elf.dynindx == -1)
+ {
+ if (stub_entry->symtype == STT_GNU_IFUNC)
+ plt = htab->elf.iplt;
+ else
+ plt = htab->pltlocal;
+ }
+ targ += plt->output_offset + plt->output_section->vma;
+ odd = off & 4;
+ off = targ - off;
- size = 4;
- if (stub_entry->stub_type == ppc_stub_long_branch_r2off)
+ if (htab->params->plt_stub_align != 0)
{
- r2off = get_r2off (info, stub_entry);
- if (r2off == (bfd_vma) -1)
- {
- htab->stub_error = TRUE;
- return FALSE;
- }
- size = 8;
- if (PPC_HA (r2off) != 0)
- size += 4;
- if (PPC_LO (r2off) != 0)
- size += 4;
- off += size - 4;
+ unsigned pad = plt_stub_pad (htab, stub_entry, off);
+
+ stub_entry->group->stub_sec->size += pad;
+ stub_entry->stub_offset = stub_entry->group->stub_sec->size;
+ off -= pad;
}
- else if (stub_entry->stub_type >= ppc_stub_long_branch_notoc)
+
+ if (info->emitrelocations)
{
- size = 20 + size_offset (targ - (off + 8));
- if (stub_entry->stub_type > ppc_stub_long_branch_notoc)
- size += 4;
- off += size - 4;
+ unsigned int num_rel;
+ if (htab->powerxx_stubs)
+ num_rel = num_relocs_for_powerxx_offset (off, odd);
+ else
+ num_rel = num_relocs_for_offset (off - 8);
+ stub_entry->group->stub_sec->reloc_count += num_rel;
+ stub_entry->group->stub_sec->flags |= SEC_RELOC;
}
- off = targ - off;
- if (stub_entry->stub_type >= ppc_stub_long_branch_notoc)
+ size = plt_stub_size (htab, stub_entry, off);
+
+ if (!htab->powerxx_stubs)
{
/* After the bcl, lr has been modified so we need to emit
.eh_frame info saying the return address is in r12. */
- unsigned int lr_used = stub_entry->stub_offset + 8;
- unsigned int delta;
- if (stub_entry->stub_type > ppc_stub_long_branch_notoc)
+ lr_used = stub_entry->stub_offset + 8;
+ if (stub_entry->stub_type == ppc_stub_plt_call_both)
lr_used += 4;
/* The eh_frame info will consist of a DW_CFA_advance_loc or
variant, DW_CFA_register, 65, 12, DW_CFA_advance_loc+2,
delta = lr_used - stub_entry->group->lr_restore;
stub_entry->group->eh_size += eh_advance_size (delta) + 6;
stub_entry->group->lr_restore = lr_used + 8;
-
- if (off + (1 << 25) >= (bfd_vma) (1 << 26))
- {
- stub_entry->stub_type += (ppc_stub_plt_branch_notoc
- - ppc_stub_long_branch_notoc);
- size += 4;
- }
}
- else
- {
- local_off = PPC64_LOCAL_ENTRY_OFFSET (stub_entry->other);
+ break;
- /* If the branch offset is too big, use a ppc_stub_plt_branch.
- Do the same for -R objects without function descriptors. */
- if ((stub_entry->stub_type == ppc_stub_long_branch_r2off
- && r2off == 0
- && htab->sec_info[stub_entry->target_section->id].toc_off == 0)
- || off + (1 << 25) >= (bfd_vma) (1 << 26) - local_off)
- {
- struct ppc_branch_hash_entry *br_entry;
+ case ppc_stub_plt_call:
+ case ppc_stub_plt_call_r2save:
+ targ = stub_entry->plt_ent->plt.offset & ~(bfd_vma) 1;
+ if (targ >= (bfd_vma) -2)
+ abort ();
+ plt = htab->elf.splt;
+ if (!htab->elf.dynamic_sections_created
+ || stub_entry->h == NULL
+ || stub_entry->h->elf.dynindx == -1)
+ {
+ if (stub_entry->symtype == STT_GNU_IFUNC)
+ plt = htab->elf.iplt;
+ else
+ plt = htab->pltlocal;
+ }
+ targ += plt->output_offset + plt->output_section->vma;
- br_entry = ppc_branch_hash_lookup (&htab->branch_hash_table,
- stub_entry->root.string + 9,
- TRUE, FALSE);
- if (br_entry == NULL)
- {
- _bfd_error_handler (_("can't build branch stub `%s'"),
- stub_entry->root.string);
- htab->stub_error = TRUE;
- return FALSE;
- }
+ off = (elf_gp (info->output_bfd)
+ + htab->sec_info[stub_entry->group->link_sec->id].toc_off);
+ off = targ - off;
- if (br_entry->iter != htab->stub_iteration)
- {
- br_entry->iter = htab->stub_iteration;
- br_entry->offset = htab->brlt->size;
- htab->brlt->size += 8;
+ if (htab->params->plt_stub_align != 0)
+ {
+ unsigned pad = plt_stub_pad (htab, stub_entry, off);
- if (htab->relbrlt != NULL)
- htab->relbrlt->size += sizeof (Elf64_External_Rela);
- else if (info->emitrelocations)
- {
- htab->brlt->reloc_count += 1;
- htab->brlt->flags |= SEC_RELOC;
- }
- }
+ stub_entry->group->stub_sec->size += pad;
+ stub_entry->stub_offset = stub_entry->group->stub_sec->size;
+ }
- targ = (br_entry->offset
- + htab->brlt->output_offset
- + htab->brlt->output_section->vma);
- off = (elf_gp (info->output_bfd)
- + htab->sec_info[stub_entry->group->link_sec->id].toc_off);
- off = targ - off;
+ if (info->emitrelocations)
+ {
+ stub_entry->group->stub_sec->reloc_count
+ += ((PPC_HA (off) != 0)
+ + (htab->opd_abi
+ ? 2 + (htab->params->plt_static_chain
+ && PPC_HA (off + 16) == PPC_HA (off))
+ : 1));
+ stub_entry->group->stub_sec->flags |= SEC_RELOC;
+ }
- if (info->emitrelocations)
- {
- stub_entry->group->stub_sec->reloc_count
- += 1 + (PPC_HA (off) != 0);
- stub_entry->group->stub_sec->flags |= SEC_RELOC;
- }
+ size = plt_stub_size (htab, stub_entry, off);
- stub_entry->stub_type
- += ppc_stub_plt_branch - ppc_stub_long_branch;
- if (stub_entry->stub_type != ppc_stub_plt_branch_r2off)
- {
- size = 12;
- if (PPC_HA (off) != 0)
- size = 16;
- }
- else
- {
- size = 16;
- if (PPC_HA (off) != 0)
- size += 4;
-
- if (PPC_HA (r2off) != 0)
- size += 4;
- if (PPC_LO (r2off) != 0)
- size += 4;
- }
- }
- else if (info->emitrelocations)
- {
- stub_entry->group->stub_sec->reloc_count += 1;
- stub_entry->group->stub_sec->flags |= SEC_RELOC;
- }
+ if (stub_entry->h != NULL
+ && (stub_entry->h == htab->tls_get_addr_fd
+ || stub_entry->h == htab->tls_get_addr)
+ && htab->params->tls_get_addr_opt
+ && stub_entry->stub_type == ppc_stub_plt_call_r2save)
+ {
+ /* After the bctrl, lr has been modified so we need to
+ emit .eh_frame info saying the return address is
+ on the stack. In fact we put the EH info specifying
+ that the return address is on the stack *at* the
+ call rather than after it, because the EH info for a
+ call needs to be specified by that point.
+ See libgcc/unwind-dw2.c execute_cfa_program. */
+ lr_used = stub_entry->stub_offset + size - 20;
+ /* The eh_frame info will consist of a DW_CFA_advance_loc
+ or variant, DW_CFA_offset_externed_sf, 65, -stackoff,
+ DW_CFA_advance_loc+4, DW_CFA_restore_extended, 65. */
+ delta = lr_used - stub_entry->group->lr_restore;
+ stub_entry->group->eh_size += eh_advance_size (delta) + 6;
+ stub_entry->group->lr_restore = size - 4;
}
+ break;
+
+ default:
+ BFD_FAIL ();
+ return FALSE;
}
stub_entry->group->stub_sec->size += size;
&& r_type != R_PPC64_REL14
&& r_type != R_PPC64_REL14_BRTAKEN
&& r_type != R_PPC64_REL14_BRNTAKEN
- && r_type != R_PPC64_PLTCALL)
+ && r_type != R_PPC64_PLTCALL
+ && r_type != R_PPC64_PLTCALL_NOTOC)
continue;
r_symndx = ELF64_R_SYM (rel->r_info);
&& code_sec->output_section != NULL
&& (((hash ? hash->elf.other : sym->st_other)
& STO_PPC64_LOCAL_MASK)
- != 1 << STO_PPC64_LOCAL_BIT)))
+ > 1 << STO_PPC64_LOCAL_BIT)))
stub_type = ppc_stub_long_branch_notoc;
}
else if (stub_type != ppc_stub_plt_call)
= hash ? hash->elf.type : ELF_ST_TYPE (sym->st_info);
stub_entry->other = hash ? hash->elf.other : sym->st_other;
- if (stub_entry->h != NULL)
+ if (hash != NULL
+ && (hash->elf.root.type == bfd_link_hash_defined
+ || hash->elf.root.type == bfd_link_hash_defweak))
htab->stub_globals += 1;
}
}
val = sym->st_value + ent->addend;
- val += PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
+ if (ELF_ST_TYPE (sym->st_info) != STT_GNU_IFUNC)
+ val += PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
if (sym_sec != NULL && sym_sec->output_section != NULL)
val += sym_sec->output_offset + sym_sec->output_section->vma;
if (input_section->owner == htab->params->stub_bfd)
return TRUE;
- BFD_ASSERT (is_ppc64_elf (input_bfd));
+ if (!is_ppc64_elf (input_bfd))
+ {
+ bfd_set_error (bfd_error_wrong_format);
+ return FALSE;
+ }
local_got_ents = elf_local_got_ents (input_bfd);
TOCstart = elf_gp (output_bfd);
Elf_Internal_Rela orig_rel;
reloc_howto_type *howto;
struct reloc_howto_struct alt_howto;
+ uint64_t pinsn;
+ bfd_vma offset;
again:
orig_rel = *rel;
{
_bfd_clear_contents (ppc64_elf_howto_table[r_type],
input_bfd, input_section,
- contents + rel->r_offset);
+ contents, rel->r_offset);
wrel->r_offset = rel->r_offset;
wrel->r_info = 0;
wrel->r_addend = 0;
doing a GD->IE transition. */
if (retval == 2)
{
- tls_gd = TLS_TPRELGD;
+ tls_gd = TLS_GDIE;
if ((tls_mask & TLS_TLS) != 0
&& (tls_mask & TLS_GD) == 0)
goto tls_ldgd_opt;
case R_PPC64_GOT_TLSGD16_HI:
case R_PPC64_GOT_TLSGD16_HA:
- tls_gd = TLS_TPRELGD;
+ tls_gd = TLS_GDIE;
if ((tls_mask & TLS_TLS) != 0 && (tls_mask & TLS_GD) == 0)
goto tls_gdld_hi;
break;
case R_PPC64_GOT_TLSGD16:
case R_PPC64_GOT_TLSGD16_LO:
- tls_gd = TLS_TPRELGD;
+ tls_gd = TLS_GDIE;
if ((tls_mask & TLS_TLS) != 0 && (tls_mask & TLS_GD) == 0)
goto tls_ldgd_opt;
break;
if ((tls_mask & TLS_TLS) != 0 && (tls_mask & TLS_LD) == 0)
{
unsigned int insn1, insn2;
- bfd_vma offset;
tls_ldgd_opt:
offset = (bfd_vma) -1;
insn2 = 0x7c636a14; /* add 3,3,13 */
if (offset != (bfd_vma) -1)
rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_PPC64_NONE);
- if ((tls_mask & TLS_EXPLICIT) == 0)
- r_type = (((r_type - (R_PPC64_GOT_TLSGD16 & 3)) & 3)
- + R_PPC64_GOT_TPREL16_DS);
- else
+ if (r_type == R_PPC64_TOC16
+ || r_type == R_PPC64_TOC16_LO)
r_type += R_PPC64_TOC16_DS - R_PPC64_TOC16;
+ else
+ r_type = (((r_type - (R_PPC64_GOT_TLSGD16 & 1)) & 1)
+ + R_PPC64_GOT_TPREL16_DS);
rel->r_info = ELF64_R_INFO (r_symndx, r_type);
}
else
bfd_put_32 (input_bfd, insn1,
contents + rel->r_offset - d_offset);
if (offset != (bfd_vma) -1)
- bfd_put_32 (input_bfd, insn2, contents + offset);
+ {
+ bfd_put_32 (input_bfd, insn2, contents + offset);
+ if (offset + 8 <= input_section->size)
+ {
+ insn2 = bfd_get_32 (input_bfd, contents + offset + 4);
+ if (insn2 == LD_R2_0R1 + STK_TOC (htab))
+ bfd_put_32 (input_bfd, NOP, contents + offset + 4);
+ }
+ }
if ((tls_mask & tls_gd) == 0
&& (tls_gd == 0 || toc_symndx != 0))
{
&& rel + 1 < relend)
{
unsigned int insn2;
- bfd_vma offset = rel->r_offset;
+ enum elf_ppc64_reloc_type r_type1 = ELF64_R_TYPE (rel[1].r_info);
- if (is_plt_seq_reloc (ELF64_R_TYPE (rel[1].r_info)))
+ offset = rel->r_offset;
+ if (is_plt_seq_reloc (r_type1))
{
bfd_put_32 (output_bfd, NOP, contents + offset);
+ if (r_type1 == R_PPC64_PLT_PCREL34
+ || r_type1 == R_PPC64_PLT_PCREL34_NOTOC)
+ bfd_put_32 (output_bfd, NOP, contents + offset + 4);
rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_PPC64_NONE);
break;
}
if (ELF64_R_TYPE (rel[1].r_info) == R_PPC64_PLTCALL)
bfd_put_32 (output_bfd, NOP, contents + offset + 4);
- if ((tls_mask & TLS_TPRELGD) != 0)
+ if ((tls_mask & TLS_GDIE) != 0)
{
/* IE */
r_type = R_PPC64_NONE;
BFD_ASSERT (offset == rel[1].r_offset);
rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_PPC64_NONE);
bfd_put_32 (input_bfd, insn2, contents + offset);
- if ((tls_mask & TLS_TPRELGD) == 0 && toc_symndx != 0)
+ if ((tls_mask & TLS_GDIE) == 0 && toc_symndx != 0)
goto again;
}
break;
&& rel + 1 < relend)
{
unsigned int insn2;
- bfd_vma offset = rel->r_offset;
+ enum elf_ppc64_reloc_type r_type1 = ELF64_R_TYPE (rel[1].r_info);
- if (is_plt_seq_reloc (ELF64_R_TYPE (rel[1].r_info)))
+ offset = rel->r_offset;
+ if (is_plt_seq_reloc (r_type1))
{
bfd_put_32 (output_bfd, NOP, contents + offset);
+ if (r_type1 == R_PPC64_PLT_PCREL34
+ || r_type1 == R_PPC64_PLT_PCREL34_NOTOC)
+ bfd_put_32 (output_bfd, NOP, contents + offset + 4);
rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_PPC64_NONE);
break;
}
if ((tls_mask & TLS_GD) == 0)
{
rel[1].r_info = ELF64_R_INFO (r_symndx, R_PPC64_NONE);
- if ((tls_mask & TLS_TPRELGD) != 0)
+ if ((tls_mask & TLS_GDIE) != 0)
r_type = R_PPC64_TPREL64;
else
{
&& relocation + 0x80008000 <= 0xffffffff)
{
unsigned int insn1, insn2;
- bfd_vma offset = rel->r_offset - d_offset;
+ offset = rel->r_offset - d_offset;
insn1 = bfd_get_32 (input_bfd, contents + offset);
insn2 = bfd_get_32 (input_bfd, contents + offset + 4);
if ((insn1 & 0xffff0000) == ADDIS_R2_R12
case R_PPC64_REL24:
case R_PPC64_REL24_NOTOC:
case R_PPC64_PLTCALL:
+ case R_PPC64_PLTCALL_NOTOC:
/* Calls to functions with a different TOC, such as calls to
shared objects, need to alter the TOC pointer. This is
done using a linkage stub. A REL24 branching to these
fdh = ppc_follow_link (h->oh);
stub_entry = ppc_get_stub_entry (input_section, sec, fdh, &orig_rel,
htab);
- if (r_type == R_PPC64_PLTCALL
+ if ((r_type == R_PPC64_PLTCALL
+ || r_type == R_PPC64_PLTCALL_NOTOC)
&& stub_entry != NULL
&& stub_entry->stub_type >= ppc_stub_plt_call
&& stub_entry->stub_type <= ppc_stub_plt_call_both)
|| stub_entry->stub_type == ppc_stub_plt_branch_both)
&& (r_type != R_PPC64_REL24_NOTOC
|| ((fdh ? fdh->elf.other : sym->st_other)
- & STO_PPC64_LOCAL_MASK) == 1 << STO_PPC64_LOCAL_BIT)
+ & STO_PPC64_LOCAL_MASK) <= 1 << STO_PPC64_LOCAL_BIT)
&& (relocation + addend - from + max_br_offset
< 2 * max_br_offset))
stub_entry = NULL;
|| stub_entry->stub_type == ppc_stub_plt_call_both)
&& r_type == R_PPC64_REL24_NOTOC)
relocation += 4;
+
+ if (r_type == R_PPC64_REL24_NOTOC
+ && (stub_entry->stub_type == ppc_stub_plt_call_notoc
+ || stub_entry->stub_type == ppc_stub_plt_call_both))
+ htab->notoc_plt = 1;
}
if (insn != 0)
goto copy_reloc;
}
break;
+
+ case R_PPC64_GOT16_DS:
+ from = TOCstart + htab->sec_info[input_section->id].toc_off;
+ if (relocation + addend - from + 0x8000 < 0x10000
+ && SYMBOL_REFERENCES_LOCAL (info, &h->elf))
+ {
+ insn = bfd_get_32 (input_bfd, contents + (rel->r_offset & ~3));
+ if ((insn & (0x3f << 26 | 0x3)) == 58u << 26 /* ld */)
+ {
+ insn += (14u << 26) - (58u << 26);
+ bfd_put_32 (input_bfd, insn, contents + (rel->r_offset & ~3));
+ r_type = R_PPC64_TOC16;
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ }
+ }
+ break;
+
+ case R_PPC64_GOT16_LO_DS:
+ case R_PPC64_GOT16_HA:
+ from = TOCstart + htab->sec_info[input_section->id].toc_off;
+ if (relocation + addend - from + 0x80008000ULL < 0x100000000ULL
+ && SYMBOL_REFERENCES_LOCAL (info, &h->elf))
+ {
+ insn = bfd_get_32 (input_bfd, contents + (rel->r_offset & ~3));
+ if ((insn & (0x3f << 26 | 0x3)) == 58u << 26 /* ld */)
+ {
+ insn += (14u << 26) - (58u << 26);
+ bfd_put_32 (input_bfd, insn, contents + (rel->r_offset & ~3));
+ r_type = R_PPC64_TOC16_LO;
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ }
+ else if ((insn & (0x3f << 26)) == 15u << 26 /* addis */)
+ {
+ r_type = R_PPC64_TOC16_HA;
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ }
+ }
+ break;
+
+ case R_PPC64_GOT_PCREL34:
+ from = (rel->r_offset
+ + input_section->output_section->vma
+ + input_section->output_offset);
+ if (relocation - from + (1ULL << 33) < 1ULL << 34
+ && SYMBOL_REFERENCES_LOCAL (info, &h->elf))
+ {
+ offset = rel->r_offset;
+ pinsn = bfd_get_32 (input_bfd, contents + offset);
+ pinsn <<= 32;
+ pinsn |= bfd_get_32 (input_bfd, contents + offset + 4);
+ if ((pinsn & ((-1ULL << 50) | (63ULL << 26)))
+ == ((1ULL << 58) | (1ULL << 52) | (57ULL << 26) /* pld */))
+ {
+ /* Replace with paddi. */
+ pinsn += (2ULL << 56) + (14ULL << 26) - (57ULL << 26);
+ r_type = R_PPC64_PCREL34;
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ bfd_put_32 (input_bfd, pinsn >> 32, contents + offset);
+ bfd_put_32 (input_bfd, pinsn, contents + offset + 4);
+ goto pcrelopt;
+ }
+ }
+ break;
+
+ case R_PPC64_PCREL34:
+ if (SYMBOL_REFERENCES_LOCAL (info, &h->elf))
+ {
+ offset = rel->r_offset;
+ pinsn = bfd_get_32 (input_bfd, contents + offset);
+ pinsn <<= 32;
+ pinsn |= bfd_get_32 (input_bfd, contents + offset + 4);
+ if ((pinsn & ((-1ULL << 50) | (63ULL << 26)))
+ == ((1ULL << 58) | (2ULL << 56) | (1ULL << 52)
+ | (14ULL << 26) /* paddi */))
+ {
+ pcrelopt:
+ if (rel + 1 < relend
+ && rel[1].r_offset == offset
+ && rel[1].r_info == ELF64_R_INFO (0, R_PPC64_PCREL_OPT))
+ {
+ bfd_vma off2 = rel[1].r_addend;
+ if (off2 == 0)
+ /* zero means next insn. */
+ off2 = 8;
+ off2 += offset;
+ if (off2 + 4 <= input_section->size)
+ {
+ uint64_t pinsn2;
+ pinsn2 = bfd_get_32 (input_bfd, contents + off2);
+ pinsn2 <<= 32;
+ if ((pinsn2 & (63ULL << 58)) == 1ULL << 58)
+ break;
+ if (xlate_pcrel_opt (&pinsn, &pinsn2))
+ {
+ bfd_put_32 (input_bfd, pinsn >> 32,
+ contents + offset);
+ bfd_put_32 (input_bfd, pinsn,
+ contents + offset + 4);
+ bfd_put_32 (input_bfd, pinsn2 >> 32,
+ contents + off2);
+ }
+ }
+ }
+ }
+ }
+ break;
}
/* Set `addend'. */
case R_PPC64_GNU_VTINHERIT:
case R_PPC64_GNU_VTENTRY:
case R_PPC64_ENTRY:
+ case R_PPC64_PCREL_OPT:
goto copy_reloc;
/* GOT16 relocations. Like an ADDR16 using the symbol's
case R_PPC64_GOT16_HA:
case R_PPC64_GOT16_DS:
case R_PPC64_GOT16_LO_DS:
+ case R_PPC64_GOT_PCREL34:
dogot:
{
/* Relocation is to the entry for this symbol in the global
bfd_vma off;
unsigned long indx = 0;
struct got_entry *ent;
+ bfd_vma sym_addend = orig_rel.r_addend;
+
+ if (r_type == R_PPC64_GOT_PCREL34)
+ sym_addend = 0;
if (tls_type == (TLS_TLS | TLS_LD)
&& (h == NULL
}
for (; ent != NULL; ent = ent->next)
- if (ent->addend == orig_rel.r_addend
+ if (ent->addend == sym_addend
&& ent->owner == input_bfd
&& ent->tls_type == tls_type)
break;
outrel.r_offset = (got->output_section->vma
+ got->output_offset
+ off);
- outrel.r_addend = addend;
+ outrel.r_addend = sym_addend;
if (tls_type & (TLS_LD | TLS_GD))
{
outrel.r_addend = 0;
bfd_elf64_swap_reloca_out (output_bfd,
&outrel, loc);
outrel.r_offset += 8;
- outrel.r_addend = addend;
+ outrel.r_addend = sym_addend;
outrel.r_info
= ELF64_R_INFO (indx, R_PPC64_DTPREL64);
}
emitting a reloc. */
else
{
- relocation += addend;
+ relocation += sym_addend;
if (tls_type != 0)
{
if (htab->elf.tls_sec == NULL)
abort ();
relocation = got->output_section->vma + got->output_offset + off;
- addend = -(TOCstart + htab->sec_info[input_section->id].toc_off);
+ if (r_type != R_PPC64_GOT_PCREL34)
+ addend = -(TOCstart + htab->sec_info[input_section->id].toc_off);
}
break;
case R_PPC64_PLT16_HI:
case R_PPC64_PLT16_LO:
case R_PPC64_PLT16_LO_DS:
+ case R_PPC64_PLT_PCREL34:
+ case R_PPC64_PLT_PCREL34_NOTOC:
case R_PPC64_PLT32:
case R_PPC64_PLT64:
case R_PPC64_PLTSEQ:
+ case R_PPC64_PLTSEQ_NOTOC:
case R_PPC64_PLTCALL:
+ case R_PPC64_PLTCALL_NOTOC:
/* Relocation is to the entry for this symbol in the
procedure linkage table. */
unresolved_reloc = TRUE;
if (plt_list)
{
struct plt_entry *ent;
+ bfd_vma sym_addend = orig_rel.r_addend;
+
+ if (r_type == R_PPC64_PLT_PCREL34
+ || r_type == R_PPC64_PLT_PCREL34_NOTOC)
+ sym_addend = 0;
for (ent = *plt_list; ent != NULL; ent = ent->next)
if (ent->plt.offset != (bfd_vma) -1
- && ent->addend == orig_rel.r_addend)
+ && ent->addend == sym_addend)
{
asection *plt;
bfd_vma got;
+ htab->sec_info[input_section->id].toc_off);
relocation -= got;
}
- addend = 0;
+ if (r_type != R_PPC64_PLT_PCREL34
+ && r_type != R_PPC64_PLT_PCREL34_NOTOC)
+ addend = 0;
unresolved_reloc = FALSE;
break;
}
case R_PPC64_REL16_LO:
case R_PPC64_REL16_HI:
case R_PPC64_REL16_HA:
+ case R_PPC64_REL16_HIGH:
+ case R_PPC64_REL16_HIGHA:
+ case R_PPC64_REL16_HIGHER:
+ case R_PPC64_REL16_HIGHERA:
+ case R_PPC64_REL16_HIGHEST:
+ case R_PPC64_REL16_HIGHESTA:
+ case R_PPC64_REL16_HIGHER34:
+ case R_PPC64_REL16_HIGHERA34:
+ case R_PPC64_REL16_HIGHEST34:
+ case R_PPC64_REL16_HIGHESTA34:
case R_PPC64_REL16DX_HA:
- break;
-
case R_PPC64_REL14:
case R_PPC64_REL14_BRNTAKEN:
case R_PPC64_REL14_BRTAKEN:
case R_PPC64_REL24:
case R_PPC64_REL24_NOTOC:
+ case R_PPC64_PCREL34:
+ case R_PPC64_PCREL28:
break;
case R_PPC64_TPREL16:
case R_PPC64_ADDR16_HIGHESTA:
case R_PPC64_ADDR16_LO:
case R_PPC64_ADDR16_LO_DS:
+ case R_PPC64_ADDR16_HIGHER34:
+ case R_PPC64_ADDR16_HIGHERA34:
+ case R_PPC64_ADDR16_HIGHEST34:
+ case R_PPC64_ADDR16_HIGHESTA34:
case R_PPC64_ADDR24:
case R_PPC64_ADDR32:
case R_PPC64_ADDR64:
case R_PPC64_UADDR16:
case R_PPC64_UADDR32:
case R_PPC64_UADDR64:
+ case R_PPC64_D34:
+ case R_PPC64_D34_LO:
+ case R_PPC64_D34_HI30:
+ case R_PPC64_D34_HA30:
+ case R_PPC64_D28:
dodyn:
if ((input_section->flags & SEC_ALLOC) == 0)
break;
}
else
{
- asection *osec;
+ asection *osec = sec->output_section;
- osec = sec->output_section;
- indx = elf_section_data (osec)->dynindx;
-
- if (indx == 0)
+ if ((osec->flags & SEC_THREAD_LOCAL) != 0)
+ {
+ /* TLS symbol values are relative to the
+ TLS segment. Dynamic relocations for
+ local TLS symbols therefore can't be
+ reduced to a relocation against their
+ section symbol because it holds the
+ address of the section, not a value
+ relative to the TLS segment. We could
+ change the .tdata dynamic section symbol
+ to be zero value but STN_UNDEF works
+ and is used elsewhere, eg. for TPREL64
+ GOT relocs against local TLS symbols. */
+ osec = htab->elf.tls_sec;
+ indx = 0;
+ }
+ else
{
- if ((osec->flags & SEC_READONLY) == 0
- && htab->elf.data_index_section != NULL)
- osec = htab->elf.data_index_section;
- else
- osec = htab->elf.text_index_section;
indx = elf_section_data (osec)->dynindx;
+ if (indx == 0)
+ {
+ if ((osec->flags & SEC_READONLY) == 0
+ && htab->elf.data_index_section != NULL)
+ osec = htab->elf.data_index_section;
+ else
+ osec = htab->elf.text_index_section;
+ indx = elf_section_data (osec)->dynindx;
+ }
+ BFD_ASSERT (indx != 0);
}
- BFD_ASSERT (indx != 0);
/* We are turning this relocation into one
against a section symbol, so subtract out
insn. */
break;
+ case R_PPC64_PLTCALL_NOTOC:
+ if (!unresolved_reloc)
+ htab->notoc_plt = 1;
+ /* Fall through. */
case R_PPC64_PLTCALL:
if (unresolved_reloc)
{
insn = bfd_get_32 (input_bfd, p);
insn &= 1;
bfd_put_32 (input_bfd, B_DOT | insn, p);
- bfd_put_32 (input_bfd, NOP, p + 4);
+ if (r_type == R_PPC64_PLTCALL)
+ bfd_put_32 (input_bfd, NOP, p + 4);
unresolved_reloc = save_unresolved_reloc;
r_type = R_PPC64_REL24;
}
break;
+ case R_PPC64_PLTSEQ_NOTOC:
case R_PPC64_PLTSEQ:
if (unresolved_reloc)
{
}
break;
+ case R_PPC64_PLT_PCREL34_NOTOC:
+ if (!unresolved_reloc)
+ htab->notoc_plt = 1;
+ /* Fall through. */
+ case R_PPC64_PLT_PCREL34:
+ if (unresolved_reloc)
+ {
+ bfd_byte *p = contents + rel->r_offset;
+ bfd_put_32 (input_bfd, PNOP >> 32, p);
+ bfd_put_32 (input_bfd, PNOP, p + 4);
+ unresolved_reloc = FALSE;
+ goto copy_reloc;
+ }
+ break;
+
case R_PPC64_PLT16_HA:
if (unresolved_reloc)
{
break;
case R_PPC64_REL16_HA:
+ case R_PPC64_REL16_HIGHA:
+ case R_PPC64_REL16_HIGHERA:
+ case R_PPC64_REL16_HIGHESTA:
case R_PPC64_REL16DX_HA:
case R_PPC64_ADDR16_HA:
case R_PPC64_ADDR16_HIGHA:
addend += 0x8000;
break;
+ case R_PPC64_D34_HA30:
+ case R_PPC64_ADDR16_HIGHERA34:
+ case R_PPC64_ADDR16_HIGHESTA34:
+ case R_PPC64_REL16_HIGHERA34:
+ case R_PPC64_REL16_HIGHESTA34:
+ if (sec != NULL)
+ addend += 1ULL << 33;
+ break;
+
case R_PPC64_ADDR16_DS:
case R_PPC64_ADDR16_LO_DS:
case R_PPC64_GOT16_DS:
}
}
- if (r_type == R_PPC64_REL16DX_HA)
+ switch (r_type)
{
- /* Split field reloc isn't handled by _bfd_final_link_relocate. */
+ /* Split field relocs aren't handled by _bfd_final_link_relocate. */
+ case R_PPC64_D34:
+ case R_PPC64_D34_LO:
+ case R_PPC64_D34_HI30:
+ case R_PPC64_D34_HA30:
+ case R_PPC64_PCREL34:
+ case R_PPC64_GOT_PCREL34:
+ case R_PPC64_PLT_PCREL34:
+ case R_PPC64_PLT_PCREL34_NOTOC:
+ case R_PPC64_D28:
+ case R_PPC64_PCREL28:
+ if (rel->r_offset + 8 > input_section->size)
+ r = bfd_reloc_outofrange;
+ else
+ {
+ relocation += addend;
+ if (howto->pc_relative)
+ relocation -= (rel->r_offset
+ + input_section->output_offset
+ + input_section->output_section->vma);
+ relocation >>= howto->rightshift;
+
+ pinsn = bfd_get_32 (input_bfd, contents + rel->r_offset);
+ pinsn <<= 32;
+ pinsn |= bfd_get_32 (input_bfd, contents + rel->r_offset + 4);
+
+ pinsn &= ~howto->dst_mask;
+ pinsn |= (((relocation << 16) | (relocation & 0xffff))
+ & howto->dst_mask);
+ bfd_put_32 (input_bfd, pinsn >> 32, contents + rel->r_offset);
+ bfd_put_32 (input_bfd, pinsn, contents + rel->r_offset + 4);
+ r = bfd_reloc_ok;
+ if (howto->complain_on_overflow == complain_overflow_signed
+ && (relocation + (1ULL << (howto->bitsize - 1))
+ >= 1ULL << howto->bitsize))
+ r = bfd_reloc_overflow;
+ }
+ break;
+
+ case R_PPC64_REL16DX_HA:
if (rel->r_offset + 4 > input_section->size)
r = bfd_reloc_outofrange;
else
if (relocation + 0x8000 > 0xffff)
r = bfd_reloc_overflow;
}
+ break;
+
+ default:
+ r = _bfd_final_link_relocate (howto, input_bfd, input_section,
+ contents, rel->r_offset,
+ relocation, addend);
}
- else
- r = _bfd_final_link_relocate (howto, input_bfd, input_section, contents,
- rel->r_offset, relocation, addend);
if (r != bfd_reloc_ok)
{
break;
case DT_PPC64_OPT:
- if (htab->do_multi_toc && htab->multi_toc_needed)
+ if ((htab->do_multi_toc && htab->multi_toc_needed)
+ || htab->notoc_plt)
dyn.d_un.d_val |= PPC64_OPT_MULTI_TOC;
if (htab->has_plt_localentry0)
dyn.d_un.d_val |= PPC64_OPT_LOCALENTRY;