/* PowerPC64-specific support for 64-bit ELF.
- Copyright (C) 1999-2018 Free Software Foundation, Inc.
+ Copyright (C) 1999-2019 Free Software Foundation, Inc.
Written by Linus Nordberg, Swox AB <info@swox.com>,
based on elf32-ppc.c by Ian Lance Taylor.
Largely rewritten by Alan Modra.
#include "elf64-ppc.h"
#include "dwarf2.h"
+/* All users of this file have bfd_octets_per_byte (abfd, sec) == 1. */
+#define OCTETS_PER_BYTE(ABFD, SEC) 1
+
static bfd_reloc_status_type ppc64_elf_ha_reloc
(bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
static bfd_reloc_status_type ppc64_elf_branch_reloc
(bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
static bfd_reloc_status_type ppc64_elf_toc64_reloc
(bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
+static bfd_reloc_status_type ppc64_elf_prefix_reloc
+ (bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
static bfd_reloc_status_type ppc64_elf_unhandled_reloc
(bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
static bfd_vma opd_entry_value
#define LD_R2_0R12 0xe84c0000 /* ld %r2,0(%r12) */
#define ADD_R2_R2_R12 0x7c426214 /* add %r2,%r2,%r12 */
+#define LI_R11_0 0x39600000 /* li %r11,0 */
#define LIS_R2 0x3c400000 /* lis %r2,xxx@ha */
+#define LIS_R11 0x3d600000 /* lis %r11,xxx@ha */
#define LIS_R12 0x3d800000 /* lis %r12,xxx@ha */
#define ADDIS_R2_R12 0x3c4c0000 /* addis %r2,%r12,xxx@ha */
#define ADDIS_R12_R2 0x3d820000 /* addis %r12,%r2,xxx@ha */
#define ADDIS_R12_R11 0x3d8b0000 /* addis %r12,%r11,xxx@ha */
#define ADDIS_R12_R12 0x3d8c0000 /* addis %r12,%r12,xxx@ha */
#define ORIS_R12_R12_0 0x658c0000 /* oris %r12,%r12,xxx@hi */
+#define ORI_R11_R11_0 0x616b0000 /* ori %r11,%r11,xxx@l */
#define ORI_R12_R12_0 0x618c0000 /* ori %r12,%r12,xxx@l */
#define LD_R12_0R12 0xe98c0000 /* ld %r12,xxx@l(%r12) */
+#define SLDI_R11_R11_34 0x796b1746 /* sldi %r11,%r11,34 */
#define SLDI_R12_R12_32 0x799c07c6 /* sldi %r12,%r12,32 */
#define LDX_R12_R11_R12 0x7d8b602a /* ldx %r12,%r11,%r12 */
#define ADD_R12_R11_R12 0x7d8b6214 /* add %r12,%r11,%r12 */
+#define PADDI_R12_PC 0x0610000039800000ULL
+#define PLD_R12_PC 0x04100000e5800000ULL
+#define PNOP 0x0700000000000000ULL
/* __glink_PLTresolve stub instructions. We enter with the index in R0. */
#define GLINK_PLTRESOLVE_SIZE(htab) \
HOW (R_PPC64_REL16_HA, 1, 16, 0xffff, 16, TRUE, signed,
ppc64_elf_ha_reloc),
+ HOW (R_PPC64_REL16_HIGH, 1, 16, 0xffff, 16, TRUE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_REL16_HIGHA, 1, 16, 0xffff, 16, TRUE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_REL16_HIGHER, 1, 16, 0xffff, 32, TRUE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_REL16_HIGHERA, 1, 16, 0xffff, 32, TRUE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_REL16_HIGHEST, 1, 16, 0xffff, 48, TRUE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_REL16_HIGHESTA, 1, 16, 0xffff, 48, TRUE, dont,
+ ppc64_elf_ha_reloc),
+
/* Like R_PPC64_REL16_HA but for split field in addpcis. */
HOW (R_PPC64_REL16DX_HA, 2, 16, 0x1fffc1, 16, TRUE, signed,
ppc64_elf_ha_reloc),
HOW (R_PPC64_ADDR64_LOCAL, 4, 64, 0xffffffffffffffffULL, 0, FALSE, dont,
bfd_elf_generic_reloc),
+ HOW (R_PPC64_PLTSEQ_NOTOC, 2, 32, 0, 0, FALSE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_PLTCALL_NOTOC, 2, 32, 0, 0, FALSE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_PCREL_OPT, 2, 32, 0, 0, FALSE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_D34, 4, 34, 0x3ffff0000ffffULL, 0, FALSE, signed,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_D34_LO, 4, 34, 0x3ffff0000ffffULL, 0, FALSE, dont,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_D34_HI30, 4, 34, 0x3ffff0000ffffULL, 34, FALSE, dont,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_D34_HA30, 4, 34, 0x3ffff0000ffffULL, 34, FALSE, dont,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_PCREL34, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_GOT_PCREL34, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_PLT_PCREL34, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_PLT_PCREL34_NOTOC, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_TPREL34, 4, 34, 0x3ffff0000ffffULL, 0, FALSE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_DTPREL34, 4, 34, 0x3ffff0000ffffULL, 0, FALSE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_GOT_TLSGD34, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_GOT_TLSLD34, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_GOT_TPREL34, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_GOT_DTPREL34, 4, 34, 0x3ffff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_unhandled_reloc),
+
+ HOW (R_PPC64_ADDR16_HIGHER34, 1, 16, 0xffff, 34, FALSE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_ADDR16_HIGHERA34, 1, 16, 0xffff, 34, FALSE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_ADDR16_HIGHEST34, 1, 16, 0xffff, 50, FALSE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_ADDR16_HIGHESTA34, 1, 16, 0xffff, 50, FALSE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_REL16_HIGHER34, 1, 16, 0xffff, 34, TRUE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_REL16_HIGHERA34, 1, 16, 0xffff, 34, TRUE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_REL16_HIGHEST34, 1, 16, 0xffff, 50, TRUE, dont,
+ bfd_elf_generic_reloc),
+
+ HOW (R_PPC64_REL16_HIGHESTA34, 1, 16, 0xffff, 50, TRUE, dont,
+ ppc64_elf_ha_reloc),
+
+ HOW (R_PPC64_D28, 4, 28, 0xfff0000ffffULL, 0, FALSE, signed,
+ ppc64_elf_prefix_reloc),
+
+ HOW (R_PPC64_PCREL28, 4, 28, 0xfff0000ffffULL, 0, TRUE, signed,
+ ppc64_elf_prefix_reloc),
+
/* GNU extension to record C++ vtable hierarchy. */
HOW (R_PPC64_GNU_VTINHERIT, 0, 0, 0, 0, FALSE, dont,
NULL),
break;
case BFD_RELOC_PPC64_PLTGOT16_LO_DS: r = R_PPC64_PLTGOT16_LO_DS;
break;
+ case BFD_RELOC_PPC64_TLS_PCREL:
case BFD_RELOC_PPC_TLS: r = R_PPC64_TLS;
break;
case BFD_RELOC_PPC_TLSGD: r = R_PPC64_TLSGD;
break;
case BFD_RELOC_HI16_S_PCREL: r = R_PPC64_REL16_HA;
break;
+ case BFD_RELOC_PPC64_REL16_HIGH: r = R_PPC64_REL16_HIGH;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHA: r = R_PPC64_REL16_HIGHA;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHER: r = R_PPC64_REL16_HIGHER;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHERA: r = R_PPC64_REL16_HIGHERA;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHEST: r = R_PPC64_REL16_HIGHEST;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHESTA: r = R_PPC64_REL16_HIGHESTA;
+ break;
case BFD_RELOC_PPC_16DX_HA: r = R_PPC64_16DX_HA;
break;
case BFD_RELOC_PPC_REL16DX_HA: r = R_PPC64_REL16DX_HA;
break;
case BFD_RELOC_PPC64_ADDR64_LOCAL: r = R_PPC64_ADDR64_LOCAL;
break;
+ case BFD_RELOC_PPC64_D34: r = R_PPC64_D34;
+ break;
+ case BFD_RELOC_PPC64_D34_LO: r = R_PPC64_D34_LO;
+ break;
+ case BFD_RELOC_PPC64_D34_HI30: r = R_PPC64_D34_HI30;
+ break;
+ case BFD_RELOC_PPC64_D34_HA30: r = R_PPC64_D34_HA30;
+ break;
+ case BFD_RELOC_PPC64_PCREL34: r = R_PPC64_PCREL34;
+ break;
+ case BFD_RELOC_PPC64_GOT_PCREL34: r = R_PPC64_GOT_PCREL34;
+ break;
+ case BFD_RELOC_PPC64_PLT_PCREL34: r = R_PPC64_PLT_PCREL34;
+ break;
+ case BFD_RELOC_PPC64_TPREL34: r = R_PPC64_TPREL34;
+ break;
+ case BFD_RELOC_PPC64_DTPREL34: r = R_PPC64_DTPREL34;
+ break;
+ case BFD_RELOC_PPC64_GOT_TLSGD34: r = R_PPC64_GOT_TLSGD34;
+ break;
+ case BFD_RELOC_PPC64_GOT_TLSLD34: r = R_PPC64_GOT_TLSLD34;
+ break;
+ case BFD_RELOC_PPC64_GOT_TPREL34: r = R_PPC64_GOT_TPREL34;
+ break;
+ case BFD_RELOC_PPC64_GOT_DTPREL34: r = R_PPC64_GOT_DTPREL34;
+ break;
+ case BFD_RELOC_PPC64_ADDR16_HIGHER34: r = R_PPC64_ADDR16_HIGHER34;
+ break;
+ case BFD_RELOC_PPC64_ADDR16_HIGHERA34: r = R_PPC64_ADDR16_HIGHERA34;
+ break;
+ case BFD_RELOC_PPC64_ADDR16_HIGHEST34: r = R_PPC64_ADDR16_HIGHEST34;
+ break;
+ case BFD_RELOC_PPC64_ADDR16_HIGHESTA34: r = R_PPC64_ADDR16_HIGHESTA34;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHER34: r = R_PPC64_REL16_HIGHER34;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHERA34: r = R_PPC64_REL16_HIGHERA34;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHEST34: r = R_PPC64_REL16_HIGHEST34;
+ break;
+ case BFD_RELOC_PPC64_REL16_HIGHESTA34: r = R_PPC64_REL16_HIGHESTA34;
+ break;
+ case BFD_RELOC_PPC64_D28: r = R_PPC64_D28;
+ break;
+ case BFD_RELOC_PPC64_PCREL28: r = R_PPC64_PCREL28;
+ break;
case BFD_RELOC_VTABLE_INHERIT: r = R_PPC64_GNU_VTINHERIT;
break;
case BFD_RELOC_VTABLE_ENTRY: r = R_PPC64_GNU_VTENTRY;
return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
- /* Adjust the addend for sign extension of the low 16 bits.
- We won't actually be using the low 16 bits, so trashing them
+ /* Adjust the addend for sign extension of the low 16 (or 34) bits.
+ We won't actually be using the low bits, so trashing them
doesn't matter. */
- reloc_entry->addend += 0x8000;
r_type = reloc_entry->howto->type;
+ if (r_type == R_PPC64_ADDR16_HIGHERA34
+ || r_type == R_PPC64_ADDR16_HIGHESTA34
+ || r_type == R_PPC64_REL16_HIGHERA34
+ || r_type == R_PPC64_REL16_HIGHESTA34)
+ reloc_entry->addend += 1ULL << 33;
+ else
+ reloc_entry->addend += 1U << 15;
if (r_type != R_PPC64_REL16DX_HA)
return bfd_reloc_continue;
+ input_section->output_section->vma);
value = (bfd_signed_vma) value >> 16;
- octets = reloc_entry->address * bfd_octets_per_byte (abfd);
+ octets = reloc_entry->address * OCTETS_PER_BYTE (abfd, input_section);
insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
insn &= ~0x1fffc1;
insn |= (value & 0xffc1) | ((value & 0x3e) << 15);
return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
- octets = reloc_entry->address * bfd_octets_per_byte (abfd);
+ octets = reloc_entry->address * OCTETS_PER_BYTE (abfd, input_section);
insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
insn &= ~(0x01 << 21);
r_type = reloc_entry->howto->type;
if (TOCstart == 0)
TOCstart = ppc64_elf_set_toc (NULL, input_section->output_section->owner);
- octets = reloc_entry->address * bfd_octets_per_byte (abfd);
+ octets = reloc_entry->address * OCTETS_PER_BYTE (abfd, input_section);
bfd_put_64 (abfd, TOCstart + TOC_BASE_OFF, (bfd_byte *) data + octets);
return bfd_reloc_ok;
}
+static bfd_reloc_status_type
+ppc64_elf_prefix_reloc (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
+ void *data, asection *input_section,
+ bfd *output_bfd, char **error_message)
+{
+ uint64_t insn;
+ bfd_vma targ;
+
+ if (output_bfd != NULL)
+ return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
+ input_section, output_bfd, error_message);
+
+ insn = bfd_get_32 (abfd, (bfd_byte *) data + reloc_entry->address);
+ insn <<= 32;
+ insn |= bfd_get_32 (abfd, (bfd_byte *) data + reloc_entry->address + 4);
+
+ targ = (symbol->section->output_section->vma
+ + symbol->section->output_offset
+ + reloc_entry->addend);
+ if (!bfd_is_com_section (symbol->section))
+ targ += symbol->value;
+ if (reloc_entry->howto->type == R_PPC64_D34_HA30)
+ targ += 1ULL << 33;
+ if (reloc_entry->howto->pc_relative)
+ {
+ bfd_vma from = (reloc_entry->address
+ + input_section->output_offset
+ + input_section->output_section->vma);
+ targ -=from;
+ }
+ targ >>= reloc_entry->howto->rightshift;
+ insn &= ~reloc_entry->howto->dst_mask;
+ insn |= ((targ << 16) | (targ & 0xffff)) & reloc_entry->howto->dst_mask;
+ bfd_put_32 (abfd, insn >> 32, (bfd_byte *) data + reloc_entry->address);
+ bfd_put_32 (abfd, insn, (bfd_byte *) data + reloc_entry->address + 4);
+ if (reloc_entry->howto->complain_on_overflow == complain_overflow_signed
+ && (targ + (1ULL << (reloc_entry->howto->bitsize - 1))
+ >= 1ULL << reloc_entry->howto->bitsize))
+ return bfd_reloc_overflow;
+ return bfd_reloc_ok;
+}
+
static bfd_reloc_status_type
ppc64_elf_unhandled_reloc (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
void *data, asection *input_section,
/* Set if toc/got ha relocs detected not using r2, or lo reloc
instruction not one we handle. */
unsigned int unexpected_toc_insn : 1;
+
+ /* Set if PLT/GOT/TOC relocs that can be optimised are present in
+ this file. */
+ unsigned int has_optrel : 1;
};
#define ppc64_elf_tdata(bfd) \
/* Flag set when PLTCALL relocs are detected. */
unsigned int has_pltcall:1;
+
+ /* Flag set when section has PLT/GOT/TOC relocations that can be
+ optimised. */
+ unsigned int has_optrel:1;
};
#define ppc64_elf_section_data(sec) \
if ((a->flags & BSF_DYNAMIC) == 0 && (b->flags & BSF_DYNAMIC) != 0)
return 1;
- return a > b;
+ /* Finally, sort on where the symbol is in memory. The symbols will
+ be in at most two malloc'd blocks, one for static syms, one for
+ dynamic syms, and we distinguish the two blocks above by testing
+ BSF_DYNAMIC. Since we are sorting the symbol pointers which were
+ originally in the same order as the symbols (and we're not
+ sorting the symbols themselves), this ensures a stable sort. */
+ if (a < b)
+ return -1;
+ if (a > b)
+ return 1;
+ return 0;
}
/* Search SYMS for a symbol of the given VALUE. */
case R_PPC64_REL32:
case R_PPC64_REL64:
case R_PPC64_REL30:
+ case R_PPC64_TOC16:
+ case R_PPC64_TOC16_DS:
+ case R_PPC64_TOC16_LO:
+ case R_PPC64_TOC16_HI:
+ case R_PPC64_TOC16_HA:
+ case R_PPC64_TOC16_LO_DS:
return 0;
case R_PPC64_TPREL16:
case R_PPC64_TPREL16_HIGHEST:
case R_PPC64_TPREL16_HIGHESTA:
case R_PPC64_TPREL64:
+ case R_PPC64_TPREL34:
/* These relocations are relative but in a shared library the
linker doesn't know the thread pointer base. */
return bfd_link_dll (info);
. mtctr %r12
. bctr
+ There are also ELFv1 powerxx variants of these stubs.
+ ppc_stub_long_branch_notoc:
+ . pla %r12,dest@pcrel
+ . b dest
+ ppc_stub_plt_branch_notoc:
+ . lis %r11,(dest-1f)@highesta34
+ . ori %r11,%r11,(dest-1f)@highera34
+ . sldi %r11,%r11,34
+ . 1: pla %r12,dest@pcrel
+ . add %r12,%r11,%r12
+ . mtctr %r12
+ . bctr
+ ppc_stub_plt_call_notoc:
+ . lis %r11,(xxx-1f)@highesta34
+ . ori %r11,%r11,(xxx-1f)@highera34
+ . sldi %r11,%r11,34
+ . 1: pla %r12,xxx@pcrel
+ . ldx %r12,%r11,%r12
+ . mtctr %r12
+ . bctr
+
In cases where the high instructions would add zero, they are
omitted and following instructions modified in some cases.
+ For example, a powerxx ppc_stub_plt_call_notoc might simplify down
+ to
+ . pld %r12,xxx@pcrel
+ . mtctr %r12
+ . bctr
For a given stub group (a set of sections all using the same toc
pointer value) there will be just one stub type used for any
of the other TLS bits are set. tls_optimize clears bits when
optimizing to indicate the corresponding GOT entry type is not
needed. If set, TLS_TLS is never cleared. tls_optimize may also
- set TLS_TPRELGD when a GD reloc turns into a TPREL one. We use a
- separate flag rather than setting TPREL just for convenience in
- distinguishing the two cases.
+ set TLS_GDIE when a GD reloc turns into an IE one.
These flags are also kept for local symbols. */
#define TLS_TLS 1 /* Any TLS reloc. */
#define TLS_GD 2 /* GD reloc. */
#define TLS_TPREL 8 /* TPREL reloc, => IE. */
#define TLS_DTPREL 16 /* DTPREL reloc, => LD. */
#define TLS_MARK 32 /* __tls_get_addr call marked. */
-#define TLS_TPRELGD 64 /* TPREL reloc resulting from GD->IE. */
-#define TLS_EXPLICIT 128 /* Marks TOC section TLS relocs. */
+#define TLS_GDIE 64 /* GOT TPREL reloc resulting from GD->IE. */
+#define TLS_EXPLICIT 256 /* TOC section TLS reloc, not stored. */
unsigned char tls_mask;
/* The above field is also used to mark function symbols. In which
/* Whether plt calls for ELFv2 localentry:0 funcs have been optimized. */
unsigned int has_plt_localentry0:1;
+ /* Whether calls are made via the PLT from NOTOC functions. */
+ unsigned int notoc_plt:1;
+
+ /* Whether to use powerxx instructions in linkage stubs. */
+ unsigned int powerxx_stubs:1;
+
/* Incremented every time we size stubs. */
unsigned int stub_iteration;
/* Nonzero if this section has TLS related relocations. */
#define has_tls_reloc sec_flg0
-/* Nonzero if this section has an old-style call to __tls_get_addr. */
-#define has_tls_get_addr_call sec_flg1
+/* Nonzero if this section has a call to __tls_get_addr lacking marker
+ relocations. */
+#define nomark_tls_get_addr sec_flg1
/* Nonzero if this section has any toc or got relocs. */
#define has_toc_reloc sec_flg2
htab->sfpr = bfd_make_section_anyway_with_flags (dynobj, ".sfpr",
flags);
if (htab->sfpr == NULL
- || !bfd_set_section_alignment (dynobj, htab->sfpr, 2))
+ || !bfd_set_section_alignment (htab->sfpr, 2))
return FALSE;
}
htab->glink = bfd_make_section_anyway_with_flags (dynobj, ".glink",
flags);
if (htab->glink == NULL
- || !bfd_set_section_alignment (dynobj, htab->glink, 3))
+ || !bfd_set_section_alignment (htab->glink, 3))
return FALSE;
/* The part of .glink used by global entry stubs, separate so that
htab->global_entry = bfd_make_section_anyway_with_flags (dynobj, ".glink",
flags);
if (htab->global_entry == NULL
- || !bfd_set_section_alignment (dynobj, htab->global_entry, 2))
+ || !bfd_set_section_alignment (htab->global_entry, 2))
return FALSE;
if (!info->no_ld_generated_unwind_info)
".eh_frame",
flags);
if (htab->glink_eh_frame == NULL
- || !bfd_set_section_alignment (dynobj, htab->glink_eh_frame, 2))
+ || !bfd_set_section_alignment (htab->glink_eh_frame, 2))
return FALSE;
}
flags = SEC_ALLOC | SEC_LINKER_CREATED;
htab->elf.iplt = bfd_make_section_anyway_with_flags (dynobj, ".iplt", flags);
if (htab->elf.iplt == NULL
- || !bfd_set_section_alignment (dynobj, htab->elf.iplt, 3))
+ || !bfd_set_section_alignment (htab->elf.iplt, 3))
return FALSE;
flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
htab->elf.irelplt
= bfd_make_section_anyway_with_flags (dynobj, ".rela.iplt", flags);
if (htab->elf.irelplt == NULL
- || !bfd_set_section_alignment (dynobj, htab->elf.irelplt, 3))
+ || !bfd_set_section_alignment (htab->elf.irelplt, 3))
return FALSE;
/* Create branch lookup table for plt_branch stubs. */
htab->brlt = bfd_make_section_anyway_with_flags (dynobj, ".branch_lt",
flags);
if (htab->brlt == NULL
- || !bfd_set_section_alignment (dynobj, htab->brlt, 3))
+ || !bfd_set_section_alignment (htab->brlt, 3))
return FALSE;
/* Local plt entries, put in .branch_lt but a separate section for
htab->pltlocal = bfd_make_section_anyway_with_flags (dynobj, ".branch_lt",
flags);
if (htab->pltlocal == NULL
- || !bfd_set_section_alignment (dynobj, htab->pltlocal, 3))
+ || !bfd_set_section_alignment (htab->pltlocal, 3))
return FALSE;
if (!bfd_link_pic (info))
htab->relbrlt
= bfd_make_section_anyway_with_flags (dynobj, ".rela.branch_lt", flags);
if (htab->relbrlt == NULL
- || !bfd_set_section_alignment (dynobj, htab->relbrlt, 3))
+ || !bfd_set_section_alignment (htab->relbrlt, 3))
return FALSE;
htab->relpltlocal
= bfd_make_section_anyway_with_flags (dynobj, ".rela.branch_lt", flags);
if (htab->relpltlocal == NULL
- || !bfd_set_section_alignment (dynobj, htab->relpltlocal, 3))
+ || !bfd_set_section_alignment (htab->relpltlocal, 3))
return FALSE;
return TRUE;
got = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
if (!got
- || !bfd_set_section_alignment (abfd, got, 3))
+ || !bfd_set_section_alignment (got, 3))
return FALSE;
relgot = bfd_make_section_anyway_with_flags (abfd, ".rela.got",
flags | SEC_READONLY);
if (!relgot
- || !bfd_set_section_alignment (abfd, relgot, 3))
+ || !bfd_set_section_alignment (relgot, 3))
return FALSE;
ppc64_elf_tdata (abfd)->got = got;
|| r_type == R_PPC64_ADDR14
|| r_type == R_PPC64_ADDR14_BRTAKEN
|| r_type == R_PPC64_ADDR14_BRNTAKEN
- || r_type == R_PPC64_PLTCALL);
+ || r_type == R_PPC64_PLTCALL
+ || r_type == R_PPC64_PLTCALL_NOTOC);
}
/* Relocs on inline plt call sequence insns prior to the call. */
|| r_type == R_PPC64_PLT16_HI
|| r_type == R_PPC64_PLT16_LO
|| r_type == R_PPC64_PLT16_LO_DS
- || r_type == R_PPC64_PLTSEQ);
+ || r_type == R_PPC64_PLT_PCREL34
+ || r_type == R_PPC64_PLT_PCREL34_NOTOC
+ || r_type == R_PPC64_PLTSEQ
+ || r_type == R_PPC64_PLTSEQ_NOTOC);
}
/* Look through the relocs for a section during the first phase, and
sec->has_toc_reloc = 1;
}
- tls_type = 0;
+ r_type = ELF64_R_TYPE (rel->r_info);
+ switch (r_type)
+ {
+ case R_PPC64_D34:
+ case R_PPC64_D34_LO:
+ case R_PPC64_D34_HI30:
+ case R_PPC64_D34_HA30:
+ case R_PPC64_D28:
+ case R_PPC64_TPREL34:
+ case R_PPC64_DTPREL34:
+ case R_PPC64_PCREL34:
+ case R_PPC64_GOT_PCREL34:
+ case R_PPC64_GOT_TLSGD34:
+ case R_PPC64_GOT_TLSLD34:
+ case R_PPC64_GOT_TPREL34:
+ case R_PPC64_GOT_DTPREL34:
+ case R_PPC64_PLT_PCREL34:
+ case R_PPC64_PLT_PCREL34_NOTOC:
+ case R_PPC64_PCREL28:
+ htab->powerxx_stubs = 1;
+ break;
+ default:
+ break;
+ }
+
+ switch (r_type)
+ {
+ case R_PPC64_PLT16_HA:
+ case R_PPC64_GOT_TLSLD16_HA:
+ case R_PPC64_GOT_TLSGD16_HA:
+ case R_PPC64_GOT_TPREL16_HA:
+ case R_PPC64_GOT_DTPREL16_HA:
+ case R_PPC64_GOT16_HA:
+ case R_PPC64_TOC16_HA:
+ case R_PPC64_PLT16_LO:
+ case R_PPC64_PLT16_LO_DS:
+ case R_PPC64_GOT_TLSLD16_LO:
+ case R_PPC64_GOT_TLSGD16_LO:
+ case R_PPC64_GOT_TPREL16_LO_DS:
+ case R_PPC64_GOT_DTPREL16_LO_DS:
+ case R_PPC64_GOT16_LO:
+ case R_PPC64_GOT16_LO_DS:
+ case R_PPC64_TOC16_LO:
+ case R_PPC64_TOC16_LO_DS:
+ case R_PPC64_GOT_PCREL34:
+ ppc64_elf_tdata (abfd)->has_optrel = 1;
+ ppc64_elf_section_data (sec)->has_optrel = 1;
+ break;
+ default:
+ break;
+ }
+
ifunc = NULL;
if (h != NULL)
{
}
}
- r_type = ELF64_R_TYPE (rel->r_info);
+ tls_type = 0;
switch (r_type)
{
case R_PPC64_TLSGD:
case R_PPC64_GOT_TLSLD16_LO:
case R_PPC64_GOT_TLSLD16_HI:
case R_PPC64_GOT_TLSLD16_HA:
+ case R_PPC64_GOT_TLSLD34:
tls_type = TLS_TLS | TLS_LD;
goto dogottls;
case R_PPC64_GOT_TLSGD16_LO:
case R_PPC64_GOT_TLSGD16_HI:
case R_PPC64_GOT_TLSGD16_HA:
+ case R_PPC64_GOT_TLSGD34:
tls_type = TLS_TLS | TLS_GD;
goto dogottls;
case R_PPC64_GOT_TPREL16_LO_DS:
case R_PPC64_GOT_TPREL16_HI:
case R_PPC64_GOT_TPREL16_HA:
+ case R_PPC64_GOT_TPREL34:
if (bfd_link_dll (info))
info->flags |= DF_STATIC_TLS;
tls_type = TLS_TLS | TLS_TPREL;
case R_PPC64_GOT_DTPREL16_LO_DS:
case R_PPC64_GOT_DTPREL16_HI:
case R_PPC64_GOT_DTPREL16_HA:
+ case R_PPC64_GOT_DTPREL34:
tls_type = TLS_TLS | TLS_DTPREL;
dogottls:
sec->has_tls_reloc = 1;
- /* Fall through */
+ goto dogot;
case R_PPC64_GOT16:
- case R_PPC64_GOT16_DS:
- case R_PPC64_GOT16_HA:
- case R_PPC64_GOT16_HI:
case R_PPC64_GOT16_LO:
+ case R_PPC64_GOT16_HI:
+ case R_PPC64_GOT16_HA:
+ case R_PPC64_GOT16_DS:
case R_PPC64_GOT16_LO_DS:
+ case R_PPC64_GOT_PCREL34:
+ dogot:
/* This symbol requires a global offset table entry. */
sec->has_toc_reloc = 1;
if (r_type == R_PPC64_GOT_TLSLD16
case R_PPC64_PLT16_HI:
case R_PPC64_PLT16_LO:
case R_PPC64_PLT16_LO_DS:
+ case R_PPC64_PLT_PCREL34:
+ case R_PPC64_PLT_PCREL34_NOTOC:
case R_PPC64_PLT32:
case R_PPC64_PLT64:
/* This symbol requires a procedure linkage table entry. */
case R_PPC64_REL16_LO:
case R_PPC64_REL16_HI:
case R_PPC64_REL16_HA:
+ case R_PPC64_REL16_HIGH:
+ case R_PPC64_REL16_HIGHA:
+ case R_PPC64_REL16_HIGHER:
+ case R_PPC64_REL16_HIGHERA:
+ case R_PPC64_REL16_HIGHEST:
+ case R_PPC64_REL16_HIGHESTA:
+ case R_PPC64_REL16_HIGHER34:
+ case R_PPC64_REL16_HIGHERA34:
+ case R_PPC64_REL16_HIGHEST34:
+ case R_PPC64_REL16_HIGHESTA34:
case R_PPC64_REL16DX_HA:
break;
case R_PPC64_TOC16_HA:
case R_PPC64_TOC16_LO_DS:
sec->has_toc_reloc = 1;
+ if (h != NULL && bfd_link_executable (info))
+ {
+ /* We may need a copy reloc. */
+ h->non_got_ref = 1;
+ /* Strongly prefer a copy reloc over a dynamic reloc.
+ glibc ld.so as of 2019-08 will error out if one of
+ these relocations is emitted. */
+ h->needs_copy = 1;
+ goto dodyn;
+ }
break;
/* Marker reloc. */
/* This relocation describes which C++ vtable entries are actually
used. Record for later use during GC. */
case R_PPC64_GNU_VTENTRY:
- BFD_ASSERT (h != NULL);
- if (h != NULL
- && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
+ if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
return FALSE;
break;
goto rel24;
case R_PPC64_PLTCALL:
+ case R_PPC64_PLTCALL_NOTOC:
ppc64_elf_section_data (sec)->has_pltcall = 1;
/* Fall through. */
;
else
/* Mark this section as having an old-style call. */
- sec->has_tls_get_addr_call = 1;
+ sec->nomark_tls_get_addr = 1;
}
plt_list = &h->plt.plist;
}
{
struct ppc_link_hash_entry *eh;
eh = (struct ppc_link_hash_entry *) h;
- eh->tls_mask |= tls_type;
+ eh->tls_mask |= tls_type & 0xff;
}
else
if (!update_local_sym_info (abfd, symtab_hdr, r_symndx,
case R_PPC64_TPREL16_HIGHERA:
case R_PPC64_TPREL16_HIGHEST:
case R_PPC64_TPREL16_HIGHESTA:
+ case R_PPC64_TPREL34:
if (bfd_link_dll (info))
info->flags |= DF_STATIC_TLS;
goto dodyn;
case R_PPC64_ADDR16_HIGHESTA:
case R_PPC64_ADDR16_LO:
case R_PPC64_ADDR16_LO_DS:
+ case R_PPC64_D34:
+ case R_PPC64_D34_LO:
+ case R_PPC64_D34_HI30:
+ case R_PPC64_D34_HA30:
+ case R_PPC64_ADDR16_HIGHER34:
+ case R_PPC64_ADDR16_HIGHERA34:
+ case R_PPC64_ADDR16_HIGHEST34:
+ case R_PPC64_ADDR16_HIGHESTA34:
+ case R_PPC64_D28:
if (h != NULL && !bfd_link_pic (info) && abiversion (abfd) != 1
&& rel->r_addend == 0)
{
/* We may need a .plt entry if this reloc refers to a
function in a shared lib. */
- if (!update_plt_info (abfd, &h->plt.plist, rel->r_addend))
+ if (!update_plt_info (abfd, &h->plt.plist, 0))
return FALSE;
h->pointer_equality_needed = 1;
}
case R_PPC64_UADDR32:
case R_PPC64_UADDR64:
case R_PPC64_TOC:
- if (h != NULL && !bfd_link_pic (info))
+ if (h != NULL && bfd_link_executable (info))
/* We may need a copy reloc. */
h->non_got_ref = 1;
dynamic library if we manage to avoid copy relocs for the
symbol. */
dodyn:
- if ((bfd_link_pic (info)
- && (must_be_dyn_reloc (info, r_type)
- || (h != NULL
- && (!SYMBOLIC_BIND (info, h)
- || h->root.type == bfd_link_hash_defweak
- || !h->def_regular))))
- || (ELIMINATE_COPY_RELOCS
- && !bfd_link_pic (info)
- && h != NULL
- && (h->root.type == bfd_link_hash_defweak
- || !h->def_regular))
+ if ((h != NULL
+ && (h->root.type == bfd_link_hash_defweak
+ || !h->def_regular))
+ || (h != NULL
+ && !bfd_link_executable (info)
+ && !SYMBOLIC_BIND (info, h))
+ || (bfd_link_pic (info)
+ && must_be_dyn_reloc (info, r_type))
|| (!bfd_link_pic (info)
&& ifunc != NULL))
{
s = elf_link_hash_lookup (&htab->elf, buf, TRUE, TRUE, FALSE);
if (s == NULL)
return FALSE;
- if (s->root.type == bfd_link_hash_new
- || (s->root.type = bfd_link_hash_defined
- && s->root.u.def.section == stub_sec))
+ if (s->root.type == bfd_link_hash_new)
{
s->root.type = bfd_link_hash_defined;
s->root.u.def.section = stub_sec;
only references to the symbol are via the global offset table.
For such cases we need not do anything here; the relocations will
be handled correctly by relocate_section. */
- if (bfd_link_pic (info))
+ if (!bfd_link_executable (info))
return TRUE;
/* If there are no references to this symbol that do not use the
/* If we don't find any dynamic relocs in read-only sections, then
we'll be keeping the dynamic relocs and avoiding the copy reloc. */
- || (ELIMINATE_COPY_RELOCS && !alias_readonly_dynrelocs (h))
+ || (ELIMINATE_COPY_RELOCS
+ && !h->needs_copy
+ && !alias_readonly_dynrelocs (h))
/* Protected variables do not work with .dynbss. The copy in
.dynbss won't be used by the shared library with the protected
struct ppc_link_hash_entry *eh;
_bfd_elf_link_hash_hide_symbol (info, h, force_local);
+ if (ppc_hash_table (info) == NULL)
+ return;
+
eh = (struct ppc_link_hash_entry *) h;
if (eh->is_func_descriptor)
{
default:
return TRUE;
+ case R_PPC64_TOC16:
+ case R_PPC64_TOC16_DS:
+ case R_PPC64_TOC16_LO:
+ case R_PPC64_TOC16_HI:
+ case R_PPC64_TOC16_HA:
+ case R_PPC64_TOC16_LO_DS:
+ if (h == NULL)
+ return TRUE;
+ break;
+
case R_PPC64_TPREL16:
case R_PPC64_TPREL16_LO:
case R_PPC64_TPREL16_HI:
case R_PPC64_TPREL16_HIGHEST:
case R_PPC64_TPREL16_HIGHESTA:
case R_PPC64_TPREL64:
+ case R_PPC64_TPREL34:
case R_PPC64_DTPMOD64:
case R_PPC64_DTPREL64:
case R_PPC64_ADDR64:
case R_PPC64_UADDR32:
case R_PPC64_UADDR64:
case R_PPC64_TOC:
+ case R_PPC64_D34:
+ case R_PPC64_D34_LO:
+ case R_PPC64_D34_HI30:
+ case R_PPC64_D34_HA30:
+ case R_PPC64_ADDR16_HIGHER34:
+ case R_PPC64_ADDR16_HIGHERA34:
+ case R_PPC64_ADDR16_HIGHEST34:
+ case R_PPC64_ADDR16_HIGHESTA34:
+ case R_PPC64_D28:
break;
}
return FALSE;
}
- if ((bfd_link_pic (info)
- && (must_be_dyn_reloc (info, r_type)
- || (h != NULL
- && (!SYMBOLIC_BIND (info, h)
- || h->root.type == bfd_link_hash_defweak
- || !h->def_regular))))
- || (ELIMINATE_COPY_RELOCS
- && !bfd_link_pic (info)
- && h != NULL
- && (h->root.type == bfd_link_hash_defweak
- || !h->def_regular)))
+ if ((h != NULL
+ && (h->root.type == bfd_link_hash_defweak
+ || !h->def_regular))
+ || (h != NULL
+ && !bfd_link_executable (info)
+ && !SYMBOLIC_BIND (info, h))
+ || (bfd_link_pic (info)
+ && must_be_dyn_reloc (info, r_type))
+ || (!bfd_link_pic (info)
+ && (h != NULL
+ ? h->type == STT_GNU_IFUNC
+ : ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)))
;
else
return TRUE;
unsigned char *tls_maskp;
r_type = ELF64_R_TYPE (rel->r_info);
- if (r_type != R_PPC64_PLTCALL)
+ if (r_type != R_PPC64_PLTCALL
+ && r_type != R_PPC64_PLTCALL_NOTOC)
continue;
r_symndx = ELF64_R_SYM (rel->r_info);
from = (rel->r_offset
+ sec->output_offset
+ sec->output_section->vma);
- if (to - from + limit < 2 * limit)
+ if (to - from + limit < 2 * limit
+ && !(r_type == R_PPC64_PLTCALL_NOTOC
+ && (((h ? h->other : sym->st_other)
+ & STO_PPC64_LOCAL_MASK)
+ > 1 << STO_PPC64_LOCAL_BIT)))
*tls_maskp &= ~PLT_KEEP;
}
}
Elf_Internal_Sym *sym;
asection *sym_sec;
unsigned char *tls_mask;
- unsigned char tls_set, tls_clear, tls_type = 0;
+ unsigned int tls_set, tls_clear, tls_type = 0;
bfd_vma value;
bfd_boolean ok_tprel, is_local;
long toc_ref_index = 0;
value = sym->st_value;
ok_tprel = FALSE;
- is_local = FALSE;
- if (h == NULL
- || !h->def_dynamic)
+ is_local = SYMBOL_REFERENCES_LOCAL (info, h);
+ if (is_local)
{
- is_local = TRUE;
if (h != NULL
&& h->root.type == bfd_link_hash_undefweak)
ok_tprel = TRUE;
{
value += sym_sec->output_offset;
value += sym_sec->output_section->vma;
- value -= htab->elf.tls_sec->vma;
- ok_tprel = (value + TP_OFFSET + ((bfd_vma) 1 << 31)
- < (bfd_vma) 1 << 32);
+ value -= htab->elf.tls_sec->vma + TP_OFFSET;
+ /* Note that even though the prefix insns
+ allow a 1<<33 offset we use the same test
+ as for addis;addi. There may be a mix of
+ pcrel and non-pcrel code and the decision
+ to optimise is per symbol, not per TLS
+ sequence. */
+ ok_tprel = value + 0x80008000ULL < 1ULL << 32;
}
}
setup insn. If we don't find matching arg setup
relocs, don't do any tls optimization. */
if (pass == 0
- && sec->has_tls_get_addr_call
+ && sec->nomark_tls_get_addr
&& h != NULL
&& (h == &htab->tls_get_addr->elf
|| h == &htab->tls_get_addr_fd->elf)
{
case R_PPC64_GOT_TLSLD16:
case R_PPC64_GOT_TLSLD16_LO:
+ case R_PPC64_GOT_TLSLD34:
expecting_tls_get_addr = 1;
found_tls_get_addr_arg = 1;
/* Fall through. */
case R_PPC64_GOT_TLSGD16:
case R_PPC64_GOT_TLSGD16_LO:
+ case R_PPC64_GOT_TLSGD34:
expecting_tls_get_addr = 1;
found_tls_get_addr_arg = 1;
/* Fall through. */
tls_set = 0;
else
/* GD -> IE */
- tls_set = TLS_TLS | TLS_TPRELGD;
+ tls_set = TLS_TLS | TLS_GDIE;
tls_clear = TLS_GD;
tls_type = TLS_TLS | TLS_GD;
break;
+ case R_PPC64_GOT_TPREL34:
case R_PPC64_GOT_TPREL16_DS:
case R_PPC64_GOT_TPREL16_LO_DS:
case R_PPC64_GOT_TPREL16_HI:
}
continue;
- case R_PPC64_TLSGD:
case R_PPC64_TLSLD:
+ if (!is_local)
+ continue;
+ /* Fall through. */
+ case R_PPC64_TLSGD:
if (rel + 1 < relend
&& is_plt_seq_reloc (ELF64_R_TYPE (rel[1].r_info)))
{
if (pass != 0
&& (ELF64_R_TYPE (rel[1].r_info)
- != R_PPC64_PLTSEQ))
+ != R_PPC64_PLTSEQ)
+ && (ELF64_R_TYPE (rel[1].r_info)
+ != R_PPC64_PLTSEQ_NOTOC))
{
r_symndx = ELF64_R_SYM (rel[1].r_info);
if (!get_sym_h (&h, NULL, NULL, NULL, &locsyms,
tls_set = TLS_EXPLICIT | TLS_GD;
else
/* GD -> IE */
- tls_set = TLS_EXPLICIT | TLS_GD | TLS_TPRELGD;
+ tls_set = TLS_EXPLICIT | TLS_GD | TLS_GDIE;
tls_clear = TLS_GD;
}
else
if (pass == 0)
{
if (!expecting_tls_get_addr
- || !sec->has_tls_get_addr_call)
+ || !sec->nomark_tls_get_addr)
continue;
if (rel + 1 < relend
Disable optimization in this case. */
if ((tls_clear & (TLS_GD | TLS_LD)) != 0
&& (tls_set & TLS_EXPLICIT) == 0
- && !sec->has_tls_get_addr_call
+ && !sec->nomark_tls_get_addr
&& ((*tls_mask & (TLS_TLS | TLS_MARK))
!= (TLS_TLS | TLS_MARK)))
continue;
- if (expecting_tls_get_addr)
+ if (expecting_tls_get_addr == 1 + !sec->nomark_tls_get_addr)
{
struct plt_entry *ent = NULL;
}
}
- *tls_mask |= tls_set;
+ *tls_mask |= tls_set & 0xff;
*tls_mask &= ~tls_clear;
}
static bfd_boolean
ok_lo_toc_insn (unsigned int insn, enum elf_ppc64_reloc_type r_type)
{
- return ((insn & (0x3f << 26)) == 12u << 26 /* addic */
- || (insn & (0x3f << 26)) == 14u << 26 /* addi */
- || (insn & (0x3f << 26)) == 32u << 26 /* lwz */
- || (insn & (0x3f << 26)) == 34u << 26 /* lbz */
- || (insn & (0x3f << 26)) == 36u << 26 /* stw */
- || (insn & (0x3f << 26)) == 38u << 26 /* stb */
- || (insn & (0x3f << 26)) == 40u << 26 /* lhz */
- || (insn & (0x3f << 26)) == 42u << 26 /* lha */
- || (insn & (0x3f << 26)) == 44u << 26 /* sth */
- || (insn & (0x3f << 26)) == 46u << 26 /* lmw */
- || (insn & (0x3f << 26)) == 47u << 26 /* stmw */
- || (insn & (0x3f << 26)) == 48u << 26 /* lfs */
- || (insn & (0x3f << 26)) == 50u << 26 /* lfd */
- || (insn & (0x3f << 26)) == 52u << 26 /* stfs */
- || (insn & (0x3f << 26)) == 54u << 26 /* stfd */
- || (insn & (0x3f << 26)) == 56u << 26 /* lq,lfq */
- || ((insn & (0x3f << 26)) == 57u << 26 /* lxsd,lxssp,lfdp */
+ return ((insn & (0x3fu << 26)) == 12u << 26 /* addic */
+ || (insn & (0x3fu << 26)) == 14u << 26 /* addi */
+ || (insn & (0x3fu << 26)) == 32u << 26 /* lwz */
+ || (insn & (0x3fu << 26)) == 34u << 26 /* lbz */
+ || (insn & (0x3fu << 26)) == 36u << 26 /* stw */
+ || (insn & (0x3fu << 26)) == 38u << 26 /* stb */
+ || (insn & (0x3fu << 26)) == 40u << 26 /* lhz */
+ || (insn & (0x3fu << 26)) == 42u << 26 /* lha */
+ || (insn & (0x3fu << 26)) == 44u << 26 /* sth */
+ || (insn & (0x3fu << 26)) == 46u << 26 /* lmw */
+ || (insn & (0x3fu << 26)) == 47u << 26 /* stmw */
+ || (insn & (0x3fu << 26)) == 48u << 26 /* lfs */
+ || (insn & (0x3fu << 26)) == 50u << 26 /* lfd */
+ || (insn & (0x3fu << 26)) == 52u << 26 /* stfs */
+ || (insn & (0x3fu << 26)) == 54u << 26 /* stfd */
+ || (insn & (0x3fu << 26)) == 56u << 26 /* lq,lfq */
+ || ((insn & (0x3fu << 26)) == 57u << 26 /* lxsd,lxssp,lfdp */
/* Exclude lfqu by testing reloc. If relocs are ever
defined for the reduced D field in psq_lu then those
will need testing too. */
&& r_type != R_PPC64_TOC16_LO && r_type != R_PPC64_GOT16_LO)
- || ((insn & (0x3f << 26)) == 58u << 26 /* ld,lwa */
+ || ((insn & (0x3fu << 26)) == 58u << 26 /* ld,lwa */
&& (insn & 1) == 0)
- || (insn & (0x3f << 26)) == 60u << 26 /* stfq */
- || ((insn & (0x3f << 26)) == 61u << 26 /* lxv,stx{v,sd,ssp},stfdp */
+ || (insn & (0x3fu << 26)) == 60u << 26 /* stfq */
+ || ((insn & (0x3fu << 26)) == 61u << 26 /* lxv,stx{v,sd,ssp},stfdp */
/* Exclude stfqu. psq_stu as above for psq_lu. */
&& r_type != R_PPC64_TOC16_LO && r_type != R_PPC64_GOT16_LO)
- || ((insn & (0x3f << 26)) == 62u << 26 /* std,stq */
+ || ((insn & (0x3fu << 26)) == 62u << 26 /* std,stq */
&& (insn & 1) == 0));
}
+/* PCREL_OPT in one instance flags to the linker that a pair of insns:
+ pld ra,symbol@got@pcrel
+ load/store rt,off(ra)
+ or
+ pla ra,symbol@pcrel
+ load/store rt,off(ra)
+ may be translated to
+ pload/pstore rt,symbol+off@pcrel
+ nop.
+ This function returns true if the optimization is possible, placing
+ the prefix insn in *PINSN1, a NOP in *PINSN2 and the offset in *POFF.
+
+ On entry to this function, the linker has already determined that
+ the pld can be replaced with pla: *PINSN1 is that pla insn,
+ while *PINSN2 is the second instruction. */
+
+static bfd_boolean
+xlate_pcrel_opt (uint64_t *pinsn1, uint64_t *pinsn2, bfd_signed_vma *poff)
+{
+ uint64_t insn1 = *pinsn1;
+ uint64_t insn2 = *pinsn2;
+ bfd_signed_vma off;
+
+ if ((insn2 & (63ULL << 58)) == 1ULL << 58)
+ {
+ /* Check that regs match. */
+ if (((insn2 >> 16) & 31) != ((insn1 >> 21) & 31))
+ return FALSE;
+
+ /* P8LS or PMLS form, non-pcrel. */
+ if ((insn2 & (-1ULL << 50) & ~(1ULL << 56)) != (1ULL << 58))
+ return FALSE;
+
+ *pinsn1 = (insn2 & ~(31 << 16) & ~0x3ffff0000ffffULL) | (1ULL << 52);
+ *pinsn2 = PNOP;
+ off = ((insn2 >> 16) & 0x3ffff0000ULL) | (insn2 & 0xffff);
+ *poff = (off ^ 0x200000000ULL) - 0x200000000ULL;
+ return TRUE;
+ }
+
+ insn2 >>= 32;
+
+ /* Check that regs match. */
+ if (((insn2 >> 16) & 31) != ((insn1 >> 21) & 31))
+ return FALSE;
+
+ switch ((insn2 >> 26) & 63)
+ {
+ default:
+ return FALSE;
+
+ case 32: /* lwz */
+ case 34: /* lbz */
+ case 36: /* stw */
+ case 38: /* stb */
+ case 40: /* lhz */
+ case 42: /* lha */
+ case 44: /* sth */
+ case 48: /* lfs */
+ case 50: /* lfd */
+ case 52: /* stfs */
+ case 54: /* stfd */
+ /* These are the PMLS cases, where we just need to tack a prefix
+ on the insn. */
+ insn1 = ((1ULL << 58) | (2ULL << 56) | (1ULL << 52)
+ | (insn2 & ((63ULL << 26) | (31ULL << 21))));
+ off = insn2 & 0xffff;
+ break;
+
+ case 58: /* lwa, ld */
+ if ((insn2 & 1) != 0)
+ return FALSE;
+ insn1 = ((1ULL << 58) | (1ULL << 52)
+ | (insn2 & 2 ? 41ULL << 26 : 57ULL << 26)
+ | (insn2 & (31ULL << 21)));
+ off = insn2 & 0xfffc;
+ break;
+
+ case 57: /* lxsd, lxssp */
+ if ((insn2 & 3) < 2)
+ return FALSE;
+ insn1 = ((1ULL << 58) | (1ULL << 52)
+ | ((40ULL | (insn2 & 3)) << 26)
+ | (insn2 & (31ULL << 21)));
+ off = insn2 & 0xfffc;
+ break;
+
+ case 61: /* stxsd, stxssp, lxv, stxv */
+ if ((insn2 & 3) == 0)
+ return FALSE;
+ else if ((insn2 & 3) >= 2)
+ {
+ insn1 = ((1ULL << 58) | (1ULL << 52)
+ | ((44ULL | (insn2 & 3)) << 26)
+ | (insn2 & (31ULL << 21)));
+ off = insn2 & 0xfffc;
+ }
+ else
+ {
+ insn1 = ((1ULL << 58) | (1ULL << 52)
+ | ((50ULL | (insn2 & 4) | ((insn2 & 8) >> 3)) << 26)
+ | (insn2 & (31ULL << 21)));
+ off = insn2 & 0xfff0;
+ }
+ break;
+
+ case 56: /* lq */
+ insn1 = ((1ULL << 58) | (1ULL << 52)
+ | (insn2 & ((63ULL << 26) | (31ULL << 21))));
+ off = insn2 & 0xffff;
+ break;
+
+ case 62: /* std, stq */
+ if ((insn2 & 1) != 0)
+ return FALSE;
+ insn1 = ((1ULL << 58) | (1ULL << 52)
+ | ((insn2 & 2) == 0 ? 61ULL << 26 : 60ULL << 26)
+ | (insn2 & (31ULL << 21)));
+ off = insn2 & 0xfffc;
+ break;
+ }
+
+ *pinsn1 = insn1;
+ *pinsn2 = (uint64_t) NOP << 32;
+ *poff = (off ^ 0x8000) - 0x8000;
+ return TRUE;
+}
+
/* Examine all relocs referencing .toc sections in order to remove
unused .toc entries. */
struct elf_link_hash_entry *h;
Elf_Internal_Sym *sym;
bfd_vma val;
- enum {no_check, check_lo, check_ha} insn_check;
r_type = ELF64_R_TYPE (rel->r_info);
switch (r_type)
{
- default:
- insn_check = no_check;
- break;
-
- case R_PPC64_GOT_TLSLD16_HA:
- case R_PPC64_GOT_TLSGD16_HA:
- case R_PPC64_GOT_TPREL16_HA:
- case R_PPC64_GOT_DTPREL16_HA:
- case R_PPC64_GOT16_HA:
- case R_PPC64_TOC16_HA:
- insn_check = check_ha;
- break;
-
- case R_PPC64_GOT_TLSLD16_LO:
- case R_PPC64_GOT_TLSGD16_LO:
- case R_PPC64_GOT_TPREL16_LO_DS:
- case R_PPC64_GOT_DTPREL16_LO_DS:
- case R_PPC64_GOT16_LO:
- case R_PPC64_GOT16_LO_DS:
+ case R_PPC64_TOC16:
case R_PPC64_TOC16_LO:
+ case R_PPC64_TOC16_HI:
+ case R_PPC64_TOC16_HA:
+ case R_PPC64_TOC16_DS:
case R_PPC64_TOC16_LO_DS:
- insn_check = check_lo;
- break;
- }
-
- if (insn_check != no_check)
- {
- bfd_vma off = rel->r_offset & ~3;
- unsigned char buf[4];
- unsigned int insn;
-
- if (!bfd_get_section_contents (ibfd, sec, buf, off, 4))
- {
- free (used);
- goto error_ret;
- }
- insn = bfd_get_32 (ibfd, buf);
- if (insn_check == check_lo
- ? !ok_lo_toc_insn (insn, r_type)
- : ((insn & ((0x3f << 26) | 0x1f << 16))
- != ((15u << 26) | (2 << 16)) /* addis rt,2,imm */))
- {
- char str[12];
-
- ppc64_elf_tdata (ibfd)->unexpected_toc_insn = 1;
- sprintf (str, "%#08x", insn);
- info->callbacks->einfo
- /* xgettext:c-format */
- (_("%H: toc optimization is not supported for"
- " %s instruction\n"),
- ibfd, sec, rel->r_offset & ~3, str);
- }
- }
-
- switch (r_type)
- {
- case R_PPC64_TOC16:
- case R_PPC64_TOC16_LO:
- case R_PPC64_TOC16_HI:
- case R_PPC64_TOC16_HA:
- case R_PPC64_TOC16_DS:
- case R_PPC64_TOC16_LO_DS:
- /* In case we're taking addresses of toc entries. */
- case R_PPC64_ADDR64:
+ /* In case we're taking addresses of toc entries. */
+ case R_PPC64_ADDR64:
break;
default:
free (skip);
}
+ /* Look for cases where we can change an indirect GOT access to
+ a GOT relative or PC relative access, possibly reducing the
+ number of GOT entries. */
+ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
+ {
+ asection *sec;
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Sym *local_syms;
+ Elf_Internal_Rela *relstart, *rel;
+ bfd_vma got;
+
+ if (!is_ppc64_elf (ibfd))
+ continue;
+
+ if (!ppc64_elf_tdata (ibfd)->has_optrel)
+ continue;
+
+ sec = ppc64_elf_tdata (ibfd)->got;
+ got = 0;
+ if (sec != NULL)
+ got = sec->output_section->vma + sec->output_offset + 0x8000;
+
+ local_syms = NULL;
+ symtab_hdr = &elf_symtab_hdr (ibfd);
+
+ for (sec = ibfd->sections; sec != NULL; sec = sec->next)
+ {
+ if (sec->reloc_count == 0
+ || !ppc64_elf_section_data (sec)->has_optrel
+ || discarded_section (sec))
+ continue;
+
+ relstart = _bfd_elf_link_read_relocs (ibfd, sec, NULL, NULL,
+ info->keep_memory);
+ if (relstart == NULL)
+ {
+ got_error_ret:
+ if (local_syms != NULL
+ && symtab_hdr->contents != (unsigned char *) local_syms)
+ free (local_syms);
+ if (sec != NULL
+ && relstart != NULL
+ && elf_section_data (sec)->relocs != relstart)
+ free (relstart);
+ return FALSE;
+ }
+
+ for (rel = relstart; rel < relstart + sec->reloc_count; ++rel)
+ {
+ enum elf_ppc64_reloc_type r_type;
+ unsigned long r_symndx;
+ Elf_Internal_Sym *sym;
+ asection *sym_sec;
+ struct elf_link_hash_entry *h;
+ struct got_entry *ent;
+ bfd_vma val, pc;
+ unsigned char buf[8];
+ unsigned int insn;
+ enum {no_check, check_lo, check_ha} insn_check;
+
+ r_type = ELF64_R_TYPE (rel->r_info);
+ switch (r_type)
+ {
+ default:
+ insn_check = no_check;
+ break;
+
+ case R_PPC64_PLT16_HA:
+ case R_PPC64_GOT_TLSLD16_HA:
+ case R_PPC64_GOT_TLSGD16_HA:
+ case R_PPC64_GOT_TPREL16_HA:
+ case R_PPC64_GOT_DTPREL16_HA:
+ case R_PPC64_GOT16_HA:
+ case R_PPC64_TOC16_HA:
+ insn_check = check_ha;
+ break;
+
+ case R_PPC64_PLT16_LO:
+ case R_PPC64_PLT16_LO_DS:
+ case R_PPC64_GOT_TLSLD16_LO:
+ case R_PPC64_GOT_TLSGD16_LO:
+ case R_PPC64_GOT_TPREL16_LO_DS:
+ case R_PPC64_GOT_DTPREL16_LO_DS:
+ case R_PPC64_GOT16_LO:
+ case R_PPC64_GOT16_LO_DS:
+ case R_PPC64_TOC16_LO:
+ case R_PPC64_TOC16_LO_DS:
+ insn_check = check_lo;
+ break;
+ }
+
+ if (insn_check != no_check)
+ {
+ bfd_vma off = rel->r_offset & ~3;
+
+ if (!bfd_get_section_contents (ibfd, sec, buf, off, 4))
+ goto got_error_ret;
+
+ insn = bfd_get_32 (ibfd, buf);
+ if (insn_check == check_lo
+ ? !ok_lo_toc_insn (insn, r_type)
+ : ((insn & ((0x3fu << 26) | 0x1f << 16))
+ != ((15u << 26) | (2 << 16)) /* addis rt,2,imm */))
+ {
+ char str[12];
+
+ ppc64_elf_tdata (ibfd)->unexpected_toc_insn = 1;
+ sprintf (str, "%#08x", insn);
+ info->callbacks->einfo
+ /* xgettext:c-format */
+ (_("%H: got/toc optimization is not supported for"
+ " %s instruction\n"),
+ ibfd, sec, rel->r_offset & ~3, str);
+ continue;
+ }
+ }
+
+ switch (r_type)
+ {
+ /* Note that we don't delete GOT entries for
+ R_PPC64_GOT16_DS since we'd need a lot more
+ analysis. For starters, the preliminary layout is
+ before the GOT, PLT, dynamic sections and stubs are
+ laid out. Then we'd need to allow for changes in
+ distance between sections caused by alignment. */
+ default:
+ continue;
+
+ case R_PPC64_GOT16_HA:
+ case R_PPC64_GOT16_LO_DS:
+ case R_PPC64_GOT_PCREL34:
+ break;
+ }
+
+ r_symndx = ELF64_R_SYM (rel->r_info);
+ if (!get_sym_h (&h, &sym, &sym_sec, NULL, &local_syms,
+ r_symndx, ibfd))
+ goto got_error_ret;
+
+ if (sym_sec == NULL
+ || sym_sec->output_section == NULL
+ || discarded_section (sym_sec))
+ continue;
+
+ if (!SYMBOL_REFERENCES_LOCAL (info, h))
+ continue;
+
+ if (h != NULL)
+ val = h->root.u.def.value;
+ else
+ val = sym->st_value;
+ val += rel->r_addend;
+ val += sym_sec->output_section->vma + sym_sec->output_offset;
+
+/* Fudge factor to allow for the fact that the preliminary layout
+ isn't exact. Reduce limits by this factor. */
+#define LIMIT_ADJUST(LIMIT) ((LIMIT) - (LIMIT) / 16)
+
+ switch (r_type)
+ {
+ default:
+ continue;
+
+ case R_PPC64_GOT16_HA:
+ if (val - got + LIMIT_ADJUST (0x80008000ULL)
+ >= LIMIT_ADJUST (0x100000000ULL))
+ continue;
+
+ if (!bfd_get_section_contents (ibfd, sec, buf,
+ rel->r_offset & ~3, 4))
+ goto got_error_ret;
+ insn = bfd_get_32 (ibfd, buf);
+ if (((insn & ((0x3fu << 26) | 0x1f << 16))
+ != ((15u << 26) | (2 << 16)) /* addis rt,2,imm */))
+ continue;
+ break;
+
+ case R_PPC64_GOT16_LO_DS:
+ if (val - got + LIMIT_ADJUST (0x80008000ULL)
+ >= LIMIT_ADJUST (0x100000000ULL))
+ continue;
+ if (!bfd_get_section_contents (ibfd, sec, buf,
+ rel->r_offset & ~3, 4))
+ goto got_error_ret;
+ insn = bfd_get_32 (ibfd, buf);
+ if ((insn & (0x3fu << 26 | 0x3)) != 58u << 26 /* ld */)
+ continue;
+ break;
+
+ case R_PPC64_GOT_PCREL34:
+ pc = rel->r_offset;
+ pc += sec->output_section->vma + sec->output_offset;
+ if (val - pc + LIMIT_ADJUST (1ULL << 33)
+ >= LIMIT_ADJUST (1ULL << 34))
+ continue;
+ if (!bfd_get_section_contents (ibfd, sec, buf,
+ rel->r_offset & ~3, 8))
+ goto got_error_ret;
+ insn = bfd_get_32 (ibfd, buf);
+ if ((insn & (-1u << 18)) != ((1u << 26) | (1u << 20)))
+ continue;
+ insn = bfd_get_32 (ibfd, buf + 4);
+ if ((insn & (0x3fu << 26)) != 57u << 26)
+ continue;
+ break;
+ }
+#undef LIMIT_ADJUST
+
+ if (h != NULL)
+ ent = h->got.glist;
+ else
+ {
+ struct got_entry **local_got_ents = elf_local_got_ents (ibfd);
+ ent = local_got_ents[r_symndx];
+ }
+ for (; ent != NULL; ent = ent->next)
+ if (ent->addend == rel->r_addend
+ && ent->owner == ibfd
+ && ent->tls_type == 0)
+ break;
+ BFD_ASSERT (ent && ent->got.refcount > 0);
+ ent->got.refcount -= 1;
+ }
+
+ if (elf_section_data (sec)->relocs != relstart)
+ free (relstart);
+ }
+
+ if (local_syms != NULL
+ && symtab_hdr->contents != (unsigned char *) local_syms)
+ {
+ if (!info->keep_memory)
+ free (local_syms);
+ else
+ symtab_hdr->contents = (unsigned char *) local_syms;
+ }
+ }
+
return TRUE;
}
htab->got_reli_size += rentsize;
}
else if (((bfd_link_pic (info)
- && !((gent->tls_type & TLS_TPREL) != 0
+ && !(gent->tls_type != 0
&& bfd_link_executable (info)
&& SYMBOL_REFERENCES_LOCAL (info, h)))
|| (htab->elf.dynamic_sections_created
eh = (struct ppc_link_hash_entry *) h;
/* Run through the TLS GD got entries first if we're changing them
to TPREL. */
- if ((eh->tls_mask & (TLS_TLS | TLS_TPRELGD)) == (TLS_TLS | TLS_TPRELGD))
+ if ((eh->tls_mask & (TLS_TLS | TLS_GDIE)) == (TLS_TLS | TLS_GDIE))
for (gent = h->got.glist; gent != NULL; gent = gent->next)
if (gent->got.refcount > 0
&& (gent->tls_type & TLS_GD) != 0)
if (gent->got.refcount > 0)
{
if ((gent->tls_type & TLS_LD) != 0
- && !h->def_dynamic)
+ && SYMBOL_REFERENCES_LOCAL (info, h))
{
ppc64_tlsld_got (gent->owner)->got.refcount += 1;
*pgent = gent->next;
for (gent = h->got.glist; gent != NULL; gent = gent->next)
if (!gent->is_indirect)
{
- /* Make sure this symbol is output as a dynamic symbol. */
+ /* Ensure we catch all the cases where this symbol should
+ be made dynamic. */
if (!ensure_undef_dynamic (info, h))
return FALSE;
be defined in regular objects. For the normal shared case,
discard space for relocs that have become local due to symbol
visibility changes. */
-
if (bfd_link_pic (info))
{
/* Relocs that use pc_count are those that appear on a call
if (eh->dyn_relocs != NULL)
{
- /* Make sure this symbol is output as a dynamic symbol. */
+ /* Ensure we catch all the cases where this symbol
+ should be made dynamic. */
if (!ensure_undef_dynamic (info, h))
return FALSE;
}
}
- else if (ELIMINATE_COPY_RELOCS && h->type != STT_GNU_IFUNC)
+
+ /* For a fixed position executable, discard space for
+ relocs against symbols which are not dynamic. */
+ else if (h->type != STT_GNU_IFUNC)
{
- /* For the non-pic case, discard space for relocs against
- symbols which turn out to need copy relocs or are not
- dynamic. */
if (h->dynamic_adjusted
&& !h->def_regular
&& !ELF_COMMON_DEF_P (h))
{
- /* Make sure this symbol is output as a dynamic symbol. */
+ /* Ensure we catch all the cases where this symbol
+ should be made dynamic. */
if (!ensure_undef_dynamic (info, h))
return FALSE;
+ /* But if that didn't work out, discard dynamic relocs. */
if (h->dynindx == -1)
eh->dyn_relocs = NULL;
}
#define PPC_LO(v) ((v) & 0xffff)
#define PPC_HI(v) (((v) >> 16) & 0xffff)
#define PPC_HA(v) PPC_HI ((v) + 0x8000)
+#define D34(v) \
+ ((((v) & 0x3ffff0000ULL) << 16) | (v & 0xffff))
+#define HA34(v) ((v + (1ULL << 33)) >> 34)
/* Called via elf_link_hash_traverse from ppc64_elf_size_dynamic_sections
to set up space for global entry stubs. These are put in glink,
htab->got_reli_size += rel_size;
}
else if (bfd_link_pic (info)
- && !((ent->tls_type & TLS_TPREL) != 0
+ && !(ent->tls_type != 0
&& bfd_link_executable (info)))
{
asection *srel = ppc64_elf_tdata (ibfd)->relgot;
ent->got.offset = s->size;
ent->owner = ibfd;
s->size += 16;
- if (bfd_link_pic (info))
+ if (bfd_link_dll (info))
{
asection *srel = ppc64_elf_tdata (ibfd)->relgot;
srel->size += sizeof (Elf64_External_Rela);
return size + 16;
}
+static unsigned int
+num_relocs_for_offset (bfd_vma off)
+{
+ unsigned int num_rel;
+ if (off + 0x8000 < 0x10000)
+ num_rel = 1;
+ else if (off + 0x80008000ULL < 0x100000000ULL)
+ num_rel = 2;
+ else
+ {
+ num_rel = 1;
+ if (off + 0x800000000000ULL >= 0x1000000000000ULL
+ && ((off >> 32) & 0xffff) != 0)
+ num_rel += 1;
+ if (PPC_HI (off) != 0)
+ num_rel += 1;
+ if (PPC_LO (off) != 0)
+ num_rel += 1;
+ }
+ return num_rel;
+}
+
+static Elf_Internal_Rela *
+emit_relocs_for_offset (struct bfd_link_info *info, Elf_Internal_Rela *r,
+ bfd_vma roff, bfd_vma targ, bfd_vma off)
+{
+ bfd_vma relative_targ = targ - (roff - 8);
+ if (bfd_big_endian (info->output_bfd))
+ roff += 2;
+ r->r_offset = roff;
+ r->r_addend = relative_targ + roff;
+ if (off + 0x8000 < 0x10000)
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16);
+ else if (off + 0x80008000ULL < 0x100000000ULL)
+ {
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HA);
+ ++r;
+ roff += 4;
+ r->r_offset = roff;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_LO);
+ r->r_addend = relative_targ + roff;
+ }
+ else
+ {
+ if (off + 0x800000000000ULL < 0x1000000000000ULL)
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHER);
+ else
+ {
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHEST);
+ if (((off >> 32) & 0xffff) != 0)
+ {
+ ++r;
+ roff += 4;
+ r->r_offset = roff;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHER);
+ r->r_addend = relative_targ + roff;
+ }
+ }
+ if (((off >> 32) & 0xffffffffULL) != 0)
+ roff += 4;
+ if (PPC_HI (off) != 0)
+ {
+ ++r;
+ roff += 4;
+ r->r_offset = roff;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGH);
+ r->r_addend = relative_targ + roff;
+ }
+ if (PPC_LO (off) != 0)
+ {
+ ++r;
+ roff += 4;
+ r->r_offset = roff;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_LO);
+ r->r_addend = relative_targ + roff;
+ }
+ }
+ return r;
+}
+
+static bfd_byte *
+build_powerxx_offset (bfd *abfd, bfd_byte *p, bfd_vma off, int odd,
+ bfd_boolean load)
+{
+ uint64_t insn;
+ if (off - odd + (1ULL << 33) < 1ULL << 34)
+ {
+ off -= odd;
+ if (odd)
+ {
+ bfd_put_32 (abfd, NOP, p);
+ p += 4;
+ }
+ if (load)
+ insn = PLD_R12_PC;
+ else
+ insn = PADDI_R12_PC;
+ insn |= D34 (off);
+ bfd_put_32 (abfd, insn >> 32, p);
+ p += 4;
+ bfd_put_32 (abfd, insn, p);
+ }
+ /* The minimum value for paddi is -0x200000000. The minimum value
+ for li is -0x8000, which when shifted by 34 and added gives a
+ minimum value of -0x2000200000000. The maximum value is
+ 0x1ffffffff+0x7fff<<34 which is 0x2000200000000-1. */
+ else if (off - (8 - odd) + (0x20002ULL << 32) < 0x40004ULL << 32)
+ {
+ off -= 8 - odd;
+ bfd_put_32 (abfd, LI_R11_0 | (HA34 (off) & 0xffff), p);
+ p += 4;
+ if (!odd)
+ {
+ bfd_put_32 (abfd, SLDI_R11_R11_34, p);
+ p += 4;
+ }
+ insn = PADDI_R12_PC | D34 (off);
+ bfd_put_32 (abfd, insn >> 32, p);
+ p += 4;
+ bfd_put_32 (abfd, insn, p);
+ p += 4;
+ if (odd)
+ {
+ bfd_put_32 (abfd, SLDI_R11_R11_34, p);
+ p += 4;
+ }
+ if (load)
+ bfd_put_32 (abfd, LDX_R12_R11_R12, p);
+ else
+ bfd_put_32 (abfd, ADD_R12_R11_R12, p);
+ }
+ else
+ {
+ off -= odd + 8;
+ bfd_put_32 (abfd, LIS_R11 | ((HA34 (off) >> 16) & 0x3fff), p);
+ p += 4;
+ bfd_put_32 (abfd, ORI_R11_R11_0 | (HA34 (off) & 0xffff), p);
+ p += 4;
+ if (odd)
+ {
+ bfd_put_32 (abfd, SLDI_R11_R11_34, p);
+ p += 4;
+ }
+ insn = PADDI_R12_PC | D34 (off);
+ bfd_put_32 (abfd, insn >> 32, p);
+ p += 4;
+ bfd_put_32 (abfd, insn, p);
+ p += 4;
+ if (!odd)
+ {
+ bfd_put_32 (abfd, SLDI_R11_R11_34, p);
+ p += 4;
+ }
+ if (load)
+ bfd_put_32 (abfd, LDX_R12_R11_R12, p);
+ else
+ bfd_put_32 (abfd, ADD_R12_R11_R12, p);
+ }
+ p += 4;
+ return p;
+}
+
+static unsigned int
+size_powerxx_offset (bfd_vma off, int odd)
+{
+ if (off - odd + (1ULL << 33) < 1ULL << 34)
+ return odd + 8;
+ else if (off - (8 - odd) + (0x20002ULL << 32) < 0x40004ULL << 32)
+ return 20;
+ else
+ return 24;
+}
+
+static unsigned int
+num_relocs_for_powerxx_offset (bfd_vma off, int odd)
+{
+ if (off - odd + (1ULL << 33) < 1ULL << 34)
+ return 1;
+ else if (off - (8 - odd) + (0x20002ULL << 32) < 0x40004ULL << 32)
+ return 2;
+ else
+ return 3;
+}
+
+static Elf_Internal_Rela *
+emit_relocs_for_powerxx_offset (struct bfd_link_info *info,
+ Elf_Internal_Rela *r, bfd_vma roff,
+ bfd_vma targ, bfd_vma off, int odd)
+{
+ if (off - odd + (1ULL << 33) < 1ULL << 34)
+ roff += odd;
+ else if (off - (8 - odd) + (0x20002ULL << 32) < 0x40004ULL << 32)
+ {
+ int d_offset = bfd_big_endian (info->output_bfd) ? 2 : 0;
+ r->r_offset = roff + d_offset;
+ r->r_addend = targ + 8 - odd - d_offset;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHERA34);
+ ++r;
+ roff += 8 - odd;
+ }
+ else
+ {
+ int d_offset = bfd_big_endian (info->output_bfd) ? 2 : 0;
+ r->r_offset = roff + d_offset;
+ r->r_addend = targ + 8 + odd - d_offset;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHESTA34);
+ ++r;
+ roff += 4;
+ r->r_offset = roff + d_offset;
+ r->r_addend = targ + 4 + odd - d_offset;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL16_HIGHERA34);
+ ++r;
+ roff += 4 + odd;
+ }
+ r->r_offset = roff;
+ r->r_addend = targ;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_PCREL34);
+ return r;
+}
+
/* Emit .eh_frame opcode to advance pc by DELTA. */
static bfd_byte *
if (stub_entry->stub_type >= ppc_stub_plt_call_notoc)
{
- size = 8 + size_offset (off - 8);
+ if (htab->powerxx_stubs)
+ {
+ bfd_vma start = (stub_entry->stub_offset
+ + stub_entry->group->stub_sec->output_offset
+ + stub_entry->group->stub_sec->output_section->vma);
+ if (stub_entry->stub_type > ppc_stub_plt_call_notoc)
+ start += 4;
+ size = 8 + size_powerxx_offset (off, start & 4);
+ }
+ else
+ size = 8 + size_offset (off - 8);
if (stub_entry->stub_type > ppc_stub_plt_call_notoc)
size += 4;
return size;
return relocs;
}
+/* Convert the relocs R[0] thru R[-NUM_REL+1], which are all no-symbol
+ forms, to the equivalent relocs against the global symbol given by
+ STUB_ENTRY->H. */
+
+static bfd_boolean
+use_global_in_relocs (struct ppc_link_hash_table *htab,
+ struct ppc_stub_hash_entry *stub_entry,
+ Elf_Internal_Rela *r, unsigned int num_rel)
+{
+ struct elf_link_hash_entry **hashes;
+ unsigned long symndx;
+ struct ppc_link_hash_entry *h;
+ bfd_vma symval;
+
+ /* Relocs are always against symbols in their own object file. Fake
+ up global sym hashes for the stub bfd (which has no symbols). */
+ hashes = elf_sym_hashes (htab->params->stub_bfd);
+ if (hashes == NULL)
+ {
+ bfd_size_type hsize;
+
+ /* When called the first time, stub_globals will contain the
+ total number of symbols seen during stub sizing. After
+ allocating, stub_globals is used as an index to fill the
+ hashes array. */
+ hsize = (htab->stub_globals + 1) * sizeof (*hashes);
+ hashes = bfd_zalloc (htab->params->stub_bfd, hsize);
+ if (hashes == NULL)
+ return FALSE;
+ elf_sym_hashes (htab->params->stub_bfd) = hashes;
+ htab->stub_globals = 1;
+ }
+ symndx = htab->stub_globals++;
+ h = stub_entry->h;
+ hashes[symndx] = &h->elf;
+ if (h->oh != NULL && h->oh->is_func)
+ h = ppc_follow_link (h->oh);
+ BFD_ASSERT (h->elf.root.type == bfd_link_hash_defined
+ || h->elf.root.type == bfd_link_hash_defweak);
+ symval = (h->elf.root.u.def.value
+ + h->elf.root.u.def.section->output_offset
+ + h->elf.root.u.def.section->output_section->vma);
+ while (num_rel-- != 0)
+ {
+ r->r_info = ELF64_R_INFO (symndx, ELF64_R_TYPE (r->r_info));
+ if (h->elf.root.u.def.section != stub_entry->target_section)
+ {
+ /* H is an opd symbol. The addend must be zero, and the
+ branch reloc is the only one we can convert. */
+ r->r_addend = 0;
+ break;
+ }
+ else
+ r->r_addend -= symval;
+ --r;
+ }
+ return TRUE;
+}
+
static bfd_vma
get_r2off (struct bfd_link_info *info,
struct ppc_stub_hash_entry *stub_entry)
struct bfd_link_info *info;
struct ppc_link_hash_table *htab;
bfd_byte *loc;
- bfd_byte *p;
+ bfd_byte *p, *relp;
bfd_vma targ, off;
Elf_Internal_Rela *r;
asection *plt;
+ int num_rel;
+ int odd;
/* Massage our args to the form they really have. */
stub_entry = (struct ppc_stub_hash_entry *) gen_entry;
r->r_offset = p - 4 - stub_entry->group->stub_sec->contents;
r->r_info = ELF64_R_INFO (0, R_PPC64_REL24);
r->r_addend = targ;
- if (stub_entry->h != NULL)
- {
- struct elf_link_hash_entry **hashes;
- unsigned long symndx;
- struct ppc_link_hash_entry *h;
-
- hashes = elf_sym_hashes (htab->params->stub_bfd);
- if (hashes == NULL)
- {
- bfd_size_type hsize;
-
- hsize = (htab->stub_globals + 1) * sizeof (*hashes);
- hashes = bfd_zalloc (htab->params->stub_bfd, hsize);
- if (hashes == NULL)
- return FALSE;
- elf_sym_hashes (htab->params->stub_bfd) = hashes;
- htab->stub_globals = 1;
- }
- symndx = htab->stub_globals++;
- h = stub_entry->h;
- hashes[symndx] = &h->elf;
- r->r_info = ELF64_R_INFO (symndx, R_PPC64_REL24);
- if (h->oh != NULL && h->oh->is_func)
- h = ppc_follow_link (h->oh);
- if (h->elf.root.u.def.section != stub_entry->target_section)
- /* H is an opd symbol. The addend must be zero. */
- r->r_addend = 0;
- else
- {
- off = (h->elf.root.u.def.value
- + h->elf.root.u.def.section->output_offset
- + h->elf.root.u.def.section->output_section->vma);
- r->r_addend -= off;
- }
- }
+ if (stub_entry->h != NULL
+ && !use_global_in_relocs (htab, stub_entry, r, 1))
+ return FALSE;
}
break;
targ = (stub_entry->target_value
+ stub_entry->target_section->output_offset
+ stub_entry->target_section->output_section->vma);
+ odd = off & 4;
off = targ - off;
- /* The notoc stubs calculate their target (either a PLT entry or
- the global entry point of a function) relative to the PC
- returned by the "bcl" two instructions past the start of the
- sequence emitted by build_offset. The offset is therefore 8
- less than calculated from the start of the sequence. */
- off -= 8;
- p = build_offset (htab->params->stub_bfd, p, off,
- stub_entry->stub_type >= ppc_stub_plt_call_notoc);
+ relp = p;
+ num_rel = 0;
+ if (htab->powerxx_stubs)
+ {
+ bfd_boolean load = stub_entry->stub_type >= ppc_stub_plt_call_notoc;
+ p = build_powerxx_offset (htab->params->stub_bfd, p, off, odd, load);
+ }
+ else
+ {
+ /* The notoc stubs calculate their target (either a PLT entry or
+ the global entry point of a function) relative to the PC
+ returned by the "bcl" two instructions past the start of the
+ sequence emitted by build_offset. The offset is therefore 8
+ less than calculated from the start of the sequence. */
+ off -= 8;
+ p = build_offset (htab->params->stub_bfd, p, off,
+ stub_entry->stub_type >= ppc_stub_plt_call_notoc);
+ }
+
if (stub_entry->stub_type <= ppc_stub_long_branch_both)
{
bfd_vma from;
+ num_rel = 1;
from = (stub_entry->stub_offset
+ stub_entry->group->stub_sec->output_offset
+ stub_entry->group->stub_sec->output_section->vma
}
p += 4;
- if (htab->glink_eh_frame != NULL
- && htab->glink_eh_frame->size != 0)
+ if (info->emitrelocations)
+ {
+ bfd_vma roff = relp - stub_entry->group->stub_sec->contents;
+ if (htab->powerxx_stubs)
+ num_rel += num_relocs_for_powerxx_offset (off, odd);
+ else
+ {
+ num_rel += num_relocs_for_offset (off);
+ roff += 16;
+ }
+ r = get_relocs (stub_entry->group->stub_sec, num_rel);
+ if (r == NULL)
+ return FALSE;
+ if (htab->powerxx_stubs)
+ r = emit_relocs_for_powerxx_offset (info, r, roff, targ, off, odd);
+ else
+ r = emit_relocs_for_offset (info, r, roff, targ, off);
+ if (stub_entry->stub_type == ppc_stub_long_branch_notoc
+ || stub_entry->stub_type == ppc_stub_long_branch_both)
+ {
+ ++r;
+ roff = p - 4 - stub_entry->group->stub_sec->contents;
+ r->r_offset = roff;
+ r->r_info = ELF64_R_INFO (0, R_PPC64_REL24);
+ r->r_addend = targ;
+ if (stub_entry->h != NULL
+ && !use_global_in_relocs (htab, stub_entry, r, num_rel))
+ return FALSE;
+ }
+ }
+
+ if (!htab->powerxx_stubs
+ && htab->glink_eh_frame != NULL
+ && htab->glink_eh_frame->size != 0)
{
bfd_byte *base, *eh;
unsigned int lr_used, delta;
struct ppc_link_hash_table *htab;
asection *plt;
bfd_vma targ, off, r2off;
- unsigned int size, extra, lr_used, delta;
+ unsigned int size, extra, lr_used, delta, odd;
/* Massage our args to the form they really have. */
stub_entry = (struct ppc_stub_hash_entry *) gen_entry;
targ = (stub_entry->target_value
+ stub_entry->target_section->output_offset
+ stub_entry->target_section->output_section->vma);
+ odd = off & 4;
off = targ - off;
- extra = size_offset (off - 8);
+ if (info->emitrelocations)
+ {
+ unsigned int num_rel;
+ if (htab->powerxx_stubs)
+ num_rel = num_relocs_for_powerxx_offset (off, odd);
+ else
+ num_rel = num_relocs_for_offset (off - 8);
+ stub_entry->group->stub_sec->reloc_count += num_rel;
+ stub_entry->group->stub_sec->flags |= SEC_RELOC;
+ }
+
+ if (htab->powerxx_stubs)
+ extra = size_powerxx_offset (off, odd);
+ else
+ extra = size_offset (off - 8);
/* Include branch insn plus those in the offset sequence. */
size += 4 + extra;
/* The branch insn is at the end, or "extra" bytes along. So
calculated. */
off -= extra;
- /* After the bcl, lr has been modified so we need to emit
- .eh_frame info saying the return address is in r12. */
- lr_used = stub_entry->stub_offset + 8;
- if (stub_entry->stub_type == ppc_stub_long_branch_both)
- lr_used += 4;
- /* The eh_frame info will consist of a DW_CFA_advance_loc or
- variant, DW_CFA_register, 65, 12, DW_CFA_advance_loc+2,
- DW_CFA_restore_extended 65. */
- delta = lr_used - stub_entry->group->lr_restore;
- stub_entry->group->eh_size += eh_advance_size (delta) + 6;
- stub_entry->group->lr_restore = lr_used + 8;
+ if (!htab->powerxx_stubs)
+ {
+ /* After the bcl, lr has been modified so we need to emit
+ .eh_frame info saying the return address is in r12. */
+ lr_used = stub_entry->stub_offset + 8;
+ if (stub_entry->stub_type == ppc_stub_long_branch_both)
+ lr_used += 4;
+ /* The eh_frame info will consist of a DW_CFA_advance_loc or
+ variant, DW_CFA_register, 65, 12, DW_CFA_advance_loc+2,
+ DW_CFA_restore_extended 65. */
+ delta = lr_used - stub_entry->group->lr_restore;
+ stub_entry->group->eh_size += eh_advance_size (delta) + 6;
+ stub_entry->group->lr_restore = lr_used + 8;
+ }
/* If the branch can't reach, use a plt_branch. */
if (off + (1 << 25) >= (bfd_vma) (1 << 26))
- ppc_stub_long_branch_notoc);
size += 4;
}
+ else if (info->emitrelocations)
+ stub_entry->group->stub_sec->reloc_count +=1;
break;
case ppc_stub_plt_call_notoc:
plt = htab->pltlocal;
}
targ += plt->output_offset + plt->output_section->vma;
+ odd = off & 4;
off = targ - off;
if (htab->params->plt_stub_align != 0)
off -= pad;
}
+ if (info->emitrelocations)
+ {
+ unsigned int num_rel;
+ if (htab->powerxx_stubs)
+ num_rel = num_relocs_for_powerxx_offset (off, odd);
+ else
+ num_rel = num_relocs_for_offset (off - 8);
+ stub_entry->group->stub_sec->reloc_count += num_rel;
+ stub_entry->group->stub_sec->flags |= SEC_RELOC;
+ }
+
size = plt_stub_size (htab, stub_entry, off);
- /* After the bcl, lr has been modified so we need to emit
- .eh_frame info saying the return address is in r12. */
- lr_used = stub_entry->stub_offset + 8;
- if (stub_entry->stub_type == ppc_stub_plt_call_both)
- lr_used += 4;
- /* The eh_frame info will consist of a DW_CFA_advance_loc or
- variant, DW_CFA_register, 65, 12, DW_CFA_advance_loc+2,
- DW_CFA_restore_extended 65. */
- delta = lr_used - stub_entry->group->lr_restore;
- stub_entry->group->eh_size += eh_advance_size (delta) + 6;
- stub_entry->group->lr_restore = lr_used + 8;
+ if (!htab->powerxx_stubs)
+ {
+ /* After the bcl, lr has been modified so we need to emit
+ .eh_frame info saying the return address is in r12. */
+ lr_used = stub_entry->stub_offset + 8;
+ if (stub_entry->stub_type == ppc_stub_plt_call_both)
+ lr_used += 4;
+ /* The eh_frame info will consist of a DW_CFA_advance_loc or
+ variant, DW_CFA_register, 65, 12, DW_CFA_advance_loc+2,
+ DW_CFA_restore_extended 65. */
+ delta = lr_used - stub_entry->group->lr_restore;
+ stub_entry->group->eh_size += eh_advance_size (delta) + 6;
+ stub_entry->group->lr_restore = lr_used + 8;
+ }
break;
case ppc_stub_plt_call:
htab->got_reli_size += rel_size;
}
else if (bfd_link_pic (info)
- && !((ent->tls_type & TLS_TPREL) != 0
+ && !(ent->tls_type != 0
&& bfd_link_executable (info)))
{
asection *srel = ppc64_elf_tdata (ibfd)->relgot;
asection *s = ppc64_elf_tdata (ibfd)->got;
ent->got.offset = s->size;
s->size += 16;
- if (bfd_link_pic (info))
+ if (bfd_link_dll (info))
{
asection *srel = ppc64_elf_tdata (ibfd)->relgot;
srel->size += sizeof (Elf64_External_Rela);
&& r_type != R_PPC64_REL14
&& r_type != R_PPC64_REL14_BRTAKEN
&& r_type != R_PPC64_REL14_BRNTAKEN
- && r_type != R_PPC64_PLTCALL)
+ && r_type != R_PPC64_PLTCALL
+ && r_type != R_PPC64_PLTCALL_NOTOC)
continue;
r_symndx = ELF64_R_SYM (rel->r_info);
&& code_sec->output_section != NULL
&& (((hash ? hash->elf.other : sym->st_other)
& STO_PPC64_LOCAL_MASK)
- != 1 << STO_PPC64_LOCAL_BIT)))
+ > 1 << STO_PPC64_LOCAL_BIT)))
stub_type = ppc_stub_long_branch_notoc;
}
else if (stub_type != ppc_stub_plt_call)
= hash ? hash->elf.type : ELF_ST_TYPE (sym->st_info);
stub_entry->other = hash ? hash->elf.other : sym->st_other;
- if (stub_entry->h != NULL)
+ if (hash != NULL
+ && (hash->elf.root.type == bfd_link_hash_defined
+ || hash->elf.root.type == bfd_link_hash_defweak))
htab->stub_globals += 1;
}
}
val = sym->st_value + ent->addend;
- val += PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
+ if (ELF_ST_TYPE (sym->st_info) != STT_GNU_IFUNC)
+ val += PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
if (sym_sec != NULL && sym_sec->output_section != NULL)
val += sym_sec->output_offset + sym_sec->output_section->vma;
return _bfd_elf_default_action_discarded (sec);
}
+/* These are the dynamic relocations supported by glibc. */
+
+static bfd_boolean
+ppc64_glibc_dynamic_reloc (enum elf_ppc64_reloc_type r_type)
+{
+ switch (r_type)
+ {
+ case R_PPC64_RELATIVE:
+ case R_PPC64_NONE:
+ case R_PPC64_ADDR64:
+ case R_PPC64_GLOB_DAT:
+ case R_PPC64_IRELATIVE:
+ case R_PPC64_JMP_IREL:
+ case R_PPC64_JMP_SLOT:
+ case R_PPC64_DTPMOD64:
+ case R_PPC64_DTPREL64:
+ case R_PPC64_TPREL64:
+ case R_PPC64_TPREL16_LO_DS:
+ case R_PPC64_TPREL16_DS:
+ case R_PPC64_TPREL16:
+ case R_PPC64_TPREL16_LO:
+ case R_PPC64_TPREL16_HI:
+ case R_PPC64_TPREL16_HIGH:
+ case R_PPC64_TPREL16_HA:
+ case R_PPC64_TPREL16_HIGHA:
+ case R_PPC64_TPREL16_HIGHER:
+ case R_PPC64_TPREL16_HIGHEST:
+ case R_PPC64_TPREL16_HIGHERA:
+ case R_PPC64_TPREL16_HIGHESTA:
+ case R_PPC64_ADDR16_LO_DS:
+ case R_PPC64_ADDR16_LO:
+ case R_PPC64_ADDR16_HI:
+ case R_PPC64_ADDR16_HIGH:
+ case R_PPC64_ADDR16_HA:
+ case R_PPC64_ADDR16_HIGHA:
+ case R_PPC64_REL30:
+ case R_PPC64_COPY:
+ case R_PPC64_UADDR64:
+ case R_PPC64_UADDR32:
+ case R_PPC64_ADDR32:
+ case R_PPC64_ADDR24:
+ case R_PPC64_ADDR16:
+ case R_PPC64_UADDR16:
+ case R_PPC64_ADDR16_DS:
+ case R_PPC64_ADDR16_HIGHER:
+ case R_PPC64_ADDR16_HIGHEST:
+ case R_PPC64_ADDR16_HIGHERA:
+ case R_PPC64_ADDR16_HIGHESTA:
+ case R_PPC64_ADDR14:
+ case R_PPC64_ADDR14_BRTAKEN:
+ case R_PPC64_ADDR14_BRNTAKEN:
+ case R_PPC64_REL32:
+ case R_PPC64_REL64:
+ return TRUE;
+
+ default:
+ return FALSE;
+ }
+}
+
/* The RELOCATE_SECTION function is called by the ELF backend linker
to handle the relocations for a section.
bfd_boolean is_opd;
/* Assume 'at' branch hints. */
bfd_boolean is_isa_v2 = TRUE;
+ bfd_boolean warned_dynamic = FALSE;
bfd_vma d_offset = (bfd_big_endian (input_bfd) ? 2 : 0);
/* Initialize howto table if needed. */
if (input_section->owner == htab->params->stub_bfd)
return TRUE;
- BFD_ASSERT (is_ppc64_elf (input_bfd));
+ if (!is_ppc64_elf (input_bfd))
+ {
+ bfd_set_error (bfd_error_wrong_format);
+ return FALSE;
+ }
local_got_ents = elf_local_got_ents (input_bfd);
TOCstart = elf_gp (output_bfd);
Elf_Internal_Rela orig_rel;
reloc_howto_type *howto;
struct reloc_howto_struct alt_howto;
+ uint64_t pinsn;
+ bfd_vma offset;
again:
orig_rel = *rel;
{
_bfd_clear_contents (ppc64_elf_howto_table[r_type],
input_bfd, input_section,
- contents + rel->r_offset);
+ contents, rel->r_offset);
wrel->r_offset = rel->r_offset;
wrel->r_info = 0;
wrel->r_addend = 0;
&& (h == NULL
|| h->elf.root.type == bfd_link_hash_defined
|| h->elf.root.type == bfd_link_hash_defweak)
- && (IS_PPC64_TLS_RELOC (r_type)
- != (sym_type == STT_TLS
- || (sym_type == STT_SECTION
- && (sec->flags & SEC_THREAD_LOCAL) != 0))))
+ && IS_PPC64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
{
if ((tls_mask & TLS_TLS) != 0
&& (r_type == R_PPC64_TLS
case R_PPC64_LO_DS_OPT:
insn = bfd_get_32 (input_bfd, contents + rel->r_offset - d_offset);
- if ((insn & (0x3f << 26)) != 58u << 26)
+ if ((insn & (0x3fu << 26)) != 58u << 26)
abort ();
insn += (14u << 26) - (58u << 26);
bfd_put_32 (input_bfd, insn, contents + rel->r_offset - d_offset);
doing a GD->IE transition. */
if (retval == 2)
{
- tls_gd = TLS_TPRELGD;
+ tls_gd = TLS_GDIE;
if ((tls_mask & TLS_TLS) != 0
&& (tls_mask & TLS_GD) == 0)
goto tls_ldgd_opt;
}
break;
+ case R_PPC64_GOT_TPREL34:
+ if ((tls_mask & TLS_TLS) != 0
+ && (tls_mask & TLS_TPREL) == 0)
+ {
+ /* pld ra,sym@got@tprel@pcrel -> paddi ra,r13,sym@tprel */
+ pinsn = bfd_get_32 (input_bfd, contents + rel->r_offset);
+ pinsn <<= 32;
+ pinsn |= bfd_get_32 (input_bfd, contents + rel->r_offset + 4);
+ pinsn += ((2ULL << 56) + (-1ULL << 52)
+ + (14ULL << 26) - (57ULL << 26) + (13ULL << 16));
+ bfd_put_32 (input_bfd, pinsn >> 32,
+ contents + rel->r_offset);
+ bfd_put_32 (input_bfd, pinsn & 0xffffffff,
+ contents + rel->r_offset + 4);
+ r_type = R_PPC64_TPREL34;
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ }
+ break;
+
case R_PPC64_TLS:
if ((tls_mask & TLS_TLS) != 0
&& (tls_mask & TLS_TPREL) == 0)
{
- insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
+ insn = bfd_get_32 (input_bfd, contents + (rel->r_offset & ~3));
insn = _bfd_elf_ppc_at_tls_transform (insn, 13);
if (insn == 0)
- abort ();
- bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
- /* Was PPC64_TLS which sits on insn boundary, now
- PPC64_TPREL16_LO which is at low-order half-word. */
- rel->r_offset += d_offset;
- r_type = R_PPC64_TPREL16_LO;
- if (toc_symndx != 0)
+ break;
+ if ((rel->r_offset & 3) == 0)
{
- rel->r_info = ELF64_R_INFO (toc_symndx, r_type);
- rel->r_addend = toc_addend;
- /* We changed the symbol. Start over in order to
- get h, sym, sec etc. right. */
- goto again;
+ bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
+ /* Was PPC64_TLS which sits on insn boundary, now
+ PPC64_TPREL16_LO which is at low-order half-word. */
+ rel->r_offset += d_offset;
+ r_type = R_PPC64_TPREL16_LO;
+ if (toc_symndx != 0)
+ {
+ rel->r_info = ELF64_R_INFO (toc_symndx, r_type);
+ rel->r_addend = toc_addend;
+ /* We changed the symbol. Start over in order to
+ get h, sym, sec etc. right. */
+ goto again;
+ }
+ else
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ }
+ else if ((rel->r_offset & 3) == 1)
+ {
+ /* For pcrel IE to LE we already have the full
+ offset and thus don't need an addi here. A nop
+ or mr will do. */
+ if ((insn & (0x3fu << 26)) == 14 << 26)
+ {
+ /* Extract regs from addi rt,ra,si. */
+ unsigned int rt = (insn >> 21) & 0x1f;
+ unsigned int ra = (insn >> 16) & 0x1f;
+ if (rt == ra)
+ insn = NOP;
+ else
+ {
+ /* Build or ra,rs,rb with rb==rs, ie. mr ra,rs. */
+ insn = (rt << 16) | (ra << 21) | (ra << 11);
+ insn |= (31u << 26) | (444u << 1);
+ }
+ }
+ bfd_put_32 (input_bfd, insn, contents + rel->r_offset - 1);
}
- else
- rel->r_info = ELF64_R_INFO (r_symndx, r_type);
}
break;
case R_PPC64_GOT_TLSGD16_HI:
case R_PPC64_GOT_TLSGD16_HA:
- tls_gd = TLS_TPRELGD;
+ tls_gd = TLS_GDIE;
if ((tls_mask & TLS_TLS) != 0 && (tls_mask & TLS_GD) == 0)
goto tls_gdld_hi;
break;
case R_PPC64_GOT_TLSGD16:
case R_PPC64_GOT_TLSGD16_LO:
- tls_gd = TLS_TPRELGD;
+ tls_gd = TLS_GDIE;
if ((tls_mask & TLS_TLS) != 0 && (tls_mask & TLS_GD) == 0)
goto tls_ldgd_opt;
break;
if ((tls_mask & TLS_TLS) != 0 && (tls_mask & TLS_LD) == 0)
{
unsigned int insn1, insn2;
- bfd_vma offset;
tls_ldgd_opt:
offset = (bfd_vma) -1;
stays with its arg setup insns, ie. that the next
reloc is the __tls_get_addr call associated with
the current reloc. Edit both insns. */
- if (input_section->has_tls_get_addr_call
+ if (input_section->nomark_tls_get_addr
&& rel + 1 < relend
&& branch_reloc_hash_match (input_bfd, rel + 1,
htab->tls_get_addr,
{
/* IE */
insn1 &= (0x1f << 21) | (0x1f << 16);
- insn1 |= 58 << 26; /* ld */
+ insn1 |= 58u << 26; /* ld */
insn2 = 0x7c636a14; /* add 3,3,13 */
if (offset != (bfd_vma) -1)
rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_PPC64_NONE);
- if ((tls_mask & TLS_EXPLICIT) == 0)
- r_type = (((r_type - (R_PPC64_GOT_TLSGD16 & 3)) & 3)
- + R_PPC64_GOT_TPREL16_DS);
- else
+ if (r_type == R_PPC64_TOC16
+ || r_type == R_PPC64_TOC16_LO)
r_type += R_PPC64_TOC16_DS - R_PPC64_TOC16;
+ else
+ r_type = (((r_type - (R_PPC64_GOT_TLSGD16 & 1)) & 1)
+ + R_PPC64_GOT_TPREL16_DS);
rel->r_info = ELF64_R_INFO (r_symndx, r_type);
}
else
if (tls_gd == 0)
{
/* Was an LD reloc. */
- if (toc_symndx)
- sec = local_sections[toc_symndx];
- for (r_symndx = 0;
- r_symndx < symtab_hdr->sh_info;
- r_symndx++)
- if (local_sections[r_symndx] == sec)
- break;
- if (r_symndx >= symtab_hdr->sh_info)
- r_symndx = STN_UNDEF;
+ r_symndx = STN_UNDEF;
rel->r_addend = htab->elf.tls_sec->vma + DTP_OFFSET;
- if (r_symndx != STN_UNDEF)
- rel->r_addend -= (local_syms[r_symndx].st_value
- + sec->output_offset
- + sec->output_section->vma);
}
else if (toc_symndx != 0)
{
bfd_put_32 (input_bfd, insn1,
contents + rel->r_offset - d_offset);
if (offset != (bfd_vma) -1)
- bfd_put_32 (input_bfd, insn2, contents + offset);
+ {
+ bfd_put_32 (input_bfd, insn2, contents + offset);
+ if (offset + 8 <= input_section->size)
+ {
+ insn2 = bfd_get_32 (input_bfd, contents + offset + 4);
+ if (insn2 == LD_R2_0R1 + STK_TOC (htab))
+ bfd_put_32 (input_bfd, NOP, contents + offset + 4);
+ }
+ }
if ((tls_mask & tls_gd) == 0
&& (tls_gd == 0 || toc_symndx != 0))
{
}
break;
+ case R_PPC64_GOT_TLSGD34:
+ if ((tls_mask & TLS_TLS) != 0 && (tls_mask & TLS_GD) == 0)
+ {
+ pinsn = bfd_get_32 (input_bfd, contents + rel->r_offset);
+ pinsn <<= 32;
+ pinsn |= bfd_get_32 (input_bfd, contents + rel->r_offset + 4);
+ if ((tls_mask & TLS_GDIE) != 0)
+ {
+ /* IE, pla -> pld */
+ pinsn += (-2ULL << 56) + (57ULL << 26) - (14ULL << 26);
+ r_type = R_PPC64_GOT_TPREL34;
+ }
+ else
+ {
+ /* LE, pla pcrel -> paddi r13 */
+ pinsn += (-1ULL << 52) + (13ULL << 16);
+ r_type = R_PPC64_TPREL34;
+ }
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ bfd_put_32 (input_bfd, pinsn >> 32,
+ contents + rel->r_offset);
+ bfd_put_32 (input_bfd, pinsn & 0xffffffff,
+ contents + rel->r_offset + 4);
+ }
+ break;
+
+ case R_PPC64_GOT_TLSLD34:
+ if ((tls_mask & TLS_TLS) != 0 && (tls_mask & TLS_LD) == 0)
+ {
+ pinsn = bfd_get_32 (input_bfd, contents + rel->r_offset);
+ pinsn <<= 32;
+ pinsn |= bfd_get_32 (input_bfd, contents + rel->r_offset + 4);
+ pinsn += (-1ULL << 52) + (13ULL << 16);
+ bfd_put_32 (input_bfd, pinsn >> 32,
+ contents + rel->r_offset);
+ bfd_put_32 (input_bfd, pinsn & 0xffffffff,
+ contents + rel->r_offset + 4);
+ rel->r_addend = htab->elf.tls_sec->vma + DTP_OFFSET;
+ r_symndx = STN_UNDEF;
+ r_type = R_PPC64_TPREL34;
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ goto again;
+ }
+ break;
+
case R_PPC64_TLSGD:
if ((tls_mask & TLS_TLS) != 0 && (tls_mask & TLS_GD) == 0
&& rel + 1 < relend)
{
unsigned int insn2;
- bfd_vma offset = rel->r_offset;
+ enum elf_ppc64_reloc_type r_type1 = ELF64_R_TYPE (rel[1].r_info);
- if (is_plt_seq_reloc (ELF64_R_TYPE (rel[1].r_info)))
+ offset = rel->r_offset;
+ if (is_plt_seq_reloc (r_type1))
{
bfd_put_32 (output_bfd, NOP, contents + offset);
+ if (r_type1 == R_PPC64_PLT_PCREL34
+ || r_type1 == R_PPC64_PLT_PCREL34_NOTOC)
+ bfd_put_32 (output_bfd, NOP, contents + offset + 4);
rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_PPC64_NONE);
break;
}
if (ELF64_R_TYPE (rel[1].r_info) == R_PPC64_PLTCALL)
bfd_put_32 (output_bfd, NOP, contents + offset + 4);
- if ((tls_mask & TLS_TPRELGD) != 0)
+ if ((tls_mask & TLS_GDIE) != 0)
{
/* IE */
r_type = R_PPC64_NONE;
r_symndx = toc_symndx;
rel->r_addend = toc_addend;
}
- r_type = R_PPC64_TPREL16_LO;
- rel->r_offset = offset + d_offset;
- insn2 = 0x38630000; /* addi 3,3,0 */
+ if (r_type1 == R_PPC64_REL24_NOTOC
+ || r_type1 == R_PPC64_PLTCALL_NOTOC)
+ {
+ r_type = R_PPC64_NONE;
+ insn2 = NOP;
+ }
+ else
+ {
+ rel->r_offset = offset + d_offset;
+ r_type = R_PPC64_TPREL16_LO;
+ insn2 = 0x38630000; /* addi 3,3,0 */
+ }
}
rel->r_info = ELF64_R_INFO (r_symndx, r_type);
/* Zap the reloc on the _tls_get_addr call too. */
BFD_ASSERT (offset == rel[1].r_offset);
rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_PPC64_NONE);
bfd_put_32 (input_bfd, insn2, contents + offset);
- if ((tls_mask & TLS_TPRELGD) == 0 && toc_symndx != 0)
+ if ((tls_mask & TLS_GDIE) == 0
+ && toc_symndx != 0
+ && r_type != R_PPC64_NONE)
goto again;
}
break;
&& rel + 1 < relend)
{
unsigned int insn2;
- bfd_vma offset = rel->r_offset;
+ enum elf_ppc64_reloc_type r_type1 = ELF64_R_TYPE (rel[1].r_info);
- if (is_plt_seq_reloc (ELF64_R_TYPE (rel[1].r_info)))
+ offset = rel->r_offset;
+ if (is_plt_seq_reloc (r_type1))
{
bfd_put_32 (output_bfd, NOP, contents + offset);
+ if (r_type1 == R_PPC64_PLT_PCREL34
+ || r_type1 == R_PPC64_PLT_PCREL34_NOTOC)
+ bfd_put_32 (output_bfd, NOP, contents + offset + 4);
rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_PPC64_NONE);
break;
}
if (ELF64_R_TYPE (rel[1].r_info) == R_PPC64_PLTCALL)
bfd_put_32 (output_bfd, NOP, contents + offset + 4);
- if (toc_symndx)
- sec = local_sections[toc_symndx];
- for (r_symndx = 0;
- r_symndx < symtab_hdr->sh_info;
- r_symndx++)
- if (local_sections[r_symndx] == sec)
- break;
- if (r_symndx >= symtab_hdr->sh_info)
- r_symndx = STN_UNDEF;
- rel->r_addend = htab->elf.tls_sec->vma + DTP_OFFSET;
- if (r_symndx != STN_UNDEF)
- rel->r_addend -= (local_syms[r_symndx].st_value
- + sec->output_offset
- + sec->output_section->vma);
-
- r_type = R_PPC64_TPREL16_LO;
+ if (r_type1 == R_PPC64_REL24_NOTOC
+ || r_type1 == R_PPC64_PLTCALL_NOTOC)
+ {
+ r_type = R_PPC64_NONE;
+ insn2 = NOP;
+ }
+ else
+ {
+ rel->r_offset = offset + d_offset;
+ r_symndx = STN_UNDEF;
+ r_type = R_PPC64_TPREL16_LO;
+ rel->r_addend = htab->elf.tls_sec->vma + DTP_OFFSET;
+ insn2 = 0x38630000; /* addi 3,3,0 */
+ }
rel->r_info = ELF64_R_INFO (r_symndx, r_type);
- rel->r_offset = offset + d_offset;
/* Zap the reloc on the _tls_get_addr call too. */
BFD_ASSERT (offset == rel[1].r_offset);
rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_PPC64_NONE);
- insn2 = 0x38630000; /* addi 3,3,0 */
bfd_put_32 (input_bfd, insn2, contents + offset);
- goto again;
+ if (r_type != R_PPC64_NONE)
+ goto again;
}
break;
if ((tls_mask & TLS_GD) == 0)
{
rel[1].r_info = ELF64_R_INFO (r_symndx, R_PPC64_NONE);
- if ((tls_mask & TLS_TPRELGD) != 0)
+ if ((tls_mask & TLS_GDIE) != 0)
r_type = R_PPC64_TPREL64;
else
{
&& relocation + 0x80008000 <= 0xffffffff)
{
unsigned int insn1, insn2;
- bfd_vma offset = rel->r_offset - d_offset;
+ offset = rel->r_offset - d_offset;
insn1 = bfd_get_32 (input_bfd, contents + offset);
insn2 = bfd_get_32 (input_bfd, contents + offset + 4);
if ((insn1 & 0xffff0000) == ADDIS_R2_R12
case R_PPC64_REL24:
case R_PPC64_REL24_NOTOC:
case R_PPC64_PLTCALL:
+ case R_PPC64_PLTCALL_NOTOC:
/* Calls to functions with a different TOC, such as calls to
shared objects, need to alter the TOC pointer. This is
done using a linkage stub. A REL24 branching to these
fdh = ppc_follow_link (h->oh);
stub_entry = ppc_get_stub_entry (input_section, sec, fdh, &orig_rel,
htab);
- if (r_type == R_PPC64_PLTCALL
+ if ((r_type == R_PPC64_PLTCALL
+ || r_type == R_PPC64_PLTCALL_NOTOC)
&& stub_entry != NULL
&& stub_entry->stub_type >= ppc_stub_plt_call
&& stub_entry->stub_type <= ppc_stub_plt_call_both)
|| stub_entry->stub_type == ppc_stub_plt_branch_both)
&& (r_type != R_PPC64_REL24_NOTOC
|| ((fdh ? fdh->elf.other : sym->st_other)
- & STO_PPC64_LOCAL_MASK) == 1 << STO_PPC64_LOCAL_BIT)
+ & STO_PPC64_LOCAL_MASK) <= 1 << STO_PPC64_LOCAL_BIT)
&& (relocation + addend - from + max_br_offset
< 2 * max_br_offset))
stub_entry = NULL;
|| stub_entry->stub_type == ppc_stub_plt_call_both)
&& r_type == R_PPC64_REL24_NOTOC)
relocation += 4;
+
+ if (r_type == R_PPC64_REL24_NOTOC
+ && (stub_entry->stub_type == ppc_stub_plt_call_notoc
+ || stub_entry->stub_type == ppc_stub_plt_call_both))
+ htab->notoc_plt = 1;
}
if (insn != 0)
goto copy_reloc;
}
break;
+
+ case R_PPC64_GOT16_DS:
+ from = TOCstart + htab->sec_info[input_section->id].toc_off;
+ if (relocation + addend - from + 0x8000 < 0x10000
+ && SYMBOL_REFERENCES_LOCAL (info, &h->elf))
+ {
+ insn = bfd_get_32 (input_bfd, contents + (rel->r_offset & ~3));
+ if ((insn & (0x3fu << 26 | 0x3)) == 58u << 26 /* ld */)
+ {
+ insn += (14u << 26) - (58u << 26);
+ bfd_put_32 (input_bfd, insn, contents + (rel->r_offset & ~3));
+ r_type = R_PPC64_TOC16;
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ }
+ }
+ break;
+
+ case R_PPC64_GOT16_LO_DS:
+ case R_PPC64_GOT16_HA:
+ from = TOCstart + htab->sec_info[input_section->id].toc_off;
+ if (relocation + addend - from + 0x80008000ULL < 0x100000000ULL
+ && SYMBOL_REFERENCES_LOCAL (info, &h->elf))
+ {
+ insn = bfd_get_32 (input_bfd, contents + (rel->r_offset & ~3));
+ if ((insn & (0x3fu << 26 | 0x3)) == 58u << 26 /* ld */)
+ {
+ insn += (14u << 26) - (58u << 26);
+ bfd_put_32 (input_bfd, insn, contents + (rel->r_offset & ~3));
+ r_type = R_PPC64_TOC16_LO;
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ }
+ else if ((insn & (0x3fu << 26)) == 15u << 26 /* addis */)
+ {
+ r_type = R_PPC64_TOC16_HA;
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ }
+ }
+ break;
+
+ case R_PPC64_GOT_PCREL34:
+ from = (rel->r_offset
+ + input_section->output_section->vma
+ + input_section->output_offset);
+ if (relocation - from + (1ULL << 33) < 1ULL << 34
+ && SYMBOL_REFERENCES_LOCAL (info, &h->elf))
+ {
+ offset = rel->r_offset;
+ pinsn = bfd_get_32 (input_bfd, contents + offset);
+ pinsn <<= 32;
+ pinsn |= bfd_get_32 (input_bfd, contents + offset + 4);
+ if ((pinsn & ((-1ULL << 50) | (63ULL << 26)))
+ == ((1ULL << 58) | (1ULL << 52) | (57ULL << 26) /* pld */))
+ {
+ /* Replace with paddi. */
+ pinsn += (2ULL << 56) + (14ULL << 26) - (57ULL << 26);
+ r_type = R_PPC64_PCREL34;
+ rel->r_info = ELF64_R_INFO (r_symndx, r_type);
+ bfd_put_32 (input_bfd, pinsn >> 32, contents + offset);
+ bfd_put_32 (input_bfd, pinsn, contents + offset + 4);
+ goto pcrelopt;
+ }
+ }
+ break;
+
+ case R_PPC64_PCREL34:
+ if (SYMBOL_REFERENCES_LOCAL (info, &h->elf))
+ {
+ offset = rel->r_offset;
+ pinsn = bfd_get_32 (input_bfd, contents + offset);
+ pinsn <<= 32;
+ pinsn |= bfd_get_32 (input_bfd, contents + offset + 4);
+ if ((pinsn & ((-1ULL << 50) | (63ULL << 26)))
+ == ((1ULL << 58) | (2ULL << 56) | (1ULL << 52)
+ | (14ULL << 26) /* paddi */))
+ {
+ pcrelopt:
+ if (rel + 1 < relend
+ && rel[1].r_offset == offset
+ && rel[1].r_info == ELF64_R_INFO (0, R_PPC64_PCREL_OPT))
+ {
+ bfd_vma off2 = rel[1].r_addend;
+ if (off2 == 0)
+ /* zero means next insn. */
+ off2 = 8;
+ off2 += offset;
+ if (off2 + 4 <= input_section->size)
+ {
+ uint64_t pinsn2;
+ bfd_signed_vma addend_off;
+ pinsn2 = bfd_get_32 (input_bfd, contents + off2);
+ pinsn2 <<= 32;
+ if ((pinsn2 & (63ULL << 58)) == 1ULL << 58)
+ {
+ if (off2 + 8 > input_section->size)
+ break;
+ pinsn2 |= bfd_get_32 (input_bfd,
+ contents + off2 + 4);
+ }
+ if (xlate_pcrel_opt (&pinsn, &pinsn2, &addend_off))
+ {
+ addend += addend_off;
+ rel->r_addend = addend;
+ bfd_put_32 (input_bfd, pinsn >> 32,
+ contents + offset);
+ bfd_put_32 (input_bfd, pinsn,
+ contents + offset + 4);
+ bfd_put_32 (input_bfd, pinsn2 >> 32,
+ contents + off2);
+ if ((pinsn2 & (63ULL << 58)) == 1ULL << 58)
+ bfd_put_32 (input_bfd, pinsn2,
+ contents + off2 + 4);
+ }
+ }
+ }
+ }
+ }
+ break;
}
- /* Set `addend'. */
tls_type = 0;
save_unresolved_reloc = unresolved_reloc;
switch (r_type)
case R_PPC64_GNU_VTINHERIT:
case R_PPC64_GNU_VTENTRY:
case R_PPC64_ENTRY:
+ case R_PPC64_PCREL_OPT:
goto copy_reloc;
/* GOT16 relocations. Like an ADDR16 using the symbol's
case R_PPC64_GOT_TLSGD16_LO:
case R_PPC64_GOT_TLSGD16_HI:
case R_PPC64_GOT_TLSGD16_HA:
+ case R_PPC64_GOT_TLSGD34:
tls_type = TLS_TLS | TLS_GD;
goto dogot;
case R_PPC64_GOT_TLSLD16_LO:
case R_PPC64_GOT_TLSLD16_HI:
case R_PPC64_GOT_TLSLD16_HA:
+ case R_PPC64_GOT_TLSLD34:
tls_type = TLS_TLS | TLS_LD;
goto dogot;
case R_PPC64_GOT_TPREL16_LO_DS:
case R_PPC64_GOT_TPREL16_HI:
case R_PPC64_GOT_TPREL16_HA:
+ case R_PPC64_GOT_TPREL34:
tls_type = TLS_TLS | TLS_TPREL;
goto dogot;
case R_PPC64_GOT_DTPREL16_LO_DS:
case R_PPC64_GOT_DTPREL16_HI:
case R_PPC64_GOT_DTPREL16_HA:
+ case R_PPC64_GOT_DTPREL34:
tls_type = TLS_TLS | TLS_DTPREL;
goto dogot;
case R_PPC64_GOT16_HA:
case R_PPC64_GOT16_DS:
case R_PPC64_GOT16_LO_DS:
+ case R_PPC64_GOT_PCREL34:
dogot:
{
/* Relocation is to the entry for this symbol in the global
struct got_entry *ent;
if (tls_type == (TLS_TLS | TLS_LD)
- && (h == NULL
- || !h->elf.def_dynamic))
+ && SYMBOL_REFERENCES_LOCAL (info, &h->elf))
ent = ppc64_tlsld_got (input_bfd);
else
{
else if (indx != 0
|| (bfd_link_pic (info)
&& (h == NULL
- || !UNDEFWEAK_NO_DYNAMIC_RELOC (info, &h->elf)
- || (tls_type == (TLS_TLS | TLS_LD)
- && !h->elf.def_dynamic))
- && !(tls_type == (TLS_TLS | TLS_TPREL)
+ || !UNDEFWEAK_NO_DYNAMIC_RELOC (info, &h->elf))
+ && !(tls_type != 0
&& bfd_link_executable (info)
&& SYMBOL_REFERENCES_LOCAL (info, &h->elf))))
relgot = ppc64_elf_tdata (ent->owner)->relgot;
outrel.r_offset = (got->output_section->vma
+ got->output_offset
+ off);
- outrel.r_addend = addend;
+ outrel.r_addend = orig_rel.r_addend;
if (tls_type & (TLS_LD | TLS_GD))
{
outrel.r_addend = 0;
bfd_elf64_swap_reloca_out (output_bfd,
&outrel, loc);
outrel.r_offset += 8;
- outrel.r_addend = addend;
+ outrel.r_addend = orig_rel.r_addend;
outrel.r_info
= ELF64_R_INFO (indx, R_PPC64_DTPREL64);
}
emitting a reloc. */
else
{
- relocation += addend;
+ relocation += orig_rel.r_addend;
if (tls_type != 0)
{
if (htab->elf.tls_sec == NULL)
abort ();
relocation = got->output_section->vma + got->output_offset + off;
- addend = -(TOCstart + htab->sec_info[input_section->id].toc_off);
+ addend = 0;
+ if (!(r_type == R_PPC64_GOT_PCREL34
+ || r_type == R_PPC64_GOT_TLSGD34
+ || r_type == R_PPC64_GOT_TLSLD34
+ || r_type == R_PPC64_GOT_TPREL34
+ || r_type == R_PPC64_GOT_DTPREL34))
+ addend = -(TOCstart + htab->sec_info[input_section->id].toc_off);
}
break;
case R_PPC64_PLT16_HI:
case R_PPC64_PLT16_LO:
case R_PPC64_PLT16_LO_DS:
+ case R_PPC64_PLT_PCREL34:
+ case R_PPC64_PLT_PCREL34_NOTOC:
case R_PPC64_PLT32:
case R_PPC64_PLT64:
case R_PPC64_PLTSEQ:
+ case R_PPC64_PLTSEQ_NOTOC:
case R_PPC64_PLTCALL:
+ case R_PPC64_PLTCALL_NOTOC:
/* Relocation is to the entry for this symbol in the
procedure linkage table. */
unresolved_reloc = TRUE;
case R_PPC64_TOC16_LO_DS:
case R_PPC64_TOC16_HA:
addend -= TOCstart + htab->sec_info[input_section->id].toc_off;
+ if (h != NULL)
+ goto dodyn;
break;
/* Relocate against the beginning of the section. */
case R_PPC64_REL16_LO:
case R_PPC64_REL16_HI:
case R_PPC64_REL16_HA:
+ case R_PPC64_REL16_HIGH:
+ case R_PPC64_REL16_HIGHA:
+ case R_PPC64_REL16_HIGHER:
+ case R_PPC64_REL16_HIGHERA:
+ case R_PPC64_REL16_HIGHEST:
+ case R_PPC64_REL16_HIGHESTA:
+ case R_PPC64_REL16_HIGHER34:
+ case R_PPC64_REL16_HIGHERA34:
+ case R_PPC64_REL16_HIGHEST34:
+ case R_PPC64_REL16_HIGHESTA34:
case R_PPC64_REL16DX_HA:
- break;
-
case R_PPC64_REL14:
case R_PPC64_REL14_BRNTAKEN:
case R_PPC64_REL14_BRTAKEN:
case R_PPC64_REL24:
case R_PPC64_REL24_NOTOC:
+ case R_PPC64_PCREL34:
+ case R_PPC64_PCREL28:
break;
case R_PPC64_TPREL16:
case R_PPC64_TPREL16_HIGHERA:
case R_PPC64_TPREL16_HIGHEST:
case R_PPC64_TPREL16_HIGHESTA:
+ case R_PPC64_TPREL34:
if (h != NULL
&& h->elf.root.type == bfd_link_hash_undefweak
&& h->elf.dynindx == -1)
case R_PPC64_DTPREL16_HIGHERA:
case R_PPC64_DTPREL16_HIGHEST:
case R_PPC64_DTPREL16_HIGHESTA:
+ case R_PPC64_DTPREL34:
if (htab->elf.tls_sec != NULL)
addend -= htab->elf.tls_sec->vma + DTP_OFFSET;
break;
case R_PPC64_ADDR16_HIGHESTA:
case R_PPC64_ADDR16_LO:
case R_PPC64_ADDR16_LO_DS:
+ case R_PPC64_ADDR16_HIGHER34:
+ case R_PPC64_ADDR16_HIGHERA34:
+ case R_PPC64_ADDR16_HIGHEST34:
+ case R_PPC64_ADDR16_HIGHESTA34:
case R_PPC64_ADDR24:
case R_PPC64_ADDR32:
case R_PPC64_ADDR64:
case R_PPC64_UADDR16:
case R_PPC64_UADDR32:
case R_PPC64_UADDR64:
+ case R_PPC64_D34:
+ case R_PPC64_D34_LO:
+ case R_PPC64_D34_HI30:
+ case R_PPC64_D34_HA30:
+ case R_PPC64_D28:
dodyn:
if ((input_section->flags & SEC_ALLOC) == 0)
break;
}
else
{
- asection *osec;
-
- osec = sec->output_section;
- indx = elf_section_data (osec)->dynindx;
+ asection *osec = sec->output_section;
- if (indx == 0)
+ if ((osec->flags & SEC_THREAD_LOCAL) != 0)
+ {
+ /* TLS symbol values are relative to the
+ TLS segment. Dynamic relocations for
+ local TLS symbols therefore can't be
+ reduced to a relocation against their
+ section symbol because it holds the
+ address of the section, not a value
+ relative to the TLS segment. We could
+ change the .tdata dynamic section symbol
+ to be zero value but STN_UNDEF works
+ and is used elsewhere, eg. for TPREL64
+ GOT relocs against local TLS symbols. */
+ osec = htab->elf.tls_sec;
+ indx = 0;
+ }
+ else
{
- if ((osec->flags & SEC_READONLY) == 0
- && htab->elf.data_index_section != NULL)
- osec = htab->elf.data_index_section;
- else
- osec = htab->elf.text_index_section;
indx = elf_section_data (osec)->dynindx;
+ if (indx == 0)
+ {
+ if ((osec->flags & SEC_READONLY) == 0
+ && htab->elf.data_index_section != NULL)
+ osec = htab->elf.data_index_section;
+ else
+ osec = htab->elf.text_index_section;
+ indx = elf_section_data (osec)->dynindx;
+ }
+ BFD_ASSERT (indx != 0);
}
- BFD_ASSERT (indx != 0);
/* We are turning this relocation into one
against a section symbol, so subtract out
loc += sreloc->reloc_count++ * sizeof (Elf64_External_Rela);
bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
+ if (!warned_dynamic
+ && !ppc64_glibc_dynamic_reloc (ELF64_R_TYPE (outrel.r_info)))
+ {
+ info->callbacks->einfo
+ /* xgettext:c-format */
+ (_("%X%P: %pB: %s against %pT "
+ "is not supported by glibc as a dynamic relocation\n"),
+ input_bfd,
+ ppc64_elf_howto_table[ELF64_R_TYPE (outrel.r_info)]->name,
+ sym_name);
+ warned_dynamic = TRUE;
+ }
+
/* If this reloc is against an external symbol, it will
be computed at runtime, so there's no need to do
anything now. However, for the sake of prelink ensure
insn. */
break;
+ case R_PPC64_PLTCALL_NOTOC:
+ if (!unresolved_reloc)
+ htab->notoc_plt = 1;
+ /* Fall through. */
case R_PPC64_PLTCALL:
if (unresolved_reloc)
{
insn = bfd_get_32 (input_bfd, p);
insn &= 1;
bfd_put_32 (input_bfd, B_DOT | insn, p);
- bfd_put_32 (input_bfd, NOP, p + 4);
+ if (r_type == R_PPC64_PLTCALL)
+ bfd_put_32 (input_bfd, NOP, p + 4);
unresolved_reloc = save_unresolved_reloc;
r_type = R_PPC64_REL24;
}
break;
+ case R_PPC64_PLTSEQ_NOTOC:
case R_PPC64_PLTSEQ:
if (unresolved_reloc)
{
}
break;
+ case R_PPC64_PLT_PCREL34_NOTOC:
+ if (!unresolved_reloc)
+ htab->notoc_plt = 1;
+ /* Fall through. */
+ case R_PPC64_PLT_PCREL34:
+ if (unresolved_reloc)
+ {
+ bfd_byte *p = contents + rel->r_offset;
+ bfd_put_32 (input_bfd, PNOP >> 32, p);
+ bfd_put_32 (input_bfd, PNOP, p + 4);
+ unresolved_reloc = FALSE;
+ goto copy_reloc;
+ }
+ break;
+
case R_PPC64_PLT16_HA:
if (unresolved_reloc)
{
{
bfd_byte *p = contents + (rel->r_offset & ~3);
insn = bfd_get_32 (input_bfd, p);
- if ((insn & (0x3f << 26)) == 12u << 26 /* addic */)
+ if ((insn & (0x3fu << 26)) == 12u << 26 /* addic */)
{
/* Transform addic to addi when we change reg. */
- insn &= ~((0x3f << 26) | (0x1f << 16));
+ insn &= ~((0x3fu << 26) | (0x1f << 16));
insn |= (14u << 26) | (2 << 16);
}
else
{
bfd_byte *p = contents + (rel->r_offset & ~3);
insn = bfd_get_32 (input_bfd, p);
- if ((insn & ((0x3f << 26) | 0x1f << 16))
+ if ((insn & ((0x3fu << 26) | 0x1f << 16))
!= ((15u << 26) | (13 << 16)) /* addis rt,13,imm */)
/* xgettext:c-format */
info->callbacks->minfo
break;
case R_PPC64_REL16_HA:
+ case R_PPC64_REL16_HIGHA:
+ case R_PPC64_REL16_HIGHERA:
+ case R_PPC64_REL16_HIGHESTA:
case R_PPC64_REL16DX_HA:
case R_PPC64_ADDR16_HA:
case R_PPC64_ADDR16_HIGHA:
addend += 0x8000;
break;
+ case R_PPC64_D34_HA30:
+ case R_PPC64_ADDR16_HIGHERA34:
+ case R_PPC64_ADDR16_HIGHESTA34:
+ case R_PPC64_REL16_HIGHERA34:
+ case R_PPC64_REL16_HIGHESTA34:
+ if (sec != NULL)
+ addend += 1ULL << 33;
+ break;
+
case R_PPC64_ADDR16_DS:
case R_PPC64_ADDR16_LO_DS:
case R_PPC64_GOT16_DS:
forms of all the _DS relocs bloats all reloc switches in
this file. It doesn't make much sense to use these
relocs in data, so testing the insn should be safe. */
- if ((insn & (0x3f << 26)) == (56u << 26)
- || ((insn & (0x3f << 26)) == (61u << 26) && (insn & 3) == 1))
+ if ((insn & (0x3fu << 26)) == (56u << 26)
+ || ((insn & (0x3fu << 26)) == (61u << 26) && (insn & 3) == 1))
mask = 15;
relocation += addend;
addend = insn & (mask ^ 3);
enum complain_overflow complain = complain_overflow_signed;
insn = bfd_get_32 (input_bfd, contents + (rel->r_offset & ~3));
- if ((insn & (0x3f << 26)) == 10u << 26 /* cmpli */)
+ if ((insn & (0x3fu << 26)) == 10u << 26 /* cmpli */)
complain = complain_overflow_bitfield;
else if (howto->rightshift == 0
- ? ((insn & (0x3f << 26)) == 28u << 26 /* andi */
- || (insn & (0x3f << 26)) == 24u << 26 /* ori */
- || (insn & (0x3f << 26)) == 26u << 26 /* xori */)
- : ((insn & (0x3f << 26)) == 29u << 26 /* andis */
- || (insn & (0x3f << 26)) == 25u << 26 /* oris */
- || (insn & (0x3f << 26)) == 27u << 26 /* xoris */))
+ ? ((insn & (0x3fu << 26)) == 28u << 26 /* andi */
+ || (insn & (0x3fu << 26)) == 24u << 26 /* ori */
+ || (insn & (0x3fu << 26)) == 26u << 26 /* xori */)
+ : ((insn & (0x3fu << 26)) == 29u << 26 /* andis */
+ || (insn & (0x3fu << 26)) == 25u << 26 /* oris */
+ || (insn & (0x3fu << 26)) == 27u << 26 /* xoris */))
complain = complain_overflow_unsigned;
if (howto->complain_on_overflow != complain)
{
}
}
- if (r_type == R_PPC64_REL16DX_HA)
+ switch (r_type)
{
- /* Split field reloc isn't handled by _bfd_final_link_relocate. */
+ /* Split field relocs aren't handled by _bfd_final_link_relocate. */
+ case R_PPC64_D34:
+ case R_PPC64_D34_LO:
+ case R_PPC64_D34_HI30:
+ case R_PPC64_D34_HA30:
+ case R_PPC64_PCREL34:
+ case R_PPC64_GOT_PCREL34:
+ case R_PPC64_TPREL34:
+ case R_PPC64_DTPREL34:
+ case R_PPC64_GOT_TLSGD34:
+ case R_PPC64_GOT_TLSLD34:
+ case R_PPC64_GOT_TPREL34:
+ case R_PPC64_GOT_DTPREL34:
+ case R_PPC64_PLT_PCREL34:
+ case R_PPC64_PLT_PCREL34_NOTOC:
+ case R_PPC64_D28:
+ case R_PPC64_PCREL28:
+ if (rel->r_offset + 8 > input_section->size)
+ r = bfd_reloc_outofrange;
+ else
+ {
+ relocation += addend;
+ if (howto->pc_relative)
+ relocation -= (rel->r_offset
+ + input_section->output_offset
+ + input_section->output_section->vma);
+ relocation >>= howto->rightshift;
+
+ pinsn = bfd_get_32 (input_bfd, contents + rel->r_offset);
+ pinsn <<= 32;
+ pinsn |= bfd_get_32 (input_bfd, contents + rel->r_offset + 4);
+
+ pinsn &= ~howto->dst_mask;
+ pinsn |= (((relocation << 16) | (relocation & 0xffff))
+ & howto->dst_mask);
+ bfd_put_32 (input_bfd, pinsn >> 32, contents + rel->r_offset);
+ bfd_put_32 (input_bfd, pinsn, contents + rel->r_offset + 4);
+ r = bfd_reloc_ok;
+ if (howto->complain_on_overflow == complain_overflow_signed
+ && (relocation + (1ULL << (howto->bitsize - 1))
+ >= 1ULL << howto->bitsize))
+ r = bfd_reloc_overflow;
+ }
+ break;
+
+ case R_PPC64_REL16DX_HA:
if (rel->r_offset + 4 > input_section->size)
r = bfd_reloc_outofrange;
else
if (relocation + 0x8000 > 0xffff)
r = bfd_reloc_overflow;
}
+ break;
+
+ default:
+ r = _bfd_final_link_relocate (howto, input_bfd, input_section,
+ contents, rel->r_offset,
+ relocation, addend);
}
- else
- r = _bfd_final_link_relocate (howto, input_bfd, input_section, contents,
- rel->r_offset, relocation, addend);
if (r != bfd_reloc_ok)
{
break;
}
- if (h->needs_copy)
+ if (h->needs_copy
+ && (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ && (h->root.u.def.section == htab->elf.sdynbss
+ || h->root.u.def.section == htab->elf.sdynrelro))
{
/* This symbol needs a copy reloc. Set it up. */
Elf_Internal_Rela rela;
asection *srel;
bfd_byte *loc;
- if (h->dynindx == -1
- || (h->root.type != bfd_link_hash_defined
- && h->root.type != bfd_link_hash_defweak)
- || htab->elf.srelbss == NULL
- || htab->elf.sreldynrelro == NULL)
+ if (h->dynindx == -1)
abort ();
rela.r_offset = (h->root.u.def.value
break;
case DT_PPC64_OPT:
- if (htab->do_multi_toc && htab->multi_toc_needed)
+ if ((htab->do_multi_toc && htab->multi_toc_needed)
+ || htab->notoc_plt)
dyn.d_un.d_val |= PPC64_OPT_MULTI_TOC;
if (htab->has_plt_localentry0)
dyn.d_un.d_val |= PPC64_OPT_LOCALENTRY;