+
+ if (hp != NULL)
+ *hp = h;
+
+ if (symp != NULL)
+ *symp = NULL;
+
+ if (symsecp != NULL)
+ {
+ asection *symsec = NULL;
+ if (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ symsec = h->root.u.def.section;
+ *symsecp = symsec;
+ }
+
+ if (tls_maskp != NULL)
+ *tls_maskp = &ppc_elf_hash_entry (h)->tls_mask;
+ }
+ else
+ {
+ Elf_Internal_Sym *sym;
+ Elf_Internal_Sym *locsyms = *locsymsp;
+
+ if (locsyms == NULL)
+ {
+ locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
+ if (locsyms == NULL)
+ locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
+ symtab_hdr->sh_info,
+ 0, NULL, NULL, NULL);
+ if (locsyms == NULL)
+ return FALSE;
+ *locsymsp = locsyms;
+ }
+ sym = locsyms + r_symndx;
+
+ if (hp != NULL)
+ *hp = NULL;
+
+ if (symp != NULL)
+ *symp = sym;
+
+ if (symsecp != NULL)
+ *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
+
+ if (tls_maskp != NULL)
+ {
+ bfd_signed_vma *local_got;
+ unsigned char *tls_mask;
+
+ tls_mask = NULL;
+ local_got = elf_local_got_refcounts (ibfd);
+ if (local_got != NULL)
+ {
+ struct plt_entry **local_plt = (struct plt_entry **)
+ (local_got + symtab_hdr->sh_info);
+ unsigned char *lgot_masks = (unsigned char *)
+ (local_plt + symtab_hdr->sh_info);
+ tls_mask = &lgot_masks[r_symndx];
+ }
+ *tls_maskp = tls_mask;
+ }
+ }
+ return TRUE;
+}
+\f
+/* Analyze inline PLT call relocations to see whether calls to locally
+ defined functions can be converted to direct calls. */
+
+bfd_boolean
+ppc_elf_inline_plt (struct bfd_link_info *info)
+{
+ struct ppc_elf_link_hash_table *htab;
+ bfd *ibfd;
+ asection *sec;
+ bfd_vma low_vma, high_vma, limit;
+
+ htab = ppc_elf_hash_table (info);
+ if (htab == NULL)
+ return FALSE;
+
+ /* A bl insn can reach -0x2000000 to 0x1fffffc. The limit is
+ reduced somewhat to cater for possible stubs that might be added
+ between the call and its destination. */
+ limit = 0x1e00000;
+ low_vma = -1;
+ high_vma = 0;
+ for (sec = info->output_bfd->sections; sec != NULL; sec = sec->next)
+ if ((sec->flags & (SEC_ALLOC | SEC_CODE)) == (SEC_ALLOC | SEC_CODE))
+ {
+ if (low_vma > sec->vma)
+ low_vma = sec->vma;
+ if (high_vma < sec->vma + sec->size)
+ high_vma = sec->vma + sec->size;
+ }
+
+ /* If a "bl" can reach anywhere in local code sections, then we can
+ convert all inline PLT sequences to direct calls when the symbol
+ is local. */
+ if (high_vma - low_vma < limit)
+ {
+ htab->can_convert_all_inline_plt = 1;
+ return TRUE;
+ }
+
+ /* Otherwise, go looking through relocs for cases where a direct
+ call won't reach. Mark the symbol on any such reloc to disable
+ the optimization and keep the PLT entry as it seems likely that
+ this will be better than creating trampolines. Note that this
+ will disable the optimization for all inline PLT calls to a
+ particular symbol, not just those that won't reach. The
+ difficulty in doing a more precise optimization is that the
+ linker needs to make a decision depending on whether a
+ particular R_PPC_PLTCALL insn can be turned into a direct
+ call, for each of the R_PPC_PLTSEQ and R_PPC_PLT16* insns in
+ the sequence, and there is nothing that ties those relocs
+ together except their symbol. */
+
+ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
+ {
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Sym *local_syms;
+
+ if (!is_ppc_elf (ibfd))
+ continue;
+
+ local_syms = NULL;
+ symtab_hdr = &elf_symtab_hdr (ibfd);
+
+ for (sec = ibfd->sections; sec != NULL; sec = sec->next)
+ if (sec->has_pltcall
+ && !bfd_is_abs_section (sec->output_section))
+ {
+ Elf_Internal_Rela *relstart, *rel, *relend;
+
+ /* Read the relocations. */
+ relstart = _bfd_elf_link_read_relocs (ibfd, sec, NULL, NULL,
+ info->keep_memory);
+ if (relstart == NULL)
+ return FALSE;
+
+ relend = relstart + sec->reloc_count;
+ for (rel = relstart; rel < relend; )
+ {
+ enum elf_ppc_reloc_type r_type;
+ unsigned long r_symndx;
+ asection *sym_sec;
+ struct elf_link_hash_entry *h;
+ Elf_Internal_Sym *sym;
+ unsigned char *tls_maskp;
+
+ r_type = ELF32_R_TYPE (rel->r_info);
+ if (r_type != R_PPC_PLTCALL)
+ continue;
+
+ r_symndx = ELF32_R_SYM (rel->r_info);
+ if (!get_sym_h (&h, &sym, &sym_sec, &tls_maskp, &local_syms,
+ r_symndx, ibfd))
+ {
+ if (elf_section_data (sec)->relocs != relstart)
+ free (relstart);
+ if (local_syms != NULL
+ && symtab_hdr->contents != (unsigned char *) local_syms)
+ free (local_syms);
+ return FALSE;
+ }
+
+ if (sym_sec != NULL && sym_sec->output_section != NULL)
+ {
+ bfd_vma from, to;
+ if (h != NULL)
+ to = h->root.u.def.value;
+ else
+ to = sym->st_value;
+ to += (rel->r_addend
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ from = (rel->r_offset
+ + sec->output_offset
+ + sec->output_section->vma);
+ if (to - from + limit < 2 * limit)
+ *tls_maskp &= ~PLT_KEEP;
+ }
+ }
+ if (elf_section_data (sec)->relocs != relstart)
+ free (relstart);
+ }
+
+ if (local_syms != NULL
+ && symtab_hdr->contents != (unsigned char *) local_syms)
+ {
+ if (!info->keep_memory)
+ free (local_syms);
+ else
+ symtab_hdr->contents = (unsigned char *) local_syms;
+ }
+ }
+
+ return TRUE;
+}
+
+/* Set plt output section type, htab->tls_get_addr, and call the
+ generic ELF tls_setup function. */
+
+asection *
+ppc_elf_tls_setup (bfd *obfd, struct bfd_link_info *info)
+{
+ struct ppc_elf_link_hash_table *htab;
+
+ htab = ppc_elf_hash_table (info);
+ htab->tls_get_addr = elf_link_hash_lookup (&htab->elf, "__tls_get_addr",
+ FALSE, FALSE, TRUE);
+ if (htab->plt_type != PLT_NEW)
+ htab->params->no_tls_get_addr_opt = TRUE;
+
+ if (!htab->params->no_tls_get_addr_opt)
+ {
+ struct elf_link_hash_entry *opt, *tga;
+ opt = elf_link_hash_lookup (&htab->elf, "__tls_get_addr_opt",
+ FALSE, FALSE, TRUE);
+ if (opt != NULL
+ && (opt->root.type == bfd_link_hash_defined
+ || opt->root.type == bfd_link_hash_defweak))
+ {
+ /* If glibc supports an optimized __tls_get_addr call stub,
+ signalled by the presence of __tls_get_addr_opt, and we'll
+ be calling __tls_get_addr via a plt call stub, then
+ make __tls_get_addr point to __tls_get_addr_opt. */
+ tga = htab->tls_get_addr;
+ if (htab->elf.dynamic_sections_created
+ && tga != NULL
+ && (tga->type == STT_FUNC
+ || tga->needs_plt)
+ && !(SYMBOL_CALLS_LOCAL (info, tga)
+ || UNDEFWEAK_NO_DYNAMIC_RELOC (info, tga)))
+ {
+ struct plt_entry *ent;
+ for (ent = tga->plt.plist; ent != NULL; ent = ent->next)
+ if (ent->plt.refcount > 0)
+ break;
+ if (ent != NULL)
+ {
+ tga->root.type = bfd_link_hash_indirect;
+ tga->root.u.i.link = &opt->root;
+ ppc_elf_copy_indirect_symbol (info, opt, tga);
+ opt->mark = 1;
+ if (opt->dynindx != -1)
+ {
+ /* Use __tls_get_addr_opt in dynamic relocations. */
+ opt->dynindx = -1;
+ _bfd_elf_strtab_delref (elf_hash_table (info)->dynstr,
+ opt->dynstr_index);
+ if (!bfd_elf_link_record_dynamic_symbol (info, opt))
+ return FALSE;
+ }
+ htab->tls_get_addr = opt;
+ }
+ }
+ }
+ else
+ htab->params->no_tls_get_addr_opt = TRUE;
+ }
+ if (htab->plt_type == PLT_NEW
+ && htab->elf.splt != NULL
+ && htab->elf.splt->output_section != NULL)
+ {
+ elf_section_type (htab->elf.splt->output_section) = SHT_PROGBITS;
+ elf_section_flags (htab->elf.splt->output_section) = SHF_ALLOC + SHF_WRITE;
+ }
+
+ return _bfd_elf_tls_setup (obfd, info);
+}
+
+/* Return TRUE iff REL is a branch reloc with a global symbol matching
+ HASH. */
+
+static bfd_boolean
+branch_reloc_hash_match (const bfd *ibfd,
+ const Elf_Internal_Rela *rel,
+ const struct elf_link_hash_entry *hash)
+{
+ Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (ibfd);
+ enum elf_ppc_reloc_type r_type = ELF32_R_TYPE (rel->r_info);
+ unsigned int r_symndx = ELF32_R_SYM (rel->r_info);
+
+ if (r_symndx >= symtab_hdr->sh_info && is_branch_reloc (r_type))
+ {
+ struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
+ struct elf_link_hash_entry *h;
+
+ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+ if (h == hash)
+ return TRUE;
+ }