Fix bad interaction between --relax and tls optimisation
authorAlan Modra <amodra@gmail.com>
Wed, 12 Feb 2014 10:34:32 +0000 (21:04 +1030)
committerAlan Modra <amodra@gmail.com>
Wed, 12 Feb 2014 11:40:09 +0000 (22:10 +1030)
Adding long-branch stubs for __tls_get_addr calls that are optimised
away is silly.  It also causes assertion failures on newer object files
that use R_PPC_TLSGD and R_PPC_TLSLD marker relocs, and half-optimised
(ie. broken) code for older object files.

PR 16546
* elf32-ppc.c (ppc_elf_relax_section): Don't build long-branch
stubs for calls to __tls_get_addr that we know will later be
optimised away.

bfd/ChangeLog
bfd/elf32-ppc.c

index 120a2f5132944a3984537a816ce799c40afe9da1..39d9def6e99dad4fd4a76ae0bf1e3872af22b131 100644 (file)
@@ -1,3 +1,9 @@
+2014-02-12  Alan Modra  <amodra@gmail.com>
+
+       * elf32-ppc.c (ppc_elf_relax_section): Don't build long-branch
+       stubs for calls to __tls_get_addr that we know will later be
+       optimised away.
+
 2014-02-12  Alan Modra  <amodra@gmail.com>
 
        * elf32-ppc.c (ppc_elf_relax_section): Enable ppc476 workaround
index f7c03663162457c1d4415d425362d35b7a08ccf9..d8e61088abdd03294bee573827e3ac907ccd5019 100644 (file)
@@ -6831,6 +6831,62 @@ ppc_elf_relax_section (bfd *abfd,
              else
                continue;
 
+             /* If this branch is to __tls_get_addr then we may later
+                optimise away the call.  We won't be needing a long-
+                branch stub in that case.  */
+             if (link_info->executable
+                 && !link_info->relocatable
+                 && h == htab->tls_get_addr
+                 && irel != internal_relocs)
+               {
+                 unsigned long t_symndx = ELF32_R_SYM (irel[-1].r_info);
+                 unsigned long t_rtype = ELF32_R_TYPE (irel[-1].r_info);
+                 unsigned int tls_mask = 0;
+
+                 /* The previous reloc should be one of R_PPC_TLSGD or
+                    R_PPC_TLSLD, or for older object files, a reloc
+                    on the __tls_get_addr arg setup insn.  Get tls
+                    mask bits from the symbol on that reloc.  */
+                 if (t_symndx < symtab_hdr->sh_info)
+                   {
+                     bfd_vma *local_got_offsets = elf_local_got_offsets (abfd);
+
+                     if (local_got_offsets != NULL)
+                       {
+                         struct plt_entry **local_plt = (struct plt_entry **)
+                           (local_got_offsets + symtab_hdr->sh_info);
+                         char *lgot_masks = (char *)
+                           (local_plt + symtab_hdr->sh_info);
+                         tls_mask = lgot_masks[t_symndx];
+                       }
+                   }
+                 else
+                   {
+                     struct elf_link_hash_entry *th
+                       = elf_sym_hashes (abfd)[t_symndx - symtab_hdr->sh_info];
+
+                     while (th->root.type == bfd_link_hash_indirect
+                            || th->root.type == bfd_link_hash_warning)
+                       th = (struct elf_link_hash_entry *) th->root.u.i.link;
+
+                     tls_mask
+                       = ((struct ppc_elf_link_hash_entry *) th)->tls_mask;
+                   }
+
+                 /* The mask bits tell us if the call will be
+                    optimised away.  */
+                 if ((tls_mask & TLS_TLS) != 0 && (tls_mask & TLS_GD) == 0
+                     && (t_rtype == R_PPC_TLSGD
+                         || t_rtype == R_PPC_GOT_TLSGD16
+                         || t_rtype == R_PPC_GOT_TLSGD16_LO))
+                   continue;
+                 if ((tls_mask & TLS_TLS) != 0 && (tls_mask & TLS_LD) == 0
+                     && (t_rtype == R_PPC_TLSLD
+                         || t_rtype == R_PPC_GOT_TLSLD16
+                         || t_rtype == R_PPC_GOT_TLSLD16_LO))
+                   continue;
+               }
+
              sym_type = h->type;
            }
 
This page took 0.036202 seconds and 4 git commands to generate.