gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / gold / s390.cc
index 45c0ba70ad43c5b582ff407357ffefdb707762eb..23a203223d702b330cea1147f8ca7511f00a2d68 100644 (file)
@@ -1,6 +1,6 @@
 // s390.cc -- s390 target support for gold.
 
-// Copyright (C) 2015 Free Software Foundation, Inc.
+// Copyright (C) 2015-2020 Free Software Foundation, Inc.
 // Written by Marcin Koƛcielnicki <koriakin@0x04.net>.
 
 // This file is part of gold.
@@ -348,6 +348,21 @@ class Target_s390 : public Sized_target<size, true>
                          const unsigned char* plocal_symbols,
                          Relocatable_relocs*);
 
+  // Scan the relocs for --emit-relocs.
+  void
+  emit_relocs_scan(Symbol_table* symtab,
+                  Layout* layout,
+                  Sized_relobj_file<size, true>* object,
+                  unsigned int data_shndx,
+                  unsigned int sh_type,
+                  const unsigned char* prelocs,
+                  size_t reloc_count,
+                  Output_section* output_section,
+                  bool needs_special_offset_handling,
+                  size_t local_symbol_count,
+                  const unsigned char* plocal_syms,
+                  Relocatable_relocs* rr);
+
   // Return a string used to fill a code section with nops.
   std::string
   do_code_fill(section_size_type length) const;
@@ -402,6 +417,20 @@ class Target_s390 : public Sized_target<size, true>
   do_can_check_for_function_pointers() const
   { return true; }
 
+  // Return whether SYM is call to a non-split function.
+  bool
+  do_is_call_to_non_split(const Symbol* sym, const unsigned char* preloc,
+                         const unsigned char* view,
+                         section_size_type view_size) const;
+
+  // Adjust -fsplit-stack code which calls non-split-stack code.
+  void
+  do_calls_non_split(Relobj* object, unsigned int shndx,
+                    section_offset_type fnoffset, section_size_type fnsize,
+                    const unsigned char* prelocs, size_t reloc_count,
+                    unsigned char* view, section_size_type view_size,
+                    std::string* from, std::string* to) const;
+
   // Return the size of the GOT section.
   section_size_type
   got_size() const
@@ -596,15 +625,6 @@ class Target_s390 : public Sized_target<size, true>
                 section_size_type view_size);
   };
 
-  // A class which returns the size required for a relocation type,
-  // used while scanning relocs during a relocatable link.
-  class Relocatable_size_for_reloc
-  {
-   public:
-    unsigned int
-    get_size_for_reloc(unsigned int, Relobj*);
-  };
-
   // Adjust TLS relocation type based on the options and whether this
   // is a local symbol.
   static tls::Tls_optimization
@@ -687,6 +707,17 @@ class Target_s390 : public Sized_target<size, true>
                                  this->rela_dyn_section(layout));
   }
 
+  // A function for targets to call.  Return whether BYTES/LEN matches
+  // VIEW/VIEW_SIZE at OFFSET.  Like the one in Target, but takes
+  // an unsigned char * parameter.
+  bool
+  match_view_u(const unsigned char* view, section_size_type view_size,
+     section_offset_type offset, const unsigned char* bytes, size_t len) const
+    {
+      return this->match_view(view, view_size, offset,
+                             reinterpret_cast<const char*>(bytes), len);
+    }
+
   // Information about this specific target which we pass to the
   // general Target structure.
   static Target::Target_info s390_info;
@@ -724,6 +755,50 @@ class Target_s390 : public Sized_target<size, true>
   bool tls_base_symbol_defined_;
   // For use in do_tls_offset_for_*
   Layout *layout_;
+
+  // Code sequences for -fsplit-stack matching.
+  static const unsigned char ss_code_bras_8[];
+  static const unsigned char ss_code_l_basr[];
+  static const unsigned char ss_code_a_basr[];
+  static const unsigned char ss_code_larl[];
+  static const unsigned char ss_code_brasl[];
+  static const unsigned char ss_code_jg[];
+  static const unsigned char ss_code_jgl[];
+
+  // Variable code sequence matchers for -fsplit-stack.
+  bool ss_match_st_r14(unsigned char* view,
+                      section_size_type view_size,
+                      section_offset_type *offset) const;
+  bool ss_match_l_r14(unsigned char* view,
+                     section_size_type view_size,
+                     section_offset_type *offset) const;
+  bool ss_match_mcount(unsigned char* view,
+                      section_size_type view_size,
+                      section_offset_type *offset) const;
+  bool ss_match_ear(unsigned char* view,
+                   section_size_type view_size,
+                   section_offset_type *offset) const;
+  bool ss_match_c(unsigned char* view,
+                 section_size_type view_size,
+                 section_offset_type *offset) const;
+  bool ss_match_l(unsigned char* view,
+                 section_size_type view_size,
+                 section_offset_type *offset,
+                 int *guard_reg) const;
+  bool ss_match_ahi(unsigned char* view,
+                   section_size_type view_size,
+                   section_offset_type *offset,
+                   int guard_reg,
+                   uint32_t *arg) const;
+  bool ss_match_alfi(unsigned char* view,
+                    section_size_type view_size,
+                    section_offset_type *offset,
+                    int guard_reg,
+                    uint32_t *arg) const;
+  bool ss_match_cr(unsigned char* view,
+                  section_size_type view_size,
+                  section_offset_type *offset,
+                  int guard_reg) const;
 };
 
 template<>
@@ -752,6 +827,7 @@ Target::Target_info Target_s390<32>::s390_info =
   NULL,                        // attributes_vendor
   "_start",            // entry_symbol_name
   32,                  // hash_entry_size
+  elfcpp::SHT_PROGBITS,        // unwind_section_type
 };
 
 template<>
@@ -780,6 +856,7 @@ Target::Target_info Target_s390<64>::s390_info =
   NULL,                        // attributes_vendor
   "_start",            // entry_symbol_name
   64,                  // hash_entry_size
+  elfcpp::SHT_PROGBITS,        // unwind_section_type
 };
 
 template<int size>
@@ -2480,7 +2557,7 @@ Target_s390<size>::Scan::local(Symbol_table* symtab,
                    unsupported_reloc_local(object, r_type);
                  }
              }
-           // fall through
+           // Fall through.
          case elfcpp::R_390_TLS_IEENT:
          case elfcpp::R_390_TLS_GOTIE12:
          case elfcpp::R_390_TLS_GOTIE20:
@@ -2918,7 +2995,7 @@ Target_s390<size>::Scan::global(Symbol_table* symtab,
                    unsupported_reloc_global(object, r_type, gsym);
                  }
              }
-           // fall through
+           // Fall through.
          case elfcpp::R_390_TLS_IEENT:
          case elfcpp::R_390_TLS_GOTIE12:
          case elfcpp::R_390_TLS_GOTIE20:
@@ -3086,13 +3163,13 @@ Target_s390<size>::gc_process_relocs(Symbol_table* symtab,
                                       size_t local_symbol_count,
                                       const unsigned char* plocal_symbols)
 {
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, true>
+      Classify_reloc;
 
   if (sh_type == elfcpp::SHT_REL)
     return;
 
-  gold::gc_process_relocs<size, true, Target_s390<size>, elfcpp::SHT_RELA,
-                         typename Target_s390<size>::Scan,
-                         typename Target_s390<size>::Relocatable_size_for_reloc>(
+  gold::gc_process_relocs<size, true, Target_s390<size>, Scan, Classify_reloc>(
     symtab,
     layout,
     this,
@@ -3166,7 +3243,7 @@ Target_s390<size>::Relocate::relocate(
                  || (gsym->is_defined()
                      && !gsym->is_from_dynobj()
                      && !gsym->is_preemptible()));
-      // fallthru
+      // Fall through.
     case elfcpp::R_390_8:
     case elfcpp::R_390_12:
     case elfcpp::R_390_16:
@@ -3195,7 +3272,7 @@ Target_s390<size>::Relocate::relocate(
       gold_assert(gsym == NULL
                  || gsym->has_plt_offset()
                  || gsym->final_value_is_known());
-      // fallthru
+      // Fall through.
     case elfcpp::R_390_GOTOFF64:
     case elfcpp::R_390_GOTOFF32:
     case elfcpp::R_390_GOTOFF16:
@@ -3924,6 +4001,9 @@ Target_s390<size>::scan_relocs(Symbol_table* symtab,
                                 size_t local_symbol_count,
                                 const unsigned char* plocal_symbols)
 {
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, true>
+      Classify_reloc;
+
   if (sh_type == elfcpp::SHT_REL)
     {
       gold_error(_("%s: unsupported REL reloc section"),
@@ -3931,8 +4011,7 @@ Target_s390<size>::scan_relocs(Symbol_table* symtab,
       return;
     }
 
-  gold::scan_relocs<size, true, Target_s390<size>, elfcpp::SHT_RELA,
-      typename Target_s390<size>::Scan>(
+  gold::scan_relocs<size, true, Target_s390<size>, Scan, Classify_reloc>(
     symtab,
     layout,
     this,
@@ -4019,113 +4098,50 @@ Target_s390<size>::do_finalize_sections(
     }
 }
 
-// Return the size of a relocation while scanning during a relocatable
-// link.
+// Scan the relocs during a relocatable link.
 
 template<int size>
-unsigned int
-Target_s390<size>::Relocatable_size_for_reloc::get_size_for_reloc(
-    unsigned int r_type,
-    Relobj* object)
+void
+Target_s390<size>::scan_relocatable_relocs(
+    Symbol_table* symtab,
+    Layout* layout,
+    Sized_relobj_file<size, true>* object,
+    unsigned int data_shndx,
+    unsigned int sh_type,
+    const unsigned char* prelocs,
+    size_t reloc_count,
+    Output_section* output_section,
+    bool needs_special_offset_handling,
+    size_t local_symbol_count,
+    const unsigned char* plocal_symbols,
+    Relocatable_relocs* rr)
 {
-  switch (r_type)
-    {
-    case elfcpp::R_390_NONE:
-    case elfcpp::R_390_GNU_VTINHERIT:
-    case elfcpp::R_390_GNU_VTENTRY:
-    case elfcpp::R_390_TLS_GD32:          // Global-dynamic
-    case elfcpp::R_390_TLS_GD64:
-    case elfcpp::R_390_TLS_GDCALL:
-    case elfcpp::R_390_TLS_LDM32:         // Local-dynamic
-    case elfcpp::R_390_TLS_LDM64:
-    case elfcpp::R_390_TLS_LDO32:
-    case elfcpp::R_390_TLS_LDO64:
-    case elfcpp::R_390_TLS_LDCALL:
-    case elfcpp::R_390_TLS_IE32:          // Initial-exec
-    case elfcpp::R_390_TLS_IE64:
-    case elfcpp::R_390_TLS_IEENT:
-    case elfcpp::R_390_TLS_GOTIE12:
-    case elfcpp::R_390_TLS_GOTIE20:
-    case elfcpp::R_390_TLS_GOTIE32:
-    case elfcpp::R_390_TLS_GOTIE64:
-    case elfcpp::R_390_TLS_LOAD:
-    case elfcpp::R_390_TLS_LE32:          // Local-exec
-    case elfcpp::R_390_TLS_LE64:
-      return 0;
-
-    case elfcpp::R_390_64:
-    case elfcpp::R_390_PC64:
-    case elfcpp::R_390_GOT64:
-    case elfcpp::R_390_PLT64:
-    case elfcpp::R_390_GOTOFF64:
-    case elfcpp::R_390_GOTPLT64:
-    case elfcpp::R_390_PLTOFF64:
-      return 8;
-
-    case elfcpp::R_390_32:
-    case elfcpp::R_390_PC32:
-    case elfcpp::R_390_GOT32:
-    case elfcpp::R_390_PLT32:
-    case elfcpp::R_390_GOTOFF32:
-    case elfcpp::R_390_GOTPC:
-    case elfcpp::R_390_PC32DBL:
-    case elfcpp::R_390_PLT32DBL:
-    case elfcpp::R_390_GOTPCDBL:
-    case elfcpp::R_390_GOTENT:
-    case elfcpp::R_390_GOTPLT32:
-    case elfcpp::R_390_GOTPLTENT:
-    case elfcpp::R_390_PLTOFF32:
-    case elfcpp::R_390_20:
-    case elfcpp::R_390_GOT20:
-    case elfcpp::R_390_GOTPLT20:
-      return 4;
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, true>
+      Classify_reloc;
+  typedef gold::Default_scan_relocatable_relocs<Classify_reloc>
+      Scan_relocatable_relocs;
 
-    case elfcpp::R_390_PC24DBL:
-    case elfcpp::R_390_PLT24DBL:
-      return 3;
-
-    case elfcpp::R_390_12:
-    case elfcpp::R_390_GOT12:
-    case elfcpp::R_390_GOTPLT12:
-    case elfcpp::R_390_PC12DBL:
-    case elfcpp::R_390_PLT12DBL:
-    case elfcpp::R_390_16:
-    case elfcpp::R_390_GOT16:
-    case elfcpp::R_390_PC16:
-    case elfcpp::R_390_PC16DBL:
-    case elfcpp::R_390_PLT16DBL:
-    case elfcpp::R_390_GOTOFF16:
-    case elfcpp::R_390_GOTPLT16:
-    case elfcpp::R_390_PLTOFF16:
-      return 2;
-
-    case elfcpp::R_390_8:
-      return 1;
-
-      // These are relocations which should only be seen by the
-      // dynamic linker, and should never be seen here.
-    case elfcpp::R_390_COPY:
-    case elfcpp::R_390_GLOB_DAT:
-    case elfcpp::R_390_JMP_SLOT:
-    case elfcpp::R_390_RELATIVE:
-    case elfcpp::R_390_IRELATIVE:
-    case elfcpp::R_390_TLS_DTPMOD:
-    case elfcpp::R_390_TLS_DTPOFF:
-    case elfcpp::R_390_TLS_TPOFF:
-      object->error(_("unexpected reloc %u in object file"), r_type);
-      return 0;
+  gold_assert(sh_type == elfcpp::SHT_RELA);
 
-    default:
-      object->error(_("unsupported reloc %u in object file"), r_type);
-      return 0;
-    }
+  gold::scan_relocatable_relocs<size, true, Scan_relocatable_relocs>(
+    symtab,
+    layout,
+    object,
+    data_shndx,
+    prelocs,
+    reloc_count,
+    output_section,
+    needs_special_offset_handling,
+    local_symbol_count,
+    plocal_symbols,
+    rr);
 }
 
-// Scan the relocs during a relocatable link.
+// Scan the relocs for --emit-relocs.
 
 template<int size>
 void
-Target_s390<size>::scan_relocatable_relocs(
+Target_s390<size>::emit_relocs_scan(
     Symbol_table* symtab,
     Layout* layout,
     Sized_relobj_file<size, true>* object,
@@ -4136,16 +4152,17 @@ Target_s390<size>::scan_relocatable_relocs(
     Output_section* output_section,
     bool needs_special_offset_handling,
     size_t local_symbol_count,
-    const unsigned char* plocal_symbols,
+    const unsigned char* plocal_syms,
     Relocatable_relocs* rr)
 {
-  gold_assert(sh_type == elfcpp::SHT_RELA);
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, true>
+      Classify_reloc;
+  typedef gold::Default_emit_relocs_strategy<Classify_reloc>
+      Emit_relocs_strategy;
 
-  typedef gold::Default_scan_relocatable_relocs<elfcpp::SHT_RELA,
-    Relocatable_size_for_reloc> Scan_relocatable_relocs;
+  gold_assert(sh_type == elfcpp::SHT_RELA);
 
-  gold::scan_relocatable_relocs<size, true, elfcpp::SHT_RELA,
-      Scan_relocatable_relocs>(
+  gold::scan_relocatable_relocs<size, true, Emit_relocs_strategy>(
     symtab,
     layout,
     object,
@@ -4155,7 +4172,7 @@ Target_s390<size>::scan_relocatable_relocs(
     output_section,
     needs_special_offset_handling,
     local_symbol_count,
-    plocal_symbols,
+    plocal_syms,
     rr);
 }
 
@@ -4176,9 +4193,12 @@ Target_s390<size>::relocate_relocs(
     unsigned char* reloc_view,
     section_size_type reloc_view_size)
 {
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, true>
+      Classify_reloc;
+
   gold_assert(sh_type == elfcpp::SHT_RELA);
 
-  gold::relocate_relocs<size, true, elfcpp::SHT_RELA>(
+  gold::relocate_relocs<size, true, Classify_reloc>(
     relinfo,
     prelocs,
     reloc_count,
@@ -4243,6 +4263,622 @@ Target_s390<size>::do_code_fill(section_size_type length) const
   return std::string(length, static_cast<char>(0x07));
 }
 
+// Return whether SYM should be treated as a call to a non-split
+// function.  We don't want that to be true of a larl instruction
+// that merely loads its address.
+
+template<int size>
+bool
+Target_s390<size>::do_is_call_to_non_split(const Symbol* sym,
+                                          const unsigned char* preloc,
+                                          const unsigned char* view,
+                                          section_size_type view_size) const
+{
+  if (sym->type() != elfcpp::STT_FUNC)
+    return false;
+  typename Reloc_types<elfcpp::SHT_RELA, size, true>::Reloc reloc(preloc);
+  typename elfcpp::Elf_types<size>::Elf_WXword r_info
+    = reloc.get_r_info();
+  unsigned int r_type = elfcpp::elf_r_type<size>(r_info);
+  section_offset_type offset = reloc.get_r_offset();
+  switch (r_type)
+    {
+    // PLT refs always involve calling the function.
+    case elfcpp::R_390_PLT12DBL:
+    case elfcpp::R_390_PLT16DBL:
+    case elfcpp::R_390_PLT24DBL:
+    case elfcpp::R_390_PLT32:
+    case elfcpp::R_390_PLT32DBL:
+    case elfcpp::R_390_PLT64:
+    case elfcpp::R_390_PLTOFF16:
+    case elfcpp::R_390_PLTOFF32:
+    case elfcpp::R_390_PLTOFF64:
+    // Could be used for calls for -msmall-exec.
+    case elfcpp::R_390_PC16DBL:
+      return true;
+
+    // Tricky case.  When used in a brasl, jg, and other branch instructions,
+    // it's a call or a sibcall.  However, when used in larl, it only loads
+    // the function's address - not a call.
+    case elfcpp::R_390_PC32DBL:
+      {
+       if (offset < 2
+           || offset + 4 > static_cast<section_offset_type>(view_size))
+         {
+           // Should not happen.
+           gold_error(_("instruction with PC32DBL not wholly within section"));
+           return false;
+         }
+
+       uint8_t op0 = view[offset-2];
+       uint8_t op1 = view[offset-1] & 0xf;
+
+       // LARL
+       if (op0 == 0xc0 && op1 == 0)
+         return false;
+
+       // Otherwise, it's either a call instruction, a branch instruction
+       // (used as a sibcall), or a data manipulation instruction (which
+       // has no business being used on a function, and can be ignored).
+        return true;
+      }
+
+    // Otherwise, it's probably not a call.
+    default:
+      return false;
+    }
+}
+
+// Code sequences to match below.
+
+template<int size>
+const unsigned char
+Target_s390<size>::ss_code_bras_8[] = {
+  0xa7, 0x15, 0x00, 0x06,              // bras %r1, .+0xc
+};
+
+template<int size>
+const unsigned char
+Target_s390<size>::ss_code_l_basr[] = {
+  0x58, 0xe0, 0x10, 0x00,              // l %r14, 0(%r1)
+  0x58, 0x10, 0x10, 0x04,              // l %r1, 4(%r1)
+  0x0d, 0xee,                          // basr %r14, %r14
+};
+
+template<int size>
+const unsigned char
+Target_s390<size>::ss_code_a_basr[] = {
+  0x18, 0xe1,                          // lr %r14, %r1
+  0x5a, 0xe0, 0x10, 0x00,              // a %r14, 0(%r1)
+  0x5a, 0x10, 0x10, 0x04,              // a %r1, 4(%r1)
+  0x0d, 0xee,                          // basr %r14, %r14
+};
+
+template<int size>
+const unsigned char
+Target_s390<size>::ss_code_larl[] = {
+  0xc0, 0x10,                          // larl %r1, ...
+};
+
+template<int size>
+const unsigned char
+Target_s390<size>::ss_code_brasl[] = {
+  0xc0, 0xe5,                          // brasl %r14, ...
+};
+
+template<int size>
+const unsigned char
+Target_s390<size>::ss_code_jg[] = {
+  0xc0, 0xf4,                          // jg ...
+};
+
+template<int size>
+const unsigned char
+Target_s390<size>::ss_code_jgl[] = {
+  0xc0, 0x44,                          // jgl ...
+};
+
+template<>
+bool
+Target_s390<32>::ss_match_st_r14(unsigned char* view,
+                                section_size_type view_size,
+                                section_offset_type *offset) const
+{
+  static const unsigned char ss_code_st_r14[] = {
+    0x50, 0xe0, 0xf0, 0x04,            // st %r14, 4(%r15)
+  };
+  if (!this->match_view_u(view, view_size, *offset, ss_code_st_r14,
+                         sizeof ss_code_st_r14))
+    return false;
+  *offset += sizeof ss_code_st_r14;
+  return true;
+}
+
+template<>
+bool
+Target_s390<64>::ss_match_st_r14(unsigned char* view,
+                                section_size_type view_size,
+                                section_offset_type *offset) const
+{
+  static const unsigned char ss_code_st_r14[] = {
+    0xe3, 0xe0, 0xf0, 0x08, 0x00, 0x24 // stg %r14, 8(%r15)
+  };
+  if (!this->match_view_u(view, view_size, *offset, ss_code_st_r14,
+                         sizeof ss_code_st_r14))
+    return false;
+  *offset += sizeof ss_code_st_r14;
+  return true;
+}
+
+template<>
+bool
+Target_s390<32>::ss_match_l_r14(unsigned char* view,
+                               section_size_type view_size,
+                               section_offset_type *offset) const
+{
+  static const unsigned char ss_code_l_r14[] = {
+    0x58, 0xe0, 0xf0, 0x04,            // l %r14, 4(%r15)
+  };
+  if (!this->match_view_u(view, view_size, *offset, ss_code_l_r14,
+                         sizeof ss_code_l_r14))
+    return false;
+  *offset += sizeof ss_code_l_r14;
+  return true;
+}
+
+template<>
+bool
+Target_s390<64>::ss_match_l_r14(unsigned char* view,
+                               section_size_type view_size,
+                               section_offset_type *offset) const
+{
+  static const unsigned char ss_code_l_r14[] = {
+    0xe3, 0xe0, 0xf0, 0x08, 0x00, 0x04 // lg %r14, 8(%r15)
+  };
+  if (!this->match_view_u(view, view_size, *offset, ss_code_l_r14,
+                         sizeof ss_code_l_r14))
+    return false;
+  *offset += sizeof ss_code_l_r14;
+  return true;
+}
+
+template<int size>
+bool
+Target_s390<size>::ss_match_mcount(unsigned char* view,
+                                  section_size_type view_size,
+                                  section_offset_type *offset) const
+{
+  // Match the mcount call sequence.
+  section_offset_type myoff = *offset;
+
+  // First, look for the store instruction saving %r14.
+  if (!this->ss_match_st_r14(view, view_size, &myoff))
+    return false;
+
+  // Now, param load and the actual call.
+  if (this->match_view_u(view, view_size, myoff, ss_code_larl,
+                        sizeof ss_code_larl))
+    {
+      myoff += sizeof ss_code_larl + 4;
+
+      // After larl, expect a brasl.
+      if (!this->match_view_u(view, view_size, myoff, ss_code_brasl,
+                             sizeof ss_code_brasl))
+       return false;
+      myoff += sizeof ss_code_brasl + 4;
+    }
+  else if (size == 32 &&
+          this->match_view_u(view, view_size, myoff, ss_code_bras_8,
+                             sizeof ss_code_bras_8))
+    {
+      // The bras skips over a block of 8 bytes, loading its address
+      // to %r1.
+      myoff += sizeof ss_code_bras_8 + 8;
+
+      // Now, there are two sequences used for actual load and call,
+      // absolute and PIC.
+      if (this->match_view_u(view, view_size, myoff, ss_code_l_basr,
+                            sizeof ss_code_l_basr))
+        myoff += sizeof ss_code_l_basr;
+      else if (this->match_view_u(view, view_size, myoff, ss_code_a_basr,
+                                 sizeof ss_code_a_basr))
+        myoff += sizeof ss_code_a_basr;
+      else
+       return false;
+    }
+  else
+    return false;
+
+  // Finally, a load bringing %r14 back.
+  if (!this->ss_match_l_r14(view, view_size, &myoff))
+    return false;
+
+  // Found it.
+  *offset = myoff;
+  return true;
+}
+
+template<>
+bool
+Target_s390<32>::ss_match_ear(unsigned char* view,
+                               section_size_type view_size,
+                               section_offset_type *offset) const
+{
+  static const unsigned char ss_code_ear[] = {
+    0xb2, 0x4f, 0x00, 0x10,            // ear %r1, %a0
+  };
+  if (!this->match_view_u(view, view_size, *offset, ss_code_ear,
+                         sizeof ss_code_ear))
+    return false;
+  *offset += sizeof ss_code_ear;
+  return true;
+}
+
+template<>
+bool
+Target_s390<64>::ss_match_ear(unsigned char* view,
+                               section_size_type view_size,
+                               section_offset_type *offset) const
+{
+  static const unsigned char ss_code_ear[] = {
+    0xb2, 0x4f, 0x00, 0x10,            // ear %r1, %a0
+    0xeb, 0x11, 0x00, 0x20, 0x00, 0x0d,        // sllg %r1,%r1,32
+    0xb2, 0x4f, 0x00, 0x11,            // ear %r1, %a1
+  };
+  if (!this->match_view_u(view, view_size, *offset, ss_code_ear,
+                         sizeof ss_code_ear))
+    return false;
+  *offset += sizeof ss_code_ear;
+  return true;
+}
+
+template<>
+bool
+Target_s390<32>::ss_match_c(unsigned char* view,
+                               section_size_type view_size,
+                               section_offset_type *offset) const
+{
+  static const unsigned char ss_code_c[] = {
+    0x59, 0xf0, 0x10, 0x20,            // c %r15, 0x20(%r1)
+  };
+  if (!this->match_view_u(view, view_size, *offset, ss_code_c,
+                         sizeof ss_code_c))
+    return false;
+  *offset += sizeof ss_code_c;
+  return true;
+}
+
+template<>
+bool
+Target_s390<64>::ss_match_c(unsigned char* view,
+                               section_size_type view_size,
+                               section_offset_type *offset) const
+{
+  static const unsigned char ss_code_c[] = {
+    0xe3, 0xf0, 0x10, 0x38, 0x00, 0x20,        // cg %r15, 0x38(%r1)
+  };
+  if (!this->match_view_u(view, view_size, *offset, ss_code_c,
+                         sizeof ss_code_c))
+    return false;
+  *offset += sizeof ss_code_c;
+  return true;
+}
+
+template<>
+bool
+Target_s390<32>::ss_match_l(unsigned char* view,
+                           section_size_type view_size,
+                           section_offset_type *offset,
+                           int *guard_reg) const
+{
+  // l %guard_reg, 0x20(%r1)
+  if (convert_to_section_size_type(*offset + 4) > view_size
+      || view[*offset] != 0x58
+      || (view[*offset + 1] & 0xf) != 0x0
+      || view[*offset + 2] != 0x10
+      || view[*offset + 3] != 0x20)
+    return false;
+  *offset += 4;
+  *guard_reg = view[*offset + 1] >> 4 & 0xf;
+  return true;
+}
+
+template<>
+bool
+Target_s390<64>::ss_match_l(unsigned char* view,
+                           section_size_type view_size,
+                           section_offset_type *offset,
+                           int *guard_reg) const
+{
+  // lg %guard_reg, 0x38(%r1)
+  if (convert_to_section_size_type(*offset + 6) > view_size
+      || view[*offset] != 0xe3
+      || (view[*offset + 1] & 0xf) != 0x0
+      || view[*offset + 2] != 0x10
+      || view[*offset + 3] != 0x38
+      || view[*offset + 4] != 0x00
+      || view[*offset + 5] != 0x04)
+    return false;
+  *offset += 6;
+  *guard_reg = view[*offset + 1] >> 4 & 0xf;
+  return true;
+}
+
+template<int size>
+bool
+Target_s390<size>::ss_match_ahi(unsigned char* view,
+                               section_size_type view_size,
+                               section_offset_type *offset,
+                               int guard_reg,
+                               uint32_t *arg) const
+{
+  int op = size == 32 ? 0xa : 0xb;
+  // a[g]hi %guard_reg, <arg>
+  if (convert_to_section_size_type(*offset + 4) > view_size
+      || view[*offset] != 0xa7
+      || view[*offset + 1] != (guard_reg << 4 | op)
+      // Disallow negative size.
+      || view[*offset + 2] & 0x80)
+    return false;
+  *arg = elfcpp::Swap<16, true>::readval(view + *offset + 2);
+  *offset += 4;
+  return true;
+}
+
+template<int size>
+bool
+Target_s390<size>::ss_match_alfi(unsigned char* view,
+                                section_size_type view_size,
+                                section_offset_type *offset,
+                                int guard_reg,
+                                uint32_t *arg) const
+{
+  int op = size == 32 ? 0xb : 0xa;
+  // al[g]fi %guard_reg, <arg>
+  if (convert_to_section_size_type(*offset + 6) > view_size
+      || view[*offset] != 0xc2
+      || view[*offset + 1] != (guard_reg << 4 | op))
+    return false;
+  *arg = elfcpp::Swap<32, true>::readval(view + *offset + 2);
+  *offset += 6;
+  return true;
+}
+
+template<>
+bool
+Target_s390<32>::ss_match_cr(unsigned char* view,
+                            section_size_type view_size,
+                            section_offset_type *offset,
+                            int guard_reg) const
+{
+  // cr %r15, %guard_reg
+  if (convert_to_section_size_type(*offset + 2) > view_size
+      || view[*offset] != 0x19
+      || view[*offset + 1] != (0xf0 | guard_reg))
+    return false;
+  *offset += 2;
+  return true;
+}
+
+template<>
+bool
+Target_s390<64>::ss_match_cr(unsigned char* view,
+                            section_size_type view_size,
+                            section_offset_type *offset,
+                            int guard_reg) const
+{
+  // cgr %r15, %guard_reg
+  if (convert_to_section_size_type(*offset + 4) > view_size
+      || view[*offset] != 0xb9
+      || view[*offset + 1] != 0x20
+      || view[*offset + 2] != 0x00
+      || view[*offset + 3] != (0xf0 | guard_reg))
+    return false;
+  *offset += 4;
+  return true;
+}
+
+
+// FNOFFSET in section SHNDX in OBJECT is the start of a function
+// compiled with -fsplit-stack.  The function calls non-split-stack
+// code.  We have to change the function so that it always ensures
+// that it has enough stack space to run some random function.
+
+template<int size>
+void
+Target_s390<size>::do_calls_non_split(Relobj* object, unsigned int shndx,
+                                     section_offset_type fnoffset,
+                                     section_size_type,
+                                     const unsigned char *prelocs,
+                                     size_t reloc_count,
+                                     unsigned char* view,
+                                     section_size_type view_size,
+                                     std::string*,
+                                     std::string*) const
+{
+  // true if there's a conditional call to __morestack in the function,
+  // false if there's an unconditional one.
+  bool conditional = false;
+  // Offset of the byte after the compare insn, if conditional.
+  section_offset_type cmpend = 0;
+  // Type and immediate offset of the add instruction that adds frame size
+  // to guard.
+  enum {
+    SS_ADD_NONE,
+    SS_ADD_AHI,
+    SS_ADD_ALFI,
+  } fsadd_type = SS_ADD_NONE;
+  section_offset_type fsadd_offset = 0;
+  uint32_t fsadd_frame_size = 0;
+  // Register used for loading guard.  Usually r1, but can also be r0 or r2-r5.
+  int guard_reg;
+  // Offset of the conditional jump.
+  section_offset_type jump_offset = 0;
+  // Section view and offset of param block.
+  section_offset_type param_offset = 0;
+  unsigned char *param_view = 0;
+  section_size_type param_view_size = 0;
+  // Current position in function.
+  section_offset_type curoffset = fnoffset;
+  // And the position of split-stack prologue.
+  section_offset_type ssoffset;
+  // Frame size.
+  typename elfcpp::Elf_types<size>::Elf_Addr frame_size;
+  // Relocation parsing.
+  typedef typename Reloc_types<elfcpp::SHT_RELA, size, true>::Reloc Reltype;
+  const int reloc_size = Reloc_types<elfcpp::SHT_RELA, size, true>::reloc_size;
+  const unsigned char *pr = prelocs;
+
+  // If the function was compiled with -pg, the profiling code may come before
+  // the split-stack prologue.  Skip it.
+
+  this->ss_match_mcount(view, view_size, &curoffset);
+  ssoffset = curoffset;
+
+  // First, figure out if there's a conditional call by looking for the
+  // extract-tp, add, cmp sequence.
+
+  if (this->ss_match_ear(view, view_size, &curoffset))
+    {
+      // Found extract-tp, now look for an add and compare.
+      conditional = true;
+      if (this->ss_match_c(view, view_size, &curoffset))
+       {
+         // Found a direct compare of stack pointer with the guard,
+         // we're done here.
+       }
+      else if (this->ss_match_l(view, view_size, &curoffset, &guard_reg))
+       {
+         // Found a load of guard to register, look for an add and compare.
+          if (this->ss_match_ahi(view, view_size, &curoffset, guard_reg,
+                                &fsadd_frame_size))
+           {
+             fsadd_type = SS_ADD_AHI;
+             fsadd_offset = curoffset - 2;
+           }
+         else if (this->ss_match_alfi(view, view_size, &curoffset, guard_reg,
+                                      &fsadd_frame_size))
+           {
+             fsadd_type = SS_ADD_ALFI;
+             fsadd_offset = curoffset - 4;
+           }
+         else
+            {
+             goto bad;
+            }
+         // Now, there has to be a compare.
+          if (!this->ss_match_cr(view, view_size, &curoffset, guard_reg))
+           goto bad;
+       }
+      else
+        {
+         goto bad;
+        }
+      cmpend = curoffset;
+    }
+
+  // Second, look for the call.
+  if (!this->match_view_u(view, view_size, curoffset, ss_code_larl,
+                         sizeof ss_code_larl))
+    goto bad;
+  curoffset += sizeof ss_code_larl;
+
+  // Find out larl's operand.  It should be a local symbol in .rodata
+  // section.
+  for (size_t i = 0; i < reloc_count; ++i, pr += reloc_size)
+    {
+      Reltype reloc(pr);
+      if (static_cast<section_offset_type>(reloc.get_r_offset())
+          == curoffset)
+        {
+          typename elfcpp::Elf_types<size>::Elf_WXword r_info
+            = reloc.get_r_info();
+          unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info);
+          unsigned int r_type = elfcpp::elf_r_type<size>(r_info);
+          if (r_type != elfcpp::R_390_PC32DBL)
+            goto bad;
+          if (r_sym >= object->local_symbol_count())
+            goto bad;
+          Sized_relobj_file<size, true> *object_sized =
+            static_cast<Sized_relobj_file<size, true> *>(object);
+          const Symbol_value<size>* sym = object_sized->local_symbol(r_sym);
+          bool param_shndx_ordinary;
+          const unsigned int param_shndx =
+            sym->input_shndx(&param_shndx_ordinary);
+          if (!param_shndx_ordinary)
+            goto bad;
+          param_offset = sym->input_value() + reloc.get_r_addend() - 2
+                         - object->output_section(param_shndx)->address()
+                         - object->output_section_offset(param_shndx);
+          param_view = object->get_output_view(param_shndx,
+                                                  &param_view_size);
+          break;
+        }
+    }
+
+  if (!param_view)
+    goto bad;
+
+  curoffset += 4;
+
+  // Now, there has to be a jump to __morestack.
+  jump_offset = curoffset;
+
+  if (this->match_view_u(view, view_size, curoffset,
+                       conditional ? ss_code_jgl : ss_code_jg,
+                       sizeof ss_code_jg))
+    curoffset += sizeof ss_code_jg;
+  else
+    goto bad;
+
+  curoffset += 4;
+
+  // Read the frame size.
+  if (convert_to_section_size_type(param_offset + size / 8) > param_view_size)
+    goto bad;
+  frame_size = elfcpp::Swap<size, true>::readval(param_view + param_offset);
+
+  // Sanity check.
+  if (fsadd_type != SS_ADD_NONE && fsadd_frame_size != frame_size)
+    goto bad;
+
+  // Bump the frame size.
+  frame_size += parameters->options().split_stack_adjust_size();
+
+  // Store it to the param block.
+  elfcpp::Swap<size, true>::writeval(param_view + param_offset, frame_size);
+
+  if (!conditional)
+    {
+      // If the call was already unconditional, we're done.
+    }
+  else if (frame_size <= 0xffffffff && fsadd_type == SS_ADD_ALFI)
+    {
+      // Using alfi to add the frame size, and it still fits.  Adjust it.
+      elfcpp::Swap_unaligned<32, true>::writeval(view + fsadd_offset,
+                                                frame_size);
+    }
+  else
+    {
+      // We were either relying on the backoff area, or used ahi to load
+      // frame size.  This won't fly, as our new frame size is too large.
+      // Convert the sequence to unconditional by nopping out the comparison,
+      // and rewiring the jump.
+      this->set_view_to_nop(view, view_size, ssoffset, cmpend - ssoffset);
+
+      // The jump is jgl, we'll mutate it to jg.
+      view[jump_offset+1] = 0xf4;
+    }
+
+  return;
+
+bad:
+  if (!object->has_no_split_stack())
+      object->error(_("failed to match split-stack sequence at "
+                     "section %u offset %0zx"),
+                   shndx, static_cast<size_t>(fnoffset));
+}
+
 // Relocate section data.
 
 template<int size>
@@ -4259,11 +4895,13 @@ Target_s390<size>::relocate_section(
     section_size_type view_size,
     const Reloc_symbol_changes* reloc_symbol_changes)
 {
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, true>
+      Classify_reloc;
+
   gold_assert(sh_type == elfcpp::SHT_RELA);
 
-  gold::relocate_section<size, true, Target_s390<size>, elfcpp::SHT_RELA,
-                        typename Target_s390<size>::Relocate,
-                        gold::Default_comdat_behavior>(
+  gold::relocate_section<size, true, Target_s390<size>, Relocate,
+                        gold::Default_comdat_behavior, Classify_reloc>(
     relinfo,
     this,
     prelocs,
This page took 0.03505 seconds and 4 git commands to generate.