gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / gold / x86_64.cc
index b9f9f17accf48964c040977313615db5602b20dd..1d9d9209bf3ac9721b6fbc21e61136d7e831195e 100644 (file)
@@ -1,7 +1,6 @@
 // x86_64.cc -- x86_64 target support for gold.
 
-// Copyright 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
-// Free Software Foundation, Inc.
+// Copyright (C) 2006-2020 Free Software Foundation, Inc.
 // Written by Ian Lance Taylor <iant@google.com>.
 
 // This file is part of gold.
@@ -49,6 +48,37 @@ namespace
 
 using namespace gold;
 
+// A class to handle the .got.plt section.
+
+class Output_data_got_plt_x86_64 : public Output_section_data_build
+{
+ public:
+  Output_data_got_plt_x86_64(Layout* layout)
+    : Output_section_data_build(8),
+      layout_(layout)
+  { }
+
+  Output_data_got_plt_x86_64(Layout* layout, off_t data_size)
+    : Output_section_data_build(data_size, 8),
+      layout_(layout)
+  { }
+
+ protected:
+  // Write out the PLT data.
+  void
+  do_write(Output_file*);
+
+  // Write to a map file.
+  void
+  do_print_to_mapfile(Mapfile* mapfile) const
+  { mapfile->print_output_data(this, "** GOT PLT"); }
+
+ private:
+  // A pointer to the Layout class, so that we can find the .dynamic
+  // section when we write out the GOT PLT section.
+  Layout* layout_;
+};
+
 // A class to handle the PLT data.
 // This is an abstract base class that handles most of the linker details
 // but does not know the actual contents of PLT entries.  The derived
@@ -62,9 +92,9 @@ class Output_data_plt_x86_64 : public Output_section_data
 
   Output_data_plt_x86_64(Layout* layout, uint64_t addralign,
                         Output_data_got<64, false>* got,
-                        Output_data_space* got_plt,
+                        Output_data_got_plt_x86_64* got_plt,
                         Output_data_space* got_irelative)
-    : Output_section_data(addralign), layout_(layout), tlsdesc_rel_(NULL),
+    : Output_section_data(addralign), tlsdesc_rel_(NULL),
       irelative_rel_(NULL), got_(got), got_plt_(got_plt),
       got_irelative_(got_irelative), count_(0), irelative_count_(0),
       tlsdesc_got_offset_(-1U), free_list_()
@@ -72,12 +102,12 @@ class Output_data_plt_x86_64 : public Output_section_data
 
   Output_data_plt_x86_64(Layout* layout, uint64_t plt_entry_size,
                         Output_data_got<64, false>* got,
-                        Output_data_space* got_plt,
+                        Output_data_got_plt_x86_64* got_plt,
                         Output_data_space* got_irelative,
                         unsigned int plt_count)
     : Output_section_data((plt_count + 1) * plt_entry_size,
                          plt_entry_size, false),
-      layout_(layout), tlsdesc_rel_(NULL), irelative_rel_(NULL), got_(got),
+      tlsdesc_rel_(NULL), irelative_rel_(NULL), got_(got),
       got_plt_(got_plt), got_irelative_(got_irelative), count_(plt_count),
       irelative_count_(0), tlsdesc_got_offset_(-1U), free_list_()
   {
@@ -149,7 +179,12 @@ class Output_data_plt_x86_64 : public Output_section_data
   has_irelative_section() const
   { return this->irelative_rel_ != NULL; }
 
-  // Return the number of PLT entries.
+  // Get count of regular PLT entries.
+  unsigned int
+  regular_count() const
+  { return this->count_; }
+
+  // Return the total number of PLT entries.
   unsigned int
   entry_count() const
   { return this->count_ + this->irelative_count_; }
@@ -174,11 +209,13 @@ class Output_data_plt_x86_64 : public Output_section_data
 
   // Return the PLT address to use for a global symbol.
   uint64_t
-  address_for_global(const Symbol*);
+  address_for_global(const Symbol* sym)
+  { return do_address_for_global(sym); }
 
   // Return the PLT address to use for a local symbol.
   uint64_t
-  address_for_local(const Relobj*, unsigned int symndx);
+  address_for_local(const Relobj* obj, unsigned int symndx)
+  { return do_address_for_local(obj, symndx); }
 
   // Add .eh_frame information for the PLT.
   void
@@ -186,6 +223,18 @@ class Output_data_plt_x86_64 : public Output_section_data
   { this->do_add_eh_frame(layout); }
 
  protected:
+  Output_data_got<64, false>*
+  got() const
+  { return this->got_; }
+
+  Output_data_got_plt_x86_64*
+  got_plt() const
+  { return this->got_plt_; }
+
+  Output_data_space*
+  got_irelative() const
+  { return this->got_irelative_; }
+
   // Fill in the first PLT entry.
   void
   fill_first_plt_entry(unsigned char* pov,
@@ -245,6 +294,14 @@ class Output_data_plt_x86_64 : public Output_section_data
                        unsigned int tlsdesc_got_offset,
                        unsigned int plt_offset) = 0;
 
+  // Return the PLT address to use for a global symbol.
+  virtual uint64_t
+  do_address_for_global(const Symbol* sym);
+
+  // Return the PLT address to use for a local symbol.
+  virtual uint64_t
+  do_address_for_local(const Relobj* obj, unsigned int symndx);
+
   virtual void
   do_add_eh_frame(Layout* layout) = 0;
 
@@ -269,9 +326,6 @@ class Output_data_plt_x86_64 : public Output_section_data
   void
   do_write(Output_file*);
 
-  // A pointer to the Layout class, so that we can find the .dynamic
-  // section when we write out the GOT PLT section.
-  Layout* layout_;
   // The reloc section.
   Reloc_section* rel_;
   // The TLSDESC relocs, if necessary.  These must follow the regular
@@ -283,7 +337,7 @@ class Output_data_plt_x86_64 : public Output_section_data
   // The .got section.
   Output_data_got<64, false>* got_;
   // The .got.plt section.
-  Output_data_space* got_plt_;
+  Output_data_got_plt_x86_64* got_plt_;
   // The part of the .got.plt section used for IRELATIVE relocs.
   Output_data_space* got_irelative_;
   // The number of PLT entries.
@@ -304,7 +358,7 @@ class Output_data_plt_x86_64_standard : public Output_data_plt_x86_64<size>
  public:
   Output_data_plt_x86_64_standard(Layout* layout,
                                  Output_data_got<64, false>* got,
-                                 Output_data_space* got_plt,
+                                 Output_data_got_plt_x86_64* got_plt,
                                  Output_data_space* got_irelative)
     : Output_data_plt_x86_64<size>(layout, plt_entry_size,
                                   got, got_plt, got_irelative)
@@ -312,7 +366,7 @@ class Output_data_plt_x86_64_standard : public Output_data_plt_x86_64<size>
 
   Output_data_plt_x86_64_standard(Layout* layout,
                                  Output_data_got<64, false>* got,
-                                 Output_data_space* got_plt,
+                                 Output_data_got_plt_x86_64* got_plt,
                                  Output_data_space* got_irelative,
                                  unsigned int plt_count)
     : Output_data_plt_x86_64<size>(layout, plt_entry_size,
@@ -376,6 +430,260 @@ class Output_data_plt_x86_64_standard : public Output_data_plt_x86_64<size>
   static const unsigned char plt_eh_frame_fde[plt_eh_frame_fde_size];
 };
 
+class Output_data_plt_x86_64_bnd : public Output_data_plt_x86_64<64>
+{
+ public:
+  Output_data_plt_x86_64_bnd(Layout* layout,
+                            Output_data_got<64, false>* got,
+                            Output_data_got_plt_x86_64* got_plt,
+                            Output_data_space* got_irelative)
+    : Output_data_plt_x86_64<64>(layout, plt_entry_size,
+                                got, got_plt, got_irelative),
+      aplt_offset_(0)
+  { }
+
+  Output_data_plt_x86_64_bnd(Layout* layout,
+                            Output_data_got<64, false>* got,
+                            Output_data_got_plt_x86_64* got_plt,
+                            Output_data_space* got_irelative,
+                            unsigned int plt_count)
+    : Output_data_plt_x86_64<64>(layout, plt_entry_size,
+                                got, got_plt, got_irelative,
+                                plt_count),
+      aplt_offset_(0)
+  { }
+
+ protected:
+  virtual unsigned int
+  do_get_plt_entry_size() const
+  { return plt_entry_size; }
+
+  // Return the PLT address to use for a global symbol.
+  uint64_t
+  do_address_for_global(const Symbol*);
+
+  // Return the PLT address to use for a local symbol.
+  uint64_t
+  do_address_for_local(const Relobj*, unsigned int symndx);
+
+  virtual void
+  do_add_eh_frame(Layout* layout)
+  {
+    layout->add_eh_frame_for_plt(this,
+                                this->plt_eh_frame_cie,
+                                this->plt_eh_frame_cie_size,
+                                plt_eh_frame_fde,
+                                plt_eh_frame_fde_size);
+  }
+
+  virtual void
+  do_fill_first_plt_entry(unsigned char* pov,
+                         elfcpp::Elf_types<64>::Elf_Addr got_addr,
+                         elfcpp::Elf_types<64>::Elf_Addr plt_addr);
+
+  virtual unsigned int
+  do_fill_plt_entry(unsigned char* pov,
+                   elfcpp::Elf_types<64>::Elf_Addr got_address,
+                   elfcpp::Elf_types<64>::Elf_Addr plt_address,
+                   unsigned int got_offset,
+                   unsigned int plt_offset,
+                   unsigned int plt_index);
+
+  virtual void
+  do_fill_tlsdesc_entry(unsigned char* pov,
+                       elfcpp::Elf_types<64>::Elf_Addr got_address,
+                       elfcpp::Elf_types<64>::Elf_Addr plt_address,
+                       elfcpp::Elf_types<64>::Elf_Addr got_base,
+                       unsigned int tlsdesc_got_offset,
+                       unsigned int plt_offset);
+
+  void
+  fill_aplt_entry(unsigned char* pov,
+                 elfcpp::Elf_types<64>::Elf_Addr got_address,
+                 elfcpp::Elf_types<64>::Elf_Addr plt_address,
+                 unsigned int got_offset,
+                 unsigned int plt_offset,
+                 unsigned int plt_index);
+
+ private:
+  // Set the final size.
+  void
+  set_final_data_size();
+
+  // Write out the BND PLT data.
+  void
+  do_write(Output_file*);
+
+  // Offset of the Additional PLT (if using -z bndplt).
+  unsigned int aplt_offset_;
+
+  // The size of an entry in the PLT.
+  static const int plt_entry_size = 16;
+
+  // The size of an entry in the additional PLT.
+  static const int aplt_entry_size = 8;
+
+  // The first entry in the PLT.
+  // From the AMD64 ABI: "Unlike Intel386 ABI, this ABI uses the same
+  // procedure linkage table for both programs and shared objects."
+  static const unsigned char first_plt_entry[plt_entry_size];
+
+  // Other entries in the PLT for an executable.
+  static const unsigned char plt_entry[plt_entry_size];
+
+  // Entries in the additional PLT.
+  static const unsigned char aplt_entry[aplt_entry_size];
+
+  // The reserved TLSDESC entry in the PLT for an executable.
+  static const unsigned char tlsdesc_plt_entry[plt_entry_size];
+
+  // The .eh_frame unwind information for the PLT.
+  static const int plt_eh_frame_fde_size = 32;
+  static const unsigned char plt_eh_frame_fde[plt_eh_frame_fde_size];
+};
+
+// We use this PLT when Indirect Branch Tracking (IBT) is enabled.
+
+template <int size>
+class Output_data_plt_x86_64_ibt : public Output_data_plt_x86_64<size>
+{
+ public:
+  Output_data_plt_x86_64_ibt(Layout* layout,
+                            Output_data_got<64, false>* got,
+                            Output_data_got_plt_x86_64* got_plt,
+                            Output_data_space* got_irelative)
+    : Output_data_plt_x86_64<size>(layout, plt_entry_size,
+                                  got, got_plt, got_irelative),
+      aplt_offset_(0)
+  { }
+
+  Output_data_plt_x86_64_ibt(Layout* layout,
+                            Output_data_got<64, false>* got,
+                            Output_data_got_plt_x86_64* got_plt,
+                            Output_data_space* got_irelative,
+                            unsigned int plt_count)
+    : Output_data_plt_x86_64<size>(layout, plt_entry_size,
+                                  got, got_plt, got_irelative,
+                                  plt_count),
+      aplt_offset_(0)
+  { }
+
+ protected:
+  virtual unsigned int
+  do_get_plt_entry_size() const
+  { return plt_entry_size; }
+
+  // Return the PLT address to use for a global symbol.
+  uint64_t
+  do_address_for_global(const Symbol*);
+
+  // Return the PLT address to use for a local symbol.
+  uint64_t
+  do_address_for_local(const Relobj*, unsigned int symndx);
+
+  virtual void
+  do_add_eh_frame(Layout* layout)
+  {
+    layout->add_eh_frame_for_plt(this,
+                                this->plt_eh_frame_cie,
+                                this->plt_eh_frame_cie_size,
+                                plt_eh_frame_fde,
+                                plt_eh_frame_fde_size);
+  }
+
+  virtual void
+  do_fill_first_plt_entry(unsigned char* pov,
+                         typename elfcpp::Elf_types<size>::Elf_Addr got_addr,
+                         typename elfcpp::Elf_types<size>::Elf_Addr plt_addr);
+
+  virtual unsigned int
+  do_fill_plt_entry(unsigned char* pov,
+                   typename elfcpp::Elf_types<size>::Elf_Addr got_address,
+                   typename elfcpp::Elf_types<size>::Elf_Addr plt_address,
+                   unsigned int got_offset,
+                   unsigned int plt_offset,
+                   unsigned int plt_index);
+
+  virtual void
+  do_fill_tlsdesc_entry(unsigned char* pov,
+                       typename elfcpp::Elf_types<size>::Elf_Addr got_address,
+                       typename elfcpp::Elf_types<size>::Elf_Addr plt_address,
+                       typename elfcpp::Elf_types<size>::Elf_Addr got_base,
+                       unsigned int tlsdesc_got_offset,
+                       unsigned int plt_offset);
+
+  void
+  fill_aplt_entry(unsigned char* pov,
+                 typename elfcpp::Elf_types<size>::Elf_Addr got_address,
+                 typename elfcpp::Elf_types<size>::Elf_Addr plt_address,
+                 unsigned int got_offset,
+                 unsigned int plt_offset,
+                 unsigned int plt_index);
+
+ private:
+  // Set the final size.
+  void
+  set_final_data_size();
+
+  // Write out the BND PLT data.
+  void
+  do_write(Output_file*);
+
+  // Offset of the Additional PLT (if using -z bndplt).
+  unsigned int aplt_offset_;
+
+  // The size of an entry in the PLT.
+  static const int plt_entry_size = 16;
+
+  // The size of an entry in the additional PLT.
+  static const int aplt_entry_size = 16;
+
+  // The first entry in the PLT.
+  // From the AMD64 ABI: "Unlike Intel386 ABI, this ABI uses the same
+  // procedure linkage table for both programs and shared objects."
+  static const unsigned char first_plt_entry[plt_entry_size];
+
+  // Other entries in the PLT for an executable.
+  static const unsigned char plt_entry[plt_entry_size];
+
+  // Entries in the additional PLT.
+  static const unsigned char aplt_entry[aplt_entry_size];
+
+  // The reserved TLSDESC entry in the PLT for an executable.
+  static const unsigned char tlsdesc_plt_entry[plt_entry_size];
+
+  // The .eh_frame unwind information for the PLT.
+  static const int plt_eh_frame_fde_size = 32;
+  static const unsigned char plt_eh_frame_fde[plt_eh_frame_fde_size];
+};
+
+template<int size>
+class Lazy_view
+{
+ public:
+  Lazy_view(Sized_relobj_file<size, false>* object, unsigned int data_shndx)
+    : object_(object), data_shndx_(data_shndx), view_(NULL), view_size_(0)
+  { }
+
+  inline unsigned char
+  operator[](size_t offset)
+  {
+    if (this->view_ == NULL)
+      this->view_ = this->object_->section_contents(this->data_shndx_,
+                                                    &this->view_size_,
+                                                    true);
+    if (offset >= this->view_size_)
+      return 0;
+    return this->view_[offset];
+  }
+
+ private:
+  Sized_relobj_file<size, false>* object_;
+  unsigned int data_shndx_;
+  const unsigned char* view_;
+  section_size_type view_size_;
+};
+
 // The x86_64 target class.
 // See the ABI at
 //   http://www.x86-64.org/documentation/abi.pdf
@@ -397,7 +705,9 @@ class Target_x86_64 : public Sized_target<size, false>
       got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL),
       rela_irelative_(NULL), copy_relocs_(elfcpp::R_X86_64_COPY),
       got_mod_index_offset_(-1U), tlsdesc_reloc_info_(),
-      tls_base_symbol_defined_(false)
+      tls_base_symbol_defined_(false), isa_1_used_(0), isa_1_needed_(0),
+      feature_1_(0), object_isa_1_used_(0), object_feature_1_(0),
+      seen_first_object_(false)
   { }
 
   // Hook for a new output section.
@@ -469,6 +779,21 @@ class Target_x86_64 : public Sized_target<size, false>
                          const unsigned char* plocal_symbols,
                          Relocatable_relocs*);
 
+  // Scan the relocs for --emit-relocs.
+  void
+  emit_relocs_scan(Symbol_table* symtab,
+                  Layout* layout,
+                  Sized_relobj_file<size, false>* object,
+                  unsigned int data_shndx,
+                  unsigned int sh_type,
+                  const unsigned char* prelocs,
+                  size_t reloc_count,
+                  Output_section* output_section,
+                  bool needs_special_offset_handling,
+                  size_t local_symbol_count,
+                  const unsigned char* plocal_syms,
+                  Relocatable_relocs* rr);
+
   // Emit relocations for a section.
   void
   relocate_relocs(
@@ -478,7 +803,6 @@ class Target_x86_64 : public Sized_target<size, false>
       size_t reloc_count,
       Output_section* output_section,
       typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
-      const Relocatable_relocs*,
       unsigned char* view,
       typename elfcpp::Elf_types<size>::Elf_Addr view_address,
       section_size_type view_size,
@@ -522,10 +846,13 @@ class Target_x86_64 : public Sized_target<size, false>
   // and global_reloc_may_be_function_pointer)
   // if a function's pointer is taken.  ICF uses this in safe mode to only
   // fold those functions whose pointer is defintely not taken.  For x86_64
-  // pie binaries, safe ICF cannot be done by looking at relocation types.
+  // pie binaries, safe ICF cannot be done by looking at only relocation
+  // types, and for certain cases (e.g. R_X86_64_PC32), the instruction
+  // opcode is checked as well to distinguish a function call from taking
+  // a function's pointer.
   bool
   do_can_check_for_function_pointers() const
-  { return !parameters->options().pie(); }
+  { return true; }
 
   // Return the base for a DW_EH_PE_datarel encoding.
   uint64_t
@@ -535,6 +862,7 @@ class Target_x86_64 : public Sized_target<size, false>
   void
   do_calls_non_split(Relobj* object, unsigned int shndx,
                     section_offset_type fnoffset, section_size_type fnsize,
+                    const unsigned char* prelocs, size_t reloc_count,
                     unsigned char* view, section_size_type view_size,
                     std::string* from, std::string* to) const;
 
@@ -567,6 +895,11 @@ class Target_x86_64 : public Sized_target<size, false>
   unsigned int
   plt_entry_size() const;
 
+  // Return the size of each GOT entry.
+  unsigned int
+  got_entry_size() const
+  { return 8; };
+
   // Create the GOT section for an incremental update.
   Output_data_got_base*
   init_got_plt_for_update(Symbol_table* symtab,
@@ -619,7 +952,7 @@ class Target_x86_64 : public Sized_target<size, false>
   Output_data_plt_x86_64<size>*
   make_data_plt(Layout* layout,
                Output_data_got<64, false>* got,
-               Output_data_space* got_plt,
+               Output_data_got_plt_x86_64* got_plt,
                Output_data_space* got_irelative)
   {
     return this->do_make_data_plt(layout, got, got_plt, got_irelative);
@@ -628,7 +961,7 @@ class Target_x86_64 : public Sized_target<size, false>
   Output_data_plt_x86_64<size>*
   make_data_plt(Layout* layout,
                Output_data_got<64, false>* got,
-               Output_data_space* got_plt,
+               Output_data_got_plt_x86_64* got_plt,
                Output_data_space* got_irelative,
                unsigned int plt_count)
   {
@@ -639,24 +972,15 @@ class Target_x86_64 : public Sized_target<size, false>
   virtual Output_data_plt_x86_64<size>*
   do_make_data_plt(Layout* layout,
                   Output_data_got<64, false>* got,
-                  Output_data_space* got_plt,
-                  Output_data_space* got_irelative)
-  {
-    return new Output_data_plt_x86_64_standard<size>(layout, got, got_plt,
-                                                    got_irelative);
-  }
+                  Output_data_got_plt_x86_64* got_plt,
+                  Output_data_space* got_irelative);
 
   virtual Output_data_plt_x86_64<size>*
   do_make_data_plt(Layout* layout,
                   Output_data_got<64, false>* got,
-                  Output_data_space* got_plt,
+                  Output_data_got_plt_x86_64* got_plt,
                   Output_data_space* got_irelative,
-                  unsigned int plt_count)
-  {
-    return new Output_data_plt_x86_64_standard<size>(layout, got, got_plt,
-                                                    got_irelative,
-                                                    plt_count);
-  }
+                  unsigned int plt_count);
 
  private:
   // The class which scans relocations.
@@ -720,7 +1044,10 @@ class Target_x86_64 : public Sized_target<size, false>
     check_non_pic(Relobj*, unsigned int r_type, Symbol*);
 
     inline bool
-    possible_function_pointer_reloc(unsigned int r_type);
+    possible_function_pointer_reloc(Sized_relobj_file<size, false>* src_obj,
+                                    unsigned int src_indx,
+                                    unsigned int r_offset,
+                                    unsigned int r_type);
 
     bool
     reloc_needs_plt_for_ifunc(Sized_relobj_file<size, false>*,
@@ -750,11 +1077,9 @@ class Target_x86_64 : public Sized_target<size, false>
     // Do a relocation.  Return false if the caller should not issue
     // any warnings about this relocation.
     inline bool
-    relocate(const Relocate_info<size, false>*, Target_x86_64*,
-            Output_section*,
-            size_t relnum, const elfcpp::Rela<size, false>&,
-            unsigned int r_type, const Sized_symbol<size>*,
-            const Symbol_value<size>*,
+    relocate(const Relocate_info<size, false>*, unsigned int,
+            Target_x86_64*, Output_section*, size_t, const unsigned char*,
+            const Sized_symbol<size>*, const Symbol_value<size>*,
             unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr,
             section_size_type);
 
@@ -771,7 +1096,6 @@ class Target_x86_64 : public Sized_target<size, false>
     // Do a TLS General-Dynamic to Initial-Exec transition.
     inline void
     tls_gd_to_ie(const Relocate_info<size, false>*, size_t relnum,
-                Output_segment* tls_segment,
                 const elfcpp::Rela<size, false>&, unsigned int r_type,
                 typename elfcpp::Elf_types<size>::Elf_Addr value,
                 unsigned char* view,
@@ -790,7 +1114,6 @@ class Target_x86_64 : public Sized_target<size, false>
     // Do a TLSDESC-style General-Dynamic to Initial-Exec transition.
     inline void
     tls_desc_gd_to_ie(const Relocate_info<size, false>*, size_t relnum,
-                     Output_segment* tls_segment,
                      const elfcpp::Rela<size, false>&, unsigned int r_type,
                      typename elfcpp::Elf_types<size>::Elf_Addr value,
                      unsigned char* view,
@@ -829,14 +1152,70 @@ class Target_x86_64 : public Sized_target<size, false>
     bool skip_call_tls_get_addr_;
   };
 
-  // A class which returns the size required for a relocation type,
-  // used while scanning relocs during a relocatable link.
-  class Relocatable_size_for_reloc
+  // Check if relocation against this symbol is a candidate for
+  // conversion from
+  // mov foo@GOTPCREL(%rip), %reg
+  // to lea foo(%rip), %reg.
+  template<class View_type>
+  static inline bool
+  can_convert_mov_to_lea(const Symbol* gsym, unsigned int r_type,
+                         size_t r_offset, View_type* view)
   {
-   public:
-    unsigned int
-    get_size_for_reloc(unsigned int, Relobj*);
-  };
+    gold_assert(gsym != NULL);
+    // We cannot do the conversion unless it's one of these relocations.
+    if (r_type != elfcpp::R_X86_64_GOTPCREL
+        && r_type != elfcpp::R_X86_64_GOTPCRELX
+        && r_type != elfcpp::R_X86_64_REX_GOTPCRELX)
+      return false;
+    // We cannot convert references to IFUNC symbols, or to symbols that
+    // are not local to the current module.
+    // We can't do predefined symbols because they may become undefined
+    // (e.g., __ehdr_start when the headers aren't mapped to a segment).
+    if (gsym->type() == elfcpp::STT_GNU_IFUNC
+        || gsym->is_undefined()
+        || gsym->is_predefined()
+        || gsym->is_from_dynobj()
+        || gsym->is_preemptible())
+      return false;
+    // If we are building a shared object and the symbol is protected, we may
+    // need to go through the GOT.
+    if (parameters->options().shared()
+        && gsym->visibility() == elfcpp::STV_PROTECTED)
+      return false;
+    // We cannot convert references to the _DYNAMIC symbol.
+    if (strcmp(gsym->name(), "_DYNAMIC") == 0)
+      return false;
+    // Check for a MOV opcode.
+    return (*view)[r_offset - 2] == 0x8b;
+  }
+
+  // Convert
+  // callq *foo@GOTPCRELX(%rip) to
+  // addr32 callq foo
+  // and jmpq *foo@GOTPCRELX(%rip) to
+  // jmpq foo
+  // nop
+  template<class View_type>
+  static inline bool
+  can_convert_callq_to_direct(const Symbol* gsym, unsigned int r_type,
+                             size_t r_offset, View_type* view)
+  {
+    gold_assert(gsym != NULL);
+    // We cannot do the conversion unless it's a GOTPCRELX relocation.
+    if (r_type != elfcpp::R_X86_64_GOTPCRELX)
+      return false;
+    // We cannot convert references to IFUNC symbols, or to symbols that
+    // are not local to the current module.
+    if (gsym->type() == elfcpp::STT_GNU_IFUNC
+        || gsym->is_undefined ()
+        || gsym->is_from_dynobj()
+        || gsym->is_preemptible())
+      return false;
+    // Check for a CALLQ or JMPQ opcode.
+    return ((*view)[r_offset - 2] == 0xff
+            && ((*view)[r_offset - 1] == 0x15
+                || (*view)[r_offset - 1] == 0x25));
+  }
 
   // Adjust TLS relocation type based on the options and whether this
   // is a local symbol.
@@ -848,7 +1227,7 @@ class Target_x86_64 : public Sized_target<size, false>
   got_section(Symbol_table*, Layout*);
 
   // Get the GOT PLT section.
-  Output_data_space*
+  Output_data_got_plt_x86_64*
   got_plt_section() const
   {
     gold_assert(this->got_plt_ != NULL);
@@ -917,12 +1296,30 @@ class Target_x86_64 : public Sized_target<size, false>
             unsigned int shndx, Output_section* output_section,
             Symbol* sym, const elfcpp::Rela<size, false>& reloc)
   {
+    unsigned int r_type = elfcpp::elf_r_type<size>(reloc.get_r_info());
     this->copy_relocs_.copy_reloc(symtab, layout,
                                  symtab->get_sized_symbol<size>(sym),
                                  object, shndx, output_section,
-                                 reloc, this->rela_dyn_section(layout));
+                                 r_type, reloc.get_r_offset(),
+                                 reloc.get_r_addend(),
+                                 this->rela_dyn_section(layout));
   }
 
+  // Record a target-specific program property in the .note.gnu.property
+  // section.
+  void
+  record_gnu_property(unsigned int, unsigned int, size_t,
+                     const unsigned char*, const Object*);
+
+  // Merge the target-specific program properties from the current object.
+  void
+  merge_gnu_properties(const Object*);
+
+  // Finalize the target-specific program properties and add them back to
+  // the layout.
+  void
+  do_finalize_gnu_properties(Layout*) const;
+
   // Information about this specific target which we pass to the
   // general Target structure.
   static const Target::Target_info x86_64_info;
@@ -959,7 +1356,7 @@ class Target_x86_64 : public Sized_target<size, false>
   // The PLT section.
   Output_data_plt_x86_64<size>* plt_;
   // The GOT PLT section.
-  Output_data_space* got_plt_;
+  Output_data_got_plt_x86_64* got_plt_;
   // The GOT section for IRELATIVE relocations.
   Output_data_space* got_irelative_;
   // The GOT section for TLSDESC relocations.
@@ -980,6 +1377,22 @@ class Target_x86_64 : public Sized_target<size, false>
   std::vector<Tlsdesc_info> tlsdesc_reloc_info_;
   // True if the _TLS_MODULE_BASE_ symbol has been defined.
   bool tls_base_symbol_defined_;
+  // Target-specific program properties, from .note.gnu.property section.
+  // Each bit represents a specific feature.
+  uint32_t isa_1_used_;
+  uint32_t isa_1_needed_;
+  uint32_t feature_1_;
+  // Target-specific properties from the current object.
+  // These bits get ORed into ISA_1_USED_ after all properties for the object
+  // have been processed. But if either is all zeroes (as when the property
+  // is absent from an object), the result should be all zeroes.
+  // (See PR ld/23486.)
+  uint32_t object_isa_1_used_;
+  // These bits get ANDed into FEATURE_1_ after all properties for the object
+  // have been processed.
+  uint32_t object_feature_1_;
+  // Whether we have seen our first object, for use in initializing FEATURE_1_.
+  bool seen_first_object_;
 };
 
 template<>
@@ -1006,7 +1419,9 @@ const Target::Target_info Target_x86_64<64>::x86_64_info =
   elfcpp::SHF_X86_64_LARGE,    // large_common_section_flags
   NULL,                        // attributes_section
   NULL,                        // attributes_vendor
-  "_start"             // entry_symbol_name
+  "_start",            // entry_symbol_name
+  32,                  // hash_entry_size
+  elfcpp::SHT_X86_64_UNWIND,   // unwind_section_type
 };
 
 template<>
@@ -1033,7 +1448,9 @@ const Target::Target_info Target_x86_64<32>::x86_64_info =
   elfcpp::SHF_X86_64_LARGE,    // large_common_section_flags
   NULL,                        // attributes_section
   NULL,                        // attributes_vendor
-  "_start"             // entry_symbol_name
+  "_start",            // entry_symbol_name
+  32,                  // hash_entry_size
+  elfcpp::SHT_X86_64_UNWIND,   // unwind_section_type
 };
 
 // This is called when a new output section is created.  This is where
@@ -1075,7 +1492,7 @@ Target_x86_64<size>::got_section(Symbol_table* symtab, Layout* layout)
                                       | elfcpp::SHF_WRITE),
                                      this->got_, got_order, true);
 
-      this->got_plt_ = new Output_data_space(8, "** GOT PLT");
+      this->got_plt_ = new Output_data_got_plt_x86_64(layout);
       layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
                                      (elfcpp::SHF_ALLOC
                                       | elfcpp::SHF_WRITE),
@@ -1164,31 +1581,149 @@ Target_x86_64<size>::rela_irelative_section(Layout* layout)
   return this->rela_irelative_;
 }
 
-// Initialize the PLT section.
-
-template<int size>
-void
-Output_data_plt_x86_64<size>::init(Layout* layout)
-{
-  this->rel_ = new Reloc_section(false);
-  layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
-                                 elfcpp::SHF_ALLOC, this->rel_,
-                                 ORDER_DYNAMIC_PLT_RELOCS, false);
-}
-
+// Record a target-specific program property from the .note.gnu.property
+// section.
 template<int size>
 void
-Output_data_plt_x86_64<size>::do_adjust_output_section(Output_section* os)
+Target_x86_64<size>::record_gnu_property(
+    unsigned int, unsigned int pr_type,
+    size_t pr_datasz, const unsigned char* pr_data,
+    const Object* object)
 {
-  os->set_entsize(this->get_plt_entry_size());
-}
-
-// Add an entry to the PLT.
+  uint32_t val = 0;
 
-template<int size>
-void
-Output_data_plt_x86_64<size>::add_entry(Symbol_table* symtab, Layout* layout,
-                                       Symbol* gsym)
+  switch (pr_type)
+    {
+    case elfcpp::GNU_PROPERTY_X86_ISA_1_USED:
+    case elfcpp::GNU_PROPERTY_X86_ISA_1_NEEDED:
+    case elfcpp::GNU_PROPERTY_X86_FEATURE_1_AND:
+      if (pr_datasz != 4)
+       {
+         gold_warning(_("%s: corrupt .note.gnu.property section "
+                        "(pr_datasz for property %d is not 4)"),
+                      object->name().c_str(), pr_type);
+         return;
+       }
+      val = elfcpp::Swap<32, false>::readval(pr_data);
+      break;
+    default:
+      gold_warning(_("%s: unknown program property type 0x%x "
+                    "in .note.gnu.property section"),
+                  object->name().c_str(), pr_type);
+      break;
+    }
+
+  switch (pr_type)
+    {
+    case elfcpp::GNU_PROPERTY_X86_ISA_1_USED:
+      this->object_isa_1_used_ |= val;
+      break;
+    case elfcpp::GNU_PROPERTY_X86_ISA_1_NEEDED:
+      this->isa_1_needed_ |= val;
+      break;
+    case elfcpp::GNU_PROPERTY_X86_FEATURE_1_AND:
+      // If we see multiple feature props in one object, OR them together.
+      this->object_feature_1_ |= val;
+      break;
+    }
+}
+
+// Merge the target-specific program properties from the current object.
+template<int size>
+void
+Target_x86_64<size>::merge_gnu_properties(const Object*)
+{
+  if (this->seen_first_object_)
+    {
+      // If any object is missing the ISA_1_USED property, we must omit
+      // it from the output file.
+      if (this->object_isa_1_used_ == 0)
+       this->isa_1_used_ = 0;
+      else if (this->isa_1_used_ != 0)
+       this->isa_1_used_ |= this->object_isa_1_used_;
+      this->feature_1_ &= this->object_feature_1_;
+    }
+  else
+    {
+      this->isa_1_used_ = this->object_isa_1_used_;
+      this->feature_1_ = this->object_feature_1_;
+      this->seen_first_object_ = true;
+    }
+  this->object_isa_1_used_ = 0;
+  this->object_feature_1_ = 0;
+}
+
+static inline void
+add_property(Layout* layout, unsigned int pr_type, uint32_t val)
+{
+  unsigned char buf[4];
+  elfcpp::Swap<32, false>::writeval(buf, val);
+  layout->add_gnu_property(elfcpp::NT_GNU_PROPERTY_TYPE_0, pr_type, 4, buf);
+}
+
+// Finalize the target-specific program properties and add them back to
+// the layout.
+template<int size>
+void
+Target_x86_64<size>::do_finalize_gnu_properties(Layout* layout) const
+{
+  if (this->isa_1_used_ != 0)
+    add_property(layout, elfcpp::GNU_PROPERTY_X86_ISA_1_USED,
+                this->isa_1_used_);
+  if (this->isa_1_needed_ != 0)
+    add_property(layout, elfcpp::GNU_PROPERTY_X86_ISA_1_NEEDED,
+                this->isa_1_needed_);
+  if (this->feature_1_ != 0)
+    add_property(layout, elfcpp::GNU_PROPERTY_X86_FEATURE_1_AND,
+                this->feature_1_);
+}
+
+// Write the first three reserved words of the .got.plt section.
+// The remainder of the section is written while writing the PLT
+// in Output_data_plt_i386::do_write.
+
+void
+Output_data_got_plt_x86_64::do_write(Output_file* of)
+{
+  // The first entry in the GOT is the address of the .dynamic section
+  // aka the PT_DYNAMIC segment.  The next two entries are reserved.
+  // We saved space for them when we created the section in
+  // Target_x86_64::got_section.
+  const off_t got_file_offset = this->offset();
+  gold_assert(this->data_size() >= 24);
+  unsigned char* const got_view = of->get_output_view(got_file_offset, 24);
+  Output_section* dynamic = this->layout_->dynamic_section();
+  uint64_t dynamic_addr = dynamic == NULL ? 0 : dynamic->address();
+  elfcpp::Swap<64, false>::writeval(got_view, dynamic_addr);
+  memset(got_view + 8, 0, 16);
+  of->write_output_view(got_file_offset, 24, got_view);
+}
+
+// Initialize the PLT section.
+
+template<int size>
+void
+Output_data_plt_x86_64<size>::init(Layout* layout)
+{
+  this->rel_ = new Reloc_section(false);
+  layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
+                                 elfcpp::SHF_ALLOC, this->rel_,
+                                 ORDER_DYNAMIC_PLT_RELOCS, false);
+}
+
+template<int size>
+void
+Output_data_plt_x86_64<size>::do_adjust_output_section(Output_section* os)
+{
+  os->set_entsize(this->get_plt_entry_size());
+}
+
+// Add an entry to the PLT.
+
+template<int size>
+void
+Output_data_plt_x86_64<size>::add_entry(Symbol_table* symtab, Layout* layout,
+                                       Symbol* gsym)
 {
   gold_assert(!gsym->has_plt_offset());
 
@@ -1199,7 +1734,7 @@ Output_data_plt_x86_64<size>::add_entry(Symbol_table* symtab, Layout* layout,
   unsigned int* pcount;
   unsigned int offset;
   unsigned int reserved;
-  Output_data_space* got;
+  Output_section_data_build* got;
   if (gsym->type() == elfcpp::STT_GNU_IFUNC
       && gsym->can_use_relative_reloc(false))
     {
@@ -1379,7 +1914,7 @@ Output_data_plt_x86_64<size>::rela_irelative(Symbol_table* symtab,
 
 template<int size>
 uint64_t
-Output_data_plt_x86_64<size>::address_for_global(const Symbol* gsym)
+Output_data_plt_x86_64<size>::do_address_for_global(const Symbol* gsym)
 {
   uint64_t offset = 0;
   if (gsym->type() == elfcpp::STT_GNU_IFUNC
@@ -1393,8 +1928,8 @@ Output_data_plt_x86_64<size>::address_for_global(const Symbol* gsym)
 
 template<int size>
 uint64_t
-Output_data_plt_x86_64<size>::address_for_local(const Relobj* object,
-                                               unsigned int r_sym)
+Output_data_plt_x86_64<size>::do_address_for_local(const Relobj* object,
+                                                  unsigned int r_sym)
 {
   return (this->address()
          + (this->count_ + 1) * this->get_plt_entry_size()
@@ -1406,10 +1941,12 @@ template<int size>
 void
 Output_data_plt_x86_64<size>::set_final_data_size()
 {
-  unsigned int count = this->count_ + this->irelative_count_;
+  // Number of regular and IFUNC PLT entries, plus the first entry.
+  unsigned int count = this->count_ + this->irelative_count_ + 1;
+  // Count the TLSDESC entry, if present.
   if (this->has_tlsdesc_entry())
     ++count;
-  this->set_data_size((count + 1) * this->get_plt_entry_size());
+  this->set_data_size(count * this->get_plt_entry_size());
 }
 
 // The first entry in the PLT for an executable.
@@ -1468,11 +2005,16 @@ Output_data_plt_x86_64_standard<size>::do_fill_plt_entry(
     unsigned int plt_offset,
     unsigned int plt_index)
 {
+  // Check PC-relative offset overflow in PLT entry.
+  uint64_t plt_got_pcrel_offset = (got_address + got_offset
+                                  - (plt_address + plt_offset + 6));
+  if (Bits<32>::has_overflow(plt_got_pcrel_offset))
+    gold_error(_("PC-relative offset overflow in PLT entry %d"),
+              plt_index + 1);
+
   memcpy(pov, plt_entry, plt_entry_size);
   elfcpp::Swap_unaligned<32, false>::writeval(pov + 2,
-                                             (got_address + got_offset
-                                              - (plt_address + plt_offset
-                                                 + 6)));
+                                             plt_got_pcrel_offset);
 
   elfcpp::Swap_unaligned<32, false>::writeval(pov + 7, plt_index);
   elfcpp::Swap<32, false>::writeval(pov + 12,
@@ -1519,6 +2061,424 @@ Output_data_plt_x86_64_standard<size>::do_fill_tlsdesc_entry(
                                                  + 12)));
 }
 
+// Return the APLT address to use for a global symbol (for -z bndplt).
+
+uint64_t
+Output_data_plt_x86_64_bnd::do_address_for_global(const Symbol* gsym)
+{
+  uint64_t offset = this->aplt_offset_;
+  // Convert the PLT offset into an APLT offset.
+  unsigned int plt_offset = gsym->plt_offset();
+  if (gsym->type() == elfcpp::STT_GNU_IFUNC
+      && gsym->can_use_relative_reloc(false))
+    offset += this->regular_count() * aplt_entry_size;
+  else
+    plt_offset -= plt_entry_size;
+  plt_offset = plt_offset / (plt_entry_size / aplt_entry_size);
+  return this->address() + offset + plt_offset;
+}
+
+// Return the PLT address to use for a local symbol.  These are always
+// IRELATIVE relocs.
+
+uint64_t
+Output_data_plt_x86_64_bnd::do_address_for_local(const Relobj* object,
+                                                unsigned int r_sym)
+{
+  // Convert the PLT offset into an APLT offset.
+  const Sized_relobj_file<64, false>* sized_relobj =
+    static_cast<const Sized_relobj_file<64, false>*>(object);
+  const Symbol_value<64>* psymval = sized_relobj->local_symbol(r_sym);
+  unsigned int plt_offset = ((object->local_plt_offset(r_sym)
+                             - (psymval->is_ifunc_symbol()
+                                ? 0 : plt_entry_size))
+                            / (plt_entry_size / aplt_entry_size));
+  return (this->address()
+         + this->aplt_offset_
+         + this->regular_count() * aplt_entry_size
+         + plt_offset);
+}
+
+// Set the final size.
+void
+Output_data_plt_x86_64_bnd::set_final_data_size()
+{
+  // Number of regular and IFUNC PLT entries.
+  unsigned int count = this->entry_count();
+  // Count the first entry and the TLSDESC entry, if present.
+  unsigned int extra = this->has_tlsdesc_entry() ? 2 : 1;
+  unsigned int plt_size = (count + extra) * plt_entry_size;
+  // Offset of the APLT.
+  this->aplt_offset_ = plt_size;
+  // Size of the APLT.
+  plt_size += count * aplt_entry_size;
+  this->set_data_size(plt_size);
+}
+
+// The first entry in the BND PLT.
+
+const unsigned char
+Output_data_plt_x86_64_bnd::first_plt_entry[plt_entry_size] =
+{
+  // From AMD64 ABI Draft 0.98, page 76
+  0xff, 0x35,          // pushq contents of memory address
+  0, 0, 0, 0,          // replaced with address of .got + 8
+  0xf2, 0xff, 0x25,    // bnd jmp indirect
+  0, 0, 0, 0,          // replaced with address of .got + 16
+  0x0f, 0x1f, 0x00     // nop
+};
+
+void
+Output_data_plt_x86_64_bnd::do_fill_first_plt_entry(
+    unsigned char* pov,
+    elfcpp::Elf_types<64>::Elf_Addr got_address,
+    elfcpp::Elf_types<64>::Elf_Addr plt_address)
+{
+  memcpy(pov, first_plt_entry, plt_entry_size);
+  // We do a jmp relative to the PC at the end of this instruction.
+  elfcpp::Swap_unaligned<32, false>::writeval(pov + 2,
+                                             (got_address + 8
+                                              - (plt_address + 6)));
+  elfcpp::Swap<32, false>::writeval(pov + 9,
+                                   (got_address + 16
+                                    - (plt_address + 13)));
+}
+
+// Subsequent entries in the BND PLT.
+
+const unsigned char
+Output_data_plt_x86_64_bnd::plt_entry[plt_entry_size] =
+{
+  // From AMD64 ABI Draft 0.99.8, page 139
+  0x68,                                // pushq immediate
+  0, 0, 0, 0,                  // replaced with offset into relocation table
+  0xf2, 0xe9,                  // bnd jmpq relative
+  0, 0, 0, 0,                  // replaced with offset to start of .plt
+  0x0f, 0x1f, 0x44, 0, 0       // nop
+};
+
+// Entries in the BND Additional PLT.
+
+const unsigned char
+Output_data_plt_x86_64_bnd::aplt_entry[aplt_entry_size] =
+{
+  // From AMD64 ABI Draft 0.99.8, page 139
+  0xf2, 0xff, 0x25,    // bnd jmpq indirect
+  0, 0, 0, 0,          // replaced with address of symbol in .got
+  0x90,                        // nop
+};
+
+unsigned int
+Output_data_plt_x86_64_bnd::do_fill_plt_entry(
+    unsigned char* pov,
+    elfcpp::Elf_types<64>::Elf_Addr,
+    elfcpp::Elf_types<64>::Elf_Addr,
+    unsigned int,
+    unsigned int plt_offset,
+    unsigned int plt_index)
+{
+  memcpy(pov, plt_entry, plt_entry_size);
+  elfcpp::Swap_unaligned<32, false>::writeval(pov + 1, plt_index);
+  elfcpp::Swap<32, false>::writeval(pov + 7, -(plt_offset + 11));
+  return 0;
+}
+
+void
+Output_data_plt_x86_64_bnd::fill_aplt_entry(
+    unsigned char* pov,
+    elfcpp::Elf_types<64>::Elf_Addr got_address,
+    elfcpp::Elf_types<64>::Elf_Addr plt_address,
+    unsigned int got_offset,
+    unsigned int plt_offset,
+    unsigned int plt_index)
+{
+  // Check PC-relative offset overflow in PLT entry.
+  uint64_t plt_got_pcrel_offset = (got_address + got_offset
+                                  - (plt_address + plt_offset + 7));
+  if (Bits<32>::has_overflow(plt_got_pcrel_offset))
+    gold_error(_("PC-relative offset overflow in APLT entry %d"),
+              plt_index + 1);
+
+  memcpy(pov, aplt_entry, aplt_entry_size);
+  elfcpp::Swap_unaligned<32, false>::writeval(pov + 3, plt_got_pcrel_offset);
+}
+
+// The reserved TLSDESC entry in the PLT for an executable.
+
+const unsigned char
+Output_data_plt_x86_64_bnd::tlsdesc_plt_entry[plt_entry_size] =
+{
+  // From Alexandre Oliva, "Thread-Local Storage Descriptors for IA32
+  // and AMD64/EM64T", Version 0.9.4 (2005-10-10).
+  0xff, 0x35,          // pushq x(%rip)
+  0, 0, 0, 0,          // replaced with address of linkmap GOT entry (at PLTGOT + 8)
+  0xf2, 0xff, 0x25,    // jmpq *y(%rip)
+  0, 0, 0, 0,          // replaced with offset of reserved TLSDESC_GOT entry
+  0x0f,        0x1f, 0         // nop
+};
+
+void
+Output_data_plt_x86_64_bnd::do_fill_tlsdesc_entry(
+    unsigned char* pov,
+    elfcpp::Elf_types<64>::Elf_Addr got_address,
+    elfcpp::Elf_types<64>::Elf_Addr plt_address,
+    elfcpp::Elf_types<64>::Elf_Addr got_base,
+    unsigned int tlsdesc_got_offset,
+    unsigned int plt_offset)
+{
+  memcpy(pov, tlsdesc_plt_entry, plt_entry_size);
+  elfcpp::Swap_unaligned<32, false>::writeval(pov + 2,
+                                             (got_address + 8
+                                              - (plt_address + plt_offset
+                                                 + 6)));
+  elfcpp::Swap_unaligned<32, false>::writeval(pov + 9,
+                                             (got_base
+                                              + tlsdesc_got_offset
+                                              - (plt_address + plt_offset
+                                                 + 13)));
+}
+
+// Return the APLT address to use for a global symbol (for IBT).
+
+template<int size>
+uint64_t
+Output_data_plt_x86_64_ibt<size>::do_address_for_global(const Symbol* gsym)
+{
+  uint64_t offset = this->aplt_offset_;
+  // Convert the PLT offset into an APLT offset.
+  unsigned int plt_offset = gsym->plt_offset();
+  if (gsym->type() == elfcpp::STT_GNU_IFUNC
+      && gsym->can_use_relative_reloc(false))
+    offset += this->regular_count() * aplt_entry_size;
+  else
+    plt_offset -= plt_entry_size;
+  plt_offset = plt_offset / (plt_entry_size / aplt_entry_size);
+  return this->address() + offset + plt_offset;
+}
+
+// Return the PLT address to use for a local symbol.  These are always
+// IRELATIVE relocs.
+
+template<int size>
+uint64_t
+Output_data_plt_x86_64_ibt<size>::do_address_for_local(const Relobj* object,
+                                                unsigned int r_sym)
+{
+  // Convert the PLT offset into an APLT offset.
+  const Sized_relobj_file<size, false>* sized_relobj =
+    static_cast<const Sized_relobj_file<size, false>*>(object);
+  const Symbol_value<size>* psymval = sized_relobj->local_symbol(r_sym);
+  unsigned int plt_offset = ((object->local_plt_offset(r_sym)
+                             - (psymval->is_ifunc_symbol()
+                                ? 0 : plt_entry_size))
+                            / (plt_entry_size / aplt_entry_size));
+  return (this->address()
+         + this->aplt_offset_
+         + this->regular_count() * aplt_entry_size
+         + plt_offset);
+}
+
+// Set the final size.
+
+template<int size>
+void
+Output_data_plt_x86_64_ibt<size>::set_final_data_size()
+{
+  // Number of regular and IFUNC PLT entries.
+  unsigned int count = this->entry_count();
+  // Count the first entry and the TLSDESC entry, if present.
+  unsigned int extra = this->has_tlsdesc_entry() ? 2 : 1;
+  unsigned int plt_size = (count + extra) * plt_entry_size;
+  // Offset of the APLT.
+  this->aplt_offset_ = plt_size;
+  // Size of the APLT.
+  plt_size += count * aplt_entry_size;
+  this->set_data_size(plt_size);
+}
+
+// The first entry in the IBT PLT.
+
+template<>
+const unsigned char
+Output_data_plt_x86_64_ibt<32>::first_plt_entry[plt_entry_size] =
+{
+  // MPX isn't supported for x32, so we don't need the BND prefix.
+  // From AMD64 ABI Draft 0.98, page 76
+  0xff, 0x35,           // pushq contents of memory address
+  0, 0, 0, 0,           // replaced with address of .got + 8
+  0xff, 0x25,           // jmp indirect
+  0, 0, 0, 0,           // replaced with address of .got + 16
+  0x90, 0x90, 0x90, 0x90 // noop (x4)
+};
+
+template<>
+const unsigned char
+Output_data_plt_x86_64_ibt<64>::first_plt_entry[plt_entry_size] =
+{
+  // Use the BND prefix so that IBT is compatible with MPX.
+  0xff, 0x35,          // pushq contents of memory address
+  0, 0, 0, 0,          // replaced with address of .got + 8
+  0xf2, 0xff, 0x25,    // bnd jmp indirect
+  0, 0, 0, 0,          // replaced with address of .got + 16
+  0x0f, 0x1f, 0x00     // nop
+};
+
+template<int size>
+void
+Output_data_plt_x86_64_ibt<size>::do_fill_first_plt_entry(
+    unsigned char* pov,
+    typename elfcpp::Elf_types<size>::Elf_Addr got_address,
+    typename elfcpp::Elf_types<size>::Elf_Addr plt_address)
+{
+  // Offsets to the addresses needing relocation.
+  const unsigned int roff1 = 2;
+  const unsigned int roff2 = (size == 32) ? 8 : 9;
+
+  memcpy(pov, first_plt_entry, plt_entry_size);
+  // We do a jmp relative to the PC at the end of this instruction.
+  elfcpp::Swap_unaligned<32, false>::writeval(pov + roff1,
+                                             (got_address + 8
+                                              - (plt_address + roff1 + 4)));
+  elfcpp::Swap<32, false>::writeval(pov + roff2,
+                                   (got_address + 16
+                                    - (plt_address + roff2 + 4)));
+}
+
+// Subsequent entries in the IBT PLT.
+
+template<>
+const unsigned char
+Output_data_plt_x86_64_ibt<32>::plt_entry[plt_entry_size] =
+{
+  // From AMD64 ABI Draft 1.0-rc1, Chapter 13.
+  0xf3, 0x0f, 0x1e, 0xfa,      // endbr64
+  0x68,                                // pushq immediate
+  0, 0, 0, 0,                  // replaced with offset into relocation table
+  0xe9,                                // jmpq relative
+  0, 0, 0, 0,                  // replaced with offset to start of .plt
+  0x90, 0x90                   // nop
+};
+
+template<>
+const unsigned char
+Output_data_plt_x86_64_ibt<64>::plt_entry[plt_entry_size] =
+{
+  // From AMD64 ABI Draft 1.0-rc1, Chapter 13.
+  0xf3, 0x0f, 0x1e, 0xfa,      // endbr64
+  0x68,                                // pushq immediate
+  0, 0, 0, 0,                  // replaced with offset into relocation table
+  0xf2, 0xe9,                  // bnd jmpq relative
+  0, 0, 0, 0,                  // replaced with offset to start of .plt
+  0x90                         // nop
+};
+
+// Entries in the IBT Additional PLT.
+
+template<>
+const unsigned char
+Output_data_plt_x86_64_ibt<32>::aplt_entry[aplt_entry_size] =
+{
+  // From AMD64 ABI Draft 1.0-rc1, Chapter 13.
+  0xf3, 0x0f, 0x1e, 0xfa,      // endbr64
+  0xff, 0x25,                  // jmpq indirect
+  0, 0, 0, 0,                  // replaced with address of symbol in .got
+  0x0f, 0x1f, 0x04, 0x00,      // nop
+  0x90, 0x90                   // nop
+};
+
+template<>
+const unsigned char
+Output_data_plt_x86_64_ibt<64>::aplt_entry[aplt_entry_size] =
+{
+  // From AMD64 ABI Draft 1.0-rc1, Chapter 13.
+  0xf3, 0x0f, 0x1e, 0xfa,      // endbr64
+  0xf2, 0xff, 0x25,            // bnd jmpq indirect
+  0, 0, 0, 0,                  // replaced with address of symbol in .got
+  0x0f, 0x1f, 0x04, 0x00,      // nop
+  0x90,                                // nop
+};
+
+template<int size>
+unsigned int
+Output_data_plt_x86_64_ibt<size>::do_fill_plt_entry(
+    unsigned char* pov,
+    typename elfcpp::Elf_types<size>::Elf_Addr,
+    typename elfcpp::Elf_types<size>::Elf_Addr,
+    unsigned int,
+    unsigned int plt_offset,
+    unsigned int plt_index)
+{
+  // Offsets to the addresses needing relocation.
+  const unsigned int roff1 = 5;
+  const unsigned int roff2 = (size == 32) ? 10 : 11;
+
+  memcpy(pov, plt_entry, plt_entry_size);
+  elfcpp::Swap_unaligned<32, false>::writeval(pov + roff1, plt_index);
+  elfcpp::Swap<32, false>::writeval(pov + roff2, -(plt_offset + roff2 + 4));
+  return 0;
+}
+
+template<int size>
+void
+Output_data_plt_x86_64_ibt<size>::fill_aplt_entry(
+    unsigned char* pov,
+    typename elfcpp::Elf_types<size>::Elf_Addr got_address,
+    typename elfcpp::Elf_types<size>::Elf_Addr plt_address,
+    unsigned int got_offset,
+    unsigned int plt_offset,
+    unsigned int plt_index)
+{
+  // Offset to the address needing relocation.
+  const unsigned int roff = (size == 32) ? 6 : 7;
+
+  // Check PC-relative offset overflow in PLT entry.
+  uint64_t plt_got_pcrel_offset = (got_address + got_offset
+                                  - (plt_address + plt_offset + roff + 4));
+  if (Bits<32>::has_overflow(plt_got_pcrel_offset))
+    gold_error(_("PC-relative offset overflow in APLT entry %d"),
+              plt_index + 1);
+
+  memcpy(pov, aplt_entry, aplt_entry_size);
+  elfcpp::Swap_unaligned<32, false>::writeval(pov + roff, plt_got_pcrel_offset);
+}
+
+// The reserved TLSDESC entry in the IBT PLT for an executable.
+
+template<int size>
+const unsigned char
+Output_data_plt_x86_64_ibt<size>::tlsdesc_plt_entry[plt_entry_size] =
+{
+  // From Alexandre Oliva, "Thread-Local Storage Descriptors for IA32
+  // and AMD64/EM64T", Version 0.9.4 (2005-10-10).
+  0xff, 0x35,          // pushq x(%rip)
+  0, 0, 0, 0,          // replaced with address of linkmap GOT entry (at PLTGOT + 8)
+  0xf2, 0xff, 0x25,    // jmpq *y(%rip)
+  0, 0, 0, 0,          // replaced with offset of reserved TLSDESC_GOT entry
+  0x0f,        0x1f, 0         // nop
+};
+
+template<int size>
+void
+Output_data_plt_x86_64_ibt<size>::do_fill_tlsdesc_entry(
+    unsigned char* pov,
+    typename elfcpp::Elf_types<size>::Elf_Addr got_address,
+    typename elfcpp::Elf_types<size>::Elf_Addr plt_address,
+    typename elfcpp::Elf_types<size>::Elf_Addr got_base,
+    unsigned int tlsdesc_got_offset,
+    unsigned int plt_offset)
+{
+  memcpy(pov, tlsdesc_plt_entry, plt_entry_size);
+  elfcpp::Swap_unaligned<32, false>::writeval(pov + 2,
+                                             (got_address + 8
+                                              - (plt_address + plt_offset
+                                                 + 6)));
+  elfcpp::Swap_unaligned<32, false>::writeval(pov + 9,
+                                             (got_base
+                                              + tlsdesc_got_offset
+                                              - (plt_address + plt_offset
+                                                 + 13)));
+}
+
 // The .eh_frame unwind information for the PLT.
 
 template<int size>
@@ -1569,6 +2529,63 @@ Output_data_plt_x86_64_standard<size>::plt_eh_frame_fde[plt_eh_frame_fde_size] =
   elfcpp::DW_CFA_nop
 };
 
+// The .eh_frame unwind information for the BND PLT.
+const unsigned char
+Output_data_plt_x86_64_bnd::plt_eh_frame_fde[plt_eh_frame_fde_size] =
+{
+  0, 0, 0, 0,                          // Replaced with offset to .plt.
+  0, 0, 0, 0,                          // Replaced with size of .plt.
+  0,                                   // Augmentation size.
+  elfcpp::DW_CFA_def_cfa_offset, 16,   // DW_CFA_def_cfa_offset: 16.
+  elfcpp::DW_CFA_advance_loc + 6,      // Advance 6 to __PLT__ + 6.
+  elfcpp::DW_CFA_def_cfa_offset, 24,   // DW_CFA_def_cfa_offset: 24.
+  elfcpp::DW_CFA_advance_loc + 10,     // Advance 10 to __PLT__ + 16.
+  elfcpp::DW_CFA_def_cfa_expression,   // DW_CFA_def_cfa_expression.
+  11,                                  // Block length.
+  elfcpp::DW_OP_breg7, 8,              // Push %rsp + 8.
+  elfcpp::DW_OP_breg16, 0,             // Push %rip.
+  elfcpp::DW_OP_lit15,                 // Push 0xf.
+  elfcpp::DW_OP_and,                   // & (%rip & 0xf).
+  elfcpp::DW_OP_lit5,                  // Push 5.
+  elfcpp::DW_OP_ge,                    // >= ((%rip & 0xf) >= 5)
+  elfcpp::DW_OP_lit3,                  // Push 3.
+  elfcpp::DW_OP_shl,                   // << (((%rip & 0xf) >= 5) << 3)
+  elfcpp::DW_OP_plus,                  // + ((((%rip&0xf)>=5)<<3)+%rsp+8
+  elfcpp::DW_CFA_nop,                  // Align to 32 bytes.
+  elfcpp::DW_CFA_nop,
+  elfcpp::DW_CFA_nop,
+  elfcpp::DW_CFA_nop
+};
+
+// The .eh_frame unwind information for the BND PLT.
+template<int size>
+const unsigned char
+Output_data_plt_x86_64_ibt<size>::plt_eh_frame_fde[plt_eh_frame_fde_size] =
+{
+  0, 0, 0, 0,                          // Replaced with offset to .plt.
+  0, 0, 0, 0,                          // Replaced with size of .plt.
+  0,                                   // Augmentation size.
+  elfcpp::DW_CFA_def_cfa_offset, 16,   // DW_CFA_def_cfa_offset: 16.
+  elfcpp::DW_CFA_advance_loc + 6,      // Advance 6 to __PLT__ + 6.
+  elfcpp::DW_CFA_def_cfa_offset, 24,   // DW_CFA_def_cfa_offset: 24.
+  elfcpp::DW_CFA_advance_loc + 10,     // Advance 10 to __PLT__ + 16.
+  elfcpp::DW_CFA_def_cfa_expression,   // DW_CFA_def_cfa_expression.
+  11,                                  // Block length.
+  elfcpp::DW_OP_breg7, 8,              // Push %rsp + 8.
+  elfcpp::DW_OP_breg16, 0,             // Push %rip.
+  elfcpp::DW_OP_lit15,                 // Push 0xf.
+  elfcpp::DW_OP_and,                   // & (%rip & 0xf).
+  elfcpp::DW_OP_lit9,                  // Push 9.
+  elfcpp::DW_OP_ge,                    // >= ((%rip & 0xf) >= 9)
+  elfcpp::DW_OP_lit3,                  // Push 3.
+  elfcpp::DW_OP_shl,                   // << (((%rip & 0xf) >= 9) << 3)
+  elfcpp::DW_OP_plus,                  // + ((((%rip&0xf)>=9)<<3)+%rsp+8
+  elfcpp::DW_CFA_nop,                  // Align to 32 bytes.
+  elfcpp::DW_CFA_nop,
+  elfcpp::DW_CFA_nop,
+  elfcpp::DW_CFA_nop
+};
+
 // Write out the PLT.  This uses the hand-coded instructions above,
 // and adjusts them as needed.  This is specified by the AMD64 ABI.
 
@@ -1606,18 +2623,9 @@ Output_data_plt_x86_64<size>::do_write(Output_file* of)
   this->fill_first_plt_entry(pov, got_address, plt_address);
   pov += this->get_plt_entry_size();
 
-  unsigned char* got_pov = got_view;
-
-  // The first entry in the GOT is the address of the .dynamic section
-  // aka the PT_DYNAMIC segment.  The next two entries are reserved.
-  // We saved space for them when we created the section in
-  // Target_x86_64::got_section.
-  Output_section* dynamic = this->layout_->dynamic_section();
-  uint32_t dynamic_addr = dynamic == NULL ? 0 : dynamic->address();
-  elfcpp::Swap<64, false>::writeval(got_pov, dynamic_addr);
-  got_pov += 8;
-  memset(got_pov, 0, 16);
-  got_pov += 16;
+  // The first three entries in the GOT are reserved, and are written
+  // by Output_data_got_plt_x86_64::do_write.
+  unsigned char* got_pov = got_view + 24;
 
   unsigned int plt_offset = this->get_plt_entry_size();
   unsigned int got_offset = 24;
@@ -1627,7 +2635,183 @@ Output_data_plt_x86_64<size>::do_write(Output_file* of)
        ++plt_index,
         pov += this->get_plt_entry_size(),
         got_pov += 8,
-        plt_offset += this->get_plt_entry_size(),
+        plt_offset += this->get_plt_entry_size(),
+        got_offset += 8)
+    {
+      // Set and adjust the PLT entry itself.
+      unsigned int lazy_offset = this->fill_plt_entry(pov,
+                                                     got_address, plt_address,
+                                                     got_offset, plt_offset,
+                                                     plt_index);
+
+      // Set the entry in the GOT.
+      elfcpp::Swap<64, false>::writeval(got_pov,
+                                       plt_address + plt_offset + lazy_offset);
+    }
+
+  if (this->has_tlsdesc_entry())
+    {
+      // Set and adjust the reserved TLSDESC PLT entry.
+      unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
+      this->fill_tlsdesc_entry(pov, got_address, plt_address, got_base,
+                              tlsdesc_got_offset, plt_offset);
+      pov += this->get_plt_entry_size();
+    }
+
+  gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
+  gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
+
+  of->write_output_view(offset, oview_size, oview);
+  of->write_output_view(got_file_offset, got_size, got_view);
+}
+
+// Write out the BND PLT.
+
+void
+Output_data_plt_x86_64_bnd::do_write(Output_file* of)
+{
+  const off_t offset = this->offset();
+  const section_size_type oview_size =
+    convert_to_section_size_type(this->data_size());
+  unsigned char* const oview = of->get_output_view(offset, oview_size);
+
+  Output_data_got<64, false>* got = this->got();
+  Output_data_got_plt_x86_64* got_plt = this->got_plt();
+  Output_data_space* got_irelative = this->got_irelative();
+
+  const off_t got_file_offset = got_plt->offset();
+  gold_assert(parameters->incremental_update()
+             || (got_file_offset + got_plt->data_size()
+                 == got_irelative->offset()));
+  const section_size_type got_size =
+    convert_to_section_size_type(got_plt->data_size()
+                                + got_irelative->data_size());
+  unsigned char* const got_view = of->get_output_view(got_file_offset,
+                                                     got_size);
+
+  unsigned char* pov = oview;
+
+  // The base address of the .plt section.
+  elfcpp::Elf_types<64>::Elf_Addr plt_address = this->address();
+  // The base address of the .got section.
+  elfcpp::Elf_types<64>::Elf_Addr got_base = got->address();
+  // The base address of the PLT portion of the .got section,
+  // which is where the GOT pointer will point, and where the
+  // three reserved GOT entries are located.
+  elfcpp::Elf_types<64>::Elf_Addr got_address = got_plt->address();
+
+  this->fill_first_plt_entry(pov, got_address, plt_address);
+  pov += plt_entry_size;
+
+  // The first three entries in the GOT are reserved, and are written
+  // by Output_data_got_plt_x86_64::do_write.
+  unsigned char* got_pov = got_view + 24;
+
+  unsigned int plt_offset = plt_entry_size;
+  unsigned int got_offset = 24;
+  const unsigned int count = this->entry_count();
+  for (unsigned int plt_index = 0;
+       plt_index < count;
+       ++plt_index,
+        pov += plt_entry_size,
+        got_pov += 8,
+        plt_offset += plt_entry_size,
+        got_offset += 8)
+    {
+      // Set and adjust the PLT entry itself.
+      unsigned int lazy_offset = this->fill_plt_entry(pov,
+                                                     got_address, plt_address,
+                                                     got_offset, plt_offset,
+                                                     plt_index);
+
+      // Set the entry in the GOT.
+      elfcpp::Swap<64, false>::writeval(got_pov,
+                                       plt_address + plt_offset + lazy_offset);
+    }
+
+  if (this->has_tlsdesc_entry())
+    {
+      // Set and adjust the reserved TLSDESC PLT entry.
+      unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
+      this->fill_tlsdesc_entry(pov, got_address, plt_address, got_base,
+                              tlsdesc_got_offset, plt_offset);
+      pov += this->get_plt_entry_size();
+      plt_offset += plt_entry_size;
+    }
+
+  // Write the additional PLT.
+  got_offset = 24;
+  for (unsigned int plt_index = 0;
+       plt_index < count;
+       ++plt_index,
+        pov += aplt_entry_size,
+        plt_offset += aplt_entry_size,
+        got_offset += 8)
+    {
+      // Set and adjust the APLT entry.
+      this->fill_aplt_entry(pov, got_address, plt_address, got_offset,
+                           plt_offset, plt_index);
+    }
+
+  gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
+  gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
+
+  of->write_output_view(offset, oview_size, oview);
+  of->write_output_view(got_file_offset, got_size, got_view);
+}
+
+// Write out the IBT PLT.
+
+template<int size>
+void
+Output_data_plt_x86_64_ibt<size>::do_write(Output_file* of)
+{
+  const off_t offset = this->offset();
+  const section_size_type oview_size =
+    convert_to_section_size_type(this->data_size());
+  unsigned char* const oview = of->get_output_view(offset, oview_size);
+
+  Output_data_got<64, false>* got = this->got();
+  Output_data_got_plt_x86_64* got_plt = this->got_plt();
+  Output_data_space* got_irelative = this->got_irelative();
+
+  const off_t got_file_offset = got_plt->offset();
+  gold_assert(parameters->incremental_update()
+             || (got_file_offset + got_plt->data_size()
+                 == got_irelative->offset()));
+  const section_size_type got_size =
+    convert_to_section_size_type(got_plt->data_size()
+                                + got_irelative->data_size());
+  unsigned char* const got_view = of->get_output_view(got_file_offset,
+                                                     got_size);
+
+  unsigned char* pov = oview;
+
+  // The base address of the .plt section.
+  elfcpp::Elf_types<64>::Elf_Addr plt_address = this->address();
+  // The base address of the .got section.
+  elfcpp::Elf_types<64>::Elf_Addr got_base = got->address();
+  // The base address of the PLT portion of the .got section,
+  // which is where the GOT pointer will point, and where the
+  // three reserved GOT entries are located.
+  elfcpp::Elf_types<64>::Elf_Addr got_address = got_plt->address();
+
+  this->fill_first_plt_entry(pov, got_address, plt_address);
+  pov += plt_entry_size;
+
+  // The first three entries in the GOT are reserved, and are written
+  // by Output_data_got_plt_x86_64::do_write.
+  unsigned char* got_pov = got_view + 24;
+
+  unsigned int plt_offset = plt_entry_size;
+  unsigned int got_offset = 24;
+  const unsigned int count = this->entry_count();
+  for (unsigned int plt_index = 0;
+       plt_index < count;
+       ++plt_index,
+        pov += plt_entry_size,
+        got_pov += 8,
+        plt_offset += plt_entry_size,
         got_offset += 8)
     {
       // Set and adjust the PLT entry itself.
@@ -1648,6 +2832,21 @@ Output_data_plt_x86_64<size>::do_write(Output_file* of)
       this->fill_tlsdesc_entry(pov, got_address, plt_address, got_base,
                               tlsdesc_got_offset, plt_offset);
       pov += this->get_plt_entry_size();
+      plt_offset += plt_entry_size;
+    }
+
+  // Write the additional PLT.
+  got_offset = 24;
+  for (unsigned int plt_index = 0;
+       plt_index < count;
+       ++plt_index,
+        pov += aplt_entry_size,
+        plt_offset += aplt_entry_size,
+        got_offset += 8)
+    {
+      // Set and adjust the APLT entry.
+      this->fill_aplt_entry(pov, got_address, plt_address, got_offset,
+                           plt_offset, plt_index);
     }
 
   gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
@@ -1686,6 +2885,73 @@ Target_x86_64<size>::make_plt_section(Symbol_table* symtab, Layout* layout)
     }
 }
 
+template<>
+Output_data_plt_x86_64<32>*
+Target_x86_64<32>::do_make_data_plt(Layout* layout,
+                                   Output_data_got<64, false>* got,
+                                   Output_data_got_plt_x86_64* got_plt,
+                                   Output_data_space* got_irelative)
+{
+  if (this->feature_1_ & elfcpp::GNU_PROPERTY_X86_FEATURE_1_IBT)
+    return new Output_data_plt_x86_64_ibt<32>(layout, got, got_plt,
+                                             got_irelative);
+  return new Output_data_plt_x86_64_standard<32>(layout, got, got_plt,
+                                                got_irelative);
+}
+
+template<>
+Output_data_plt_x86_64<64>*
+Target_x86_64<64>::do_make_data_plt(Layout* layout,
+                                   Output_data_got<64, false>* got,
+                                   Output_data_got_plt_x86_64* got_plt,
+                                   Output_data_space* got_irelative)
+{
+  if (this->feature_1_ & elfcpp::GNU_PROPERTY_X86_FEATURE_1_IBT)
+    return new Output_data_plt_x86_64_ibt<64>(layout, got, got_plt,
+                                             got_irelative);
+  else if (parameters->options().bndplt())
+    return new Output_data_plt_x86_64_bnd(layout, got, got_plt,
+                                         got_irelative);
+  else
+    return new Output_data_plt_x86_64_standard<64>(layout, got, got_plt,
+                                                  got_irelative);
+}
+
+template<>
+Output_data_plt_x86_64<32>*
+Target_x86_64<32>::do_make_data_plt(Layout* layout,
+                                   Output_data_got<64, false>* got,
+                                   Output_data_got_plt_x86_64* got_plt,
+                                   Output_data_space* got_irelative,
+                                   unsigned int plt_count)
+{
+  if (this->feature_1_ & elfcpp::GNU_PROPERTY_X86_FEATURE_1_IBT)
+    return new Output_data_plt_x86_64_ibt<32>(layout, got, got_plt,
+                                             got_irelative, plt_count);
+  return new Output_data_plt_x86_64_standard<32>(layout, got, got_plt,
+                                                got_irelative, plt_count);
+}
+
+template<>
+Output_data_plt_x86_64<64>*
+Target_x86_64<64>::do_make_data_plt(Layout* layout,
+                                   Output_data_got<64, false>* got,
+                                   Output_data_got_plt_x86_64* got_plt,
+                                   Output_data_space* got_irelative,
+                                   unsigned int plt_count)
+{
+  if (this->feature_1_ & elfcpp::GNU_PROPERTY_X86_FEATURE_1_IBT)
+    return new Output_data_plt_x86_64_ibt<64>(layout, got, got_plt,
+                                             got_irelative, plt_count);
+  else if (parameters->options().bndplt())
+    return new Output_data_plt_x86_64_bnd(layout, got, got_plt,
+                                         got_irelative, plt_count);
+  else
+    return new Output_data_plt_x86_64_standard<64>(layout, got, got_plt,
+                                                  got_irelative,
+                                                  plt_count);
+}
+
 // Return the section for TLSDESC relocations.
 
 template<int size>
@@ -1747,6 +3013,8 @@ template<int size>
 unsigned int
 Target_x86_64<size>::first_plt_entry_offset() const
 {
+  if (this->plt_ == NULL)
+    return 0;
   return this->plt_->first_plt_entry_offset();
 }
 
@@ -1756,6 +3024,8 @@ template<int size>
 unsigned int
 Target_x86_64<size>::plt_entry_size() const
 {
+  if (this->plt_ == NULL)
+    return 0;
   return this->plt_->get_plt_entry_size();
 }
 
@@ -1778,7 +3048,7 @@ Target_x86_64<size>::init_got_plt_for_update(Symbol_table* symtab,
                                  true);
 
   // Add the three reserved entries.
-  this->got_plt_ = new Output_data_space((plt_count + 3) * 8, 8, "** GOT PLT");
+  this->got_plt_ = new Output_data_got_plt_x86_64(layout, (plt_count + 3) * 8);
   layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
                                  (elfcpp::SHF_ALLOC
                                   | elfcpp::SHF_WRITE),
@@ -2113,12 +3383,14 @@ Target_x86_64<size>::Scan::get_reference_flags(unsigned int r_type)
 
     case elfcpp::R_X86_64_PC64:
     case elfcpp::R_X86_64_PC32:
+    case elfcpp::R_X86_64_PC32_BND:
     case elfcpp::R_X86_64_PC16:
     case elfcpp::R_X86_64_PC8:
     case elfcpp::R_X86_64_GOTOFF64:
       return Symbol::RELATIVE_REF;
 
     case elfcpp::R_X86_64_PLT32:
+    case elfcpp::R_X86_64_PLT32_BND:
     case elfcpp::R_X86_64_PLTOFF64:
       return Symbol::FUNCTION_CALL | Symbol::RELATIVE_REF;
 
@@ -2126,6 +3398,8 @@ Target_x86_64<size>::Scan::get_reference_flags(unsigned int r_type)
     case elfcpp::R_X86_64_GOT32:
     case elfcpp::R_X86_64_GOTPCREL64:
     case elfcpp::R_X86_64_GOTPCREL:
+    case elfcpp::R_X86_64_GOTPCRELX:
+    case elfcpp::R_X86_64_REX_GOTPCRELX:
     case elfcpp::R_X86_64_GOTPLT64:
       // Absolute in GOT.
       return Symbol::ABSOLUTE_REF;
@@ -2200,6 +3474,7 @@ Target_x86_64<size>::Scan::check_non_pic(Relobj* object, unsigned int r_type,
 
       // glibc supports these reloc types, but they can overflow.
     case elfcpp::R_X86_64_PC32:
+    case elfcpp::R_X86_64_PC32_BND:
       // A PC relative reference is OK against a local symbol or if
       // the symbol is defined locally.
       if (gsym == NULL
@@ -2207,7 +3482,7 @@ Target_x86_64<size>::Scan::check_non_pic(Relobj* object, unsigned int r_type,
              && !gsym->is_undefined()
              && !gsym->is_preemptible()))
        return;
-      /* Fall through.  */
+      // Fall through.
     case elfcpp::R_X86_64_32:
       // R_X86_64_32 is OK for x32.
       if (size == 32 && r_type == elfcpp::R_X86_64_32)
@@ -2219,12 +3494,28 @@ Target_x86_64<size>::Scan::check_non_pic(Relobj* object, unsigned int r_type,
        object->error(_("requires dynamic R_X86_64_32 reloc which may "
                        "overflow at runtime; recompile with -fPIC"));
       else
-       object->error(_("requires dynamic %s reloc against '%s' which may "
-                       "overflow at runtime; recompile with -fPIC"),
-                     (r_type == elfcpp::R_X86_64_32
-                      ? "R_X86_64_32"
-                      : "R_X86_64_PC32"),
-                     gsym->name());
+       {
+         const char *r_name;
+         switch (r_type)
+           {
+           case elfcpp::R_X86_64_32:
+             r_name = "R_X86_64_32";
+             break;
+           case elfcpp::R_X86_64_PC32:
+             r_name = "R_X86_64_PC32";
+             break;
+           case elfcpp::R_X86_64_PC32_BND:
+             r_name = "R_X86_64_PC32_BND";
+             break;
+           default:
+             gold_unreachable();
+             break;
+           }
+         object->error(_("requires dynamic %s reloc against '%s' "
+                         "which may overflow at runtime; recompile "
+                         "with -fPIC"),
+                       r_name, gsym->name());
+       }
       this->issued_non_pic_error_ = true;
       return;
 
@@ -2368,11 +3659,13 @@ Target_x86_64<size>::Scan::local(Symbol_table* symtab,
 
     case elfcpp::R_X86_64_PC64:
     case elfcpp::R_X86_64_PC32:
+    case elfcpp::R_X86_64_PC32_BND:
     case elfcpp::R_X86_64_PC16:
     case elfcpp::R_X86_64_PC8:
       break;
 
     case elfcpp::R_X86_64_PLT32:
+    case elfcpp::R_X86_64_PLT32_BND:
       // Since we know this is a local symbol, we can handle this as a
       // PC32 reloc.
       break;
@@ -2391,10 +3684,33 @@ Target_x86_64<size>::Scan::local(Symbol_table* symtab,
     case elfcpp::R_X86_64_GOT32:
     case elfcpp::R_X86_64_GOTPCREL64:
     case elfcpp::R_X86_64_GOTPCREL:
+    case elfcpp::R_X86_64_GOTPCRELX:
+    case elfcpp::R_X86_64_REX_GOTPCRELX:
     case elfcpp::R_X86_64_GOTPLT64:
       {
-       // The symbol requires a GOT entry.
+       // The symbol requires a GOT section.
        Output_data_got<64, false>* got = target->got_section(symtab, layout);
+
+       // If the relocation symbol isn't IFUNC,
+       // and is local, then we will convert
+       // mov foo@GOTPCREL(%rip), %reg
+       // to lea foo(%rip), %reg.
+       // in Relocate::relocate.
+       if (!parameters->incremental()
+           && (r_type == elfcpp::R_X86_64_GOTPCREL
+               || r_type == elfcpp::R_X86_64_GOTPCRELX
+               || r_type == elfcpp::R_X86_64_REX_GOTPCRELX)
+           && reloc.get_r_offset() >= 2
+           && !is_ifunc)
+         {
+           section_size_type stype;
+           const unsigned char* view = object->section_contents(data_shndx,
+                                                                &stype, true);
+           if (view[reloc.get_r_offset() - 2] == 0x8b)
+             break;
+         }
+
+       // The symbol requires a GOT entry.
        unsigned int r_sym = elfcpp::elf_r_sym<size>(reloc.get_r_info());
 
        // For a STT_GNU_IFUNC symbol we want the PLT offset.  That
@@ -2597,7 +3913,11 @@ Target_x86_64<size>::Scan::unsupported_reloc_global(
 // Returns true if this relocation type could be that of a function pointer.
 template<int size>
 inline bool
-Target_x86_64<size>::Scan::possible_function_pointer_reloc(unsigned int r_type)
+Target_x86_64<size>::Scan::possible_function_pointer_reloc(
+    Sized_relobj_file<size, false>* src_obj,
+    unsigned int src_indx,
+    unsigned int r_offset,
+    unsigned int r_type)
 {
   switch (r_type)
     {
@@ -2610,10 +3930,47 @@ Target_x86_64<size>::Scan::possible_function_pointer_reloc(unsigned int r_type)
     case elfcpp::R_X86_64_GOT32:
     case elfcpp::R_X86_64_GOTPCREL64:
     case elfcpp::R_X86_64_GOTPCREL:
+    case elfcpp::R_X86_64_GOTPCRELX:
+    case elfcpp::R_X86_64_REX_GOTPCRELX:
     case elfcpp::R_X86_64_GOTPLT64:
       {
        return true;
       }
+    case elfcpp::R_X86_64_PC32:
+      {
+        // This relocation may be used both for function calls and
+        // for taking address of a function. We distinguish between
+        // them by checking the opcodes.
+        uint64_t sh_flags = src_obj->section_flags(src_indx);
+        bool is_executable = (sh_flags & elfcpp::SHF_EXECINSTR) != 0;
+        if (is_executable)
+          {
+            section_size_type stype;
+            const unsigned char* view = src_obj->section_contents(src_indx,
+                                                                  &stype,
+                                                                  true);
+
+            // call
+            if (r_offset >= 1
+                && view[r_offset - 1] == 0xe8)
+              return false;
+
+            // jmp
+            if (r_offset >= 1
+                && view[r_offset - 1] == 0xe9)
+              return false;
+
+            // jo/jno/jb/jnb/je/jne/jna/ja/js/jns/jp/jnp/jl/jge/jle/jg
+            if (r_offset >= 2
+                && view[r_offset - 2] == 0x0f
+                && view[r_offset - 1] >= 0x80
+                && view[r_offset - 1] <= 0x8f)
+              return false;
+          }
+
+        // Be conservative and treat all others as function pointers.
+        return true;
+      }
     }
   return false;
 }
@@ -2628,18 +3985,21 @@ Target_x86_64<size>::Scan::local_reloc_may_be_function_pointer(
   Symbol_table* ,
   Layout* ,
   Target_x86_64<size>* ,
-  Sized_relobj_file<size, false>* ,
-  unsigned int ,
+  Sized_relobj_file<size, false>* src_obj,
+  unsigned int src_indx,
   Output_section* ,
-  const elfcpp::Rela<size, false>& ,
+  const elfcpp::Rela<size, false>& reloc,
   unsigned int r_type,
   const elfcpp::Sym<size, false>&)
 {
   // When building a shared library, do not fold any local symbols as it is
   // not possible to distinguish pointer taken versus a call by looking at
   // the relocation types.
-  return (parameters->options().shared()
-         || possible_function_pointer_reloc(r_type));
+  if (parameters->options().shared())
+    return true;
+
+  return possible_function_pointer_reloc(src_obj, src_indx,
+                                         reloc.get_r_offset(), r_type);
 }
 
 // For safe ICF, scan a relocation for a global symbol to check if it
@@ -2652,20 +4012,23 @@ Target_x86_64<size>::Scan::global_reloc_may_be_function_pointer(
   Symbol_table*,
   Layout* ,
   Target_x86_64<size>* ,
-  Sized_relobj_file<size, false>* ,
-  unsigned int ,
+  Sized_relobj_file<size, false>* src_obj,
+  unsigned int src_indx,
   Output_section* ,
-  const elfcpp::Rela<size, false>& ,
+  const elfcpp::Rela<size, false>& reloc,
   unsigned int r_type,
   Symbol* gsym)
 {
   // When building a shared library, do not fold symbols whose visibility
   // is hidden, internal or protected.
-  return ((parameters->options().shared()
-          && (gsym->visibility() == elfcpp::STV_INTERNAL
-              || gsym->visibility() == elfcpp::STV_PROTECTED
-              || gsym->visibility() == elfcpp::STV_HIDDEN))
-         || possible_function_pointer_reloc(r_type));
+  if (parameters->options().shared()
+      && (gsym->visibility() == elfcpp::STV_INTERNAL
+         || gsym->visibility() == elfcpp::STV_PROTECTED
+         || gsym->visibility() == elfcpp::STV_HIDDEN))
+    return true;
+
+  return possible_function_pointer_reloc(src_obj, src_indx,
+                                         reloc.get_r_offset(), r_type);
 }
 
 // Scan a relocation for a global symbol.
@@ -2714,7 +4077,8 @@ Target_x86_64<size>::Scan::global(Symbol_table* symtab,
        // Make a dynamic relocation if necessary.
        if (gsym->needs_dynamic_reloc(Scan::get_reference_flags(r_type)))
          {
-           if (gsym->may_need_copy_reloc())
+           if (!parameters->options().output_is_position_independent()
+               && gsym->may_need_copy_reloc())
              {
                target->copy_reloc(symtab, layout, object,
                                   data_shndx, output_section, gsym, reloc);
@@ -2765,6 +4129,7 @@ Target_x86_64<size>::Scan::global(Symbol_table* symtab,
 
     case elfcpp::R_X86_64_PC64:
     case elfcpp::R_X86_64_PC32:
+    case elfcpp::R_X86_64_PC32_BND:
     case elfcpp::R_X86_64_PC16:
     case elfcpp::R_X86_64_PC8:
       {
@@ -2774,7 +4139,8 @@ Target_x86_64<size>::Scan::global(Symbol_table* symtab,
        // Make a dynamic relocation if necessary.
        if (gsym->needs_dynamic_reloc(Scan::get_reference_flags(r_type)))
          {
-           if (gsym->may_need_copy_reloc())
+           if (parameters->options().output_is_executable()
+               && gsym->may_need_copy_reloc())
              {
                target->copy_reloc(symtab, layout, object,
                                   data_shndx, output_section, gsym, reloc);
@@ -2795,10 +4161,41 @@ Target_x86_64<size>::Scan::global(Symbol_table* symtab,
     case elfcpp::R_X86_64_GOT32:
     case elfcpp::R_X86_64_GOTPCREL64:
     case elfcpp::R_X86_64_GOTPCREL:
+    case elfcpp::R_X86_64_GOTPCRELX:
+    case elfcpp::R_X86_64_REX_GOTPCRELX:
     case elfcpp::R_X86_64_GOTPLT64:
       {
        // The symbol requires a GOT entry.
        Output_data_got<64, false>* got = target->got_section(symtab, layout);
+
+       // If we convert this from
+       // mov foo@GOTPCREL(%rip), %reg
+       // to lea foo(%rip), %reg.
+       // OR
+       // if we convert
+       // (callq|jmpq) *foo@GOTPCRELX(%rip) to
+       // (callq|jmpq) foo
+       // in Relocate::relocate, then there is nothing to do here.
+       // We cannot make these optimizations in incremental linking mode,
+       // because we look at the opcode to decide whether or not to make
+       // change, and during an incremental update, the change may have
+       // already been applied.
+
+        Lazy_view<size> view(object, data_shndx);
+        size_t r_offset = reloc.get_r_offset();
+        if (!parameters->incremental()
+           && r_offset >= 2
+            && Target_x86_64<size>::can_convert_mov_to_lea(gsym, r_type,
+                                                           r_offset, &view))
+          break;
+
+       if (!parameters->incremental()
+           && r_offset >= 2
+           && Target_x86_64<size>::can_convert_callq_to_direct(gsym, r_type,
+                                                               r_offset,
+                                                               &view))
+          break;
+
        if (gsym->final_value_is_known())
          {
            // For a STT_GNU_IFUNC symbol we want the PLT address.
@@ -2860,15 +4257,11 @@ Target_x86_64<size>::Scan::global(Symbol_table* symtab,
                  }
              }
          }
-       // For GOTPLT64, we also need a PLT entry (but only if the
-       // symbol is not fully resolved).
-       if (r_type == elfcpp::R_X86_64_GOTPLT64
-           && !gsym->final_value_is_known())
-         target->make_plt_entry(symtab, layout, gsym);
       }
       break;
 
     case elfcpp::R_X86_64_PLT32:
+    case elfcpp::R_X86_64_PLT32_BND:
       // If the symbol is fully resolved, this is just a PC32 reloc.
       // Otherwise we need a PLT entry.
       if (gsym->final_value_is_known())
@@ -2919,7 +4312,12 @@ Target_x86_64<size>::Scan::global(Symbol_table* symtab,
     case elfcpp::R_X86_64_GOTTPOFF:         // Initial-exec
     case elfcpp::R_X86_64_TPOFF32:          // Local-exec
       {
-       const bool is_final = gsym->final_value_is_known();
+       // For the Initial-Exec model, we can treat undef symbols as final
+       // when building an executable.
+       const bool is_final = (gsym->final_value_is_known() ||
+                              (r_type == elfcpp::R_X86_64_GOTTPOFF &&
+                               gsym->is_undefined() &&
+                               parameters->options().output_is_executable()));
        const tls::Tls_optimization optimized_type
            = Target_x86_64<size>::optimize_tls_reloc(is_final, r_type);
        switch (r_type)
@@ -3048,15 +4446,16 @@ Target_x86_64<size>::gc_process_relocs(Symbol_table* symtab,
                                       size_t local_symbol_count,
                                       const unsigned char* plocal_symbols)
 {
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, false>
+      Classify_reloc;
 
   if (sh_type == elfcpp::SHT_REL)
     {
       return;
     }
 
-   gold::gc_process_relocs<size, false, Target_x86_64<size>, elfcpp::SHT_RELA,
-                          typename Target_x86_64<size>::Scan,
-                          typename Target_x86_64<size>::Relocatable_size_for_reloc>(
+   gold::gc_process_relocs<size, false, Target_x86_64<size>, Scan,
+                          Classify_reloc>(
     symtab,
     layout,
     this,
@@ -3086,6 +4485,9 @@ Target_x86_64<size>::scan_relocs(Symbol_table* symtab,
                                 size_t local_symbol_count,
                                 const unsigned char* plocal_symbols)
 {
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, false>
+      Classify_reloc;
+
   if (sh_type == elfcpp::SHT_REL)
     {
       gold_error(_("%s: unsupported REL reloc section"),
@@ -3093,8 +4495,7 @@ Target_x86_64<size>::scan_relocs(Symbol_table* symtab,
       return;
     }
 
-  gold::scan_relocs<size, false, Target_x86_64<size>, elfcpp::SHT_RELA,
-      typename Target_x86_64<size>::Scan>(
+  gold::scan_relocs<size, false, Target_x86_64<size>, Scan, Classify_reloc>(
     symtab,
     layout,
     this,
@@ -3197,32 +4598,96 @@ Target_x86_64<size>::do_finalize_sections(
     }
 }
 
+// For x32, we need to handle PC-relative relocations using full 64-bit
+// arithmetic, so that we can detect relocation overflows properly.
+// This class overrides the pcrela32_check methods from the defaults in
+// Relocate_functions in reloc.h.
+
+template<int size>
+class X86_64_relocate_functions : public Relocate_functions<size, false>
+{
+ public:
+  typedef Relocate_functions<size, false> Base;
+
+  // Do a simple PC relative relocation with the addend in the
+  // relocation.
+  static inline typename Base::Reloc_status
+  pcrela32_check(unsigned char* view,
+                typename elfcpp::Elf_types<64>::Elf_Addr value,
+                typename elfcpp::Elf_types<64>::Elf_Swxword addend,
+                typename elfcpp::Elf_types<64>::Elf_Addr address)
+  {
+    typedef typename elfcpp::Swap<32, false>::Valtype Valtype;
+    Valtype* wv = reinterpret_cast<Valtype*>(view);
+    value = value + addend - address;
+    elfcpp::Swap<32, false>::writeval(wv, value);
+    return (Bits<32>::has_overflow(value)
+           ? Base::RELOC_OVERFLOW : Base::RELOC_OK);
+  }
+
+  // Do a simple PC relative relocation with a Symbol_value with the
+  // addend in the relocation.
+  static inline typename Base::Reloc_status
+  pcrela32_check(unsigned char* view,
+                const Sized_relobj_file<size, false>* object,
+                const Symbol_value<size>* psymval,
+                typename elfcpp::Elf_types<64>::Elf_Swxword addend,
+                typename elfcpp::Elf_types<64>::Elf_Addr address)
+  {
+    typedef typename elfcpp::Swap<32, false>::Valtype Valtype;
+    Valtype* wv = reinterpret_cast<Valtype*>(view);
+    typename elfcpp::Elf_types<64>::Elf_Addr value;
+    if (addend >= 0)
+      value = psymval->value(object, addend);
+    else
+      {
+       // For negative addends, get the symbol value without
+       // the addend, then add the addend using 64-bit arithmetic.
+       value = psymval->value(object, 0);
+       value += addend;
+      }
+    value -= address;
+    elfcpp::Swap<32, false>::writeval(wv, value);
+    return (Bits<32>::has_overflow(value)
+           ? Base::RELOC_OVERFLOW : Base::RELOC_OK);
+  }
+};
+
 // Perform a relocation.
 
 template<int size>
 inline bool
 Target_x86_64<size>::Relocate::relocate(
     const Relocate_info<size, false>* relinfo,
+    unsigned int,
     Target_x86_64<size>* target,
     Output_section*,
     size_t relnum,
-    const elfcpp::Rela<size, false>& rela,
-    unsigned int r_type,
+    const unsigned char* preloc,
     const Sized_symbol<size>* gsym,
     const Symbol_value<size>* psymval,
     unsigned char* view,
     typename elfcpp::Elf_types<size>::Elf_Addr address,
     section_size_type view_size)
 {
+  typedef X86_64_relocate_functions<size> Reloc_funcs;
+  const elfcpp::Rela<size, false> rela(preloc);
+  unsigned int r_type = elfcpp::elf_r_type<size>(rela.get_r_info());
+
   if (this->skip_call_tls_get_addr_)
     {
       if ((r_type != elfcpp::R_X86_64_PLT32
+          && r_type != elfcpp::R_X86_64_GOTPCREL
+          && r_type != elfcpp::R_X86_64_GOTPCRELX
+          && r_type != elfcpp::R_X86_64_PLT32_BND
+          && r_type != elfcpp::R_X86_64_PC32_BND
           && r_type != elfcpp::R_X86_64_PC32)
          || gsym == NULL
          || strcmp(gsym->name(), "__tls_get_addr") != 0)
        {
          gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
                                 _("missing expected TLS relocation"));
+         this->skip_call_tls_get_addr_ = false;
        }
       else
        {
@@ -3261,13 +4726,14 @@ Target_x86_64<size>::Relocate::relocate(
   // We need to subtract the size of the GOT section to get
   // the actual offset to use in the relocation.
   bool have_got_offset = false;
-  unsigned int got_offset = 0;
+  // Since the actual offset is always negative, we use signed int to
+  // support 64-bit GOT relocations.
+  int got_offset = 0;
   switch (r_type)
     {
     case elfcpp::R_X86_64_GOT32:
     case elfcpp::R_X86_64_GOT64:
     case elfcpp::R_X86_64_GOTPLT64:
-    case elfcpp::R_X86_64_GOTPCREL:
     case elfcpp::R_X86_64_GOTPCREL64:
       if (gsym != NULL)
        {
@@ -3288,6 +4754,8 @@ Target_x86_64<size>::Relocate::relocate(
       break;
     }
 
+  typename Reloc_funcs::Reloc_status rstatus = Reloc_funcs::RELOC_OK;
+
   switch (r_type)
     {
     case elfcpp::R_X86_64_NONE:
@@ -3296,53 +4764,48 @@ Target_x86_64<size>::Relocate::relocate(
       break;
 
     case elfcpp::R_X86_64_64:
-      Relocate_functions<size, false>::rela64(view, object, psymval, addend);
+      Reloc_funcs::rela64(view, object, psymval, addend);
       break;
 
     case elfcpp::R_X86_64_PC64:
-      Relocate_functions<size, false>::pcrela64(view, object, psymval, addend,
+      Reloc_funcs::pcrela64(view, object, psymval, addend,
                                              address);
       break;
 
     case elfcpp::R_X86_64_32:
-      // FIXME: we need to verify that value + addend fits into 32 bits:
-      //    uint64_t x = value + addend;
-      //    x == static_cast<uint64_t>(static_cast<uint32_t>(x))
-      // Likewise for other <=32-bit relocations (but see R_X86_64_32S).
-      Relocate_functions<size, false>::rela32(view, object, psymval, addend);
+      rstatus = Reloc_funcs::rela32_check(view, object, psymval, addend,
+                                         Reloc_funcs::CHECK_UNSIGNED);
       break;
 
     case elfcpp::R_X86_64_32S:
-      // FIXME: we need to verify that value + addend fits into 32 bits:
-      //    int64_t x = value + addend;   // note this quantity is signed!
-      //    x == static_cast<int64_t>(static_cast<int32_t>(x))
-      Relocate_functions<size, false>::rela32(view, object, psymval, addend);
+      rstatus = Reloc_funcs::rela32_check(view, object, psymval, addend,
+                                         Reloc_funcs::CHECK_SIGNED);
       break;
 
     case elfcpp::R_X86_64_PC32:
-      Relocate_functions<size, false>::pcrela32(view, object, psymval, addend,
-                                               address);
+    case elfcpp::R_X86_64_PC32_BND:
+      rstatus = Reloc_funcs::pcrela32_check(view, object, psymval, addend,
+                                           address);
       break;
 
     case elfcpp::R_X86_64_16:
-      Relocate_functions<size, false>::rela16(view, object, psymval, addend);
+      Reloc_funcs::rela16(view, object, psymval, addend);
       break;
 
     case elfcpp::R_X86_64_PC16:
-      Relocate_functions<size, false>::pcrela16(view, object, psymval, addend,
-                                               address);
+      Reloc_funcs::pcrela16(view, object, psymval, addend, address);
       break;
 
     case elfcpp::R_X86_64_8:
-      Relocate_functions<size, false>::rela8(view, object, psymval, addend);
+      Reloc_funcs::rela8(view, object, psymval, addend);
       break;
 
     case elfcpp::R_X86_64_PC8:
-      Relocate_functions<size, false>::pcrela8(view, object, psymval, addend,
-                                              address);
+      Reloc_funcs::pcrela8(view, object, psymval, addend, address);
       break;
 
     case elfcpp::R_X86_64_PLT32:
+    case elfcpp::R_X86_64_PLT32_BND:
       gold_assert(gsym == NULL
                  || gsym->has_plt_offset()
                  || gsym->final_value_is_known()
@@ -3352,8 +4815,8 @@ Target_x86_64<size>::Relocate::relocate(
       // Note: while this code looks the same as for R_X86_64_PC32, it
       // behaves differently because psymval was set to point to
       // the PLT entry, rather than the symbol, in Scan::global().
-      Relocate_functions<size, false>::pcrela32(view, object, psymval, addend,
-                                               address);
+      rstatus = Reloc_funcs::pcrela32_check(view, object, psymval, addend,
+                                           address);
       break;
 
     case elfcpp::R_X86_64_PLTOFF64:
@@ -3362,14 +4825,15 @@ Target_x86_64<size>::Relocate::relocate(
        gold_assert(gsym->has_plt_offset()
                    || gsym->final_value_is_known());
        typename elfcpp::Elf_types<size>::Elf_Addr got_address;
-       got_address = target->got_section(NULL, NULL)->address();
-       Relocate_functions<size, false>::rela64(view, object, psymval,
-                                               addend - got_address);
+       // This is the address of GLOBAL_OFFSET_TABLE.
+       got_address = target->got_plt_section()->address();
+       Reloc_funcs::rela64(view, object, psymval, addend - got_address);
       }
+      break;
 
     case elfcpp::R_X86_64_GOT32:
       gold_assert(have_got_offset);
-      Relocate_functions<size, false>::rela32(view, got_offset, addend);
+      Reloc_funcs::rela32(view, got_offset, addend);
       break;
 
     case elfcpp::R_X86_64_GOTPC32:
@@ -3377,16 +4841,16 @@ Target_x86_64<size>::Relocate::relocate(
        gold_assert(gsym);
        typename elfcpp::Elf_types<size>::Elf_Addr value;
        value = target->got_plt_section()->address();
-       Relocate_functions<size, false>::pcrela32(view, value, addend, address);
+       Reloc_funcs::pcrela32_check(view, value, addend, address);
       }
       break;
 
     case elfcpp::R_X86_64_GOT64:
-      // The ABI doc says "Like GOT64, but indicates a PLT entry is needed."
-      // Since we always add a PLT entry, this is equivalent.
     case elfcpp::R_X86_64_GOTPLT64:
+      // R_X86_64_GOTPLT64 is obsolete and treated the same as
+      // GOT64.
       gold_assert(have_got_offset);
-      Relocate_functions<size, false>::rela64(view, got_offset, addend);
+      Reloc_funcs::rela64(view, got_offset, addend);
       break;
 
     case elfcpp::R_X86_64_GOTPC64:
@@ -3394,25 +4858,98 @@ Target_x86_64<size>::Relocate::relocate(
        gold_assert(gsym);
        typename elfcpp::Elf_types<size>::Elf_Addr value;
        value = target->got_plt_section()->address();
-       Relocate_functions<size, false>::pcrela64(view, value, addend, address);
+       Reloc_funcs::pcrela64(view, value, addend, address);
       }
       break;
 
     case elfcpp::R_X86_64_GOTOFF64:
       {
-       typename elfcpp::Elf_types<size>::Elf_Addr value;
-       value = (psymval->value(object, 0)
-                - target->got_plt_section()->address());
-       Relocate_functions<size, false>::rela64(view, value, addend);
+       typename elfcpp::Elf_types<size>::Elf_Addr reladdr;
+       reladdr = target->got_plt_section()->address();
+       Reloc_funcs::pcrela64(view, object, psymval, addend, reladdr);
       }
       break;
 
     case elfcpp::R_X86_64_GOTPCREL:
+    case elfcpp::R_X86_64_GOTPCRELX:
+    case elfcpp::R_X86_64_REX_GOTPCRELX:
       {
-       gold_assert(have_got_offset);
-       typename elfcpp::Elf_types<size>::Elf_Addr value;
-       value = target->got_plt_section()->address() + got_offset;
-       Relocate_functions<size, false>::pcrela32(view, value, addend, address);
+      // Convert
+      // mov foo@GOTPCREL(%rip), %reg
+      // to lea foo(%rip), %reg.
+      // if possible.
+      if (!parameters->incremental()
+         && ((gsym == NULL
+              && rela.get_r_offset() >= 2
+              && view[-2] == 0x8b
+              && !psymval->is_ifunc_symbol())
+             || (gsym != NULL
+                 && rela.get_r_offset() >= 2
+                 && Target_x86_64<size>::can_convert_mov_to_lea(gsym, r_type,
+                                                                0, &view))))
+       {
+         view[-2] = 0x8d;
+         Reloc_funcs::pcrela32(view, object, psymval, addend, address);
+       }
+      // Convert
+      // callq *foo@GOTPCRELX(%rip) to
+      // addr32 callq foo
+      // and jmpq *foo@GOTPCRELX(%rip) to
+      // jmpq foo
+      // nop
+      else if (!parameters->incremental()
+              && gsym != NULL
+              && rela.get_r_offset() >= 2
+              && Target_x86_64<size>::can_convert_callq_to_direct(gsym,
+                                                                  r_type,
+                                                                  0, &view))
+       {
+         if (view[-1] == 0x15)
+           {
+             // Convert callq *foo@GOTPCRELX(%rip) to addr32 callq.
+             // Opcode of addr32 is 0x67 and opcode of direct callq is 0xe8.
+             view[-2] = 0x67;
+             view[-1] = 0xe8;
+             // Convert GOTPCRELX to 32-bit pc relative reloc.
+             Reloc_funcs::pcrela32(view, object, psymval, addend, address);
+           }
+         else
+           {
+             // Convert jmpq *foo@GOTPCRELX(%rip) to
+             // jmpq foo
+             // nop
+             // The opcode of direct jmpq is 0xe9.
+             view[-2] = 0xe9;
+             // The opcode of nop is 0x90.
+             view[3] = 0x90;
+             // Convert GOTPCRELX to 32-bit pc relative reloc.  jmpq is rip
+             // relative and since the instruction following the jmpq is now
+             // the nop, offset the address by 1 byte.  The start of the
+              // relocation also moves ahead by 1 byte.
+             Reloc_funcs::pcrela32(&view[-1], object, psymval, addend,
+                                   address - 1);
+           }
+       }
+      else
+       {
+         if (gsym != NULL)
+           {
+             gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
+             got_offset = (gsym->got_offset(GOT_TYPE_STANDARD)
+                           - target->got_size());
+           }
+         else
+           {
+             unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
+             gold_assert(object->local_has_got_offset(r_sym,
+                                                      GOT_TYPE_STANDARD));
+             got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
+                           - target->got_size());
+           }
+         typename elfcpp::Elf_types<size>::Elf_Addr value;
+         value = target->got_plt_section()->address() + got_offset;
+         Reloc_funcs::pcrela32_check(view, value, addend, address);
+       }
       }
       break;
 
@@ -3421,7 +4958,7 @@ Target_x86_64<size>::Relocate::relocate(
        gold_assert(have_got_offset);
        typename elfcpp::Elf_types<size>::Elf_Addr value;
        value = target->got_plt_section()->address() + got_offset;
-       Relocate_functions<size, false>::pcrela64(view, value, addend, address);
+       Reloc_funcs::pcrela64(view, value, addend, address);
       }
       break;
 
@@ -3461,6 +4998,32 @@ Target_x86_64<size>::Relocate::relocate(
       break;
     }
 
+  if (rstatus == Reloc_funcs::RELOC_OVERFLOW)
+    {
+      if (gsym == NULL)
+        {
+         unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
+         gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
+                                _("relocation overflow: "
+                                  "reference to local symbol %u in %s"),
+                                r_sym, object->name().c_str());
+        }
+      else if (gsym->is_defined() && gsym->source() == Symbol::FROM_OBJECT)
+        {
+         gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
+                                _("relocation overflow: "
+                                  "reference to '%s' defined in %s"),
+                                gsym->name(),
+                                gsym->object()->name().c_str());
+        }
+      else
+        {
+         gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
+                                _("relocation overflow: reference to '%s'"),
+                                gsym->name());
+        }
+    }
+
   return true;
 }
 
@@ -3541,7 +5104,7 @@ Target_x86_64<size>::Relocate::relocate_tls(
          if (optimized_type == tls::TLSOPT_TO_IE)
            {
              value = target->got_plt_section()->address() + got_offset;
-             this->tls_gd_to_ie(relinfo, relnum, tls_segment, rela, r_type,
+             this->tls_gd_to_ie(relinfo, relnum, rela, r_type,
                                 value, view, address, view_size);
              break;
            }
@@ -3608,14 +5171,8 @@ Target_x86_64<size>::Relocate::relocate_tls(
            }
          if (optimized_type == tls::TLSOPT_TO_IE)
            {
-             if (tls_segment == NULL)
-               {
-                 gold_assert(parameters->errors()->error_count() > 0
-                             || issue_undefined_symbol_error(gsym));
-                 return;
-               }
              value = target->got_plt_section()->address() + got_offset;
-             this->tls_desc_gd_to_ie(relinfo, relnum, tls_segment,
+             this->tls_desc_gd_to_ie(relinfo, relnum,
                                      rela, r_type, value, view, address,
                                      view_size);
              break;
@@ -3707,7 +5264,17 @@ Target_x86_64<size>::Relocate::relocate_tls(
       break;
 
     case elfcpp::R_X86_64_GOTTPOFF:         // Initial-exec
-      if (optimized_type == tls::TLSOPT_TO_LE)
+      if (gsym != NULL
+         && gsym->is_undefined()
+         && parameters->options().output_is_executable())
+       {
+         Target_x86_64<size>::Relocate::tls_ie_to_le(relinfo, relnum,
+                                                     NULL, rela,
+                                                     r_type, value, view,
+                                                     view_size);
+         break;
+       }
+      else if (optimized_type == tls::TLSOPT_TO_LE)
        {
          if (tls_segment == NULL)
            {
@@ -3771,7 +5338,6 @@ inline void
 Target_x86_64<size>::Relocate::tls_gd_to_ie(
     const Relocate_info<size, false>* relinfo,
     size_t relnum,
-    Output_segment*,
     const elfcpp::Rela<size, false>& rela,
     unsigned int,
     typename elfcpp::Elf_types<size>::Elf_Addr value,
@@ -3781,16 +5347,23 @@ Target_x86_64<size>::Relocate::tls_gd_to_ie(
 {
   // For SIZE == 64:
   //   .byte 0x66; leaq foo@tlsgd(%rip),%rdi;
-  //   .word 0x6666; rex64; call __tls_get_addr
+  //   .word 0x6666; rex64; call __tls_get_addr@PLT
+  //   ==> movq %fs:0,%rax; addq x@gottpoff(%rip),%rax
+  //   .byte 0x66; leaq foo@tlsgd(%rip),%rdi;
+  //   .word 0x66; rex64; call *__tls_get_addr@GOTPCREL(%rip)
   //   ==> movq %fs:0,%rax; addq x@gottpoff(%rip),%rax
   // For SIZE == 32:
   //   leaq foo@tlsgd(%rip),%rdi;
-  //   .word 0x6666; rex64; call __tls_get_addr
+  //   .word 0x6666; rex64; call __tls_get_addr@PLT
+  //   ==> movl %fs:0,%eax; addq x@gottpoff(%rip),%rax
+  //   leaq foo@tlsgd(%rip),%rdi;
+  //   .word 0x66; rex64; call *__tls_get_addr@GOTPCREL(%rip)
   //   ==> movl %fs:0,%eax; addq x@gottpoff(%rip),%rax
 
   tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 12);
   tls::check_tls(relinfo, relnum, rela.get_r_offset(),
-                (memcmp(view + 4, "\x66\x66\x48\xe8", 4) == 0));
+                (memcmp(view + 4, "\x66\x66\x48\xe8", 4) == 0
+                 || memcmp(view + 4, "\x66\x48\xff", 3) == 0));
 
   if (size == 64)
     {
@@ -3837,16 +5410,23 @@ Target_x86_64<size>::Relocate::tls_gd_to_le(
 {
   // For SIZE == 64:
   //   .byte 0x66; leaq foo@tlsgd(%rip),%rdi;
-  //   .word 0x6666; rex64; call __tls_get_addr
+  //   .word 0x6666; rex64; call __tls_get_addr@PLT
+  //   ==> movq %fs:0,%rax; leaq x@tpoff(%rax),%rax
+  //   .byte 0x66; leaq foo@tlsgd(%rip),%rdi;
+  //   .word 0x66; rex64; call *__tls_get_addr@GOTPCREL(%rip)
   //   ==> movq %fs:0,%rax; leaq x@tpoff(%rax),%rax
   // For SIZE == 32:
   //   leaq foo@tlsgd(%rip),%rdi;
-  //   .word 0x6666; rex64; call __tls_get_addr
+  //   .word 0x6666; rex64; call __tls_get_addr@PLT
+  //   ==> movl %fs:0,%eax; leaq x@tpoff(%rax),%rax
+  //   leaq foo@tlsgd(%rip),%rdi;
+  //   .word 0x66; rex64; call *__tls_get_addr@GOTPCREL(%rip)
   //   ==> movl %fs:0,%eax; leaq x@tpoff(%rax),%rax
 
   tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 12);
   tls::check_tls(relinfo, relnum, rela.get_r_offset(),
-                (memcmp(view + 4, "\x66\x66\x48\xe8", 4) == 0));
+                (memcmp(view + 4, "\x66\x66\x48\xe8", 4) == 0
+                 || memcmp(view + 4, "\x66\x48\xff", 3) == 0));
 
   if (size == 64)
     {
@@ -3883,7 +5463,6 @@ inline void
 Target_x86_64<size>::Relocate::tls_desc_gd_to_ie(
     const Relocate_info<size, false>* relinfo,
     size_t relnum,
-    Output_segment*,
     const elfcpp::Rela<size, false>& rela,
     unsigned int r_type,
     typename elfcpp::Elf_types<size>::Elf_Addr value,
@@ -3893,26 +5472,49 @@ Target_x86_64<size>::Relocate::tls_desc_gd_to_ie(
 {
   if (r_type == elfcpp::R_X86_64_GOTPC32_TLSDESC)
     {
-      // leaq foo@tlsdesc(%rip), %rax
-      // ==> movq foo@gottpoff(%rip), %rax
+      // LP64: leaq foo@tlsdesc(%rip), %rax
+      //       ==> movq foo@gottpoff(%rip), %rax
+      // X32:  rex leal foo@tlsdesc(%rip), %eax
+      //       ==> rex movl foo@gottpoff(%rip), %eax
       tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -3);
       tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 4);
       tls::check_tls(relinfo, relnum, rela.get_r_offset(),
-                    view[-3] == 0x48 && view[-2] == 0x8d && view[-1] == 0x05);
+                    (((view[-3] & 0xfb) == 0x48
+                      || (size == 32 && (view[-3] & 0xfb) == 0x40))
+                     && view[-2] == 0x8d
+                     && (view[-1] & 0xc7) == 0x05));
       view[-2] = 0x8b;
       const elfcpp::Elf_Xword addend = rela.get_r_addend();
       Relocate_functions<size, false>::pcrela32(view, value, addend, address);
     }
   else
     {
-      // call *foo@tlscall(%rax)
-      // ==> nop; nop
+      // LP64: call *foo@tlscall(%rax)
+      //       ==> xchg %ax, %ax
+      // X32:  call *foo@tlscall(%eax)
+      //       ==> nopl (%rax)
       gold_assert(r_type == elfcpp::R_X86_64_TLSDESC_CALL);
       tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 2);
+      int prefix = 0;
+      if (size == 32 && view[0] == 0x67)
+       {
+         tls::check_range(relinfo, relnum, rela.get_r_offset(),
+                          view_size, 3);
+         prefix = 1;
+       }
       tls::check_tls(relinfo, relnum, rela.get_r_offset(),
-                    view[0] == 0xff && view[1] == 0x10);
-      view[0] = 0x66;
-      view[1] = 0x90;
+                    view[prefix] == 0xff && view[prefix + 1] == 0x10);
+      if (prefix)
+       {
+         view[0] = 0x0f;
+         view[1] = 0x1f;
+         view[2] = 0x00;
+       }
+      else
+       {
+         view[0] = 0x66;
+         view[1] = 0x90;
+       }
     }
 }
 
@@ -3932,27 +5534,51 @@ Target_x86_64<size>::Relocate::tls_desc_gd_to_le(
 {
   if (r_type == elfcpp::R_X86_64_GOTPC32_TLSDESC)
     {
-      // leaq foo@tlsdesc(%rip), %rax
-      // ==> movq foo@tpoff, %rax
+      // LP64: leaq foo@tlsdesc(%rip), %rax
+      //       ==> movq foo@tpoff, %rax
+      // X32:  rex leal foo@tlsdesc(%rip), %eax
+      //       ==> rex movl foo@tpoff, %eax
       tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -3);
       tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 4);
       tls::check_tls(relinfo, relnum, rela.get_r_offset(),
-                    view[-3] == 0x48 && view[-2] == 0x8d && view[-1] == 0x05);
+                    (((view[-3] & 0xfb) == 0x48
+                      || (size == 32 && (view[-3] & 0xfb) == 0x40))
+                     && view[-2] == 0x8d
+                     && (view[-1] & 0xc7) == 0x05));
+      view[-3] = (view[-3] & 0x48) | ((view[-3] >> 2) & 1);
       view[-2] = 0xc7;
-      view[-1] = 0xc0;
+      view[-1] = 0xc0 | ((view[-1] >> 3) & 7);
       value -= tls_segment->memsz();
       Relocate_functions<size, false>::rela32(view, value, 0);
     }
   else
     {
-      // call *foo@tlscall(%rax)
-      // ==> nop; nop
+      // LP64: call *foo@tlscall(%rax)
+      //       ==> xchg %ax, %ax
+      // X32:  call *foo@tlscall(%eax)
+      //       ==> nopl (%rax)
       gold_assert(r_type == elfcpp::R_X86_64_TLSDESC_CALL);
       tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 2);
+      int prefix = 0;
+      if (size == 32 && view[0] == 0x67)
+       {
+         tls::check_range(relinfo, relnum, rela.get_r_offset(),
+                          view_size, 3);
+         prefix = 1;
+       }
       tls::check_tls(relinfo, relnum, rela.get_r_offset(),
-                    view[0] == 0xff && view[1] == 0x10);
-      view[0] = 0x66;
-      view[1] = 0x90;
+                    view[prefix] == 0xff && view[prefix + 1] == 0x10);
+      if (prefix)
+       {
+         view[0] = 0x0f;
+         view[1] = 0x1f;
+         view[2] = 0x00;
+       }
+      else
+       {
+         view[0] = 0x66;
+         view[1] = 0x90;
+       }
     }
 }
 
@@ -3975,6 +5601,13 @@ Target_x86_64<size>::Relocate::tls_ld_to_le(
   // For SIZE == 32:
   // ... leq foo@dtpoff(%rax),%reg
   // ==> nopl 0x0(%rax); movl %fs:0,%eax ... leaq x@tpoff(%rax),%rdx
+  // leaq foo@tlsld(%rip),%rdi; call *__tls_get_addr@GOTPCREL(%rip)
+  // For SIZE == 64:
+  // ... leq foo@dtpoff(%rax),%reg
+  // ==> .word 0x6666; .byte 0x6666; movq %fs:0,%rax ... leaq x@tpoff(%rax),%rdx
+  // For SIZE == 32:
+  // ... leq foo@dtpoff(%rax),%reg
+  // ==> nopw 0x0(%rax); movl %fs:0,%eax ... leaq x@tpoff(%rax),%rdx
 
   tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -3);
   tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 9);
@@ -3982,12 +5615,25 @@ Target_x86_64<size>::Relocate::tls_ld_to_le(
   tls::check_tls(relinfo, relnum, rela.get_r_offset(),
                 view[-3] == 0x48 && view[-2] == 0x8d && view[-1] == 0x3d);
 
-  tls::check_tls(relinfo, relnum, rela.get_r_offset(), view[4] == 0xe8);
+  tls::check_tls(relinfo, relnum, rela.get_r_offset(),
+                view[4] == 0xe8 || view[4] == 0xff);
 
-  if (size == 64)
-    memcpy(view - 3, "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0\0", 12);
+  if (view[4] == 0xe8)
+    {
+      if (size == 64)
+       memcpy(view - 3, "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0\0", 12);
+      else
+       memcpy(view - 3, "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0\0", 12);
+    }
   else
-    memcpy(view - 3, "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0\0", 12);
+    {
+      if (size == 64)
+       memcpy(view - 3, "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0\0",
+              13);
+      else
+       memcpy(view - 3, "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0\0",
+              13);
+    }
 
   // The next reloc should be a PLT32 reloc against __tls_get_addr.
   // We can skip it.
@@ -4028,6 +5674,8 @@ Target_x86_64<size>::Relocate::tls_ie_to_le(
       // movq
       if (op1 == 0x4c)
        view[-3] = 0x49;
+      else if (size == 32 && op1 == 0x44)
+       view[-3] = 0x41;
       view[-2] = 0xc7;
       view[-1] = 0xc0 | reg;
     }
@@ -4036,6 +5684,8 @@ Target_x86_64<size>::Relocate::tls_ie_to_le(
       // Special handling for %rsp.
       if (op1 == 0x4c)
        view[-3] = 0x49;
+      else if (size == 32 && op1 == 0x44)
+       view[-3] = 0x41;
       view[-2] = 0x81;
       view[-1] = 0xc0 | reg;
     }
@@ -4044,11 +5694,14 @@ Target_x86_64<size>::Relocate::tls_ie_to_le(
       // addq
       if (op1 == 0x4c)
        view[-3] = 0x4d;
+      else if (size == 32 && op1 == 0x44)
+       view[-3] = 0x45;
       view[-2] = 0x8d;
       view[-1] = 0x80 | reg | (reg << 3);
     }
 
-  value -= tls_segment->memsz();
+  if (tls_segment != NULL)
+    value -= tls_segment->memsz();
   Relocate_functions<size, false>::rela32(view, value, 0);
 }
 
@@ -4068,11 +5721,13 @@ Target_x86_64<size>::relocate_section(
     section_size_type view_size,
     const Reloc_symbol_changes* reloc_symbol_changes)
 {
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, false>
+      Classify_reloc;
+
   gold_assert(sh_type == elfcpp::SHT_RELA);
 
-  gold::relocate_section<size, false, Target_x86_64<size>, elfcpp::SHT_RELA,
-                        typename Target_x86_64<size>::Relocate,
-                        gold::Default_comdat_behavior>(
+  gold::relocate_section<size, false, Target_x86_64<size>, Relocate,
+                        gold::Default_comdat_behavior, Classify_reloc>(
     relinfo,
     this,
     prelocs,
@@ -4113,82 +5768,50 @@ Target_x86_64<size>::apply_relocation(
     view_size);
 }
 
-// Return the size of a relocation while scanning during a relocatable
-// link.
+// Scan the relocs during a relocatable link.
 
 template<int size>
-unsigned int
-Target_x86_64<size>::Relocatable_size_for_reloc::get_size_for_reloc(
-    unsigned int r_type,
-    Relobj* object)
+void
+Target_x86_64<size>::scan_relocatable_relocs(
+    Symbol_table* symtab,
+    Layout* layout,
+    Sized_relobj_file<size, false>* object,
+    unsigned int data_shndx,
+    unsigned int sh_type,
+    const unsigned char* prelocs,
+    size_t reloc_count,
+    Output_section* output_section,
+    bool needs_special_offset_handling,
+    size_t local_symbol_count,
+    const unsigned char* plocal_symbols,
+    Relocatable_relocs* rr)
 {
-  switch (r_type)
-    {
-    case elfcpp::R_X86_64_NONE:
-    case elfcpp::R_X86_64_GNU_VTINHERIT:
-    case elfcpp::R_X86_64_GNU_VTENTRY:
-    case elfcpp::R_X86_64_TLSGD:            // Global-dynamic
-    case elfcpp::R_X86_64_GOTPC32_TLSDESC:  // Global-dynamic (from ~oliva url)
-    case elfcpp::R_X86_64_TLSDESC_CALL:
-    case elfcpp::R_X86_64_TLSLD:            // Local-dynamic
-    case elfcpp::R_X86_64_DTPOFF32:
-    case elfcpp::R_X86_64_DTPOFF64:
-    case elfcpp::R_X86_64_GOTTPOFF:         // Initial-exec
-    case elfcpp::R_X86_64_TPOFF32:          // Local-exec
-      return 0;
-
-    case elfcpp::R_X86_64_64:
-    case elfcpp::R_X86_64_PC64:
-    case elfcpp::R_X86_64_GOTOFF64:
-    case elfcpp::R_X86_64_GOTPC64:
-    case elfcpp::R_X86_64_PLTOFF64:
-    case elfcpp::R_X86_64_GOT64:
-    case elfcpp::R_X86_64_GOTPCREL64:
-    case elfcpp::R_X86_64_GOTPCREL:
-    case elfcpp::R_X86_64_GOTPLT64:
-      return 8;
-
-    case elfcpp::R_X86_64_32:
-    case elfcpp::R_X86_64_32S:
-    case elfcpp::R_X86_64_PC32:
-    case elfcpp::R_X86_64_PLT32:
-    case elfcpp::R_X86_64_GOTPC32:
-    case elfcpp::R_X86_64_GOT32:
-      return 4;
-
-    case elfcpp::R_X86_64_16:
-    case elfcpp::R_X86_64_PC16:
-      return 2;
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, false>
+      Classify_reloc;
+  typedef gold::Default_scan_relocatable_relocs<Classify_reloc>
+      Scan_relocatable_relocs;
 
-    case elfcpp::R_X86_64_8:
-    case elfcpp::R_X86_64_PC8:
-      return 1;
-
-    case elfcpp::R_X86_64_COPY:
-    case elfcpp::R_X86_64_GLOB_DAT:
-    case elfcpp::R_X86_64_JUMP_SLOT:
-    case elfcpp::R_X86_64_RELATIVE:
-    case elfcpp::R_X86_64_IRELATIVE:
-      // These are outstanding tls relocs, which are unexpected when linking
-    case elfcpp::R_X86_64_TPOFF64:
-    case elfcpp::R_X86_64_DTPMOD64:
-    case elfcpp::R_X86_64_TLSDESC:
-      object->error(_("unexpected reloc %u in object file"), r_type);
-      return 0;
+  gold_assert(sh_type == elfcpp::SHT_RELA);
 
-    case elfcpp::R_X86_64_SIZE32:
-    case elfcpp::R_X86_64_SIZE64:
-    default:
-      object->error(_("unsupported reloc %u against local symbol"), r_type);
-      return 0;
-    }
+  gold::scan_relocatable_relocs<size, false, Scan_relocatable_relocs>(
+    symtab,
+    layout,
+    object,
+    data_shndx,
+    prelocs,
+    reloc_count,
+    output_section,
+    needs_special_offset_handling,
+    local_symbol_count,
+    plocal_symbols,
+    rr);
 }
 
-// Scan the relocs during a relocatable link.
+// Scan the relocs for --emit-relocs.
 
 template<int size>
 void
-Target_x86_64<size>::scan_relocatable_relocs(
+Target_x86_64<size>::emit_relocs_scan(
     Symbol_table* symtab,
     Layout* layout,
     Sized_relobj_file<size, false>* object,
@@ -4199,16 +5822,17 @@ Target_x86_64<size>::scan_relocatable_relocs(
     Output_section* output_section,
     bool needs_special_offset_handling,
     size_t local_symbol_count,
-    const unsigned char* plocal_symbols,
+    const unsigned char* plocal_syms,
     Relocatable_relocs* rr)
 {
-  gold_assert(sh_type == elfcpp::SHT_RELA);
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, false>
+      Classify_reloc;
+  typedef gold::Default_emit_relocs_strategy<Classify_reloc>
+      Emit_relocs_strategy;
 
-  typedef gold::Default_scan_relocatable_relocs<elfcpp::SHT_RELA,
-    Relocatable_size_for_reloc> Scan_relocatable_relocs;
+  gold_assert(sh_type == elfcpp::SHT_RELA);
 
-  gold::scan_relocatable_relocs<size, false, elfcpp::SHT_RELA,
-      Scan_relocatable_relocs>(
+  gold::scan_relocatable_relocs<size, false, Emit_relocs_strategy>(
     symtab,
     layout,
     object,
@@ -4218,7 +5842,7 @@ Target_x86_64<size>::scan_relocatable_relocs(
     output_section,
     needs_special_offset_handling,
     local_symbol_count,
-    plocal_symbols,
+    plocal_syms,
     rr);
 }
 
@@ -4233,22 +5857,23 @@ Target_x86_64<size>::relocate_relocs(
     size_t reloc_count,
     Output_section* output_section,
     typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
-    const Relocatable_relocs* rr,
     unsigned char* view,
     typename elfcpp::Elf_types<size>::Elf_Addr view_address,
     section_size_type view_size,
     unsigned char* reloc_view,
     section_size_type reloc_view_size)
 {
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, false>
+      Classify_reloc;
+
   gold_assert(sh_type == elfcpp::SHT_RELA);
 
-  gold::relocate_relocs<size, false, elfcpp::SHT_RELA>(
+  gold::relocate_relocs<size, false, Classify_reloc>(
     relinfo,
     prelocs,
     reloc_count,
     output_section,
     offset_in_output_section,
-    rr,
     view,
     view_address,
     view_size,
@@ -4383,35 +6008,60 @@ Target_x86_64<size>::do_ehframe_datarel_base() const
 // code.  We have to change the function so that it always ensures
 // that it has enough stack space to run some random function.
 
+static const unsigned char cmp_insn_32[] = { 0x64, 0x3b, 0x24, 0x25 };
+static const unsigned char lea_r10_insn_32[] = { 0x44, 0x8d, 0x94, 0x24 };
+static const unsigned char lea_r11_insn_32[] = { 0x44, 0x8d, 0x9c, 0x24 };
+
+static const unsigned char cmp_insn_64[] = { 0x64, 0x48, 0x3b, 0x24, 0x25 };
+static const unsigned char lea_r10_insn_64[] = { 0x4c, 0x8d, 0x94, 0x24 };
+static const unsigned char lea_r11_insn_64[] = { 0x4c, 0x8d, 0x9c, 0x24 };
+
 template<int size>
 void
 Target_x86_64<size>::do_calls_non_split(Relobj* object, unsigned int shndx,
                                        section_offset_type fnoffset,
                                        section_size_type fnsize,
+                                       const unsigned char*,
+                                       size_t,
                                        unsigned char* view,
                                        section_size_type view_size,
                                        std::string* from,
                                        std::string* to) const
 {
+  const char* const cmp_insn = reinterpret_cast<const char*>
+      (size == 32 ? cmp_insn_32 : cmp_insn_64);
+  const char* const lea_r10_insn = reinterpret_cast<const char*>
+      (size == 32 ? lea_r10_insn_32 : lea_r10_insn_64);
+  const char* const lea_r11_insn = reinterpret_cast<const char*>
+      (size == 32 ? lea_r11_insn_32 : lea_r11_insn_64);
+
+  const size_t cmp_insn_len =
+      (size == 32 ? sizeof(cmp_insn_32) : sizeof(cmp_insn_64));
+  const size_t lea_r10_insn_len =
+      (size == 32 ? sizeof(lea_r10_insn_32) : sizeof(lea_r10_insn_64));
+  const size_t lea_r11_insn_len =
+      (size == 32 ? sizeof(lea_r11_insn_32) : sizeof(lea_r11_insn_64));
+  const size_t nop_len = (size == 32 ? 7 : 8);
+
   // The function starts with a comparison of the stack pointer and a
   // field in the TCB.  This is followed by a jump.
 
   // cmp %fs:NN,%rsp
-  if (this->match_view(view, view_size, fnoffset, "\x64\x48\x3b\x24\x25", 5)
-      && fnsize > 9)
+  if (this->match_view(view, view_size, fnoffset, cmp_insn, cmp_insn_len)
+      && fnsize > nop_len + 1)
     {
       // We will call __morestack if the carry flag is set after this
       // comparison.  We turn the comparison into an stc instruction
       // and some nops.
       view[fnoffset] = '\xf9';
-      this->set_view_to_nop(view, view_size, fnoffset + 1, 8);
+      this->set_view_to_nop(view, view_size, fnoffset + 1, nop_len);
     }
   // lea NN(%rsp),%r10
   // lea NN(%rsp),%r11
   else if ((this->match_view(view, view_size, fnoffset,
-                            "\x4c\x8d\x94\x24", 4)
+                            lea_r10_insn, lea_r10_insn_len)
            || this->match_view(view, view_size, fnoffset,
-                               "\x4c\x8d\x9c\x24", 4))
+                               lea_r11_insn, lea_r11_insn_len))
           && fnsize > 8)
     {
       // This is loading an offset from the stack pointer for a
@@ -4471,7 +6121,7 @@ class Output_data_plt_x86_64_nacl : public Output_data_plt_x86_64<size>
  public:
   Output_data_plt_x86_64_nacl(Layout* layout,
                              Output_data_got<64, false>* got,
-                             Output_data_space* got_plt,
+                             Output_data_got_plt_x86_64* got_plt,
                              Output_data_space* got_irelative)
     : Output_data_plt_x86_64<size>(layout, plt_entry_size,
                                   got, got_plt, got_irelative)
@@ -4479,7 +6129,7 @@ class Output_data_plt_x86_64_nacl : public Output_data_plt_x86_64<size>
 
   Output_data_plt_x86_64_nacl(Layout* layout,
                              Output_data_got<64, false>* got,
-                             Output_data_space* got_plt,
+                             Output_data_got_plt_x86_64* got_plt,
                              Output_data_space* got_irelative,
                              unsigned int plt_count)
     : Output_data_plt_x86_64<size>(layout, plt_entry_size,
@@ -4552,7 +6202,7 @@ class Target_x86_64_nacl : public Target_x86_64<size>
   virtual Output_data_plt_x86_64<size>*
   do_make_data_plt(Layout* layout,
                   Output_data_got<64, false>* got,
-                  Output_data_space* got_plt,
+                  Output_data_got_plt_x86_64* got_plt,
                   Output_data_space* got_irelative)
   {
     return new Output_data_plt_x86_64_nacl<size>(layout, got, got_plt,
@@ -4562,7 +6212,7 @@ class Target_x86_64_nacl : public Target_x86_64<size>
   virtual Output_data_plt_x86_64<size>*
   do_make_data_plt(Layout* layout,
                   Output_data_got<64, false>* got,
-                  Output_data_space* got_plt,
+                  Output_data_got_plt_x86_64* got_plt,
                   Output_data_space* got_irelative,
                   unsigned int plt_count)
   {
@@ -4602,7 +6252,9 @@ const Target::Target_info Target_x86_64_nacl<64>::x86_64_nacl_info =
   elfcpp::SHF_X86_64_LARGE,    // large_common_section_flags
   NULL,                        // attributes_section
   NULL,                        // attributes_vendor
-  "_start"             // entry_symbol_name
+  "_start",            // entry_symbol_name
+  32,                  // hash_entry_size
+  elfcpp::SHT_X86_64_UNWIND,   // unwind_section_type
 };
 
 template<>
@@ -4629,7 +6281,9 @@ const Target::Target_info Target_x86_64_nacl<32>::x86_64_nacl_info =
   elfcpp::SHF_X86_64_LARGE,    // large_common_section_flags
   NULL,                        // attributes_section
   NULL,                        // attributes_vendor
-  "_start"             // entry_symbol_name
+  "_start",            // entry_symbol_name
+  32,                  // hash_entry_size
+  elfcpp::SHT_X86_64_UNWIND,   // unwind_section_type
 };
 
 #define        NACLMASK        0xe0            // 32-byte alignment mask.
This page took 0.058588 seconds and 4 git commands to generate.