From Craig Silverstein: handle PLT32 relocs against local symbols for
[deliverable/binutils-gdb.git] / gold / x86_64.cc
1 // x86_64.cc -- x86_64 target support for gold.
2
3 // Copyright 2006, 2007, Free Software Foundation, Inc.
4 // Written by Ian Lance Taylor <iant@google.com>.
5
6 // This file is part of gold.
7
8 // This program is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Library General Public License
10 // as published by the Free Software Foundation; either version 2, or
11 // (at your option) any later version.
12
13 // In addition to the permissions in the GNU Library General Public
14 // License, the Free Software Foundation gives you unlimited
15 // permission to link the compiled version of this file into
16 // combinations with other programs, and to distribute those
17 // combinations without any restriction coming from the use of this
18 // file. (The Library Public License restrictions do apply in other
19 // respects; for example, they cover modification of the file, and
20 /// distribution when not linked into a combined executable.)
21
22 // This program is distributed in the hope that it will be useful, but
23 // WITHOUT ANY WARRANTY; without even the implied warranty of
24 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 // Library General Public License for more details.
26
27 // You should have received a copy of the GNU Library General Public
28 // License along with this program; if not, write to the Free Software
29 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
30 // 02110-1301, USA.
31
32 #include "gold.h"
33
34 #include <cstring>
35
36 #include "elfcpp.h"
37 #include "parameters.h"
38 #include "reloc.h"
39 #include "x86_64.h"
40 #include "object.h"
41 #include "symtab.h"
42 #include "layout.h"
43 #include "output.h"
44 #include "target.h"
45 #include "target-reloc.h"
46 #include "target-select.h"
47 #include "tls.h"
48
49 namespace
50 {
51
52 using namespace gold;
53
54 class Output_data_plt_x86_64;
55
56 // The x86_64 target class.
57 // See the ABI at
58 // http://www.x86-64.org/documentation/abi.pdf
59 // TLS info comes from
60 // http://people.redhat.com/drepper/tls.pdf
61 // http://www.lsd.ic.unicamp.br/~oliva/writeups/TLS/RFC-TLSDESC-x86.txt
62
63 class Target_x86_64 : public Sized_target<64, false>
64 {
65 public:
66 // In the x86_64 ABI (p 68), it says "The AMD64 ABI architectures
67 // uses only Elf64_Rela relocation entries with explicit addends."
68 typedef Output_data_reloc<elfcpp::SHT_RELA, true, 64, false> Reloc_section;
69
70 Target_x86_64()
71 : Sized_target<64, false>(&x86_64_info),
72 got_(NULL), plt_(NULL), got_plt_(NULL), rela_dyn_(NULL),
73 copy_relocs_(NULL), dynbss_(NULL)
74 { }
75
76 // Scan the relocations to look for symbol adjustments.
77 void
78 scan_relocs(const General_options& options,
79 Symbol_table* symtab,
80 Layout* layout,
81 Sized_relobj<64, false>* object,
82 unsigned int data_shndx,
83 unsigned int sh_type,
84 const unsigned char* prelocs,
85 size_t reloc_count,
86 size_t local_symbol_count,
87 const unsigned char* plocal_symbols,
88 Symbol** global_symbols);
89
90 // Finalize the sections.
91 void
92 do_finalize_sections(Layout*);
93
94 // Return the value to use for a dynamic which requires special
95 // treatment.
96 uint64_t
97 do_dynsym_value(const Symbol*) const;
98
99 // Relocate a section.
100 void
101 relocate_section(const Relocate_info<64, false>*,
102 unsigned int sh_type,
103 const unsigned char* prelocs,
104 size_t reloc_count,
105 unsigned char* view,
106 elfcpp::Elf_types<64>::Elf_Addr view_address,
107 off_t view_size);
108
109 // Return a string used to fill a code section with nops.
110 std::string
111 do_code_fill(off_t length);
112
113 private:
114 // The class which scans relocations.
115 struct Scan
116 {
117 inline void
118 local(const General_options& options, Symbol_table* symtab,
119 Layout* layout, Target_x86_64* target,
120 Sized_relobj<64, false>* object,
121 unsigned int data_shndx,
122 const elfcpp::Rela<64, false>& reloc, unsigned int r_type,
123 const elfcpp::Sym<64, false>& lsym);
124
125 inline void
126 global(const General_options& options, Symbol_table* symtab,
127 Layout* layout, Target_x86_64* target,
128 Sized_relobj<64, false>* object,
129 unsigned int data_shndx,
130 const elfcpp::Rela<64, false>& reloc, unsigned int r_type,
131 Symbol* gsym);
132
133 static void
134 unsupported_reloc_local(Sized_relobj<64, false>*, unsigned int r_type);
135
136 static void
137 unsupported_reloc_global(Sized_relobj<64, false>*, unsigned int r_type,
138 Symbol*);
139 };
140
141 // The class which implements relocation.
142 class Relocate
143 {
144 public:
145 Relocate()
146 : skip_call_tls_get_addr_(false)
147 { }
148
149 ~Relocate()
150 {
151 if (this->skip_call_tls_get_addr_)
152 {
153 // FIXME: This needs to specify the location somehow.
154 gold_error(_("missing expected TLS relocation"));
155 }
156 }
157
158 // Do a relocation. Return false if the caller should not issue
159 // any warnings about this relocation.
160 inline bool
161 relocate(const Relocate_info<64, false>*, Target_x86_64*, size_t relnum,
162 const elfcpp::Rela<64, false>&,
163 unsigned int r_type, const Sized_symbol<64>*,
164 const Symbol_value<64>*,
165 unsigned char*, elfcpp::Elf_types<64>::Elf_Addr,
166 off_t);
167
168 private:
169 // Do a TLS relocation.
170 inline void
171 relocate_tls(const Relocate_info<64, false>*, size_t relnum,
172 const elfcpp::Rela<64, false>&,
173 unsigned int r_type, const Sized_symbol<64>*,
174 const Symbol_value<64>*,
175 unsigned char*, elfcpp::Elf_types<64>::Elf_Addr, off_t);
176
177 // Do a TLS Initial-Exec to Local-Exec transition.
178 static inline void
179 tls_ie_to_le(const Relocate_info<64, false>*, size_t relnum,
180 Output_segment* tls_segment,
181 const elfcpp::Rela<64, false>&, unsigned int r_type,
182 elfcpp::Elf_types<64>::Elf_Addr value,
183 unsigned char* view,
184 off_t view_size);
185
186 // Do a TLS General-Dynamic to Local-Exec transition.
187 inline void
188 tls_gd_to_le(const Relocate_info<64, false>*, size_t relnum,
189 Output_segment* tls_segment,
190 const elfcpp::Rela<64, false>&, unsigned int r_type,
191 elfcpp::Elf_types<64>::Elf_Addr value,
192 unsigned char* view,
193 off_t view_size);
194
195 // Do a TLS Local-Dynamic to Local-Exec transition.
196 inline void
197 tls_ld_to_le(const Relocate_info<64, false>*, size_t relnum,
198 Output_segment* tls_segment,
199 const elfcpp::Rela<64, false>&, unsigned int r_type,
200 elfcpp::Elf_types<64>::Elf_Addr value,
201 unsigned char* view,
202 off_t view_size);
203
204 // This is set if we should skip the next reloc, which should be a
205 // PLT32 reloc against ___tls_get_addr.
206 bool skip_call_tls_get_addr_;
207 };
208
209 // Adjust TLS relocation type based on the options and whether this
210 // is a local symbol.
211 static tls::Tls_optimization
212 optimize_tls_reloc(bool is_final, int r_type);
213
214 // Get the GOT section, creating it if necessary.
215 Output_data_got<64, false>*
216 got_section(Symbol_table*, Layout*);
217
218 // Create a PLT entry for a global symbol.
219 void
220 make_plt_entry(Symbol_table*, Layout*, Symbol*);
221
222 // Get the PLT section.
223 Output_data_plt_x86_64*
224 plt_section() const
225 {
226 gold_assert(this->plt_ != NULL);
227 return this->plt_;
228 }
229
230 // Get the dynamic reloc section, creating it if necessary.
231 Reloc_section*
232 rela_dyn_section(Layout*);
233
234 // Copy a relocation against a global symbol.
235 void
236 copy_reloc(const General_options*, Symbol_table*, Layout*,
237 Sized_relobj<64, false>*, unsigned int,
238 Symbol*, const elfcpp::Rela<64, false>&);
239
240 // Information about this specific target which we pass to the
241 // general Target structure.
242 static const Target::Target_info x86_64_info;
243
244 // The GOT section.
245 Output_data_got<64, false>* got_;
246 // The PLT section.
247 Output_data_plt_x86_64* plt_;
248 // The GOT PLT section.
249 Output_data_space* got_plt_;
250 // The dynamic reloc section.
251 Reloc_section* rela_dyn_;
252 // Relocs saved to avoid a COPY reloc.
253 Copy_relocs<64, false>* copy_relocs_;
254 // Space for variables copied with a COPY reloc.
255 Output_data_space* dynbss_;
256 };
257
258 const Target::Target_info Target_x86_64::x86_64_info =
259 {
260 64, // size
261 false, // is_big_endian
262 elfcpp::EM_X86_64, // machine_code
263 false, // has_make_symbol
264 false, // has_resolve
265 true, // has_code_fill
266 "/lib/ld64.so.1", // program interpreter
267 0x400000, // default_text_segment_address
268 0x1000, // abi_pagesize
269 0x1000 // common_pagesize
270 };
271
272 // Get the GOT section, creating it if necessary.
273
274 Output_data_got<64, false>*
275 Target_x86_64::got_section(Symbol_table* symtab, Layout* layout)
276 {
277 if (this->got_ == NULL)
278 {
279 gold_assert(symtab != NULL && layout != NULL);
280
281 this->got_ = new Output_data_got<64, false>();
282
283 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
284 elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE,
285 this->got_);
286
287 // The old GNU linker creates a .got.plt section. We just
288 // create another set of data in the .got section. Note that we
289 // always create a PLT if we create a GOT, although the PLT
290 // might be empty.
291 // TODO(csilvers): do we really need an alignment of 8?
292 this->got_plt_ = new Output_data_space(8);
293 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
294 elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE,
295 this->got_plt_);
296
297 // The first three entries are reserved.
298 this->got_plt_->set_space_size(3 * 8);
299
300 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
301 symtab->define_in_output_data(this, "_GLOBAL_OFFSET_TABLE_", NULL,
302 this->got_plt_,
303 0, 0, elfcpp::STT_OBJECT,
304 elfcpp::STB_LOCAL,
305 elfcpp::STV_HIDDEN, 0,
306 false, false);
307 }
308
309 return this->got_;
310 }
311
312 // Get the dynamic reloc section, creating it if necessary.
313
314 Target_x86_64::Reloc_section*
315 Target_x86_64::rela_dyn_section(Layout* layout)
316 {
317 if (this->rela_dyn_ == NULL)
318 {
319 gold_assert(layout != NULL);
320 this->rela_dyn_ = new Reloc_section();
321 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
322 elfcpp::SHF_ALLOC, this->rela_dyn_);
323 }
324 return this->rela_dyn_;
325 }
326
327 // A class to handle the PLT data.
328
329 class Output_data_plt_x86_64 : public Output_section_data
330 {
331 public:
332 typedef Output_data_reloc<elfcpp::SHT_RELA, true, 64, false> Reloc_section;
333
334 Output_data_plt_x86_64(Layout*, Output_data_space*);
335
336 // Add an entry to the PLT.
337 void
338 add_entry(Symbol* gsym);
339
340 // Return the .rel.plt section data.
341 const Reloc_section*
342 rel_plt() const
343 { return this->rel_; }
344
345 protected:
346 void
347 do_adjust_output_section(Output_section* os);
348
349 private:
350 // The size of an entry in the PLT.
351 static const int plt_entry_size = 16;
352
353 // The first entry in the PLT.
354 // From the AMD64 ABI: "Unlike Intel386 ABI, this ABI uses the same
355 // procedure linkage table for both programs and shared objects."
356 static unsigned char first_plt_entry[plt_entry_size];
357
358 // Other entries in the PLT for an executable.
359 static unsigned char plt_entry[plt_entry_size];
360
361 // Set the final size.
362 void
363 do_set_address(uint64_t, off_t)
364 { this->set_data_size((this->count_ + 1) * plt_entry_size); }
365
366 // Write out the PLT data.
367 void
368 do_write(Output_file*);
369
370 // The reloc section.
371 Reloc_section* rel_;
372 // The .got.plt section.
373 Output_data_space* got_plt_;
374 // The number of PLT entries.
375 unsigned int count_;
376 };
377
378 // Create the PLT section. The ordinary .got section is an argument,
379 // since we need to refer to the start. We also create our own .got
380 // section just for PLT entries.
381
382 Output_data_plt_x86_64::Output_data_plt_x86_64(Layout* layout,
383 Output_data_space* got_plt)
384 // TODO(csilvers): do we really need an alignment of 8?
385 : Output_section_data(8), got_plt_(got_plt), count_(0)
386 {
387 this->rel_ = new Reloc_section();
388 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
389 elfcpp::SHF_ALLOC, this->rel_);
390 }
391
392 void
393 Output_data_plt_x86_64::do_adjust_output_section(Output_section* os)
394 {
395 // UnixWare sets the entsize of .plt to 4, and so does the old GNU
396 // linker, and so do we.
397 os->set_entsize(4);
398 }
399
400 // Add an entry to the PLT.
401
402 void
403 Output_data_plt_x86_64::add_entry(Symbol* gsym)
404 {
405 gold_assert(!gsym->has_plt_offset());
406
407 // Note that when setting the PLT offset we skip the initial
408 // reserved PLT entry.
409 gsym->set_plt_offset((this->count_ + 1) * plt_entry_size);
410
411 ++this->count_;
412
413 off_t got_offset = this->got_plt_->data_size();
414
415 // Every PLT entry needs a GOT entry which points back to the PLT
416 // entry (this will be changed by the dynamic linker, normally
417 // lazily when the function is called).
418 this->got_plt_->set_space_size(got_offset + 8);
419
420 // Every PLT entry needs a reloc.
421 gsym->set_needs_dynsym_entry();
422 this->rel_->add_global(gsym, elfcpp::R_X86_64_JUMP_SLOT, this->got_plt_,
423 got_offset, 0);
424
425 // Note that we don't need to save the symbol. The contents of the
426 // PLT are independent of which symbols are used. The symbols only
427 // appear in the relocations.
428 }
429
430 // The first entry in the PLT for an executable.
431
432 unsigned char Output_data_plt_x86_64::first_plt_entry[plt_entry_size] =
433 {
434 // From AMD64 ABI Draft 0.98, page 76
435 0xff, 0x35, // pushq contents of memory address
436 0, 0, 0, 0, // replaced with address of .got + 4
437 0xff, 0x25, // jmp indirect
438 0, 0, 0, 0, // replaced with address of .got + 8
439 0x90, 0x90, 0x90, 0x90 // noop (x4)
440 };
441
442 // Subsequent entries in the PLT for an executable.
443
444 unsigned char Output_data_plt_x86_64::plt_entry[plt_entry_size] =
445 {
446 // From AMD64 ABI Draft 0.98, page 76
447 0xff, 0x25, // jmpq indirect
448 0, 0, 0, 0, // replaced with address of symbol in .got
449 0x68, // pushq immediate
450 0, 0, 0, 0, // replaced with offset into relocation table
451 0xe9, // jmpq relative
452 0, 0, 0, 0 // replaced with offset to start of .plt
453 };
454
455 // Write out the PLT. This uses the hand-coded instructions above,
456 // and adjusts them as needed. This is specified by the AMD64 ABI.
457
458 void
459 Output_data_plt_x86_64::do_write(Output_file* of)
460 {
461 const off_t offset = this->offset();
462 const off_t oview_size = this->data_size();
463 unsigned char* const oview = of->get_output_view(offset, oview_size);
464
465 const off_t got_file_offset = this->got_plt_->offset();
466 const off_t got_size = this->got_plt_->data_size();
467 unsigned char* const got_view = of->get_output_view(got_file_offset,
468 got_size);
469
470 unsigned char* pov = oview;
471
472 elfcpp::Elf_types<32>::Elf_Addr plt_address = this->address();
473 elfcpp::Elf_types<32>::Elf_Addr got_address = this->got_plt_->address();
474
475 memcpy(pov, first_plt_entry, plt_entry_size);
476 if (!parameters->output_is_shared())
477 {
478 // We do a jmp relative to the PC at the end of this instruction.
479 elfcpp::Swap_unaligned<32, false>::writeval(pov + 2, got_address + 8
480 - (plt_address + 6));
481 elfcpp::Swap<32, false>::writeval(pov + 8, got_address + 16
482 - (plt_address + 12));
483 }
484 pov += plt_entry_size;
485
486 unsigned char* got_pov = got_view;
487
488 memset(got_pov, 0, 24);
489 got_pov += 24;
490
491 unsigned int plt_offset = plt_entry_size;
492 unsigned int got_offset = 24;
493 const unsigned int count = this->count_;
494 for (unsigned int plt_index = 0;
495 plt_index < count;
496 ++plt_index,
497 pov += plt_entry_size,
498 got_pov += 8,
499 plt_offset += plt_entry_size,
500 got_offset += 8)
501 {
502 // Set and adjust the PLT entry itself.
503 memcpy(pov, plt_entry, plt_entry_size);
504 if (parameters->output_is_shared())
505 // FIXME(csilvers): what's the right thing to write here?
506 elfcpp::Swap_unaligned<32, false>::writeval(pov + 2, got_offset);
507 else
508 elfcpp::Swap_unaligned<32, false>::writeval(pov + 2,
509 (got_address + got_offset
510 - (plt_address + plt_offset
511 + 6)));
512
513 elfcpp::Swap_unaligned<32, false>::writeval(pov + 7, plt_index);
514 elfcpp::Swap<32, false>::writeval(pov + 12,
515 - (plt_offset + plt_entry_size));
516
517 // Set the entry in the GOT.
518 elfcpp::Swap<64, false>::writeval(got_pov, plt_address + plt_offset + 6);
519 }
520
521 gold_assert(pov - oview == oview_size);
522 gold_assert(got_pov - got_view == got_size);
523
524 of->write_output_view(offset, oview_size, oview);
525 of->write_output_view(got_file_offset, got_size, got_view);
526 }
527
528 // Create a PLT entry for a global symbol.
529
530 void
531 Target_x86_64::make_plt_entry(Symbol_table* symtab, Layout* layout,
532 Symbol* gsym)
533 {
534 if (gsym->has_plt_offset())
535 return;
536
537 if (this->plt_ == NULL)
538 {
539 // Create the GOT sections first.
540 this->got_section(symtab, layout);
541
542 this->plt_ = new Output_data_plt_x86_64(layout, this->got_plt_);
543 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
544 (elfcpp::SHF_ALLOC
545 | elfcpp::SHF_EXECINSTR),
546 this->plt_);
547 }
548
549 this->plt_->add_entry(gsym);
550 }
551
552 // Handle a relocation against a non-function symbol defined in a
553 // dynamic object. The traditional way to handle this is to generate
554 // a COPY relocation to copy the variable at runtime from the shared
555 // object into the executable's data segment. However, this is
556 // undesirable in general, as if the size of the object changes in the
557 // dynamic object, the executable will no longer work correctly. If
558 // this relocation is in a writable section, then we can create a
559 // dynamic reloc and the dynamic linker will resolve it to the correct
560 // address at runtime. However, we do not want do that if the
561 // relocation is in a read-only section, as it would prevent the
562 // readonly segment from being shared. And if we have to eventually
563 // generate a COPY reloc, then any dynamic relocations will be
564 // useless. So this means that if this is a writable section, we need
565 // to save the relocation until we see whether we have to create a
566 // COPY relocation for this symbol for any other relocation.
567
568 void
569 Target_x86_64::copy_reloc(const General_options* options,
570 Symbol_table* symtab,
571 Layout* layout,
572 Sized_relobj<64, false>* object,
573 unsigned int data_shndx, Symbol* gsym,
574 const elfcpp::Rela<64, false>& rela)
575 {
576 Sized_symbol<64>* ssym;
577 ssym = symtab->get_sized_symbol SELECT_SIZE_NAME(64) (gsym
578 SELECT_SIZE(64));
579
580 if (!Copy_relocs<64, false>::need_copy_reloc(options, object,
581 data_shndx, ssym))
582 {
583 // So far we do not need a COPY reloc. Save this relocation.
584 // If it turns out that we never need a COPY reloc for this
585 // symbol, then we will emit the relocation.
586 if (this->copy_relocs_ == NULL)
587 this->copy_relocs_ = new Copy_relocs<64, false>();
588 this->copy_relocs_->save(ssym, object, data_shndx, rela);
589 }
590 else
591 {
592 // Allocate space for this symbol in the .bss section.
593
594 elfcpp::Elf_types<64>::Elf_WXword symsize = ssym->symsize();
595
596 // There is no defined way to determine the required alignment
597 // of the symbol. We pick the alignment based on the size. We
598 // set an arbitrary maximum of 256.
599 unsigned int align;
600 for (align = 1; align < 512; align <<= 1)
601 if ((symsize & align) != 0)
602 break;
603
604 if (this->dynbss_ == NULL)
605 {
606 this->dynbss_ = new Output_data_space(align);
607 layout->add_output_section_data(".bss",
608 elfcpp::SHT_NOBITS,
609 (elfcpp::SHF_ALLOC
610 | elfcpp::SHF_WRITE),
611 this->dynbss_);
612 }
613
614 Output_data_space* dynbss = this->dynbss_;
615
616 if (align > dynbss->addralign())
617 dynbss->set_space_alignment(align);
618
619 off_t dynbss_size = dynbss->data_size();
620 dynbss_size = align_address(dynbss_size, align);
621 off_t offset = dynbss_size;
622 dynbss->set_space_size(dynbss_size + symsize);
623
624 // Define the symbol in the .dynbss section.
625 symtab->define_in_output_data(this, ssym->name(), ssym->version(),
626 dynbss, offset, symsize, ssym->type(),
627 ssym->binding(), ssym->visibility(),
628 ssym->nonvis(), false, false);
629
630 // Add the COPY reloc.
631 ssym->set_needs_dynsym_entry();
632 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
633 rela_dyn->add_global(ssym, elfcpp::R_X86_64_COPY, dynbss, offset, 0);
634 }
635 }
636
637
638 // Optimize the TLS relocation type based on what we know about the
639 // symbol. IS_FINAL is true if the final address of this symbol is
640 // known at link time.
641
642 tls::Tls_optimization
643 Target_x86_64::optimize_tls_reloc(bool is_final, int r_type)
644 {
645 // If we are generating a shared library, then we can't do anything
646 // in the linker.
647 if (parameters->output_is_shared())
648 return tls::TLSOPT_NONE;
649
650 switch (r_type)
651 {
652 case elfcpp::R_X86_64_TLSGD:
653 case elfcpp::R_X86_64_GOTPC32_TLSDESC:
654 case elfcpp::R_X86_64_TLSDESC_CALL:
655 // These are General-Dynamic which permits fully general TLS
656 // access. Since we know that we are generating an executable,
657 // we can convert this to Initial-Exec. If we also know that
658 // this is a local symbol, we can further switch to Local-Exec.
659 if (is_final)
660 return tls::TLSOPT_TO_LE;
661 return tls::TLSOPT_TO_IE;
662
663 case elfcpp::R_X86_64_TLSLD:
664 // This is Local-Dynamic, which refers to a local symbol in the
665 // dynamic TLS block. Since we know that we generating an
666 // executable, we can switch to Local-Exec.
667 return tls::TLSOPT_TO_LE;
668
669 case elfcpp::R_X86_64_DTPOFF32:
670 case elfcpp::R_X86_64_DTPOFF64:
671 // Another Local-Dynamic reloc.
672 return tls::TLSOPT_TO_LE;
673
674 case elfcpp::R_X86_64_GOTTPOFF:
675 // These are Initial-Exec relocs which get the thread offset
676 // from the GOT. If we know that we are linking against the
677 // local symbol, we can switch to Local-Exec, which links the
678 // thread offset into the instruction.
679 if (is_final)
680 return tls::TLSOPT_TO_LE;
681 return tls::TLSOPT_NONE;
682
683 case elfcpp::R_X86_64_TPOFF32:
684 // When we already have Local-Exec, there is nothing further we
685 // can do.
686 return tls::TLSOPT_NONE;
687
688 default:
689 gold_unreachable();
690 }
691 }
692
693 // Report an unsupported relocation against a local symbol.
694
695 void
696 Target_x86_64::Scan::unsupported_reloc_local(Sized_relobj<64, false>* object,
697 unsigned int r_type)
698 {
699 gold_error(_("%s: unsupported reloc %u against local symbol"),
700 object->name().c_str(), r_type);
701 }
702
703 // Scan a relocation for a local symbol.
704
705 inline void
706 Target_x86_64::Scan::local(const General_options&,
707 Symbol_table* symtab,
708 Layout* layout,
709 Target_x86_64* target,
710 Sized_relobj<64, false>* object,
711 unsigned int data_shndx,
712 const elfcpp::Rela<64, false>& reloc,
713 unsigned int r_type,
714 const elfcpp::Sym<64, false>&)
715 {
716 switch (r_type)
717 {
718 case elfcpp::R_X86_64_NONE:
719 case elfcpp::R_386_GNU_VTINHERIT:
720 case elfcpp::R_386_GNU_VTENTRY:
721 break;
722
723 case elfcpp::R_X86_64_64:
724 case elfcpp::R_X86_64_32:
725 case elfcpp::R_X86_64_32S:
726 case elfcpp::R_X86_64_16:
727 case elfcpp::R_X86_64_8:
728 // FIXME: If we are generating a shared object we need to copy
729 // this relocation into the object.
730 gold_assert(!parameters->output_is_shared());
731 break;
732
733 case elfcpp::R_X86_64_PC64:
734 case elfcpp::R_X86_64_PC32:
735 case elfcpp::R_X86_64_PC16:
736 case elfcpp::R_X86_64_PC8:
737 break;
738
739 case elfcpp::R_X86_64_PLT32:
740 // Since we know this is a local symbol, we can handle this as a
741 // PC32 reloc.
742 break;
743
744 case elfcpp::R_X86_64_GOTPC32: // TODO(csilvers): correct?
745 case elfcpp::R_X86_64_GOTOFF64:
746 case elfcpp::R_X86_64_GOTPC64: // TODO(csilvers): correct?
747 case elfcpp::R_X86_64_PLTOFF64: // TODO(csilvers): correct?
748 // We need a GOT section.
749 target->got_section(symtab, layout);
750 break;
751
752 case elfcpp::R_X86_64_GOT64:
753 case elfcpp::R_X86_64_GOT32:
754 case elfcpp::R_X86_64_GOTPCREL64:
755 case elfcpp::R_X86_64_GOTPCREL:
756 {
757 // The symbol requires a GOT entry.
758 Output_data_got<64, false>* got = target->got_section(symtab, layout);
759 unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info());
760 if (got->add_local(object, r_sym))
761 {
762 // If we are generating a shared object, we need to add a
763 // dynamic RELATIVE relocation for this symbol.
764 if (parameters->output_is_shared())
765 {
766 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
767 rela_dyn->add_local(object, 0, elfcpp::R_X86_64_RELATIVE,
768 data_shndx, reloc.get_r_offset(), 0);
769 }
770 }
771 }
772 break;
773
774 case elfcpp::R_X86_64_COPY:
775 case elfcpp::R_X86_64_GLOB_DAT:
776 case elfcpp::R_X86_64_JUMP_SLOT:
777 case elfcpp::R_X86_64_RELATIVE:
778 // These are outstanding tls relocs, which are unexpected when linking
779 case elfcpp::R_X86_64_TPOFF64:
780 case elfcpp::R_X86_64_DTPMOD64:
781 case elfcpp::R_X86_64_TLSDESC:
782 gold_error(_("%s: unexpected reloc %u in object file"),
783 object->name().c_str(), r_type);
784 break;
785
786 // These are initial tls relocs, which are expected when linking
787 case elfcpp::R_X86_64_TLSGD:
788 case elfcpp::R_X86_64_GOTPC32_TLSDESC:
789 case elfcpp::R_X86_64_TLSDESC_CALL:
790 case elfcpp::R_X86_64_TLSLD:
791 case elfcpp::R_X86_64_GOTTPOFF:
792 case elfcpp::R_X86_64_TPOFF32:
793 case elfcpp::R_X86_64_DTPOFF32:
794 case elfcpp::R_X86_64_DTPOFF64:
795 {
796 bool output_is_shared = parameters->output_is_shared();
797 const tls::Tls_optimization optimized_type
798 = Target_x86_64::optimize_tls_reloc(!output_is_shared, r_type);
799 switch (r_type)
800 {
801 case elfcpp::R_X86_64_TPOFF32: // Local-exec
802 // FIXME: If generating a shared object, we need to copy
803 // this relocation into the object.
804 gold_assert(!output_is_shared);
805 break;
806
807 case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec
808 // FIXME: If not relaxing to LE, we need to generate a
809 // TPOFF64 reloc.
810 if (optimized_type != tls::TLSOPT_TO_LE)
811 unsupported_reloc_local(object, r_type);
812 break;
813
814 case elfcpp::R_X86_64_TLSLD: // Local-dynamic
815 case elfcpp::R_X86_64_DTPOFF32:
816 case elfcpp::R_X86_64_DTPOFF64:
817 // FIXME: If not relaxing to LE, we need to generate a
818 // DTPMOD64 reloc.
819 if (optimized_type != tls::TLSOPT_TO_LE)
820 unsupported_reloc_local(object, r_type);
821 break;
822
823
824 case elfcpp::R_X86_64_TLSGD: // General-dynamic
825 case elfcpp::R_X86_64_GOTPC32_TLSDESC:
826 case elfcpp::R_X86_64_TLSDESC_CALL:
827 // FIXME: If not relaxing to LE, we need to generate
828 // DTPMOD64 and DTPOFF64 relocs.
829 if (optimized_type != tls::TLSOPT_TO_LE)
830 unsupported_reloc_local(object, r_type);
831 break;
832
833 default:
834 gold_unreachable();
835 }
836 }
837 break;
838
839 case elfcpp::R_X86_64_GOTPLT64:
840 case elfcpp::R_X86_64_SIZE32: // TODO(csilvers): correct?
841 case elfcpp::R_X86_64_SIZE64: // TODO(csilvers): correct?
842 default:
843 gold_error(_("%s: unsupported reloc %u against local symbol"),
844 object->name().c_str(), r_type);
845 break;
846 }
847 }
848
849
850 // Report an unsupported relocation against a global symbol.
851
852 void
853 Target_x86_64::Scan::unsupported_reloc_global(Sized_relobj<64, false>* object,
854 unsigned int r_type,
855 Symbol* gsym)
856 {
857 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
858 object->name().c_str(), r_type, gsym->name());
859 }
860
861 // Scan a relocation for a global symbol.
862
863 inline void
864 Target_x86_64::Scan::global(const General_options& options,
865 Symbol_table* symtab,
866 Layout* layout,
867 Target_x86_64* target,
868 Sized_relobj<64, false>* object,
869 unsigned int data_shndx,
870 const elfcpp::Rela<64, false>& reloc,
871 unsigned int r_type,
872 Symbol* gsym)
873 {
874 switch (r_type)
875 {
876 case elfcpp::R_X86_64_NONE:
877 case elfcpp::R_386_GNU_VTINHERIT:
878 case elfcpp::R_386_GNU_VTENTRY:
879 break;
880
881 case elfcpp::R_X86_64_64:
882 case elfcpp::R_X86_64_PC64:
883 case elfcpp::R_X86_64_32:
884 case elfcpp::R_X86_64_32S:
885 case elfcpp::R_X86_64_PC32:
886 case elfcpp::R_X86_64_16:
887 case elfcpp::R_X86_64_PC16:
888 case elfcpp::R_X86_64_8:
889 case elfcpp::R_X86_64_PC8:
890 // FIXME: If we are generating a shared object we may need to
891 // copy this relocation into the object. If this symbol is
892 // defined in a shared object, we may need to copy this
893 // relocation in order to avoid a COPY relocation.
894 gold_assert(!parameters->output_is_shared());
895
896 if (gsym->is_from_dynobj())
897 {
898 // This symbol is defined in a dynamic object. If it is a
899 // function, we make a PLT entry. Otherwise we need to
900 // either generate a COPY reloc or copy this reloc.
901 if (gsym->type() == elfcpp::STT_FUNC)
902 {
903 target->make_plt_entry(symtab, layout, gsym);
904
905 // If this is not a PC relative reference, then we may
906 // be taking the address of the function. In that case
907 // we need to set the entry in the dynamic symbol table
908 // to the address of the PLT entry.
909 if (r_type != elfcpp::R_X86_64_PC64
910 && r_type != elfcpp::R_X86_64_PC32
911 && r_type != elfcpp::R_X86_64_PC16
912 && r_type != elfcpp::R_X86_64_PC8)
913 gsym->set_needs_dynsym_value();
914 }
915 else
916 target->copy_reloc(&options, symtab, layout, object, data_shndx,
917 gsym, reloc);
918 }
919
920 break;
921
922 case elfcpp::R_X86_64_GOT64:
923 case elfcpp::R_X86_64_GOT32:
924 case elfcpp::R_X86_64_GOTPCREL64:
925 case elfcpp::R_X86_64_GOTPCREL:
926 case elfcpp::R_X86_64_GOTPLT64:
927 {
928 // The symbol requires a GOT entry.
929 Output_data_got<64, false>* got = target->got_section(symtab, layout);
930 if (got->add_global(gsym))
931 {
932 // If this symbol is not fully resolved, we need to add a
933 // dynamic relocation for it.
934 if (!gsym->final_value_is_known())
935 {
936 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
937 rela_dyn->add_global(gsym, elfcpp::R_X86_64_GLOB_DAT, got,
938 gsym->got_offset(), 0);
939 }
940 }
941 }
942 break;
943
944 case elfcpp::R_X86_64_PLT32:
945 // If the symbol is fully resolved, this is just a PC32 reloc.
946 // Otherwise we need a PLT entry.
947 if (gsym->final_value_is_known())
948 break;
949 target->make_plt_entry(symtab, layout, gsym);
950 break;
951
952 case elfcpp::R_X86_64_GOTPC32: // TODO(csilvers): correct?
953 case elfcpp::R_X86_64_GOTOFF64:
954 case elfcpp::R_X86_64_GOTPC64: // TODO(csilvers): correct?
955 case elfcpp::R_X86_64_PLTOFF64: // TODO(csilvers): correct?
956 // We need a GOT section.
957 target->got_section(symtab, layout);
958 break;
959
960 case elfcpp::R_X86_64_COPY:
961 case elfcpp::R_X86_64_GLOB_DAT:
962 case elfcpp::R_X86_64_JUMP_SLOT:
963 case elfcpp::R_X86_64_RELATIVE:
964 // These are outstanding tls relocs, which are unexpected when linking
965 case elfcpp::R_X86_64_TPOFF64:
966 case elfcpp::R_X86_64_DTPMOD64:
967 case elfcpp::R_X86_64_TLSDESC:
968 gold_error(_("%s: unexpected reloc %u in object file"),
969 object->name().c_str(), r_type);
970 break;
971
972 // These are initial tls relocs, which are expected for global()
973 case elfcpp::R_X86_64_TLSGD:
974 case elfcpp::R_X86_64_TLSLD:
975 case elfcpp::R_X86_64_GOTTPOFF:
976 case elfcpp::R_X86_64_TPOFF32:
977 case elfcpp::R_X86_64_GOTPC32_TLSDESC:
978 case elfcpp::R_X86_64_TLSDESC_CALL:
979 case elfcpp::R_X86_64_DTPOFF32:
980 case elfcpp::R_X86_64_DTPOFF64:
981 {
982 const bool is_final = gsym->final_value_is_known();
983 const tls::Tls_optimization optimized_type
984 = Target_x86_64::optimize_tls_reloc(is_final, r_type);
985 switch (r_type)
986 {
987 case elfcpp::R_X86_64_TPOFF32: // Local-exec
988 // FIXME: If generating a shared object, we need to copy
989 // this relocation into the object.
990 gold_assert(is_final);
991 break;
992
993 case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec
994 // FIXME: If not relaxing to LE, we need to generate a
995 // TPOFF64 reloc.
996 if (optimized_type != tls::TLSOPT_TO_LE)
997 unsupported_reloc_global(object, r_type, gsym);
998 break;
999
1000 case elfcpp::R_X86_64_TLSLD: // Local-dynamic
1001 case elfcpp::R_X86_64_DTPOFF32:
1002 case elfcpp::R_X86_64_DTPOFF64:
1003 // FIXME: If not relaxing to LE, we need to generate a
1004 // DTPMOD64 reloc.
1005 if (optimized_type != tls::TLSOPT_TO_LE)
1006 unsupported_reloc_global(object, r_type, gsym);
1007 break;
1008
1009
1010 case elfcpp::R_X86_64_TLSGD: // General-dynamic
1011 case elfcpp::R_X86_64_GOTPC32_TLSDESC:
1012 case elfcpp::R_X86_64_TLSDESC_CALL:
1013 // FIXME: If not relaxing to LE, we need to generate
1014 // DTPMOD64 and DTPOFF64, or TLSDESC, relocs.
1015 if (optimized_type != tls::TLSOPT_TO_LE)
1016 unsupported_reloc_global(object, r_type, gsym);
1017 break;
1018
1019 default:
1020 gold_unreachable();
1021 }
1022 }
1023 break;
1024 case elfcpp::R_X86_64_SIZE32: // TODO(csilvers): correct?
1025 case elfcpp::R_X86_64_SIZE64: // TODO(csilvers): correct?
1026 default:
1027 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
1028 object->name().c_str(), r_type, gsym->name());
1029 break;
1030 }
1031 }
1032
1033 // Scan relocations for a section.
1034
1035 void
1036 Target_x86_64::scan_relocs(const General_options& options,
1037 Symbol_table* symtab,
1038 Layout* layout,
1039 Sized_relobj<64, false>* object,
1040 unsigned int data_shndx,
1041 unsigned int sh_type,
1042 const unsigned char* prelocs,
1043 size_t reloc_count,
1044 size_t local_symbol_count,
1045 const unsigned char* plocal_symbols,
1046 Symbol** global_symbols)
1047 {
1048 if (sh_type == elfcpp::SHT_REL)
1049 {
1050 gold_error(_("%s: unsupported REL reloc section"),
1051 object->name().c_str());
1052 return;
1053 }
1054
1055 gold::scan_relocs<64, false, Target_x86_64, elfcpp::SHT_RELA,
1056 Target_x86_64::Scan>(
1057 options,
1058 symtab,
1059 layout,
1060 this,
1061 object,
1062 data_shndx,
1063 prelocs,
1064 reloc_count,
1065 local_symbol_count,
1066 plocal_symbols,
1067 global_symbols);
1068 }
1069
1070 // Finalize the sections.
1071
1072 void
1073 Target_x86_64::do_finalize_sections(Layout* layout)
1074 {
1075 // Fill in some more dynamic tags.
1076 Output_data_dynamic* const odyn = layout->dynamic_data();
1077 if (odyn != NULL)
1078 {
1079 if (this->got_plt_ != NULL)
1080 odyn->add_section_address(elfcpp::DT_PLTGOT, this->got_plt_);
1081
1082 if (this->plt_ != NULL)
1083 {
1084 const Output_data* od = this->plt_->rel_plt();
1085 odyn->add_section_size(elfcpp::DT_PLTRELSZ, od);
1086 odyn->add_section_address(elfcpp::DT_JMPREL, od);
1087 odyn->add_constant(elfcpp::DT_PLTREL, elfcpp::DT_RELA);
1088 }
1089
1090 if (this->rela_dyn_ != NULL)
1091 {
1092 const Output_data* od = this->rela_dyn_;
1093 odyn->add_section_address(elfcpp::DT_RELA, od);
1094 odyn->add_section_size(elfcpp::DT_RELASZ, od);
1095 odyn->add_constant(elfcpp::DT_RELAENT,
1096 elfcpp::Elf_sizes<64>::rela_size);
1097 }
1098
1099 if (!parameters->output_is_shared())
1100 {
1101 // The value of the DT_DEBUG tag is filled in by the dynamic
1102 // linker at run time, and used by the debugger.
1103 odyn->add_constant(elfcpp::DT_DEBUG, 0);
1104 }
1105 }
1106
1107 // Emit any relocs we saved in an attempt to avoid generating COPY
1108 // relocs.
1109 if (this->copy_relocs_ == NULL)
1110 return;
1111 if (this->copy_relocs_->any_to_emit())
1112 {
1113 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
1114 this->copy_relocs_->emit(rela_dyn);
1115 }
1116 delete this->copy_relocs_;
1117 this->copy_relocs_ = NULL;
1118 }
1119
1120 // Perform a relocation.
1121
1122 inline bool
1123 Target_x86_64::Relocate::relocate(const Relocate_info<64, false>* relinfo,
1124 Target_x86_64* target,
1125 size_t relnum,
1126 const elfcpp::Rela<64, false>& rela,
1127 unsigned int r_type,
1128 const Sized_symbol<64>* gsym,
1129 const Symbol_value<64>* psymval,
1130 unsigned char* view,
1131 elfcpp::Elf_types<64>::Elf_Addr address,
1132 off_t view_size)
1133 {
1134 if (this->skip_call_tls_get_addr_)
1135 {
1136 if (r_type != elfcpp::R_X86_64_PLT32
1137 || gsym == NULL
1138 || strcmp(gsym->name(), "__tls_get_addr") != 0)
1139 {
1140 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
1141 _("missing expected TLS relocation"));
1142 }
1143 else
1144 {
1145 this->skip_call_tls_get_addr_ = false;
1146 return false;
1147 }
1148 }
1149
1150 // Pick the value to use for symbols defined in shared objects.
1151 Symbol_value<64> symval;
1152 if (gsym != NULL && gsym->is_from_dynobj() && gsym->has_plt_offset())
1153 {
1154 symval.set_output_value(target->plt_section()->address()
1155 + gsym->plt_offset());
1156 psymval = &symval;
1157 }
1158
1159 const Sized_relobj<64, false>* object = relinfo->object;
1160 const elfcpp::Elf_Xword addend = rela.get_r_addend();
1161
1162 // Get the GOT offset if needed.
1163 bool have_got_offset = false;
1164 unsigned int got_offset = 0;
1165 switch (r_type)
1166 {
1167 case elfcpp::R_X86_64_GOT32:
1168 case elfcpp::R_X86_64_GOT64:
1169 case elfcpp::R_X86_64_GOTPLT64:
1170 case elfcpp::R_X86_64_GOTPCREL:
1171 case elfcpp::R_X86_64_GOTPCREL64:
1172 if (gsym != NULL)
1173 {
1174 gold_assert(gsym->has_got_offset());
1175 got_offset = gsym->got_offset();
1176 }
1177 else
1178 {
1179 unsigned int r_sym = elfcpp::elf_r_sym<64>(rela.get_r_info());
1180 got_offset = object->local_got_offset(r_sym);
1181 }
1182 have_got_offset = true;
1183 break;
1184
1185 default:
1186 break;
1187 }
1188
1189 switch (r_type)
1190 {
1191 case elfcpp::R_X86_64_NONE:
1192 case elfcpp::R_386_GNU_VTINHERIT:
1193 case elfcpp::R_386_GNU_VTENTRY:
1194 break;
1195
1196 case elfcpp::R_X86_64_64:
1197 Relocate_functions<64, false>::rela64(view, object, psymval, addend);
1198 break;
1199
1200 case elfcpp::R_X86_64_PC64:
1201 Relocate_functions<64, false>::pcrela64(view, object, psymval, addend,
1202 address);
1203 break;
1204
1205 case elfcpp::R_X86_64_32:
1206 // FIXME: we need to verify that value + addend fits into 32 bits:
1207 // uint64_t x = value + addend;
1208 // x == static_cast<uint64_t>(static_cast<uint32_t>(x))
1209 // Likewise for other <=32-bit relocations (but see R_X86_64_32S).
1210 Relocate_functions<64, false>::rela32(view, object, psymval, addend);
1211 break;
1212
1213 case elfcpp::R_X86_64_32S:
1214 // FIXME: we need to verify that value + addend fits into 32 bits:
1215 // int64_t x = value + addend; // note this quantity is signed!
1216 // x == static_cast<int64_t>(static_cast<int32_t>(x))
1217 Relocate_functions<64, false>::rela32(view, object, psymval, addend);
1218 break;
1219
1220 case elfcpp::R_X86_64_PC32:
1221 Relocate_functions<64, false>::pcrela32(view, object, psymval, addend,
1222 address);
1223 break;
1224
1225 case elfcpp::R_X86_64_16:
1226 Relocate_functions<64, false>::rela16(view, object, psymval, addend);
1227 break;
1228
1229 case elfcpp::R_X86_64_PC16:
1230 Relocate_functions<64, false>::pcrela16(view, object, psymval, addend,
1231 address);
1232 break;
1233
1234 case elfcpp::R_X86_64_8:
1235 Relocate_functions<64, false>::rela8(view, object, psymval, addend);
1236 break;
1237
1238 case elfcpp::R_X86_64_PC8:
1239 Relocate_functions<64, false>::pcrela8(view, object, psymval, addend,
1240 address);
1241 break;
1242
1243 case elfcpp::R_X86_64_PLT32:
1244 gold_assert(gsym == NULL
1245 || gsym->has_plt_offset()
1246 || gsym->final_value_is_known());
1247 Relocate_functions<64, false>::pcrela32(view, object, psymval, addend,
1248 address);
1249 break;
1250
1251 case elfcpp::R_X86_64_GOT32:
1252 gold_assert(have_got_offset);
1253 Relocate_functions<64, false>::rela32(view, got_offset, addend);
1254 break;
1255
1256 case elfcpp::R_X86_64_GOTPC32:
1257 {
1258 gold_assert(gsym);
1259 elfcpp::Elf_types<64>::Elf_Addr value;
1260 value = target->got_section(NULL, NULL)->address();
1261 Relocate_functions<64, false>::pcrela32(view, value, addend, address);
1262 }
1263 break;
1264
1265 case elfcpp::R_X86_64_GOT64:
1266 // The ABI doc says "Like GOT64, but indicates a PLT entry is needed."
1267 // Since we always add a PLT entry, this is equivalent.
1268 case elfcpp::R_X86_64_GOTPLT64: // TODO(csilvers): correct?
1269 gold_assert(have_got_offset);
1270 Relocate_functions<64, false>::rela64(view, got_offset, addend);
1271 break;
1272
1273 case elfcpp::R_X86_64_GOTPC64:
1274 {
1275 gold_assert(gsym);
1276 elfcpp::Elf_types<64>::Elf_Addr value;
1277 value = target->got_section(NULL, NULL)->address();
1278 Relocate_functions<64, false>::pcrela64(view, value, addend, address);
1279 }
1280 break;
1281
1282 case elfcpp::R_X86_64_GOTOFF64:
1283 {
1284 elfcpp::Elf_types<64>::Elf_Addr value;
1285 value = (psymval->value(object, 0)
1286 - target->got_section(NULL, NULL)->address());
1287 Relocate_functions<64, false>::rela64(view, value, addend);
1288 }
1289 break;
1290
1291 case elfcpp::R_X86_64_GOTPCREL:
1292 {
1293 gold_assert(have_got_offset);
1294 elfcpp::Elf_types<64>::Elf_Addr value;
1295 value = target->got_section(NULL, NULL)->address() + got_offset;
1296 Relocate_functions<64, false>::pcrela32(view, value, addend, address);
1297 }
1298 break;
1299
1300 case elfcpp::R_X86_64_GOTPCREL64:
1301 {
1302 gold_assert(have_got_offset);
1303 elfcpp::Elf_types<64>::Elf_Addr value;
1304 value = target->got_section(NULL, NULL)->address() + got_offset;
1305 Relocate_functions<64, false>::pcrela64(view, value, addend, address);
1306 }
1307 break;
1308
1309 case elfcpp::R_X86_64_COPY:
1310 case elfcpp::R_X86_64_GLOB_DAT:
1311 case elfcpp::R_X86_64_JUMP_SLOT:
1312 case elfcpp::R_X86_64_RELATIVE:
1313 // These are outstanding tls relocs, which are unexpected when linking
1314 case elfcpp::R_X86_64_TPOFF64:
1315 case elfcpp::R_X86_64_DTPMOD64:
1316 case elfcpp::R_X86_64_TLSDESC:
1317 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
1318 _("unexpected reloc %u in object file"),
1319 r_type);
1320 break;
1321
1322 // These are initial tls relocs, which are expected when linking
1323 case elfcpp::R_X86_64_TLSGD:
1324 case elfcpp::R_X86_64_TLSLD:
1325 case elfcpp::R_X86_64_GOTTPOFF:
1326 case elfcpp::R_X86_64_TPOFF32:
1327 case elfcpp::R_X86_64_GOTPC32_TLSDESC:
1328 case elfcpp::R_X86_64_TLSDESC_CALL:
1329 case elfcpp::R_X86_64_DTPOFF32:
1330 case elfcpp::R_X86_64_DTPOFF64:
1331 this->relocate_tls(relinfo, relnum, rela, r_type, gsym, psymval, view,
1332 address, view_size);
1333 break;
1334
1335 case elfcpp::R_X86_64_SIZE32: // TODO(csilvers): correct?
1336 case elfcpp::R_X86_64_SIZE64: // TODO(csilvers): correct?
1337 case elfcpp::R_X86_64_PLTOFF64: // TODO(csilvers): implement me!
1338 default:
1339 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
1340 _("unsupported reloc %u"),
1341 r_type);
1342 break;
1343 }
1344
1345 return true;
1346 }
1347
1348 // Perform a TLS relocation.
1349
1350 inline void
1351 Target_x86_64::Relocate::relocate_tls(const Relocate_info<64, false>* relinfo,
1352 size_t relnum,
1353 const elfcpp::Rela<64, false>& rela,
1354 unsigned int r_type,
1355 const Sized_symbol<64>* gsym,
1356 const Symbol_value<64>* psymval,
1357 unsigned char* view,
1358 elfcpp::Elf_types<64>::Elf_Addr,
1359 off_t view_size)
1360 {
1361 Output_segment* tls_segment = relinfo->layout->tls_segment();
1362 if (tls_segment == NULL)
1363 {
1364 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
1365 _("TLS reloc but no TLS segment"));
1366 return;
1367 }
1368
1369 elfcpp::Elf_types<64>::Elf_Addr value = psymval->value(relinfo->object, 0);
1370
1371 const bool is_final = (gsym == NULL
1372 ? !parameters->output_is_shared()
1373 : gsym->final_value_is_known());
1374 const tls::Tls_optimization optimized_type
1375 = Target_x86_64::optimize_tls_reloc(is_final, r_type);
1376 switch (r_type)
1377 {
1378 case elfcpp::R_X86_64_TPOFF32: // Local-exec reloc
1379 value = value - (tls_segment->vaddr() + tls_segment->memsz());
1380 Relocate_functions<64, false>::rel32(view, value);
1381 break;
1382
1383 case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec reloc
1384 if (optimized_type == tls::TLSOPT_TO_LE)
1385 {
1386 Target_x86_64::Relocate::tls_ie_to_le(relinfo, relnum, tls_segment,
1387 rela, r_type, value, view,
1388 view_size);
1389 break;
1390 }
1391 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
1392 _("unsupported reloc type %u"),
1393 r_type);
1394 break;
1395
1396 case elfcpp::R_X86_64_TLSGD:
1397 case elfcpp::R_X86_64_GOTPC32_TLSDESC:
1398 case elfcpp::R_X86_64_TLSDESC_CALL:
1399 if (optimized_type == tls::TLSOPT_TO_LE)
1400 {
1401 this->tls_gd_to_le(relinfo, relnum, tls_segment,
1402 rela, r_type, value, view,
1403 view_size);
1404 break;
1405 }
1406 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
1407 _("unsupported reloc %u"), r_type);
1408 break;
1409
1410 case elfcpp::R_X86_64_TLSLD:
1411 if (optimized_type == tls::TLSOPT_TO_LE)
1412 {
1413 this->tls_ld_to_le(relinfo, relnum, tls_segment, rela, r_type,
1414 value, view, view_size);
1415 break;
1416 }
1417 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
1418 _("unsupported reloc %u"), r_type);
1419 break;
1420
1421 case elfcpp::R_X86_64_DTPOFF32:
1422 if (optimized_type == tls::TLSOPT_TO_LE)
1423 value = value - (tls_segment->vaddr() + tls_segment->memsz());
1424 else
1425 value = value - tls_segment->vaddr();
1426 Relocate_functions<64, false>::rel32(view, value);
1427 break;
1428
1429 case elfcpp::R_X86_64_DTPOFF64:
1430 if (optimized_type == tls::TLSOPT_TO_LE)
1431 value = value - (tls_segment->vaddr() + tls_segment->memsz());
1432 else
1433 value = value - tls_segment->vaddr();
1434 Relocate_functions<64, false>::rel64(view, value);
1435 break;
1436 }
1437 }
1438
1439 // Do a relocation in which we convert a TLS Initial-Exec to a
1440 // Local-Exec.
1441
1442 inline void
1443 Target_x86_64::Relocate::tls_ie_to_le(const Relocate_info<64, false>* relinfo,
1444 size_t relnum,
1445 Output_segment* tls_segment,
1446 const elfcpp::Rela<64, false>& rela,
1447 unsigned int,
1448 elfcpp::Elf_types<64>::Elf_Addr value,
1449 unsigned char* view,
1450 off_t view_size)
1451 {
1452 // We need to examine the opcodes to figure out which instruction we
1453 // are looking at.
1454
1455 // movq foo@gottpoff(%rip),%reg ==> movq $YY,%reg
1456 // addq foo@gottpoff(%rip),%reg ==> addq $YY,%reg
1457
1458 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -3);
1459 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 4);
1460
1461 unsigned char op1 = view[-3];
1462 unsigned char op2 = view[-2];
1463 unsigned char op3 = view[-1];
1464 unsigned char reg = op3 >> 3;
1465
1466 if (op2 == 0x8b)
1467 {
1468 // movq
1469 if (op1 == 0x4c)
1470 view[-3] = 0x49;
1471 view[-2] = 0xc7;
1472 view[-1] = 0xc0 | reg;
1473 }
1474 else if (reg == 4)
1475 {
1476 // Special handling for %rsp.
1477 if (op1 == 0x4c)
1478 view[-3] = 0x49;
1479 view[-2] = 0x81;
1480 view[-1] = 0xc0 | reg;
1481 }
1482 else
1483 {
1484 // addq
1485 if (op1 == 0x4c)
1486 view[-3] = 0x4d;
1487 view[-2] = 0x8d;
1488 view[-1] = 0x80 | reg | (reg << 3);
1489 }
1490
1491 value = value - (tls_segment->vaddr() + tls_segment->memsz());
1492 Relocate_functions<64, false>::rela32(view, value, 0);
1493 }
1494
1495 // Do a relocation in which we convert a TLS General-Dynamic to a
1496 // Local-Exec.
1497
1498 inline void
1499 Target_x86_64::Relocate::tls_gd_to_le(const Relocate_info<64, false>* relinfo,
1500 size_t relnum,
1501 Output_segment* tls_segment,
1502 const elfcpp::Rela<64, false>& rela,
1503 unsigned int,
1504 elfcpp::Elf_types<64>::Elf_Addr value,
1505 unsigned char* view,
1506 off_t view_size)
1507 {
1508 // .byte 0x66; leaq foo@tlsgd(%rip),%rdi;
1509 // .word 0x6666; rex64; call __tls_get_addr
1510 // ==> movq %fs:0,%rax; leaq x@tpoff(%rax),%rax
1511
1512 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -4);
1513 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 12);
1514
1515 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
1516 (memcmp(view - 4, "\x66\x48\x8d\x3d", 4) == 0));
1517 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
1518 (memcmp(view + 4, "\x66\x66\x48\xe8", 4) == 0));
1519
1520 memcpy(view - 4, "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0\0", 16);
1521
1522 value = value - (tls_segment->vaddr() + tls_segment->memsz());
1523 Relocate_functions<64, false>::rela32(view + 8, value, 0);
1524
1525 // The next reloc should be a PLT32 reloc against __tls_get_addr.
1526 // We can skip it.
1527 this->skip_call_tls_get_addr_ = true;
1528 }
1529
1530 inline void
1531 Target_x86_64::Relocate::tls_ld_to_le(const Relocate_info<64, false>* relinfo,
1532 size_t relnum,
1533 Output_segment*,
1534 const elfcpp::Rela<64, false>& rela,
1535 unsigned int,
1536 elfcpp::Elf_types<64>::Elf_Addr,
1537 unsigned char* view,
1538 off_t view_size)
1539 {
1540 // leaq foo@tlsld(%rip),%rdi; call __tls_get_addr@plt;
1541 // ... leq foo@dtpoff(%rax),%reg
1542 // ==> .word 0x6666; .byte 0x66; movq %fs:0,%rax ... leaq x@tpoff(%rax),%rdx
1543
1544 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -3);
1545 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 9);
1546
1547 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
1548 view[-3] == 0x48 && view[-2] == 0x8d && view[-1] == 0x3d);
1549
1550 tls::check_tls(relinfo, relnum, rela.get_r_offset(), view[4] == 0xe8);
1551
1552 memcpy(view - 3, "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0\0", 12);
1553
1554 // The next reloc should be a PLT32 reloc against __tls_get_addr.
1555 // We can skip it.
1556 this->skip_call_tls_get_addr_ = true;
1557 }
1558
1559 // Relocate section data.
1560
1561 void
1562 Target_x86_64::relocate_section(const Relocate_info<64, false>* relinfo,
1563 unsigned int sh_type,
1564 const unsigned char* prelocs,
1565 size_t reloc_count,
1566 unsigned char* view,
1567 elfcpp::Elf_types<64>::Elf_Addr address,
1568 off_t view_size)
1569 {
1570 gold_assert(sh_type == elfcpp::SHT_RELA);
1571
1572 gold::relocate_section<64, false, Target_x86_64, elfcpp::SHT_RELA,
1573 Target_x86_64::Relocate>(
1574 relinfo,
1575 this,
1576 prelocs,
1577 reloc_count,
1578 view,
1579 address,
1580 view_size);
1581 }
1582
1583 // Return the value to use for a dynamic which requires special
1584 // treatment. This is how we support equality comparisons of function
1585 // pointers across shared library boundaries, as described in the
1586 // processor specific ABI supplement.
1587
1588 uint64_t
1589 Target_x86_64::do_dynsym_value(const Symbol* gsym) const
1590 {
1591 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
1592 return this->plt_section()->address() + gsym->plt_offset();
1593 }
1594
1595 // Return a string used to fill a code section with nops to take up
1596 // the specified length.
1597
1598 std::string
1599 Target_x86_64::do_code_fill(off_t length)
1600 {
1601 if (length >= 16)
1602 {
1603 // Build a jmpq instruction to skip over the bytes.
1604 unsigned char jmp[5];
1605 jmp[0] = 0xe9;
1606 elfcpp::Swap_unaligned<64, false>::writeval(jmp + 1, length - 5);
1607 return (std::string(reinterpret_cast<char*>(&jmp[0]), 5)
1608 + std::string(length - 5, '\0'));
1609 }
1610
1611 // Nop sequences of various lengths.
1612 const char nop1[1] = { 0x90 }; // nop
1613 const char nop2[2] = { 0x66, 0x90 }; // xchg %ax %ax
1614 const char nop3[3] = { 0x8d, 0x76, 0x00 }; // leal 0(%esi),%esi
1615 const char nop4[4] = { 0x8d, 0x74, 0x26, 0x00}; // leal 0(%esi,1),%esi
1616 const char nop5[5] = { 0x90, 0x8d, 0x74, 0x26, // nop
1617 0x00 }; // leal 0(%esi,1),%esi
1618 const char nop6[6] = { 0x8d, 0xb6, 0x00, 0x00, // leal 0L(%esi),%esi
1619 0x00, 0x00 };
1620 const char nop7[7] = { 0x8d, 0xb4, 0x26, 0x00, // leal 0L(%esi,1),%esi
1621 0x00, 0x00, 0x00 };
1622 const char nop8[8] = { 0x90, 0x8d, 0xb4, 0x26, // nop
1623 0x00, 0x00, 0x00, 0x00 }; // leal 0L(%esi,1),%esi
1624 const char nop9[9] = { 0x89, 0xf6, 0x8d, 0xbc, // movl %esi,%esi
1625 0x27, 0x00, 0x00, 0x00, // leal 0L(%edi,1),%edi
1626 0x00 };
1627 const char nop10[10] = { 0x8d, 0x76, 0x00, 0x8d, // leal 0(%esi),%esi
1628 0xbc, 0x27, 0x00, 0x00, // leal 0L(%edi,1),%edi
1629 0x00, 0x00 };
1630 const char nop11[11] = { 0x8d, 0x74, 0x26, 0x00, // leal 0(%esi,1),%esi
1631 0x8d, 0xbc, 0x27, 0x00, // leal 0L(%edi,1),%edi
1632 0x00, 0x00, 0x00 };
1633 const char nop12[12] = { 0x8d, 0xb6, 0x00, 0x00, // leal 0L(%esi),%esi
1634 0x00, 0x00, 0x8d, 0xbf, // leal 0L(%edi),%edi
1635 0x00, 0x00, 0x00, 0x00 };
1636 const char nop13[13] = { 0x8d, 0xb6, 0x00, 0x00, // leal 0L(%esi),%esi
1637 0x00, 0x00, 0x8d, 0xbc, // leal 0L(%edi,1),%edi
1638 0x27, 0x00, 0x00, 0x00,
1639 0x00 };
1640 const char nop14[14] = { 0x8d, 0xb4, 0x26, 0x00, // leal 0L(%esi,1),%esi
1641 0x00, 0x00, 0x00, 0x8d, // leal 0L(%edi,1),%edi
1642 0xbc, 0x27, 0x00, 0x00,
1643 0x00, 0x00 };
1644 const char nop15[15] = { 0xeb, 0x0d, 0x90, 0x90, // jmp .+15
1645 0x90, 0x90, 0x90, 0x90, // nop,nop,nop,...
1646 0x90, 0x90, 0x90, 0x90,
1647 0x90, 0x90, 0x90 };
1648
1649 const char* nops[16] = {
1650 NULL,
1651 nop1, nop2, nop3, nop4, nop5, nop6, nop7,
1652 nop8, nop9, nop10, nop11, nop12, nop13, nop14, nop15
1653 };
1654
1655 return std::string(nops[length], length);
1656 }
1657
1658 // The selector for x86_64 object files.
1659
1660 class Target_selector_x86_64 : public Target_selector
1661 {
1662 public:
1663 Target_selector_x86_64()
1664 : Target_selector(elfcpp::EM_X86_64, 64, false)
1665 { }
1666
1667 Target*
1668 recognize(int machine, int osabi, int abiversion);
1669
1670 private:
1671 Target_x86_64* target_;
1672 };
1673
1674 // Recognize an x86_64 object file when we already know that the machine
1675 // number is EM_X86_64.
1676
1677 Target*
1678 Target_selector_x86_64::recognize(int, int, int)
1679 {
1680 if (this->target_ == NULL)
1681 this->target_ = new Target_x86_64();
1682 return this->target_;
1683 }
1684
1685 Target_selector_x86_64 target_selector_x86_64;
1686
1687 } // End anonymous namespace.
This page took 0.065357 seconds and 5 git commands to generate.