Further improve warnings for relocations referring to discarded sections.
[deliverable/binutils-gdb.git] / gold / aarch64.cc
1 // aarch64.cc -- aarch64 target support for gold.
2
3 // Copyright (C) 2014-2018 Free Software Foundation, Inc.
4 // Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>.
5
6 // This file is part of gold.
7
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 3 of the License, or
11 // (at your option) any later version.
12
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
17
18 // You should have received a copy of the GNU General Public License
19 // along with this program; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 // MA 02110-1301, USA.
22
23 #include "gold.h"
24
25 #include <cstring>
26 #include <map>
27 #include <set>
28
29 #include "elfcpp.h"
30 #include "dwarf.h"
31 #include "parameters.h"
32 #include "reloc.h"
33 #include "aarch64.h"
34 #include "object.h"
35 #include "symtab.h"
36 #include "layout.h"
37 #include "output.h"
38 #include "copy-relocs.h"
39 #include "target.h"
40 #include "target-reloc.h"
41 #include "target-select.h"
42 #include "tls.h"
43 #include "freebsd.h"
44 #include "nacl.h"
45 #include "gc.h"
46 #include "icf.h"
47 #include "aarch64-reloc-property.h"
48
49 // The first three .got.plt entries are reserved.
50 const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3;
51
52
53 namespace
54 {
55
56 using namespace gold;
57
58 template<int size, bool big_endian>
59 class Output_data_plt_aarch64;
60
61 template<int size, bool big_endian>
62 class Output_data_plt_aarch64_standard;
63
64 template<int size, bool big_endian>
65 class Target_aarch64;
66
67 template<int size, bool big_endian>
68 class AArch64_relocate_functions;
69
70 // Utility class dealing with insns. This is ported from macros in
71 // bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This
72 // class is used in erratum sequence scanning.
73
74 template<bool big_endian>
75 class AArch64_insn_utilities
76 {
77 public:
78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
79
80 static const int BYTES_PER_INSN;
81
82 // Zero register encoding - 31.
83 static const unsigned int AARCH64_ZR;
84
85 static unsigned int
86 aarch64_bit(Insntype insn, int pos)
87 { return ((1 << pos) & insn) >> pos; }
88
89 static unsigned int
90 aarch64_bits(Insntype insn, int pos, int l)
91 { return (insn >> pos) & ((1 << l) - 1); }
92
93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is
94 // the name defined in armv8 insn manual C3.5.9.
95 static unsigned int
96 aarch64_op31(Insntype insn)
97 { return aarch64_bits(insn, 21, 3); }
98
99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the
100 // third source register. See armv8 insn manual C3.5.9.
101 static unsigned int
102 aarch64_ra(Insntype insn)
103 { return aarch64_bits(insn, 10, 5); }
104
105 static bool
106 is_adr(const Insntype insn)
107 { return (insn & 0x9F000000) == 0x10000000; }
108
109 static bool
110 is_adrp(const Insntype insn)
111 { return (insn & 0x9F000000) == 0x90000000; }
112
113 static bool
114 is_mrs_tpidr_el0(const Insntype insn)
115 { return (insn & 0xFFFFFFE0) == 0xd53bd040; }
116
117 static unsigned int
118 aarch64_rm(const Insntype insn)
119 { return aarch64_bits(insn, 16, 5); }
120
121 static unsigned int
122 aarch64_rn(const Insntype insn)
123 { return aarch64_bits(insn, 5, 5); }
124
125 static unsigned int
126 aarch64_rd(const Insntype insn)
127 { return aarch64_bits(insn, 0, 5); }
128
129 static unsigned int
130 aarch64_rt(const Insntype insn)
131 { return aarch64_bits(insn, 0, 5); }
132
133 static unsigned int
134 aarch64_rt2(const Insntype insn)
135 { return aarch64_bits(insn, 10, 5); }
136
137 // Encode imm21 into adr. Signed imm21 is in the range of [-1M, 1M).
138 static Insntype
139 aarch64_adr_encode_imm(Insntype adr, int imm21)
140 {
141 gold_assert(is_adr(adr));
142 gold_assert(-(1 << 20) <= imm21 && imm21 < (1 << 20));
143 const int mask19 = (1 << 19) - 1;
144 const int mask2 = 3;
145 adr &= ~((mask19 << 5) | (mask2 << 29));
146 adr |= ((imm21 & mask2) << 29) | (((imm21 >> 2) & mask19) << 5);
147 return adr;
148 }
149
150 // Retrieve encoded adrp 33-bit signed imm value. This value is obtained by
151 // 21-bit signed imm encoded in the insn multiplied by 4k (page size) and
152 // 64-bit sign-extended, resulting in [-4G, 4G) with 12-lsb being 0.
153 static int64_t
154 aarch64_adrp_decode_imm(const Insntype adrp)
155 {
156 const int mask19 = (1 << 19) - 1;
157 const int mask2 = 3;
158 gold_assert(is_adrp(adrp));
159 // 21-bit imm encoded in adrp.
160 uint64_t imm = ((adrp >> 29) & mask2) | (((adrp >> 5) & mask19) << 2);
161 // Retrieve msb of 21-bit-signed imm for sign extension.
162 uint64_t msbt = (imm >> 20) & 1;
163 // Real value is imm multiplied by 4k. Value now has 33-bit information.
164 int64_t value = imm << 12;
165 // Sign extend to 64-bit by repeating msbt 31 (64-33) times and merge it
166 // with value.
167 return ((((uint64_t)(1) << 32) - msbt) << 33) | value;
168 }
169
170 static bool
171 aarch64_b(const Insntype insn)
172 { return (insn & 0xFC000000) == 0x14000000; }
173
174 static bool
175 aarch64_bl(const Insntype insn)
176 { return (insn & 0xFC000000) == 0x94000000; }
177
178 static bool
179 aarch64_blr(const Insntype insn)
180 { return (insn & 0xFFFFFC1F) == 0xD63F0000; }
181
182 static bool
183 aarch64_br(const Insntype insn)
184 { return (insn & 0xFFFFFC1F) == 0xD61F0000; }
185
186 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
187 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops.
188 static bool
189 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; }
190
191 static bool
192 aarch64_ldst(Insntype insn)
193 { return (insn & 0x0a000000) == 0x08000000; }
194
195 static bool
196 aarch64_ldst_ex(Insntype insn)
197 { return (insn & 0x3f000000) == 0x08000000; }
198
199 static bool
200 aarch64_ldst_pcrel(Insntype insn)
201 { return (insn & 0x3b000000) == 0x18000000; }
202
203 static bool
204 aarch64_ldst_nap(Insntype insn)
205 { return (insn & 0x3b800000) == 0x28000000; }
206
207 static bool
208 aarch64_ldstp_pi(Insntype insn)
209 { return (insn & 0x3b800000) == 0x28800000; }
210
211 static bool
212 aarch64_ldstp_o(Insntype insn)
213 { return (insn & 0x3b800000) == 0x29000000; }
214
215 static bool
216 aarch64_ldstp_pre(Insntype insn)
217 { return (insn & 0x3b800000) == 0x29800000; }
218
219 static bool
220 aarch64_ldst_ui(Insntype insn)
221 { return (insn & 0x3b200c00) == 0x38000000; }
222
223 static bool
224 aarch64_ldst_piimm(Insntype insn)
225 { return (insn & 0x3b200c00) == 0x38000400; }
226
227 static bool
228 aarch64_ldst_u(Insntype insn)
229 { return (insn & 0x3b200c00) == 0x38000800; }
230
231 static bool
232 aarch64_ldst_preimm(Insntype insn)
233 { return (insn & 0x3b200c00) == 0x38000c00; }
234
235 static bool
236 aarch64_ldst_ro(Insntype insn)
237 { return (insn & 0x3b200c00) == 0x38200800; }
238
239 static bool
240 aarch64_ldst_uimm(Insntype insn)
241 { return (insn & 0x3b000000) == 0x39000000; }
242
243 static bool
244 aarch64_ldst_simd_m(Insntype insn)
245 { return (insn & 0xbfbf0000) == 0x0c000000; }
246
247 static bool
248 aarch64_ldst_simd_m_pi(Insntype insn)
249 { return (insn & 0xbfa00000) == 0x0c800000; }
250
251 static bool
252 aarch64_ldst_simd_s(Insntype insn)
253 { return (insn & 0xbf9f0000) == 0x0d000000; }
254
255 static bool
256 aarch64_ldst_simd_s_pi(Insntype insn)
257 { return (insn & 0xbf800000) == 0x0d800000; }
258
259 // Classify an INSN if it is indeed a load/store. Return true if INSN is a
260 // LD/ST instruction otherwise return false. For scalar LD/ST instructions
261 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair
262 // instructions PAIR is TRUE, RT and RT2 are returned.
263 static bool
264 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2,
265 bool *pair, bool *load)
266 {
267 uint32_t opcode;
268 unsigned int r;
269 uint32_t opc = 0;
270 uint32_t v = 0;
271 uint32_t opc_v = 0;
272
273 /* Bail out quickly if INSN doesn't fall into the load-store
274 encoding space. */
275 if (!aarch64_ldst (insn))
276 return false;
277
278 *pair = false;
279 *load = false;
280 if (aarch64_ldst_ex (insn))
281 {
282 *rt = aarch64_rt (insn);
283 *rt2 = *rt;
284 if (aarch64_bit (insn, 21) == 1)
285 {
286 *pair = true;
287 *rt2 = aarch64_rt2 (insn);
288 }
289 *load = aarch64_ld (insn);
290 return true;
291 }
292 else if (aarch64_ldst_nap (insn)
293 || aarch64_ldstp_pi (insn)
294 || aarch64_ldstp_o (insn)
295 || aarch64_ldstp_pre (insn))
296 {
297 *pair = true;
298 *rt = aarch64_rt (insn);
299 *rt2 = aarch64_rt2 (insn);
300 *load = aarch64_ld (insn);
301 return true;
302 }
303 else if (aarch64_ldst_pcrel (insn)
304 || aarch64_ldst_ui (insn)
305 || aarch64_ldst_piimm (insn)
306 || aarch64_ldst_u (insn)
307 || aarch64_ldst_preimm (insn)
308 || aarch64_ldst_ro (insn)
309 || aarch64_ldst_uimm (insn))
310 {
311 *rt = aarch64_rt (insn);
312 *rt2 = *rt;
313 if (aarch64_ldst_pcrel (insn))
314 *load = true;
315 opc = aarch64_bits (insn, 22, 2);
316 v = aarch64_bit (insn, 26);
317 opc_v = opc | (v << 2);
318 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
319 || opc_v == 5 || opc_v == 7);
320 return true;
321 }
322 else if (aarch64_ldst_simd_m (insn)
323 || aarch64_ldst_simd_m_pi (insn))
324 {
325 *rt = aarch64_rt (insn);
326 *load = aarch64_bit (insn, 22);
327 opcode = (insn >> 12) & 0xf;
328 switch (opcode)
329 {
330 case 0:
331 case 2:
332 *rt2 = *rt + 3;
333 break;
334
335 case 4:
336 case 6:
337 *rt2 = *rt + 2;
338 break;
339
340 case 7:
341 *rt2 = *rt;
342 break;
343
344 case 8:
345 case 10:
346 *rt2 = *rt + 1;
347 break;
348
349 default:
350 return false;
351 }
352 return true;
353 }
354 else if (aarch64_ldst_simd_s (insn)
355 || aarch64_ldst_simd_s_pi (insn))
356 {
357 *rt = aarch64_rt (insn);
358 r = (insn >> 21) & 1;
359 *load = aarch64_bit (insn, 22);
360 opcode = (insn >> 13) & 0x7;
361 switch (opcode)
362 {
363 case 0:
364 case 2:
365 case 4:
366 *rt2 = *rt + r;
367 break;
368
369 case 1:
370 case 3:
371 case 5:
372 *rt2 = *rt + (r == 0 ? 2 : 3);
373 break;
374
375 case 6:
376 *rt2 = *rt + r;
377 break;
378
379 case 7:
380 *rt2 = *rt + (r == 0 ? 2 : 3);
381 break;
382
383 default:
384 return false;
385 }
386 return true;
387 }
388 return false;
389 } // End of "aarch64_mem_op_p".
390
391 // Return true if INSN is mac insn.
392 static bool
393 aarch64_mac(Insntype insn)
394 { return (insn & 0xff000000) == 0x9b000000; }
395
396 // Return true if INSN is multiply-accumulate.
397 // (This is similar to implementaton in elfnn-aarch64.c.)
398 static bool
399 aarch64_mlxl(Insntype insn)
400 {
401 uint32_t op31 = aarch64_op31(insn);
402 if (aarch64_mac(insn)
403 && (op31 == 0 || op31 == 1 || op31 == 5)
404 /* Exclude MUL instructions which are encoded as a multiple-accumulate
405 with RA = XZR. */
406 && aarch64_ra(insn) != AARCH64_ZR)
407 {
408 return true;
409 }
410 return false;
411 }
412 }; // End of "AArch64_insn_utilities".
413
414
415 // Insn length in byte.
416
417 template<bool big_endian>
418 const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4;
419
420
421 // Zero register encoding - 31.
422
423 template<bool big_endian>
424 const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f;
425
426
427 // Output_data_got_aarch64 class.
428
429 template<int size, bool big_endian>
430 class Output_data_got_aarch64 : public Output_data_got<size, big_endian>
431 {
432 public:
433 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype;
434 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout)
435 : Output_data_got<size, big_endian>(),
436 symbol_table_(symtab), layout_(layout)
437 { }
438
439 // Add a static entry for the GOT entry at OFFSET. GSYM is a global
440 // symbol and R_TYPE is the code of a dynamic relocation that needs to be
441 // applied in a static link.
442 void
443 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
444 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); }
445
446
447 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object
448 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic
449 // relocation that needs to be applied in a static link.
450 void
451 add_static_reloc(unsigned int got_offset, unsigned int r_type,
452 Sized_relobj_file<size, big_endian>* relobj,
453 unsigned int index)
454 {
455 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj,
456 index));
457 }
458
459
460 protected:
461 // Write out the GOT table.
462 void
463 do_write(Output_file* of) {
464 // The first entry in the GOT is the address of the .dynamic section.
465 gold_assert(this->data_size() >= size / 8);
466 Output_section* dynamic = this->layout_->dynamic_section();
467 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address();
468 this->replace_constant(0, dynamic_addr);
469 Output_data_got<size, big_endian>::do_write(of);
470
471 // Handling static relocs
472 if (this->static_relocs_.empty())
473 return;
474
475 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
476
477 gold_assert(parameters->doing_static_link());
478 const off_t offset = this->offset();
479 const section_size_type oview_size =
480 convert_to_section_size_type(this->data_size());
481 unsigned char* const oview = of->get_output_view(offset, oview_size);
482
483 Output_segment* tls_segment = this->layout_->tls_segment();
484 gold_assert(tls_segment != NULL);
485
486 AArch64_address aligned_tcb_address =
487 align_address(Target_aarch64<size, big_endian>::TCB_SIZE,
488 tls_segment->maximum_alignment());
489
490 for (size_t i = 0; i < this->static_relocs_.size(); ++i)
491 {
492 Static_reloc& reloc(this->static_relocs_[i]);
493 AArch64_address value;
494
495 if (!reloc.symbol_is_global())
496 {
497 Sized_relobj_file<size, big_endian>* object = reloc.relobj();
498 const Symbol_value<size>* psymval =
499 reloc.relobj()->local_symbol(reloc.index());
500
501 // We are doing static linking. Issue an error and skip this
502 // relocation if the symbol is undefined or in a discarded_section.
503 bool is_ordinary;
504 unsigned int shndx = psymval->input_shndx(&is_ordinary);
505 if ((shndx == elfcpp::SHN_UNDEF)
506 || (is_ordinary
507 && shndx != elfcpp::SHN_UNDEF
508 && !object->is_section_included(shndx)
509 && !this->symbol_table_->is_section_folded(object, shndx)))
510 {
511 gold_error(_("undefined or discarded local symbol %u from "
512 " object %s in GOT"),
513 reloc.index(), reloc.relobj()->name().c_str());
514 continue;
515 }
516 value = psymval->value(object, 0);
517 }
518 else
519 {
520 const Symbol* gsym = reloc.symbol();
521 gold_assert(gsym != NULL);
522 if (gsym->is_forwarder())
523 gsym = this->symbol_table_->resolve_forwards(gsym);
524
525 // We are doing static linking. Issue an error and skip this
526 // relocation if the symbol is undefined or in a discarded_section
527 // unless it is a weakly_undefined symbol.
528 if ((gsym->is_defined_in_discarded_section()
529 || gsym->is_undefined())
530 && !gsym->is_weak_undefined())
531 {
532 gold_error(_("undefined or discarded symbol %s in GOT"),
533 gsym->name());
534 continue;
535 }
536
537 if (!gsym->is_weak_undefined())
538 {
539 const Sized_symbol<size>* sym =
540 static_cast<const Sized_symbol<size>*>(gsym);
541 value = sym->value();
542 }
543 else
544 value = 0;
545 }
546
547 unsigned got_offset = reloc.got_offset();
548 gold_assert(got_offset < oview_size);
549
550 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype;
551 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset);
552 Valtype x;
553 switch (reloc.r_type())
554 {
555 case elfcpp::R_AARCH64_TLS_DTPREL64:
556 x = value;
557 break;
558 case elfcpp::R_AARCH64_TLS_TPREL64:
559 x = value + aligned_tcb_address;
560 break;
561 default:
562 gold_unreachable();
563 }
564 elfcpp::Swap<size, big_endian>::writeval(wv, x);
565 }
566
567 of->write_output_view(offset, oview_size, oview);
568 }
569
570 private:
571 // Symbol table of the output object.
572 Symbol_table* symbol_table_;
573 // A pointer to the Layout class, so that we can find the .dynamic
574 // section when we write out the GOT section.
575 Layout* layout_;
576
577 // This class represent dynamic relocations that need to be applied by
578 // gold because we are using TLS relocations in a static link.
579 class Static_reloc
580 {
581 public:
582 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
583 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true)
584 { this->u_.global.symbol = gsym; }
585
586 Static_reloc(unsigned int got_offset, unsigned int r_type,
587 Sized_relobj_file<size, big_endian>* relobj, unsigned int index)
588 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false)
589 {
590 this->u_.local.relobj = relobj;
591 this->u_.local.index = index;
592 }
593
594 // Return the GOT offset.
595 unsigned int
596 got_offset() const
597 { return this->got_offset_; }
598
599 // Relocation type.
600 unsigned int
601 r_type() const
602 { return this->r_type_; }
603
604 // Whether the symbol is global or not.
605 bool
606 symbol_is_global() const
607 { return this->symbol_is_global_; }
608
609 // For a relocation against a global symbol, the global symbol.
610 Symbol*
611 symbol() const
612 {
613 gold_assert(this->symbol_is_global_);
614 return this->u_.global.symbol;
615 }
616
617 // For a relocation against a local symbol, the defining object.
618 Sized_relobj_file<size, big_endian>*
619 relobj() const
620 {
621 gold_assert(!this->symbol_is_global_);
622 return this->u_.local.relobj;
623 }
624
625 // For a relocation against a local symbol, the local symbol index.
626 unsigned int
627 index() const
628 {
629 gold_assert(!this->symbol_is_global_);
630 return this->u_.local.index;
631 }
632
633 private:
634 // GOT offset of the entry to which this relocation is applied.
635 unsigned int got_offset_;
636 // Type of relocation.
637 unsigned int r_type_;
638 // Whether this relocation is against a global symbol.
639 bool symbol_is_global_;
640 // A global or local symbol.
641 union
642 {
643 struct
644 {
645 // For a global symbol, the symbol itself.
646 Symbol* symbol;
647 } global;
648 struct
649 {
650 // For a local symbol, the object defining the symbol.
651 Sized_relobj_file<size, big_endian>* relobj;
652 // For a local symbol, the symbol index.
653 unsigned int index;
654 } local;
655 } u_;
656 }; // End of inner class Static_reloc
657
658 std::vector<Static_reloc> static_relocs_;
659 }; // End of Output_data_got_aarch64
660
661
662 template<int size, bool big_endian>
663 class AArch64_input_section;
664
665
666 template<int size, bool big_endian>
667 class AArch64_output_section;
668
669
670 template<int size, bool big_endian>
671 class AArch64_relobj;
672
673
674 // Stub type enum constants.
675
676 enum
677 {
678 ST_NONE = 0,
679
680 // Using adrp/add pair, 4 insns (including alignment) without mem access,
681 // the fastest stub. This has a limited jump distance, which is tested by
682 // aarch64_valid_for_adrp_p.
683 ST_ADRP_BRANCH = 1,
684
685 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access,
686 // unlimited in jump distance.
687 ST_LONG_BRANCH_ABS = 2,
688
689 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1
690 // mem access, slowest one. Only used in position independent executables.
691 ST_LONG_BRANCH_PCREL = 3,
692
693 // Stub for erratum 843419 handling.
694 ST_E_843419 = 4,
695
696 // Stub for erratum 835769 handling.
697 ST_E_835769 = 5,
698
699 // Number of total stub types.
700 ST_NUMBER = 6
701 };
702
703
704 // Struct that wraps insns for a particular stub. All stub templates are
705 // created/initialized as constants by Stub_template_repertoire.
706
707 template<bool big_endian>
708 struct Stub_template
709 {
710 const typename AArch64_insn_utilities<big_endian>::Insntype* insns;
711 const int insn_num;
712 };
713
714
715 // Simple singleton class that creates/initializes/stores all types of stub
716 // templates.
717
718 template<bool big_endian>
719 class Stub_template_repertoire
720 {
721 public:
722 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
723
724 // Single static method to get stub template for a given stub type.
725 static const Stub_template<big_endian>*
726 get_stub_template(int type)
727 {
728 static Stub_template_repertoire<big_endian> singleton;
729 return singleton.stub_templates_[type];
730 }
731
732 private:
733 // Constructor - creates/initializes all stub templates.
734 Stub_template_repertoire();
735 ~Stub_template_repertoire()
736 { }
737
738 // Disallowing copy ctor and copy assignment operator.
739 Stub_template_repertoire(Stub_template_repertoire&);
740 Stub_template_repertoire& operator=(Stub_template_repertoire&);
741
742 // Data that stores all insn templates.
743 const Stub_template<big_endian>* stub_templates_[ST_NUMBER];
744 }; // End of "class Stub_template_repertoire".
745
746
747 // Constructor - creates/initilizes all stub templates.
748
749 template<bool big_endian>
750 Stub_template_repertoire<big_endian>::Stub_template_repertoire()
751 {
752 // Insn array definitions.
753 const static Insntype ST_NONE_INSNS[] = {};
754
755 const static Insntype ST_ADRP_BRANCH_INSNS[] =
756 {
757 0x90000010, /* adrp ip0, X */
758 /* ADR_PREL_PG_HI21(X) */
759 0x91000210, /* add ip0, ip0, :lo12:X */
760 /* ADD_ABS_LO12_NC(X) */
761 0xd61f0200, /* br ip0 */
762 0x00000000, /* alignment padding */
763 };
764
765 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] =
766 {
767 0x58000050, /* ldr ip0, 0x8 */
768 0xd61f0200, /* br ip0 */
769 0x00000000, /* address field */
770 0x00000000, /* address fields */
771 };
772
773 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] =
774 {
775 0x58000090, /* ldr ip0, 0x10 */
776 0x10000011, /* adr ip1, #0 */
777 0x8b110210, /* add ip0, ip0, ip1 */
778 0xd61f0200, /* br ip0 */
779 0x00000000, /* address field */
780 0x00000000, /* address field */
781 0x00000000, /* alignment padding */
782 0x00000000, /* alignment padding */
783 };
784
785 const static Insntype ST_E_843419_INSNS[] =
786 {
787 0x00000000, /* Placeholder for erratum insn. */
788 0x14000000, /* b <label> */
789 };
790
791 // ST_E_835769 has the same stub template as ST_E_843419
792 // but we reproduce the array here so that the sizeof
793 // expressions in install_insn_template will work.
794 const static Insntype ST_E_835769_INSNS[] =
795 {
796 0x00000000, /* Placeholder for erratum insn. */
797 0x14000000, /* b <label> */
798 };
799
800 #define install_insn_template(T) \
801 const static Stub_template<big_endian> template_##T = { \
802 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \
803 this->stub_templates_[T] = &template_##T
804
805 install_insn_template(ST_NONE);
806 install_insn_template(ST_ADRP_BRANCH);
807 install_insn_template(ST_LONG_BRANCH_ABS);
808 install_insn_template(ST_LONG_BRANCH_PCREL);
809 install_insn_template(ST_E_843419);
810 install_insn_template(ST_E_835769);
811
812 #undef install_insn_template
813 }
814
815
816 // Base class for stubs.
817
818 template<int size, bool big_endian>
819 class Stub_base
820 {
821 public:
822 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
823 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
824
825 static const AArch64_address invalid_address =
826 static_cast<AArch64_address>(-1);
827
828 static const section_offset_type invalid_offset =
829 static_cast<section_offset_type>(-1);
830
831 Stub_base(int type)
832 : destination_address_(invalid_address),
833 offset_(invalid_offset),
834 type_(type)
835 {}
836
837 ~Stub_base()
838 {}
839
840 // Get stub type.
841 int
842 type() const
843 { return this->type_; }
844
845 // Get stub template that provides stub insn information.
846 const Stub_template<big_endian>*
847 stub_template() const
848 {
849 return Stub_template_repertoire<big_endian>::
850 get_stub_template(this->type());
851 }
852
853 // Get destination address.
854 AArch64_address
855 destination_address() const
856 {
857 gold_assert(this->destination_address_ != this->invalid_address);
858 return this->destination_address_;
859 }
860
861 // Set destination address.
862 void
863 set_destination_address(AArch64_address address)
864 {
865 gold_assert(address != this->invalid_address);
866 this->destination_address_ = address;
867 }
868
869 // Reset the destination address.
870 void
871 reset_destination_address()
872 { this->destination_address_ = this->invalid_address; }
873
874 // Get offset of code stub. For Reloc_stub, it is the offset from the
875 // beginning of its containing stub table; for Erratum_stub, it is the offset
876 // from the end of reloc_stubs.
877 section_offset_type
878 offset() const
879 {
880 gold_assert(this->offset_ != this->invalid_offset);
881 return this->offset_;
882 }
883
884 // Set stub offset.
885 void
886 set_offset(section_offset_type offset)
887 { this->offset_ = offset; }
888
889 // Return the stub insn.
890 const Insntype*
891 insns() const
892 { return this->stub_template()->insns; }
893
894 // Return num of stub insns.
895 unsigned int
896 insn_num() const
897 { return this->stub_template()->insn_num; }
898
899 // Get size of the stub.
900 int
901 stub_size() const
902 {
903 return this->insn_num() *
904 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
905 }
906
907 // Write stub to output file.
908 void
909 write(unsigned char* view, section_size_type view_size)
910 { this->do_write(view, view_size); }
911
912 protected:
913 // Abstract method to be implemented by sub-classes.
914 virtual void
915 do_write(unsigned char*, section_size_type) = 0;
916
917 private:
918 // The last insn of a stub is a jump to destination insn. This field records
919 // the destination address.
920 AArch64_address destination_address_;
921 // The stub offset. Note this has difference interpretations between an
922 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the
923 // beginning of the containing stub_table, whereas for Erratum_stub, this is
924 // the offset from the end of reloc_stubs.
925 section_offset_type offset_;
926 // Stub type.
927 const int type_;
928 }; // End of "Stub_base".
929
930
931 // Erratum stub class. An erratum stub differs from a reloc stub in that for
932 // each erratum occurrence, we generate an erratum stub. We never share erratum
933 // stubs, whereas for reloc stubs, different branch insns share a single reloc
934 // stub as long as the branch targets are the same. (More to the point, reloc
935 // stubs can be shared because they're used to reach a specific target, whereas
936 // erratum stubs branch back to the original control flow.)
937
938 template<int size, bool big_endian>
939 class Erratum_stub : public Stub_base<size, big_endian>
940 {
941 public:
942 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
943 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
944 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
945 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
946
947 static const int STUB_ADDR_ALIGN;
948
949 static const Insntype invalid_insn = static_cast<Insntype>(-1);
950
951 Erratum_stub(The_aarch64_relobj* relobj, int type,
952 unsigned shndx, unsigned int sh_offset)
953 : Stub_base<size, big_endian>(type), relobj_(relobj),
954 shndx_(shndx), sh_offset_(sh_offset),
955 erratum_insn_(invalid_insn),
956 erratum_address_(this->invalid_address)
957 {}
958
959 ~Erratum_stub() {}
960
961 // Return the object that contains the erratum.
962 The_aarch64_relobj*
963 relobj()
964 { return this->relobj_; }
965
966 // Get section index of the erratum.
967 unsigned int
968 shndx() const
969 { return this->shndx_; }
970
971 // Get section offset of the erratum.
972 unsigned int
973 sh_offset() const
974 { return this->sh_offset_; }
975
976 // Get the erratum insn. This is the insn located at erratum_insn_address.
977 Insntype
978 erratum_insn() const
979 {
980 gold_assert(this->erratum_insn_ != this->invalid_insn);
981 return this->erratum_insn_;
982 }
983
984 // Set the insn that the erratum happens to.
985 void
986 set_erratum_insn(Insntype insn)
987 { this->erratum_insn_ = insn; }
988
989 // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a
990 // relocation spot, in this case, the erratum_insn_ recorded at scanning phase
991 // is no longer the one we want to write out to the stub, update erratum_insn_
992 // with relocated version. Also note that in this case xn must not be "PC", so
993 // it is safe to move the erratum insn from the origin place to the stub. For
994 // 835769, the erratum insn is multiply-accumulate insn, which could not be a
995 // relocation spot (assertion added though).
996 void
997 update_erratum_insn(Insntype insn)
998 {
999 gold_assert(this->erratum_insn_ != this->invalid_insn);
1000 switch (this->type())
1001 {
1002 case ST_E_843419:
1003 gold_assert(Insn_utilities::aarch64_ldst_uimm(insn));
1004 gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn()));
1005 gold_assert(Insn_utilities::aarch64_rd(insn) ==
1006 Insn_utilities::aarch64_rd(this->erratum_insn()));
1007 gold_assert(Insn_utilities::aarch64_rn(insn) ==
1008 Insn_utilities::aarch64_rn(this->erratum_insn()));
1009 // Update plain ld/st insn with relocated insn.
1010 this->erratum_insn_ = insn;
1011 break;
1012 case ST_E_835769:
1013 gold_assert(insn == this->erratum_insn());
1014 break;
1015 default:
1016 gold_unreachable();
1017 }
1018 }
1019
1020
1021 // Return the address where an erratum must be done.
1022 AArch64_address
1023 erratum_address() const
1024 {
1025 gold_assert(this->erratum_address_ != this->invalid_address);
1026 return this->erratum_address_;
1027 }
1028
1029 // Set the address where an erratum must be done.
1030 void
1031 set_erratum_address(AArch64_address addr)
1032 { this->erratum_address_ = addr; }
1033
1034 // Later relaxation passes of may alter the recorded erratum and destination
1035 // address. Given an up to date output section address of shidx_ in
1036 // relobj_ we can derive the erratum_address and destination address.
1037 void
1038 update_erratum_address(AArch64_address output_section_addr)
1039 {
1040 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
1041 AArch64_address updated_addr = output_section_addr + this->sh_offset_;
1042 this->set_erratum_address(updated_addr);
1043 this->set_destination_address(updated_addr + BPI);
1044 }
1045
1046 // Comparator used to group Erratum_stubs in a set by (obj, shndx,
1047 // sh_offset). We do not include 'type' in the calculation, because there is
1048 // at most one stub type at (obj, shndx, sh_offset).
1049 bool
1050 operator<(const Erratum_stub<size, big_endian>& k) const
1051 {
1052 if (this == &k)
1053 return false;
1054 // We group stubs by relobj.
1055 if (this->relobj_ != k.relobj_)
1056 return this->relobj_ < k.relobj_;
1057 // Then by section index.
1058 if (this->shndx_ != k.shndx_)
1059 return this->shndx_ < k.shndx_;
1060 // Lastly by section offset.
1061 return this->sh_offset_ < k.sh_offset_;
1062 }
1063
1064 void
1065 invalidate_erratum_stub()
1066 {
1067 gold_assert(this->erratum_insn_ != invalid_insn);
1068 this->erratum_insn_ = invalid_insn;
1069 }
1070
1071 bool
1072 is_invalidated_erratum_stub()
1073 { return this->erratum_insn_ == invalid_insn; }
1074
1075 protected:
1076 virtual void
1077 do_write(unsigned char*, section_size_type);
1078
1079 private:
1080 // The object that needs to be fixed.
1081 The_aarch64_relobj* relobj_;
1082 // The shndx in the object that needs to be fixed.
1083 const unsigned int shndx_;
1084 // The section offset in the obejct that needs to be fixed.
1085 const unsigned int sh_offset_;
1086 // The insn to be fixed.
1087 Insntype erratum_insn_;
1088 // The address of the above insn.
1089 AArch64_address erratum_address_;
1090 }; // End of "Erratum_stub".
1091
1092
1093 // Erratum sub class to wrap additional info needed by 843419. In fixing this
1094 // erratum, we may choose to replace 'adrp' with 'adr', in this case, we need
1095 // adrp's code position (two or three insns before erratum insn itself).
1096
1097 template<int size, bool big_endian>
1098 class E843419_stub : public Erratum_stub<size, big_endian>
1099 {
1100 public:
1101 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
1102
1103 E843419_stub(AArch64_relobj<size, big_endian>* relobj,
1104 unsigned int shndx, unsigned int sh_offset,
1105 unsigned int adrp_sh_offset)
1106 : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset),
1107 adrp_sh_offset_(adrp_sh_offset)
1108 {}
1109
1110 unsigned int
1111 adrp_sh_offset() const
1112 { return this->adrp_sh_offset_; }
1113
1114 private:
1115 // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we
1116 // can obtain it from its parent.)
1117 const unsigned int adrp_sh_offset_;
1118 };
1119
1120
1121 template<int size, bool big_endian>
1122 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1123
1124 // Comparator used in set definition.
1125 template<int size, bool big_endian>
1126 struct Erratum_stub_less
1127 {
1128 bool
1129 operator()(const Erratum_stub<size, big_endian>* s1,
1130 const Erratum_stub<size, big_endian>* s2) const
1131 { return *s1 < *s2; }
1132 };
1133
1134 // Erratum_stub implementation for writing stub to output file.
1135
1136 template<int size, bool big_endian>
1137 void
1138 Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type)
1139 {
1140 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1141 const Insntype* insns = this->insns();
1142 uint32_t num_insns = this->insn_num();
1143 Insntype* ip = reinterpret_cast<Insntype*>(view);
1144 // For current implemented erratum 843419 and 835769, the first insn in the
1145 // stub is always a copy of the problematic insn (in 843419, the mem access
1146 // insn, in 835769, the mac insn), followed by a jump-back.
1147 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn());
1148 for (uint32_t i = 1; i < num_insns; ++i)
1149 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1150 }
1151
1152
1153 // Reloc stub class.
1154
1155 template<int size, bool big_endian>
1156 class Reloc_stub : public Stub_base<size, big_endian>
1157 {
1158 public:
1159 typedef Reloc_stub<size, big_endian> This;
1160 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1161
1162 // Branch range. This is used to calculate the section group size, as well as
1163 // determine whether a stub is needed.
1164 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2;
1165 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2);
1166
1167 // Constant used to determine if an offset fits in the adrp instruction
1168 // encoding.
1169 static const int MAX_ADRP_IMM = (1 << 20) - 1;
1170 static const int MIN_ADRP_IMM = -(1 << 20);
1171
1172 static const int BYTES_PER_INSN = 4;
1173 static const int STUB_ADDR_ALIGN;
1174
1175 // Determine whether the offset fits in the jump/branch instruction.
1176 static bool
1177 aarch64_valid_branch_offset_p(int64_t offset)
1178 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; }
1179
1180 // Determine whether the offset fits in the adrp immediate field.
1181 static bool
1182 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest)
1183 {
1184 typedef AArch64_relocate_functions<size, big_endian> Reloc;
1185 int64_t adrp_imm = (Reloc::Page(dest) - Reloc::Page(location)) >> 12;
1186 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM;
1187 }
1188
1189 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1190 // needed.
1191 static int
1192 stub_type_for_reloc(unsigned int r_type, AArch64_address address,
1193 AArch64_address target);
1194
1195 Reloc_stub(int type)
1196 : Stub_base<size, big_endian>(type)
1197 { }
1198
1199 ~Reloc_stub()
1200 { }
1201
1202 // The key class used to index the stub instance in the stub table's stub map.
1203 class Key
1204 {
1205 public:
1206 Key(int type, const Symbol* symbol, const Relobj* relobj,
1207 unsigned int r_sym, int32_t addend)
1208 : type_(type), addend_(addend)
1209 {
1210 if (symbol != NULL)
1211 {
1212 this->r_sym_ = Reloc_stub::invalid_index;
1213 this->u_.symbol = symbol;
1214 }
1215 else
1216 {
1217 gold_assert(relobj != NULL && r_sym != invalid_index);
1218 this->r_sym_ = r_sym;
1219 this->u_.relobj = relobj;
1220 }
1221 }
1222
1223 ~Key()
1224 { }
1225
1226 // Return stub type.
1227 int
1228 type() const
1229 { return this->type_; }
1230
1231 // Return the local symbol index or invalid_index.
1232 unsigned int
1233 r_sym() const
1234 { return this->r_sym_; }
1235
1236 // Return the symbol if there is one.
1237 const Symbol*
1238 symbol() const
1239 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
1240
1241 // Return the relobj if there is one.
1242 const Relobj*
1243 relobj() const
1244 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
1245
1246 // Whether this equals to another key k.
1247 bool
1248 eq(const Key& k) const
1249 {
1250 return ((this->type_ == k.type_)
1251 && (this->r_sym_ == k.r_sym_)
1252 && ((this->r_sym_ != Reloc_stub::invalid_index)
1253 ? (this->u_.relobj == k.u_.relobj)
1254 : (this->u_.symbol == k.u_.symbol))
1255 && (this->addend_ == k.addend_));
1256 }
1257
1258 // Return a hash value.
1259 size_t
1260 hash_value() const
1261 {
1262 size_t name_hash_value = gold::string_hash<char>(
1263 (this->r_sym_ != Reloc_stub::invalid_index)
1264 ? this->u_.relobj->name().c_str()
1265 : this->u_.symbol->name());
1266 // We only have 4 stub types.
1267 size_t stub_type_hash_value = 0x03 & this->type_;
1268 return (name_hash_value
1269 ^ stub_type_hash_value
1270 ^ ((this->r_sym_ & 0x3fff) << 2)
1271 ^ ((this->addend_ & 0xffff) << 16));
1272 }
1273
1274 // Functors for STL associative containers.
1275 struct hash
1276 {
1277 size_t
1278 operator()(const Key& k) const
1279 { return k.hash_value(); }
1280 };
1281
1282 struct equal_to
1283 {
1284 bool
1285 operator()(const Key& k1, const Key& k2) const
1286 { return k1.eq(k2); }
1287 };
1288
1289 private:
1290 // Stub type.
1291 const int type_;
1292 // If this is a local symbol, this is the index in the defining object.
1293 // Otherwise, it is invalid_index for a global symbol.
1294 unsigned int r_sym_;
1295 // If r_sym_ is an invalid index, this points to a global symbol.
1296 // Otherwise, it points to a relobj. We used the unsized and target
1297 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
1298 // Arm_relobj, in order to avoid making the stub class a template
1299 // as most of the stub machinery is endianness-neutral. However, it
1300 // may require a bit of casting done by users of this class.
1301 union
1302 {
1303 const Symbol* symbol;
1304 const Relobj* relobj;
1305 } u_;
1306 // Addend associated with a reloc.
1307 int32_t addend_;
1308 }; // End of inner class Reloc_stub::Key
1309
1310 protected:
1311 // This may be overridden in the child class.
1312 virtual void
1313 do_write(unsigned char*, section_size_type);
1314
1315 private:
1316 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
1317 }; // End of Reloc_stub
1318
1319 template<int size, bool big_endian>
1320 const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1321
1322 // Write data to output file.
1323
1324 template<int size, bool big_endian>
1325 void
1326 Reloc_stub<size, big_endian>::
1327 do_write(unsigned char* view, section_size_type)
1328 {
1329 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1330 const uint32_t* insns = this->insns();
1331 uint32_t num_insns = this->insn_num();
1332 Insntype* ip = reinterpret_cast<Insntype*>(view);
1333 for (uint32_t i = 0; i < num_insns; ++i)
1334 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1335 }
1336
1337
1338 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1339 // needed.
1340
1341 template<int size, bool big_endian>
1342 inline int
1343 Reloc_stub<size, big_endian>::stub_type_for_reloc(
1344 unsigned int r_type, AArch64_address location, AArch64_address dest)
1345 {
1346 int64_t branch_offset = 0;
1347 switch(r_type)
1348 {
1349 case elfcpp::R_AARCH64_CALL26:
1350 case elfcpp::R_AARCH64_JUMP26:
1351 branch_offset = dest - location;
1352 break;
1353 default:
1354 gold_unreachable();
1355 }
1356
1357 if (aarch64_valid_branch_offset_p(branch_offset))
1358 return ST_NONE;
1359
1360 if (aarch64_valid_for_adrp_p(location, dest))
1361 return ST_ADRP_BRANCH;
1362
1363 // Always use PC-relative addressing in case of -shared or -pie.
1364 if (parameters->options().output_is_position_independent())
1365 return ST_LONG_BRANCH_PCREL;
1366
1367 // This saves 2 insns per stub, compared to ST_LONG_BRANCH_PCREL.
1368 // But is only applicable to non-shared or non-pie.
1369 return ST_LONG_BRANCH_ABS;
1370 }
1371
1372 // A class to hold stubs for the ARM target. This contains 2 different types of
1373 // stubs - reloc stubs and erratum stubs.
1374
1375 template<int size, bool big_endian>
1376 class Stub_table : public Output_data
1377 {
1378 public:
1379 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1380 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1381 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
1382 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1383 typedef Reloc_stub<size, big_endian> The_reloc_stub;
1384 typedef typename The_reloc_stub::Key The_reloc_stub_key;
1385 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1386 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less;
1387 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash;
1388 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to;
1389 typedef Stub_table<size, big_endian> The_stub_table;
1390 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*,
1391 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to>
1392 Reloc_stub_map;
1393 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter;
1394 typedef Relocate_info<size, big_endian> The_relocate_info;
1395
1396 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set;
1397 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter;
1398
1399 Stub_table(The_aarch64_input_section* owner)
1400 : Output_data(), owner_(owner), reloc_stubs_size_(0),
1401 erratum_stubs_size_(0), prev_data_size_(0)
1402 { }
1403
1404 ~Stub_table()
1405 { }
1406
1407 The_aarch64_input_section*
1408 owner() const
1409 { return owner_; }
1410
1411 // Whether this stub table is empty.
1412 bool
1413 empty() const
1414 { return reloc_stubs_.empty() && erratum_stubs_.empty(); }
1415
1416 // Return the current data size.
1417 off_t
1418 current_data_size() const
1419 { return this->current_data_size_for_child(); }
1420
1421 // Add a STUB using KEY. The caller is responsible for avoiding addition
1422 // if a STUB with the same key has already been added.
1423 void
1424 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key);
1425
1426 // Add an erratum stub into the erratum stub set. The set is ordered by
1427 // (relobj, shndx, sh_offset).
1428 void
1429 add_erratum_stub(The_erratum_stub* stub);
1430
1431 // Find if such erratum exists for any given (obj, shndx, sh_offset).
1432 The_erratum_stub*
1433 find_erratum_stub(The_aarch64_relobj* a64relobj,
1434 unsigned int shndx, unsigned int sh_offset);
1435
1436 // Find all the erratums for a given input section. The return value is a pair
1437 // of iterators [begin, end).
1438 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1439 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj,
1440 unsigned int shndx);
1441
1442 // Compute the erratum stub address.
1443 AArch64_address
1444 erratum_stub_address(The_erratum_stub* stub) const
1445 {
1446 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_,
1447 The_erratum_stub::STUB_ADDR_ALIGN);
1448 r += stub->offset();
1449 return r;
1450 }
1451
1452 // Finalize stubs. No-op here, just for completeness.
1453 void
1454 finalize_stubs()
1455 { }
1456
1457 // Look up a relocation stub using KEY. Return NULL if there is none.
1458 The_reloc_stub*
1459 find_reloc_stub(The_reloc_stub_key& key)
1460 {
1461 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key);
1462 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
1463 }
1464
1465 // Relocate reloc stubs in this stub table. This does not relocate erratum stubs.
1466 void
1467 relocate_reloc_stubs(const The_relocate_info*,
1468 The_target_aarch64*,
1469 Output_section*,
1470 unsigned char*,
1471 AArch64_address,
1472 section_size_type);
1473
1474 // Relocate an erratum stub.
1475 void
1476 relocate_erratum_stub(The_erratum_stub*, unsigned char*);
1477
1478 // Update data size at the end of a relaxation pass. Return true if data size
1479 // is different from that of the previous relaxation pass.
1480 bool
1481 update_data_size_changed_p()
1482 {
1483 // No addralign changed here.
1484 off_t s = align_address(this->reloc_stubs_size_,
1485 The_erratum_stub::STUB_ADDR_ALIGN)
1486 + this->erratum_stubs_size_;
1487 bool changed = (s != this->prev_data_size_);
1488 this->prev_data_size_ = s;
1489 return changed;
1490 }
1491
1492 protected:
1493 // Write out section contents.
1494 void
1495 do_write(Output_file*);
1496
1497 // Return the required alignment.
1498 uint64_t
1499 do_addralign() const
1500 {
1501 return std::max(The_reloc_stub::STUB_ADDR_ALIGN,
1502 The_erratum_stub::STUB_ADDR_ALIGN);
1503 }
1504
1505 // Reset address and file offset.
1506 void
1507 do_reset_address_and_file_offset()
1508 { this->set_current_data_size_for_child(this->prev_data_size_); }
1509
1510 // Set final data size.
1511 void
1512 set_final_data_size()
1513 { this->set_data_size(this->current_data_size()); }
1514
1515 private:
1516 // Relocate one reloc stub.
1517 void
1518 relocate_reloc_stub(The_reloc_stub*,
1519 const The_relocate_info*,
1520 The_target_aarch64*,
1521 Output_section*,
1522 unsigned char*,
1523 AArch64_address,
1524 section_size_type);
1525
1526 private:
1527 // Owner of this stub table.
1528 The_aarch64_input_section* owner_;
1529 // The relocation stubs.
1530 Reloc_stub_map reloc_stubs_;
1531 // The erratum stubs.
1532 Erratum_stub_set erratum_stubs_;
1533 // Size of reloc stubs.
1534 off_t reloc_stubs_size_;
1535 // Size of erratum stubs.
1536 off_t erratum_stubs_size_;
1537 // data size of this in the previous pass.
1538 off_t prev_data_size_;
1539 }; // End of Stub_table
1540
1541
1542 // Add an erratum stub into the erratum stub set. The set is ordered by
1543 // (relobj, shndx, sh_offset).
1544
1545 template<int size, bool big_endian>
1546 void
1547 Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub)
1548 {
1549 std::pair<Erratum_stub_set_iter, bool> ret =
1550 this->erratum_stubs_.insert(stub);
1551 gold_assert(ret.second);
1552 this->erratum_stubs_size_ = align_address(
1553 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1554 stub->set_offset(this->erratum_stubs_size_);
1555 this->erratum_stubs_size_ += stub->stub_size();
1556 }
1557
1558
1559 // Find if such erratum exists for given (obj, shndx, sh_offset).
1560
1561 template<int size, bool big_endian>
1562 Erratum_stub<size, big_endian>*
1563 Stub_table<size, big_endian>::find_erratum_stub(
1564 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset)
1565 {
1566 // A dummy object used as key to search in the set.
1567 The_erratum_stub key(a64relobj, ST_NONE,
1568 shndx, sh_offset);
1569 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key);
1570 if (i != this->erratum_stubs_.end())
1571 {
1572 The_erratum_stub* stub(*i);
1573 gold_assert(stub->erratum_insn() != 0);
1574 return stub;
1575 }
1576 return NULL;
1577 }
1578
1579
1580 // Find all the errata for a given input section. The return value is a pair of
1581 // iterators [begin, end).
1582
1583 template<int size, bool big_endian>
1584 std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter,
1585 typename Stub_table<size, big_endian>::Erratum_stub_set_iter>
1586 Stub_table<size, big_endian>::find_erratum_stubs_for_input_section(
1587 The_aarch64_relobj* a64relobj, unsigned int shndx)
1588 {
1589 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair;
1590 Erratum_stub_set_iter start, end;
1591 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0);
1592 start = this->erratum_stubs_.lower_bound(&low_key);
1593 if (start == this->erratum_stubs_.end())
1594 return Result_pair(this->erratum_stubs_.end(),
1595 this->erratum_stubs_.end());
1596 end = start;
1597 while (end != this->erratum_stubs_.end() &&
1598 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx)
1599 ++end;
1600 return Result_pair(start, end);
1601 }
1602
1603
1604 // Add a STUB using KEY. The caller is responsible for avoiding addition
1605 // if a STUB with the same key has already been added.
1606
1607 template<int size, bool big_endian>
1608 void
1609 Stub_table<size, big_endian>::add_reloc_stub(
1610 The_reloc_stub* stub, const The_reloc_stub_key& key)
1611 {
1612 gold_assert(stub->type() == key.type());
1613 this->reloc_stubs_[key] = stub;
1614
1615 // Assign stub offset early. We can do this because we never remove
1616 // reloc stubs and they are in the beginning of the stub table.
1617 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_,
1618 The_reloc_stub::STUB_ADDR_ALIGN);
1619 stub->set_offset(this->reloc_stubs_size_);
1620 this->reloc_stubs_size_ += stub->stub_size();
1621 }
1622
1623
1624 // Relocate an erratum stub.
1625
1626 template<int size, bool big_endian>
1627 void
1628 Stub_table<size, big_endian>::
1629 relocate_erratum_stub(The_erratum_stub* estub,
1630 unsigned char* view)
1631 {
1632 // Just for convenience.
1633 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
1634
1635 gold_assert(!estub->is_invalidated_erratum_stub());
1636 AArch64_address stub_address = this->erratum_stub_address(estub);
1637 // The address of "b" in the stub that is to be "relocated".
1638 AArch64_address stub_b_insn_address;
1639 // Branch offset that is to be filled in "b" insn.
1640 int b_offset = 0;
1641 switch (estub->type())
1642 {
1643 case ST_E_843419:
1644 case ST_E_835769:
1645 // The 1st insn of the erratum could be a relocation spot,
1646 // in this case we need to fix it with
1647 // "(*i)->erratum_insn()".
1648 elfcpp::Swap<32, big_endian>::writeval(
1649 view + (stub_address - this->address()),
1650 estub->erratum_insn());
1651 // For the erratum, the 2nd insn is a b-insn to be patched
1652 // (relocated).
1653 stub_b_insn_address = stub_address + 1 * BPI;
1654 b_offset = estub->destination_address() - stub_b_insn_address;
1655 AArch64_relocate_functions<size, big_endian>::construct_b(
1656 view + (stub_b_insn_address - this->address()),
1657 ((unsigned int)(b_offset)) & 0xfffffff);
1658 break;
1659 default:
1660 gold_unreachable();
1661 break;
1662 }
1663 estub->invalidate_erratum_stub();
1664 }
1665
1666
1667 // Relocate only reloc stubs in this stub table. This does not relocate erratum
1668 // stubs.
1669
1670 template<int size, bool big_endian>
1671 void
1672 Stub_table<size, big_endian>::
1673 relocate_reloc_stubs(const The_relocate_info* relinfo,
1674 The_target_aarch64* target_aarch64,
1675 Output_section* output_section,
1676 unsigned char* view,
1677 AArch64_address address,
1678 section_size_type view_size)
1679 {
1680 // "view_size" is the total size of the stub_table.
1681 gold_assert(address == this->address() &&
1682 view_size == static_cast<section_size_type>(this->data_size()));
1683 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin();
1684 p != this->reloc_stubs_.end(); ++p)
1685 relocate_reloc_stub(p->second, relinfo, target_aarch64, output_section,
1686 view, address, view_size);
1687 }
1688
1689
1690 // Relocate one reloc stub. This is a helper for
1691 // Stub_table::relocate_reloc_stubs().
1692
1693 template<int size, bool big_endian>
1694 void
1695 Stub_table<size, big_endian>::
1696 relocate_reloc_stub(The_reloc_stub* stub,
1697 const The_relocate_info* relinfo,
1698 The_target_aarch64* target_aarch64,
1699 Output_section* output_section,
1700 unsigned char* view,
1701 AArch64_address address,
1702 section_size_type view_size)
1703 {
1704 // "offset" is the offset from the beginning of the stub_table.
1705 section_size_type offset = stub->offset();
1706 section_size_type stub_size = stub->stub_size();
1707 // "view_size" is the total size of the stub_table.
1708 gold_assert(offset + stub_size <= view_size);
1709
1710 target_aarch64->relocate_reloc_stub(stub, relinfo, output_section,
1711 view + offset, address + offset, view_size);
1712 }
1713
1714
1715 // Write out the stubs to file.
1716
1717 template<int size, bool big_endian>
1718 void
1719 Stub_table<size, big_endian>::do_write(Output_file* of)
1720 {
1721 off_t offset = this->offset();
1722 const section_size_type oview_size =
1723 convert_to_section_size_type(this->data_size());
1724 unsigned char* const oview = of->get_output_view(offset, oview_size);
1725
1726 // Write relocation stubs.
1727 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
1728 p != this->reloc_stubs_.end(); ++p)
1729 {
1730 The_reloc_stub* stub = p->second;
1731 AArch64_address address = this->address() + stub->offset();
1732 gold_assert(address ==
1733 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN));
1734 stub->write(oview + stub->offset(), stub->stub_size());
1735 }
1736
1737 // Write erratum stubs.
1738 unsigned int erratum_stub_start_offset =
1739 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1740 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin();
1741 p != this->erratum_stubs_.end(); ++p)
1742 {
1743 The_erratum_stub* stub(*p);
1744 stub->write(oview + erratum_stub_start_offset + stub->offset(),
1745 stub->stub_size());
1746 }
1747
1748 of->write_output_view(this->offset(), oview_size, oview);
1749 }
1750
1751
1752 // AArch64_relobj class.
1753
1754 template<int size, bool big_endian>
1755 class AArch64_relobj : public Sized_relobj_file<size, big_endian>
1756 {
1757 public:
1758 typedef AArch64_relobj<size, big_endian> This;
1759 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1760 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1761 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1762 typedef Stub_table<size, big_endian> The_stub_table;
1763 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1764 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter;
1765 typedef std::vector<The_stub_table*> Stub_table_list;
1766 static const AArch64_address invalid_address =
1767 static_cast<AArch64_address>(-1);
1768
1769 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset,
1770 const typename elfcpp::Ehdr<size, big_endian>& ehdr)
1771 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr),
1772 stub_tables_()
1773 { }
1774
1775 ~AArch64_relobj()
1776 { }
1777
1778 // Return the stub table of the SHNDX-th section if there is one.
1779 The_stub_table*
1780 stub_table(unsigned int shndx) const
1781 {
1782 gold_assert(shndx < this->stub_tables_.size());
1783 return this->stub_tables_[shndx];
1784 }
1785
1786 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1787 void
1788 set_stub_table(unsigned int shndx, The_stub_table* stub_table)
1789 {
1790 gold_assert(shndx < this->stub_tables_.size());
1791 this->stub_tables_[shndx] = stub_table;
1792 }
1793
1794 // Entrance to errata scanning.
1795 void
1796 scan_errata(unsigned int shndx,
1797 const elfcpp::Shdr<size, big_endian>&,
1798 Output_section*, const Symbol_table*,
1799 The_target_aarch64*);
1800
1801 // Scan all relocation sections for stub generation.
1802 void
1803 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*,
1804 const Layout*);
1805
1806 // Whether a section is a scannable text section.
1807 bool
1808 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int,
1809 const Output_section*, const Symbol_table*);
1810
1811 // Convert regular input section with index SHNDX to a relaxed section.
1812 void
1813 convert_input_section_to_relaxed_section(unsigned shndx)
1814 {
1815 // The stubs have relocations and we need to process them after writing
1816 // out the stubs. So relocation now must follow section write.
1817 this->set_section_offset(shndx, -1ULL);
1818 this->set_relocs_must_follow_section_writes();
1819 }
1820
1821 // Structure for mapping symbol position.
1822 struct Mapping_symbol_position
1823 {
1824 Mapping_symbol_position(unsigned int shndx, AArch64_address offset):
1825 shndx_(shndx), offset_(offset)
1826 {}
1827
1828 // "<" comparator used in ordered_map container.
1829 bool
1830 operator<(const Mapping_symbol_position& p) const
1831 {
1832 return (this->shndx_ < p.shndx_
1833 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_));
1834 }
1835
1836 // Section index.
1837 unsigned int shndx_;
1838
1839 // Section offset.
1840 AArch64_address offset_;
1841 };
1842
1843 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info;
1844
1845 protected:
1846 // Post constructor setup.
1847 void
1848 do_setup()
1849 {
1850 // Call parent's setup method.
1851 Sized_relobj_file<size, big_endian>::do_setup();
1852
1853 // Initialize look-up tables.
1854 this->stub_tables_.resize(this->shnum());
1855 }
1856
1857 virtual void
1858 do_relocate_sections(
1859 const Symbol_table* symtab, const Layout* layout,
1860 const unsigned char* pshdrs, Output_file* of,
1861 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1862
1863 // Count local symbols and (optionally) record mapping info.
1864 virtual void
1865 do_count_local_symbols(Stringpool_template<char>*,
1866 Stringpool_template<char>*);
1867
1868 private:
1869 // Fix all errata in the object, and for each erratum, relocate corresponding
1870 // erratum stub.
1871 void
1872 fix_errata_and_relocate_erratum_stubs(
1873 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1874
1875 // Try to fix erratum 843419 in an optimized way. Return true if patch is
1876 // applied.
1877 bool
1878 try_fix_erratum_843419_optimized(
1879 The_erratum_stub*, AArch64_address,
1880 typename Sized_relobj_file<size, big_endian>::View_size&);
1881
1882 // Whether a section needs to be scanned for relocation stubs.
1883 bool
1884 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&,
1885 const Relobj::Output_sections&,
1886 const Symbol_table*, const unsigned char*);
1887
1888 // List of stub tables.
1889 Stub_table_list stub_tables_;
1890
1891 // Mapping symbol information sorted by (section index, section_offset).
1892 Mapping_symbol_info mapping_symbol_info_;
1893 }; // End of AArch64_relobj
1894
1895
1896 // Override to record mapping symbol information.
1897 template<int size, bool big_endian>
1898 void
1899 AArch64_relobj<size, big_endian>::do_count_local_symbols(
1900 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool)
1901 {
1902 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool);
1903
1904 // Only erratum-fixing work needs mapping symbols, so skip this time consuming
1905 // processing if not fixing erratum.
1906 if (!parameters->options().fix_cortex_a53_843419()
1907 && !parameters->options().fix_cortex_a53_835769())
1908 return;
1909
1910 const unsigned int loccount = this->local_symbol_count();
1911 if (loccount == 0)
1912 return;
1913
1914 // Read the symbol table section header.
1915 const unsigned int symtab_shndx = this->symtab_shndx();
1916 elfcpp::Shdr<size, big_endian>
1917 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
1918 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
1919
1920 // Read the local symbols.
1921 const int sym_size =elfcpp::Elf_sizes<size>::sym_size;
1922 gold_assert(loccount == symtabshdr.get_sh_info());
1923 off_t locsize = loccount * sym_size;
1924 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
1925 locsize, true, true);
1926
1927 // For mapping symbol processing, we need to read the symbol names.
1928 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
1929 if (strtab_shndx >= this->shnum())
1930 {
1931 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
1932 return;
1933 }
1934
1935 elfcpp::Shdr<size, big_endian>
1936 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
1937 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
1938 {
1939 this->error(_("symbol table name section has wrong type: %u"),
1940 static_cast<unsigned int>(strtabshdr.get_sh_type()));
1941 return;
1942 }
1943
1944 const char* pnames =
1945 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
1946 strtabshdr.get_sh_size(),
1947 false, false));
1948
1949 // Skip the first dummy symbol.
1950 psyms += sym_size;
1951 typename Sized_relobj_file<size, big_endian>::Local_values*
1952 plocal_values = this->local_values();
1953 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
1954 {
1955 elfcpp::Sym<size, big_endian> sym(psyms);
1956 Symbol_value<size>& lv((*plocal_values)[i]);
1957 AArch64_address input_value = lv.input_value();
1958
1959 // Check to see if this is a mapping symbol. AArch64 mapping symbols are
1960 // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping
1961 // symbols.
1962 // Mapping symbols could be one of the following 4 forms -
1963 // a) $x
1964 // b) $x.<any...>
1965 // c) $d
1966 // d) $d.<any...>
1967 const char* sym_name = pnames + sym.get_st_name();
1968 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd')
1969 && (sym_name[2] == '\0' || sym_name[2] == '.'))
1970 {
1971 bool is_ordinary;
1972 unsigned int input_shndx =
1973 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary);
1974 gold_assert(is_ordinary);
1975
1976 Mapping_symbol_position msp(input_shndx, input_value);
1977 // Insert mapping_symbol_info into map whose ordering is defined by
1978 // (shndx, offset_within_section).
1979 this->mapping_symbol_info_[msp] = sym_name[1];
1980 }
1981 }
1982 }
1983
1984
1985 // Fix all errata in the object and for each erratum, we relocate the
1986 // corresponding erratum stub (by calling Stub_table::relocate_erratum_stub).
1987
1988 template<int size, bool big_endian>
1989 void
1990 AArch64_relobj<size, big_endian>::fix_errata_and_relocate_erratum_stubs(
1991 typename Sized_relobj_file<size, big_endian>::Views* pviews)
1992 {
1993 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
1994 unsigned int shnum = this->shnum();
1995 const Relobj::Output_sections& out_sections(this->output_sections());
1996 for (unsigned int i = 1; i < shnum; ++i)
1997 {
1998 The_stub_table* stub_table = this->stub_table(i);
1999 if (!stub_table)
2000 continue;
2001 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
2002 ipair(stub_table->find_erratum_stubs_for_input_section(this, i));
2003 Erratum_stub_set_iter p = ipair.first, end = ipair.second;
2004 typename Sized_relobj_file<size, big_endian>::View_size&
2005 pview((*pviews)[i]);
2006 AArch64_address view_offset = 0;
2007 if (pview.is_input_output_view)
2008 {
2009 // In this case, write_sections has not added the output offset to
2010 // the view's address, so we must do so. Currently this only happens
2011 // for a relaxed section.
2012 unsigned int index = this->adjust_shndx(i);
2013 const Output_relaxed_input_section* poris =
2014 out_sections[index]->find_relaxed_input_section(this, index);
2015 gold_assert(poris != NULL);
2016 view_offset = poris->address() - pview.address;
2017 }
2018
2019 while (p != end)
2020 {
2021 The_erratum_stub* stub = *p;
2022
2023 // Double check data before fix.
2024 gold_assert(pview.address + view_offset + stub->sh_offset()
2025 == stub->erratum_address());
2026
2027 // Update previously recorded erratum insn with relocated
2028 // version.
2029 Insntype* ip =
2030 reinterpret_cast<Insntype*>(
2031 pview.view + view_offset + stub->sh_offset());
2032 Insntype insn_to_fix = ip[0];
2033 stub->update_erratum_insn(insn_to_fix);
2034
2035 // First try to see if erratum is 843419 and if it can be fixed
2036 // without using branch-to-stub.
2037 if (!try_fix_erratum_843419_optimized(stub, view_offset, pview))
2038 {
2039 // Replace the erratum insn with a branch-to-stub.
2040 AArch64_address stub_address =
2041 stub_table->erratum_stub_address(stub);
2042 unsigned int b_offset = stub_address - stub->erratum_address();
2043 AArch64_relocate_functions<size, big_endian>::construct_b(
2044 pview.view + view_offset + stub->sh_offset(),
2045 b_offset & 0xfffffff);
2046 }
2047
2048 // Erratum fix is done (or skipped), continue to relocate erratum
2049 // stub. Note, when erratum fix is skipped (either because we
2050 // proactively change the code sequence or the code sequence is
2051 // changed by relaxation, etc), we can still safely relocate the
2052 // erratum stub, ignoring the fact the erratum could never be
2053 // executed.
2054 stub_table->relocate_erratum_stub(
2055 stub,
2056 pview.view + (stub_table->address() - pview.address));
2057
2058 // Next erratum stub.
2059 ++p;
2060 }
2061 }
2062 }
2063
2064
2065 // This is an optimization for 843419. This erratum requires the sequence begin
2066 // with 'adrp', when final value calculated by adrp fits in adr, we can just
2067 // replace 'adrp' with 'adr', so we save 2 jumps per occurrence. (Note, however,
2068 // in this case, we do not delete the erratum stub (too late to do so), it is
2069 // merely generated without ever being called.)
2070
2071 template<int size, bool big_endian>
2072 bool
2073 AArch64_relobj<size, big_endian>::try_fix_erratum_843419_optimized(
2074 The_erratum_stub* stub, AArch64_address view_offset,
2075 typename Sized_relobj_file<size, big_endian>::View_size& pview)
2076 {
2077 if (stub->type() != ST_E_843419)
2078 return false;
2079
2080 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2081 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
2082 E843419_stub<size, big_endian>* e843419_stub =
2083 reinterpret_cast<E843419_stub<size, big_endian>*>(stub);
2084 AArch64_address pc =
2085 pview.address + view_offset + e843419_stub->adrp_sh_offset();
2086 unsigned int adrp_offset = e843419_stub->adrp_sh_offset ();
2087 Insntype* adrp_view =
2088 reinterpret_cast<Insntype*>(pview.view + view_offset + adrp_offset);
2089 Insntype adrp_insn = adrp_view[0];
2090
2091 // If the instruction at adrp_sh_offset is "mrs R, tpidr_el0", it may come
2092 // from IE -> LE relaxation etc. This is a side-effect of TLS relaxation that
2093 // ADRP has been turned into MRS, there is no erratum risk anymore.
2094 // Therefore, we return true to avoid doing unnecessary branch-to-stub.
2095 if (Insn_utilities::is_mrs_tpidr_el0(adrp_insn))
2096 return true;
2097
2098 // If the instruction at adrp_sh_offset is not ADRP and the instruction before
2099 // it is "mrs R, tpidr_el0", it may come from LD -> LE relaxation etc.
2100 // Like the above case, there is no erratum risk any more, we can safely
2101 // return true.
2102 if (!Insn_utilities::is_adrp(adrp_insn) && adrp_offset)
2103 {
2104 Insntype* prev_view =
2105 reinterpret_cast<Insntype*>(
2106 pview.view + view_offset + adrp_offset - 4);
2107 Insntype prev_insn = prev_view[0];
2108
2109 if (Insn_utilities::is_mrs_tpidr_el0(prev_insn))
2110 return true;
2111 }
2112
2113 /* If we reach here, the first instruction must be ADRP. */
2114 gold_assert(Insn_utilities::is_adrp(adrp_insn));
2115 // Get adrp 33-bit signed imm value.
2116 int64_t adrp_imm = Insn_utilities::
2117 aarch64_adrp_decode_imm(adrp_insn);
2118 // adrp - final value transferred to target register is calculated as:
2119 // PC[11:0] = Zeros(12)
2120 // adrp_dest_value = PC + adrp_imm;
2121 int64_t adrp_dest_value = (pc & ~((1 << 12) - 1)) + adrp_imm;
2122 // adr -final value transferred to target register is calucalted as:
2123 // PC + adr_imm
2124 // So we have:
2125 // PC + adr_imm = adrp_dest_value
2126 // ==>
2127 // adr_imm = adrp_dest_value - PC
2128 int64_t adr_imm = adrp_dest_value - pc;
2129 // Check if imm fits in adr (21-bit signed).
2130 if (-(1 << 20) <= adr_imm && adr_imm < (1 << 20))
2131 {
2132 // Convert 'adrp' into 'adr'.
2133 Insntype adr_insn = adrp_insn & ((1u << 31) - 1);
2134 adr_insn = Insn_utilities::
2135 aarch64_adr_encode_imm(adr_insn, adr_imm);
2136 elfcpp::Swap<32, big_endian>::writeval(adrp_view, adr_insn);
2137 return true;
2138 }
2139 return false;
2140 }
2141
2142
2143 // Relocate sections.
2144
2145 template<int size, bool big_endian>
2146 void
2147 AArch64_relobj<size, big_endian>::do_relocate_sections(
2148 const Symbol_table* symtab, const Layout* layout,
2149 const unsigned char* pshdrs, Output_file* of,
2150 typename Sized_relobj_file<size, big_endian>::Views* pviews)
2151 {
2152 // Relocate the section data.
2153 this->relocate_section_range(symtab, layout, pshdrs, of, pviews,
2154 1, this->shnum() - 1);
2155
2156 // We do not generate stubs if doing a relocatable link.
2157 if (parameters->options().relocatable())
2158 return;
2159
2160 // This part only relocates erratum stubs that belong to input sections of this
2161 // object file.
2162 if (parameters->options().fix_cortex_a53_843419()
2163 || parameters->options().fix_cortex_a53_835769())
2164 this->fix_errata_and_relocate_erratum_stubs(pviews);
2165
2166 Relocate_info<size, big_endian> relinfo;
2167 relinfo.symtab = symtab;
2168 relinfo.layout = layout;
2169 relinfo.object = this;
2170
2171 // This part relocates all reloc stubs that are contained in stub_tables of
2172 // this object file.
2173 unsigned int shnum = this->shnum();
2174 The_target_aarch64* target = The_target_aarch64::current_target();
2175
2176 for (unsigned int i = 1; i < shnum; ++i)
2177 {
2178 The_aarch64_input_section* aarch64_input_section =
2179 target->find_aarch64_input_section(this, i);
2180 if (aarch64_input_section != NULL
2181 && aarch64_input_section->is_stub_table_owner()
2182 && !aarch64_input_section->stub_table()->empty())
2183 {
2184 Output_section* os = this->output_section(i);
2185 gold_assert(os != NULL);
2186
2187 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
2188 relinfo.reloc_shdr = NULL;
2189 relinfo.data_shndx = i;
2190 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size;
2191
2192 typename Sized_relobj_file<size, big_endian>::View_size&
2193 view_struct = (*pviews)[i];
2194 gold_assert(view_struct.view != NULL);
2195
2196 The_stub_table* stub_table = aarch64_input_section->stub_table();
2197 off_t offset = stub_table->address() - view_struct.address;
2198 unsigned char* view = view_struct.view + offset;
2199 AArch64_address address = stub_table->address();
2200 section_size_type view_size = stub_table->data_size();
2201 stub_table->relocate_reloc_stubs(&relinfo, target, os, view, address,
2202 view_size);
2203 }
2204 }
2205 }
2206
2207
2208 // Determine if an input section is scannable for stub processing. SHDR is
2209 // the header of the section and SHNDX is the section index. OS is the output
2210 // section for the input section and SYMTAB is the global symbol table used to
2211 // look up ICF information.
2212
2213 template<int size, bool big_endian>
2214 bool
2215 AArch64_relobj<size, big_endian>::text_section_is_scannable(
2216 const elfcpp::Shdr<size, big_endian>& text_shdr,
2217 unsigned int text_shndx,
2218 const Output_section* os,
2219 const Symbol_table* symtab)
2220 {
2221 // Skip any empty sections, unallocated sections or sections whose
2222 // type are not SHT_PROGBITS.
2223 if (text_shdr.get_sh_size() == 0
2224 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0
2225 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2226 return false;
2227
2228 // Skip any discarded or ICF'ed sections.
2229 if (os == NULL || symtab->is_section_folded(this, text_shndx))
2230 return false;
2231
2232 // Skip exception frame.
2233 if (strcmp(os->name(), ".eh_frame") == 0)
2234 return false ;
2235
2236 gold_assert(!this->is_output_section_offset_invalid(text_shndx) ||
2237 os->find_relaxed_input_section(this, text_shndx) != NULL);
2238
2239 return true;
2240 }
2241
2242
2243 // Determine if we want to scan the SHNDX-th section for relocation stubs.
2244 // This is a helper for AArch64_relobj::scan_sections_for_stubs().
2245
2246 template<int size, bool big_endian>
2247 bool
2248 AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning(
2249 const elfcpp::Shdr<size, big_endian>& shdr,
2250 const Relobj::Output_sections& out_sections,
2251 const Symbol_table* symtab,
2252 const unsigned char* pshdrs)
2253 {
2254 unsigned int sh_type = shdr.get_sh_type();
2255 if (sh_type != elfcpp::SHT_RELA)
2256 return false;
2257
2258 // Ignore empty section.
2259 off_t sh_size = shdr.get_sh_size();
2260 if (sh_size == 0)
2261 return false;
2262
2263 // Ignore reloc section with unexpected symbol table. The
2264 // error will be reported in the final link.
2265 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
2266 return false;
2267
2268 gold_assert(sh_type == elfcpp::SHT_RELA);
2269 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2270
2271 // Ignore reloc section with unexpected entsize or uneven size.
2272 // The error will be reported in the final link.
2273 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
2274 return false;
2275
2276 // Ignore reloc section with bad info. This error will be
2277 // reported in the final link.
2278 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info());
2279 if (text_shndx >= this->shnum())
2280 return false;
2281
2282 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2283 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs +
2284 text_shndx * shdr_size);
2285 return this->text_section_is_scannable(text_shdr, text_shndx,
2286 out_sections[text_shndx], symtab);
2287 }
2288
2289
2290 // Scan section SHNDX for erratum 843419 and 835769.
2291
2292 template<int size, bool big_endian>
2293 void
2294 AArch64_relobj<size, big_endian>::scan_errata(
2295 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr,
2296 Output_section* os, const Symbol_table* symtab,
2297 The_target_aarch64* target)
2298 {
2299 if (shdr.get_sh_size() == 0
2300 || (shdr.get_sh_flags() &
2301 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0
2302 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2303 return;
2304
2305 if (!os || symtab->is_section_folded(this, shndx)) return;
2306
2307 AArch64_address output_offset = this->get_output_section_offset(shndx);
2308 AArch64_address output_address;
2309 if (output_offset != invalid_address)
2310 output_address = os->address() + output_offset;
2311 else
2312 {
2313 const Output_relaxed_input_section* poris =
2314 os->find_relaxed_input_section(this, shndx);
2315 if (!poris) return;
2316 output_address = poris->address();
2317 }
2318
2319 // Update the addresses in previously generated erratum stubs. Unlike when
2320 // we scan relocations for stubs, if section addresses have changed due to
2321 // other relaxations we are unlikely to scan the same erratum instances
2322 // again.
2323 The_stub_table* stub_table = this->stub_table(shndx);
2324 if (stub_table)
2325 {
2326 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
2327 ipair(stub_table->find_erratum_stubs_for_input_section(this, shndx));
2328 for (Erratum_stub_set_iter p = ipair.first; p != ipair.second; ++p)
2329 (*p)->update_erratum_address(output_address);
2330 }
2331
2332 section_size_type input_view_size = 0;
2333 const unsigned char* input_view =
2334 this->section_contents(shndx, &input_view_size, false);
2335
2336 Mapping_symbol_position section_start(shndx, 0);
2337 // Find the first mapping symbol record within section shndx.
2338 typename Mapping_symbol_info::const_iterator p =
2339 this->mapping_symbol_info_.lower_bound(section_start);
2340 while (p != this->mapping_symbol_info_.end() &&
2341 p->first.shndx_ == shndx)
2342 {
2343 typename Mapping_symbol_info::const_iterator prev = p;
2344 ++p;
2345 if (prev->second == 'x')
2346 {
2347 section_size_type span_start =
2348 convert_to_section_size_type(prev->first.offset_);
2349 section_size_type span_end;
2350 if (p != this->mapping_symbol_info_.end()
2351 && p->first.shndx_ == shndx)
2352 span_end = convert_to_section_size_type(p->first.offset_);
2353 else
2354 span_end = convert_to_section_size_type(shdr.get_sh_size());
2355
2356 // Here we do not share the scanning code of both errata. For 843419,
2357 // only the last few insns of each page are examined, which is fast,
2358 // whereas, for 835769, every insn pair needs to be checked.
2359
2360 if (parameters->options().fix_cortex_a53_843419())
2361 target->scan_erratum_843419_span(
2362 this, shndx, span_start, span_end,
2363 const_cast<unsigned char*>(input_view), output_address);
2364
2365 if (parameters->options().fix_cortex_a53_835769())
2366 target->scan_erratum_835769_span(
2367 this, shndx, span_start, span_end,
2368 const_cast<unsigned char*>(input_view), output_address);
2369 }
2370 }
2371 }
2372
2373
2374 // Scan relocations for stub generation.
2375
2376 template<int size, bool big_endian>
2377 void
2378 AArch64_relobj<size, big_endian>::scan_sections_for_stubs(
2379 The_target_aarch64* target,
2380 const Symbol_table* symtab,
2381 const Layout* layout)
2382 {
2383 unsigned int shnum = this->shnum();
2384 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2385
2386 // Read the section headers.
2387 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
2388 shnum * shdr_size,
2389 true, true);
2390
2391 // To speed up processing, we set up hash tables for fast lookup of
2392 // input offsets to output addresses.
2393 this->initialize_input_to_output_maps();
2394
2395 const Relobj::Output_sections& out_sections(this->output_sections());
2396
2397 Relocate_info<size, big_endian> relinfo;
2398 relinfo.symtab = symtab;
2399 relinfo.layout = layout;
2400 relinfo.object = this;
2401
2402 // Do relocation stubs scanning.
2403 const unsigned char* p = pshdrs + shdr_size;
2404 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
2405 {
2406 const elfcpp::Shdr<size, big_endian> shdr(p);
2407 if (parameters->options().fix_cortex_a53_843419()
2408 || parameters->options().fix_cortex_a53_835769())
2409 scan_errata(i, shdr, out_sections[i], symtab, target);
2410 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
2411 pshdrs))
2412 {
2413 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
2414 AArch64_address output_offset =
2415 this->get_output_section_offset(index);
2416 AArch64_address output_address;
2417 if (output_offset != invalid_address)
2418 {
2419 output_address = out_sections[index]->address() + output_offset;
2420 }
2421 else
2422 {
2423 // Currently this only happens for a relaxed section.
2424 const Output_relaxed_input_section* poris =
2425 out_sections[index]->find_relaxed_input_section(this, index);
2426 gold_assert(poris != NULL);
2427 output_address = poris->address();
2428 }
2429
2430 // Get the relocations.
2431 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
2432 shdr.get_sh_size(),
2433 true, false);
2434
2435 // Get the section contents.
2436 section_size_type input_view_size = 0;
2437 const unsigned char* input_view =
2438 this->section_contents(index, &input_view_size, false);
2439
2440 relinfo.reloc_shndx = i;
2441 relinfo.data_shndx = index;
2442 unsigned int sh_type = shdr.get_sh_type();
2443 unsigned int reloc_size;
2444 gold_assert (sh_type == elfcpp::SHT_RELA);
2445 reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2446
2447 Output_section* os = out_sections[index];
2448 target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
2449 shdr.get_sh_size() / reloc_size,
2450 os,
2451 output_offset == invalid_address,
2452 input_view, output_address,
2453 input_view_size);
2454 }
2455 }
2456 }
2457
2458
2459 // A class to wrap an ordinary input section containing executable code.
2460
2461 template<int size, bool big_endian>
2462 class AArch64_input_section : public Output_relaxed_input_section
2463 {
2464 public:
2465 typedef Stub_table<size, big_endian> The_stub_table;
2466
2467 AArch64_input_section(Relobj* relobj, unsigned int shndx)
2468 : Output_relaxed_input_section(relobj, shndx, 1),
2469 stub_table_(NULL),
2470 original_contents_(NULL), original_size_(0),
2471 original_addralign_(1)
2472 { }
2473
2474 ~AArch64_input_section()
2475 { delete[] this->original_contents_; }
2476
2477 // Initialize.
2478 void
2479 init();
2480
2481 // Set the stub_table.
2482 void
2483 set_stub_table(The_stub_table* st)
2484 { this->stub_table_ = st; }
2485
2486 // Whether this is a stub table owner.
2487 bool
2488 is_stub_table_owner() const
2489 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
2490
2491 // Return the original size of the section.
2492 uint32_t
2493 original_size() const
2494 { return this->original_size_; }
2495
2496 // Return the stub table.
2497 The_stub_table*
2498 stub_table()
2499 { return stub_table_; }
2500
2501 protected:
2502 // Write out this input section.
2503 void
2504 do_write(Output_file*);
2505
2506 // Return required alignment of this.
2507 uint64_t
2508 do_addralign() const
2509 {
2510 if (this->is_stub_table_owner())
2511 return std::max(this->stub_table_->addralign(),
2512 static_cast<uint64_t>(this->original_addralign_));
2513 else
2514 return this->original_addralign_;
2515 }
2516
2517 // Finalize data size.
2518 void
2519 set_final_data_size();
2520
2521 // Reset address and file offset.
2522 void
2523 do_reset_address_and_file_offset();
2524
2525 // Output offset.
2526 bool
2527 do_output_offset(const Relobj* object, unsigned int shndx,
2528 section_offset_type offset,
2529 section_offset_type* poutput) const
2530 {
2531 if ((object == this->relobj())
2532 && (shndx == this->shndx())
2533 && (offset >= 0)
2534 && (offset <=
2535 convert_types<section_offset_type, uint32_t>(this->original_size_)))
2536 {
2537 *poutput = offset;
2538 return true;
2539 }
2540 else
2541 return false;
2542 }
2543
2544 private:
2545 // Copying is not allowed.
2546 AArch64_input_section(const AArch64_input_section&);
2547 AArch64_input_section& operator=(const AArch64_input_section&);
2548
2549 // The relocation stubs.
2550 The_stub_table* stub_table_;
2551 // Original section contents. We have to make a copy here since the file
2552 // containing the original section may not be locked when we need to access
2553 // the contents.
2554 unsigned char* original_contents_;
2555 // Section size of the original input section.
2556 uint32_t original_size_;
2557 // Address alignment of the original input section.
2558 uint32_t original_addralign_;
2559 }; // End of AArch64_input_section
2560
2561
2562 // Finalize data size.
2563
2564 template<int size, bool big_endian>
2565 void
2566 AArch64_input_section<size, big_endian>::set_final_data_size()
2567 {
2568 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2569
2570 if (this->is_stub_table_owner())
2571 {
2572 this->stub_table_->finalize_data_size();
2573 off = align_address(off, this->stub_table_->addralign());
2574 off += this->stub_table_->data_size();
2575 }
2576 this->set_data_size(off);
2577 }
2578
2579
2580 // Reset address and file offset.
2581
2582 template<int size, bool big_endian>
2583 void
2584 AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset()
2585 {
2586 // Size of the original input section contents.
2587 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2588
2589 // If this is a stub table owner, account for the stub table size.
2590 if (this->is_stub_table_owner())
2591 {
2592 The_stub_table* stub_table = this->stub_table_;
2593
2594 // Reset the stub table's address and file offset. The
2595 // current data size for child will be updated after that.
2596 stub_table_->reset_address_and_file_offset();
2597 off = align_address(off, stub_table_->addralign());
2598 off += stub_table->current_data_size();
2599 }
2600
2601 this->set_current_data_size(off);
2602 }
2603
2604
2605 // Initialize an Arm_input_section.
2606
2607 template<int size, bool big_endian>
2608 void
2609 AArch64_input_section<size, big_endian>::init()
2610 {
2611 Relobj* relobj = this->relobj();
2612 unsigned int shndx = this->shndx();
2613
2614 // We have to cache original size, alignment and contents to avoid locking
2615 // the original file.
2616 this->original_addralign_ =
2617 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx));
2618
2619 // This is not efficient but we expect only a small number of relaxed
2620 // input sections for stubs.
2621 section_size_type section_size;
2622 const unsigned char* section_contents =
2623 relobj->section_contents(shndx, &section_size, false);
2624 this->original_size_ =
2625 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx));
2626
2627 gold_assert(this->original_contents_ == NULL);
2628 this->original_contents_ = new unsigned char[section_size];
2629 memcpy(this->original_contents_, section_contents, section_size);
2630
2631 // We want to make this look like the original input section after
2632 // output sections are finalized.
2633 Output_section* os = relobj->output_section(shndx);
2634 off_t offset = relobj->output_section_offset(shndx);
2635 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
2636 this->set_address(os->address() + offset);
2637 this->set_file_offset(os->offset() + offset);
2638 this->set_current_data_size(this->original_size_);
2639 this->finalize_data_size();
2640 }
2641
2642
2643 // Write data to output file.
2644
2645 template<int size, bool big_endian>
2646 void
2647 AArch64_input_section<size, big_endian>::do_write(Output_file* of)
2648 {
2649 // We have to write out the original section content.
2650 gold_assert(this->original_contents_ != NULL);
2651 of->write(this->offset(), this->original_contents_,
2652 this->original_size_);
2653
2654 // If this owns a stub table and it is not empty, write it.
2655 if (this->is_stub_table_owner() && !this->stub_table_->empty())
2656 this->stub_table_->write(of);
2657 }
2658
2659
2660 // Arm output section class. This is defined mainly to add a number of stub
2661 // generation methods.
2662
2663 template<int size, bool big_endian>
2664 class AArch64_output_section : public Output_section
2665 {
2666 public:
2667 typedef Target_aarch64<size, big_endian> The_target_aarch64;
2668 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2669 typedef Stub_table<size, big_endian> The_stub_table;
2670 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2671
2672 public:
2673 AArch64_output_section(const char* name, elfcpp::Elf_Word type,
2674 elfcpp::Elf_Xword flags)
2675 : Output_section(name, type, flags)
2676 { }
2677
2678 ~AArch64_output_section() {}
2679
2680 // Group input sections for stub generation.
2681 void
2682 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*,
2683 const Task*);
2684
2685 private:
2686 typedef Output_section::Input_section Input_section;
2687 typedef Output_section::Input_section_list Input_section_list;
2688
2689 // Create a stub group.
2690 void
2691 create_stub_group(Input_section_list::const_iterator,
2692 Input_section_list::const_iterator,
2693 Input_section_list::const_iterator,
2694 The_target_aarch64*,
2695 std::vector<Output_relaxed_input_section*>&,
2696 const Task*);
2697 }; // End of AArch64_output_section
2698
2699
2700 // Create a stub group for input sections from FIRST to LAST. OWNER points to
2701 // the input section that will be the owner of the stub table.
2702
2703 template<int size, bool big_endian> void
2704 AArch64_output_section<size, big_endian>::create_stub_group(
2705 Input_section_list::const_iterator first,
2706 Input_section_list::const_iterator last,
2707 Input_section_list::const_iterator owner,
2708 The_target_aarch64* target,
2709 std::vector<Output_relaxed_input_section*>& new_relaxed_sections,
2710 const Task* task)
2711 {
2712 // Currently we convert ordinary input sections into relaxed sections only
2713 // at this point.
2714 The_aarch64_input_section* input_section;
2715 if (owner->is_relaxed_input_section())
2716 gold_unreachable();
2717 else
2718 {
2719 gold_assert(owner->is_input_section());
2720 // Create a new relaxed input section. We need to lock the original
2721 // file.
2722 Task_lock_obj<Object> tl(task, owner->relobj());
2723 input_section =
2724 target->new_aarch64_input_section(owner->relobj(), owner->shndx());
2725 new_relaxed_sections.push_back(input_section);
2726 }
2727
2728 // Create a stub table.
2729 The_stub_table* stub_table =
2730 target->new_stub_table(input_section);
2731
2732 input_section->set_stub_table(stub_table);
2733
2734 Input_section_list::const_iterator p = first;
2735 // Look for input sections or relaxed input sections in [first ... last].
2736 do
2737 {
2738 if (p->is_input_section() || p->is_relaxed_input_section())
2739 {
2740 // The stub table information for input sections live
2741 // in their objects.
2742 The_aarch64_relobj* aarch64_relobj =
2743 static_cast<The_aarch64_relobj*>(p->relobj());
2744 aarch64_relobj->set_stub_table(p->shndx(), stub_table);
2745 }
2746 }
2747 while (p++ != last);
2748 }
2749
2750
2751 // Group input sections for stub generation. GROUP_SIZE is roughly the limit of
2752 // stub groups. We grow a stub group by adding input section until the size is
2753 // just below GROUP_SIZE. The last input section will be converted into a stub
2754 // table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond
2755 // after the stub table, effectively doubling the group size.
2756 //
2757 // This is similar to the group_sections() function in elf32-arm.c but is
2758 // implemented differently.
2759
2760 template<int size, bool big_endian>
2761 void AArch64_output_section<size, big_endian>::group_sections(
2762 section_size_type group_size,
2763 bool stubs_always_after_branch,
2764 Target_aarch64<size, big_endian>* target,
2765 const Task* task)
2766 {
2767 typedef enum
2768 {
2769 NO_GROUP,
2770 FINDING_STUB_SECTION,
2771 HAS_STUB_SECTION
2772 } State;
2773
2774 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
2775
2776 State state = NO_GROUP;
2777 section_size_type off = 0;
2778 section_size_type group_begin_offset = 0;
2779 section_size_type group_end_offset = 0;
2780 section_size_type stub_table_end_offset = 0;
2781 Input_section_list::const_iterator group_begin =
2782 this->input_sections().end();
2783 Input_section_list::const_iterator stub_table =
2784 this->input_sections().end();
2785 Input_section_list::const_iterator group_end = this->input_sections().end();
2786 for (Input_section_list::const_iterator p = this->input_sections().begin();
2787 p != this->input_sections().end();
2788 ++p)
2789 {
2790 section_size_type section_begin_offset =
2791 align_address(off, p->addralign());
2792 section_size_type section_end_offset =
2793 section_begin_offset + p->data_size();
2794
2795 // Check to see if we should group the previously seen sections.
2796 switch (state)
2797 {
2798 case NO_GROUP:
2799 break;
2800
2801 case FINDING_STUB_SECTION:
2802 // Adding this section makes the group larger than GROUP_SIZE.
2803 if (section_end_offset - group_begin_offset >= group_size)
2804 {
2805 if (stubs_always_after_branch)
2806 {
2807 gold_assert(group_end != this->input_sections().end());
2808 this->create_stub_group(group_begin, group_end, group_end,
2809 target, new_relaxed_sections,
2810 task);
2811 state = NO_GROUP;
2812 }
2813 else
2814 {
2815 // Input sections up to stub_group_size bytes after the stub
2816 // table can be handled by it too.
2817 state = HAS_STUB_SECTION;
2818 stub_table = group_end;
2819 stub_table_end_offset = group_end_offset;
2820 }
2821 }
2822 break;
2823
2824 case HAS_STUB_SECTION:
2825 // Adding this section makes the post stub-section group larger
2826 // than GROUP_SIZE.
2827 gold_unreachable();
2828 // NOT SUPPORTED YET. For completeness only.
2829 if (section_end_offset - stub_table_end_offset >= group_size)
2830 {
2831 gold_assert(group_end != this->input_sections().end());
2832 this->create_stub_group(group_begin, group_end, stub_table,
2833 target, new_relaxed_sections, task);
2834 state = NO_GROUP;
2835 }
2836 break;
2837
2838 default:
2839 gold_unreachable();
2840 }
2841
2842 // If we see an input section and currently there is no group, start
2843 // a new one. Skip any empty sections. We look at the data size
2844 // instead of calling p->relobj()->section_size() to avoid locking.
2845 if ((p->is_input_section() || p->is_relaxed_input_section())
2846 && (p->data_size() != 0))
2847 {
2848 if (state == NO_GROUP)
2849 {
2850 state = FINDING_STUB_SECTION;
2851 group_begin = p;
2852 group_begin_offset = section_begin_offset;
2853 }
2854
2855 // Keep track of the last input section seen.
2856 group_end = p;
2857 group_end_offset = section_end_offset;
2858 }
2859
2860 off = section_end_offset;
2861 }
2862
2863 // Create a stub group for any ungrouped sections.
2864 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
2865 {
2866 gold_assert(group_end != this->input_sections().end());
2867 this->create_stub_group(group_begin, group_end,
2868 (state == FINDING_STUB_SECTION
2869 ? group_end
2870 : stub_table),
2871 target, new_relaxed_sections, task);
2872 }
2873
2874 if (!new_relaxed_sections.empty())
2875 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
2876
2877 // Update the section offsets
2878 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
2879 {
2880 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>(
2881 new_relaxed_sections[i]->relobj());
2882 unsigned int shndx = new_relaxed_sections[i]->shndx();
2883 // Tell AArch64_relobj that this input section is converted.
2884 relobj->convert_input_section_to_relaxed_section(shndx);
2885 }
2886 } // End of AArch64_output_section::group_sections
2887
2888
2889 AArch64_reloc_property_table* aarch64_reloc_property_table = NULL;
2890
2891
2892 // The aarch64 target class.
2893 // See the ABI at
2894 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf
2895 template<int size, bool big_endian>
2896 class Target_aarch64 : public Sized_target<size, big_endian>
2897 {
2898 public:
2899 typedef Target_aarch64<size, big_endian> This;
2900 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
2901 Reloc_section;
2902 typedef Relocate_info<size, big_endian> The_relocate_info;
2903 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
2904 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2905 typedef Reloc_stub<size, big_endian> The_reloc_stub;
2906 typedef Erratum_stub<size, big_endian> The_erratum_stub;
2907 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key;
2908 typedef Stub_table<size, big_endian> The_stub_table;
2909 typedef std::vector<The_stub_table*> Stub_table_list;
2910 typedef typename Stub_table_list::iterator Stub_table_iterator;
2911 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2912 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section;
2913 typedef Unordered_map<Section_id,
2914 AArch64_input_section<size, big_endian>*,
2915 Section_id_hash> AArch64_input_section_map;
2916 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2917 const static int TCB_SIZE = size / 8 * 2;
2918
2919 Target_aarch64(const Target::Target_info* info = &aarch64_info)
2920 : Sized_target<size, big_endian>(info),
2921 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL),
2922 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL),
2923 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY),
2924 got_mod_index_offset_(-1U),
2925 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false),
2926 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_()
2927 { }
2928
2929 // Scan the relocations to determine unreferenced sections for
2930 // garbage collection.
2931 void
2932 gc_process_relocs(Symbol_table* symtab,
2933 Layout* layout,
2934 Sized_relobj_file<size, big_endian>* object,
2935 unsigned int data_shndx,
2936 unsigned int sh_type,
2937 const unsigned char* prelocs,
2938 size_t reloc_count,
2939 Output_section* output_section,
2940 bool needs_special_offset_handling,
2941 size_t local_symbol_count,
2942 const unsigned char* plocal_symbols);
2943
2944 // Scan the relocations to look for symbol adjustments.
2945 void
2946 scan_relocs(Symbol_table* symtab,
2947 Layout* layout,
2948 Sized_relobj_file<size, big_endian>* object,
2949 unsigned int data_shndx,
2950 unsigned int sh_type,
2951 const unsigned char* prelocs,
2952 size_t reloc_count,
2953 Output_section* output_section,
2954 bool needs_special_offset_handling,
2955 size_t local_symbol_count,
2956 const unsigned char* plocal_symbols);
2957
2958 // Finalize the sections.
2959 void
2960 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
2961
2962 // Return the value to use for a dynamic which requires special
2963 // treatment.
2964 uint64_t
2965 do_dynsym_value(const Symbol*) const;
2966
2967 // Relocate a section.
2968 void
2969 relocate_section(const Relocate_info<size, big_endian>*,
2970 unsigned int sh_type,
2971 const unsigned char* prelocs,
2972 size_t reloc_count,
2973 Output_section* output_section,
2974 bool needs_special_offset_handling,
2975 unsigned char* view,
2976 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2977 section_size_type view_size,
2978 const Reloc_symbol_changes*);
2979
2980 // Scan the relocs during a relocatable link.
2981 void
2982 scan_relocatable_relocs(Symbol_table* symtab,
2983 Layout* layout,
2984 Sized_relobj_file<size, big_endian>* object,
2985 unsigned int data_shndx,
2986 unsigned int sh_type,
2987 const unsigned char* prelocs,
2988 size_t reloc_count,
2989 Output_section* output_section,
2990 bool needs_special_offset_handling,
2991 size_t local_symbol_count,
2992 const unsigned char* plocal_symbols,
2993 Relocatable_relocs*);
2994
2995 // Scan the relocs for --emit-relocs.
2996 void
2997 emit_relocs_scan(Symbol_table* symtab,
2998 Layout* layout,
2999 Sized_relobj_file<size, big_endian>* object,
3000 unsigned int data_shndx,
3001 unsigned int sh_type,
3002 const unsigned char* prelocs,
3003 size_t reloc_count,
3004 Output_section* output_section,
3005 bool needs_special_offset_handling,
3006 size_t local_symbol_count,
3007 const unsigned char* plocal_syms,
3008 Relocatable_relocs* rr);
3009
3010 // Relocate a section during a relocatable link.
3011 void
3012 relocate_relocs(
3013 const Relocate_info<size, big_endian>*,
3014 unsigned int sh_type,
3015 const unsigned char* prelocs,
3016 size_t reloc_count,
3017 Output_section* output_section,
3018 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
3019 unsigned char* view,
3020 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
3021 section_size_type view_size,
3022 unsigned char* reloc_view,
3023 section_size_type reloc_view_size);
3024
3025 // Return the symbol index to use for a target specific relocation.
3026 // The only target specific relocation is R_AARCH64_TLSDESC for a
3027 // local symbol, which is an absolute reloc.
3028 unsigned int
3029 do_reloc_symbol_index(void*, unsigned int r_type) const
3030 {
3031 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
3032 return 0;
3033 }
3034
3035 // Return the addend to use for a target specific relocation.
3036 uint64_t
3037 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const;
3038
3039 // Return the PLT section.
3040 uint64_t
3041 do_plt_address_for_global(const Symbol* gsym) const
3042 { return this->plt_section()->address_for_global(gsym); }
3043
3044 uint64_t
3045 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const
3046 { return this->plt_section()->address_for_local(relobj, symndx); }
3047
3048 // This function should be defined in targets that can use relocation
3049 // types to determine (implemented in local_reloc_may_be_function_pointer
3050 // and global_reloc_may_be_function_pointer)
3051 // if a function's pointer is taken. ICF uses this in safe mode to only
3052 // fold those functions whose pointer is defintely not taken.
3053 bool
3054 do_can_check_for_function_pointers() const
3055 { return true; }
3056
3057 // Return the number of entries in the PLT.
3058 unsigned int
3059 plt_entry_count() const;
3060
3061 //Return the offset of the first non-reserved PLT entry.
3062 unsigned int
3063 first_plt_entry_offset() const;
3064
3065 // Return the size of each PLT entry.
3066 unsigned int
3067 plt_entry_size() const;
3068
3069 // Create a stub table.
3070 The_stub_table*
3071 new_stub_table(The_aarch64_input_section*);
3072
3073 // Create an aarch64 input section.
3074 The_aarch64_input_section*
3075 new_aarch64_input_section(Relobj*, unsigned int);
3076
3077 // Find an aarch64 input section instance for a given OBJ and SHNDX.
3078 The_aarch64_input_section*
3079 find_aarch64_input_section(Relobj*, unsigned int) const;
3080
3081 // Return the thread control block size.
3082 unsigned int
3083 tcb_size() const { return This::TCB_SIZE; }
3084
3085 // Scan a section for stub generation.
3086 void
3087 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int,
3088 const unsigned char*, size_t, Output_section*,
3089 bool, const unsigned char*,
3090 Address,
3091 section_size_type);
3092
3093 // Scan a relocation section for stub.
3094 template<int sh_type>
3095 void
3096 scan_reloc_section_for_stubs(
3097 const The_relocate_info* relinfo,
3098 const unsigned char* prelocs,
3099 size_t reloc_count,
3100 Output_section* output_section,
3101 bool needs_special_offset_handling,
3102 const unsigned char* view,
3103 Address view_address,
3104 section_size_type);
3105
3106 // Relocate a single reloc stub.
3107 void
3108 relocate_reloc_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*,
3109 Output_section*, unsigned char*, Address,
3110 section_size_type);
3111
3112 // Get the default AArch64 target.
3113 static This*
3114 current_target()
3115 {
3116 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64
3117 && parameters->target().get_size() == size
3118 && parameters->target().is_big_endian() == big_endian);
3119 return static_cast<This*>(parameters->sized_target<size, big_endian>());
3120 }
3121
3122
3123 // Scan erratum 843419 for a part of a section.
3124 void
3125 scan_erratum_843419_span(
3126 AArch64_relobj<size, big_endian>*,
3127 unsigned int,
3128 const section_size_type,
3129 const section_size_type,
3130 unsigned char*,
3131 Address);
3132
3133 // Scan erratum 835769 for a part of a section.
3134 void
3135 scan_erratum_835769_span(
3136 AArch64_relobj<size, big_endian>*,
3137 unsigned int,
3138 const section_size_type,
3139 const section_size_type,
3140 unsigned char*,
3141 Address);
3142
3143 protected:
3144 void
3145 do_select_as_default_target()
3146 {
3147 gold_assert(aarch64_reloc_property_table == NULL);
3148 aarch64_reloc_property_table = new AArch64_reloc_property_table();
3149 }
3150
3151 // Add a new reloc argument, returning the index in the vector.
3152 size_t
3153 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object,
3154 unsigned int r_sym)
3155 {
3156 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym));
3157 return this->tlsdesc_reloc_info_.size() - 1;
3158 }
3159
3160 virtual Output_data_plt_aarch64<size, big_endian>*
3161 do_make_data_plt(Layout* layout,
3162 Output_data_got_aarch64<size, big_endian>* got,
3163 Output_data_space* got_plt,
3164 Output_data_space* got_irelative)
3165 {
3166 return new Output_data_plt_aarch64_standard<size, big_endian>(
3167 layout, got, got_plt, got_irelative);
3168 }
3169
3170
3171 // do_make_elf_object to override the same function in the base class.
3172 Object*
3173 do_make_elf_object(const std::string&, Input_file*, off_t,
3174 const elfcpp::Ehdr<size, big_endian>&);
3175
3176 Output_data_plt_aarch64<size, big_endian>*
3177 make_data_plt(Layout* layout,
3178 Output_data_got_aarch64<size, big_endian>* got,
3179 Output_data_space* got_plt,
3180 Output_data_space* got_irelative)
3181 {
3182 return this->do_make_data_plt(layout, got, got_plt, got_irelative);
3183 }
3184
3185 // We only need to generate stubs, and hence perform relaxation if we are
3186 // not doing relocatable linking.
3187 virtual bool
3188 do_may_relax() const
3189 { return !parameters->options().relocatable(); }
3190
3191 // Relaxation hook. This is where we do stub generation.
3192 virtual bool
3193 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*);
3194
3195 void
3196 group_sections(Layout* layout,
3197 section_size_type group_size,
3198 bool stubs_always_after_branch,
3199 const Task* task);
3200
3201 void
3202 scan_reloc_for_stub(const The_relocate_info*, unsigned int,
3203 const Sized_symbol<size>*, unsigned int,
3204 const Symbol_value<size>*,
3205 typename elfcpp::Elf_types<size>::Elf_Swxword,
3206 Address Elf_Addr);
3207
3208 // Make an output section.
3209 Output_section*
3210 do_make_output_section(const char* name, elfcpp::Elf_Word type,
3211 elfcpp::Elf_Xword flags)
3212 { return new The_aarch64_output_section(name, type, flags); }
3213
3214 private:
3215 // The class which scans relocations.
3216 class Scan
3217 {
3218 public:
3219 Scan()
3220 : issued_non_pic_error_(false)
3221 { }
3222
3223 inline void
3224 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3225 Sized_relobj_file<size, big_endian>* object,
3226 unsigned int data_shndx,
3227 Output_section* output_section,
3228 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3229 const elfcpp::Sym<size, big_endian>& lsym,
3230 bool is_discarded);
3231
3232 inline void
3233 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3234 Sized_relobj_file<size, big_endian>* object,
3235 unsigned int data_shndx,
3236 Output_section* output_section,
3237 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3238 Symbol* gsym);
3239
3240 inline bool
3241 local_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3242 Target_aarch64<size, big_endian>* ,
3243 Sized_relobj_file<size, big_endian>* ,
3244 unsigned int ,
3245 Output_section* ,
3246 const elfcpp::Rela<size, big_endian>& ,
3247 unsigned int r_type,
3248 const elfcpp::Sym<size, big_endian>&);
3249
3250 inline bool
3251 global_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3252 Target_aarch64<size, big_endian>* ,
3253 Sized_relobj_file<size, big_endian>* ,
3254 unsigned int ,
3255 Output_section* ,
3256 const elfcpp::Rela<size, big_endian>& ,
3257 unsigned int r_type,
3258 Symbol* gsym);
3259
3260 private:
3261 static void
3262 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*,
3263 unsigned int r_type);
3264
3265 static void
3266 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*,
3267 unsigned int r_type, Symbol*);
3268
3269 inline bool
3270 possible_function_pointer_reloc(unsigned int r_type);
3271
3272 void
3273 check_non_pic(Relobj*, unsigned int r_type);
3274
3275 bool
3276 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*,
3277 unsigned int r_type);
3278
3279 // Whether we have issued an error about a non-PIC compilation.
3280 bool issued_non_pic_error_;
3281 };
3282
3283 // The class which implements relocation.
3284 class Relocate
3285 {
3286 public:
3287 Relocate()
3288 : skip_call_tls_get_addr_(false)
3289 { }
3290
3291 ~Relocate()
3292 { }
3293
3294 // Do a relocation. Return false if the caller should not issue
3295 // any warnings about this relocation.
3296 inline bool
3297 relocate(const Relocate_info<size, big_endian>*, unsigned int,
3298 Target_aarch64*, Output_section*, size_t, const unsigned char*,
3299 const Sized_symbol<size>*, const Symbol_value<size>*,
3300 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr,
3301 section_size_type);
3302
3303 private:
3304 inline typename AArch64_relocate_functions<size, big_endian>::Status
3305 relocate_tls(const Relocate_info<size, big_endian>*,
3306 Target_aarch64<size, big_endian>*,
3307 size_t,
3308 const elfcpp::Rela<size, big_endian>&,
3309 unsigned int r_type, const Sized_symbol<size>*,
3310 const Symbol_value<size>*,
3311 unsigned char*,
3312 typename elfcpp::Elf_types<size>::Elf_Addr);
3313
3314 inline typename AArch64_relocate_functions<size, big_endian>::Status
3315 tls_gd_to_le(
3316 const Relocate_info<size, big_endian>*,
3317 Target_aarch64<size, big_endian>*,
3318 const elfcpp::Rela<size, big_endian>&,
3319 unsigned int,
3320 unsigned char*,
3321 const Symbol_value<size>*);
3322
3323 inline typename AArch64_relocate_functions<size, big_endian>::Status
3324 tls_ld_to_le(
3325 const Relocate_info<size, big_endian>*,
3326 Target_aarch64<size, big_endian>*,
3327 const elfcpp::Rela<size, big_endian>&,
3328 unsigned int,
3329 unsigned char*,
3330 const Symbol_value<size>*);
3331
3332 inline typename AArch64_relocate_functions<size, big_endian>::Status
3333 tls_ie_to_le(
3334 const Relocate_info<size, big_endian>*,
3335 Target_aarch64<size, big_endian>*,
3336 const elfcpp::Rela<size, big_endian>&,
3337 unsigned int,
3338 unsigned char*,
3339 const Symbol_value<size>*);
3340
3341 inline typename AArch64_relocate_functions<size, big_endian>::Status
3342 tls_desc_gd_to_le(
3343 const Relocate_info<size, big_endian>*,
3344 Target_aarch64<size, big_endian>*,
3345 const elfcpp::Rela<size, big_endian>&,
3346 unsigned int,
3347 unsigned char*,
3348 const Symbol_value<size>*);
3349
3350 inline typename AArch64_relocate_functions<size, big_endian>::Status
3351 tls_desc_gd_to_ie(
3352 const Relocate_info<size, big_endian>*,
3353 Target_aarch64<size, big_endian>*,
3354 const elfcpp::Rela<size, big_endian>&,
3355 unsigned int,
3356 unsigned char*,
3357 const Symbol_value<size>*,
3358 typename elfcpp::Elf_types<size>::Elf_Addr,
3359 typename elfcpp::Elf_types<size>::Elf_Addr);
3360
3361 bool skip_call_tls_get_addr_;
3362
3363 }; // End of class Relocate
3364
3365 // Adjust TLS relocation type based on the options and whether this
3366 // is a local symbol.
3367 static tls::Tls_optimization
3368 optimize_tls_reloc(bool is_final, int r_type);
3369
3370 // Get the GOT section, creating it if necessary.
3371 Output_data_got_aarch64<size, big_endian>*
3372 got_section(Symbol_table*, Layout*);
3373
3374 // Get the GOT PLT section.
3375 Output_data_space*
3376 got_plt_section() const
3377 {
3378 gold_assert(this->got_plt_ != NULL);
3379 return this->got_plt_;
3380 }
3381
3382 // Get the GOT section for TLSDESC entries.
3383 Output_data_got<size, big_endian>*
3384 got_tlsdesc_section() const
3385 {
3386 gold_assert(this->got_tlsdesc_ != NULL);
3387 return this->got_tlsdesc_;
3388 }
3389
3390 // Create the PLT section.
3391 void
3392 make_plt_section(Symbol_table* symtab, Layout* layout);
3393
3394 // Create a PLT entry for a global symbol.
3395 void
3396 make_plt_entry(Symbol_table*, Layout*, Symbol*);
3397
3398 // Create a PLT entry for a local STT_GNU_IFUNC symbol.
3399 void
3400 make_local_ifunc_plt_entry(Symbol_table*, Layout*,
3401 Sized_relobj_file<size, big_endian>* relobj,
3402 unsigned int local_sym_index);
3403
3404 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
3405 void
3406 define_tls_base_symbol(Symbol_table*, Layout*);
3407
3408 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
3409 void
3410 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout);
3411
3412 // Create a GOT entry for the TLS module index.
3413 unsigned int
3414 got_mod_index_entry(Symbol_table* symtab, Layout* layout,
3415 Sized_relobj_file<size, big_endian>* object);
3416
3417 // Get the PLT section.
3418 Output_data_plt_aarch64<size, big_endian>*
3419 plt_section() const
3420 {
3421 gold_assert(this->plt_ != NULL);
3422 return this->plt_;
3423 }
3424
3425 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769. For
3426 // ST_E_843419, we need an additional field for adrp offset.
3427 void create_erratum_stub(
3428 AArch64_relobj<size, big_endian>* relobj,
3429 unsigned int shndx,
3430 section_size_type erratum_insn_offset,
3431 Address erratum_address,
3432 typename Insn_utilities::Insntype erratum_insn,
3433 int erratum_type,
3434 unsigned int e843419_adrp_offset=0);
3435
3436 // Return whether this is a 3-insn erratum sequence.
3437 bool is_erratum_843419_sequence(
3438 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
3439 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
3440 typename elfcpp::Swap<32,big_endian>::Valtype insn3);
3441
3442 // Return whether this is a 835769 sequence.
3443 // (Similarly implemented as in elfnn-aarch64.c.)
3444 bool is_erratum_835769_sequence(
3445 typename elfcpp::Swap<32,big_endian>::Valtype,
3446 typename elfcpp::Swap<32,big_endian>::Valtype);
3447
3448 // Get the dynamic reloc section, creating it if necessary.
3449 Reloc_section*
3450 rela_dyn_section(Layout*);
3451
3452 // Get the section to use for TLSDESC relocations.
3453 Reloc_section*
3454 rela_tlsdesc_section(Layout*) const;
3455
3456 // Get the section to use for IRELATIVE relocations.
3457 Reloc_section*
3458 rela_irelative_section(Layout*);
3459
3460 // Add a potential copy relocation.
3461 void
3462 copy_reloc(Symbol_table* symtab, Layout* layout,
3463 Sized_relobj_file<size, big_endian>* object,
3464 unsigned int shndx, Output_section* output_section,
3465 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc)
3466 {
3467 unsigned int r_type = elfcpp::elf_r_type<size>(reloc.get_r_info());
3468 this->copy_relocs_.copy_reloc(symtab, layout,
3469 symtab->get_sized_symbol<size>(sym),
3470 object, shndx, output_section,
3471 r_type, reloc.get_r_offset(),
3472 reloc.get_r_addend(),
3473 this->rela_dyn_section(layout));
3474 }
3475
3476 // Information about this specific target which we pass to the
3477 // general Target structure.
3478 static const Target::Target_info aarch64_info;
3479
3480 // The types of GOT entries needed for this platform.
3481 // These values are exposed to the ABI in an incremental link.
3482 // Do not renumber existing values without changing the version
3483 // number of the .gnu_incremental_inputs section.
3484 enum Got_type
3485 {
3486 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
3487 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset
3488 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair
3489 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair
3490 };
3491
3492 // This type is used as the argument to the target specific
3493 // relocation routines. The only target specific reloc is
3494 // R_AARCh64_TLSDESC against a local symbol.
3495 struct Tlsdesc_info
3496 {
3497 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object,
3498 unsigned int a_r_sym)
3499 : object(a_object), r_sym(a_r_sym)
3500 { }
3501
3502 // The object in which the local symbol is defined.
3503 Sized_relobj_file<size, big_endian>* object;
3504 // The local symbol index in the object.
3505 unsigned int r_sym;
3506 };
3507
3508 // The GOT section.
3509 Output_data_got_aarch64<size, big_endian>* got_;
3510 // The PLT section.
3511 Output_data_plt_aarch64<size, big_endian>* plt_;
3512 // The GOT PLT section.
3513 Output_data_space* got_plt_;
3514 // The GOT section for IRELATIVE relocations.
3515 Output_data_space* got_irelative_;
3516 // The GOT section for TLSDESC relocations.
3517 Output_data_got<size, big_endian>* got_tlsdesc_;
3518 // The _GLOBAL_OFFSET_TABLE_ symbol.
3519 Symbol* global_offset_table_;
3520 // The dynamic reloc section.
3521 Reloc_section* rela_dyn_;
3522 // The section to use for IRELATIVE relocs.
3523 Reloc_section* rela_irelative_;
3524 // Relocs saved to avoid a COPY reloc.
3525 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_;
3526 // Offset of the GOT entry for the TLS module index.
3527 unsigned int got_mod_index_offset_;
3528 // We handle R_AARCH64_TLSDESC against a local symbol as a target
3529 // specific relocation. Here we store the object and local symbol
3530 // index for the relocation.
3531 std::vector<Tlsdesc_info> tlsdesc_reloc_info_;
3532 // True if the _TLS_MODULE_BASE_ symbol has been defined.
3533 bool tls_base_symbol_defined_;
3534 // List of stub_tables
3535 Stub_table_list stub_tables_;
3536 // Actual stub group size
3537 section_size_type stub_group_size_;
3538 AArch64_input_section_map aarch64_input_section_map_;
3539 }; // End of Target_aarch64
3540
3541
3542 template<>
3543 const Target::Target_info Target_aarch64<64, false>::aarch64_info =
3544 {
3545 64, // size
3546 false, // is_big_endian
3547 elfcpp::EM_AARCH64, // machine_code
3548 false, // has_make_symbol
3549 false, // has_resolve
3550 false, // has_code_fill
3551 false, // is_default_stack_executable
3552 true, // can_icf_inline_merge_sections
3553 '\0', // wrap_char
3554 "/lib/ld.so.1", // program interpreter
3555 0x400000, // default_text_segment_address
3556 0x10000, // abi_pagesize (overridable by -z max-page-size)
3557 0x1000, // common_pagesize (overridable by -z common-page-size)
3558 false, // isolate_execinstr
3559 0, // rosegment_gap
3560 elfcpp::SHN_UNDEF, // small_common_shndx
3561 elfcpp::SHN_UNDEF, // large_common_shndx
3562 0, // small_common_section_flags
3563 0, // large_common_section_flags
3564 NULL, // attributes_section
3565 NULL, // attributes_vendor
3566 "_start", // entry_symbol_name
3567 32, // hash_entry_size
3568 elfcpp::SHT_PROGBITS, // unwind_section_type
3569 };
3570
3571 template<>
3572 const Target::Target_info Target_aarch64<32, false>::aarch64_info =
3573 {
3574 32, // size
3575 false, // is_big_endian
3576 elfcpp::EM_AARCH64, // machine_code
3577 false, // has_make_symbol
3578 false, // has_resolve
3579 false, // has_code_fill
3580 false, // is_default_stack_executable
3581 false, // can_icf_inline_merge_sections
3582 '\0', // wrap_char
3583 "/lib/ld.so.1", // program interpreter
3584 0x400000, // default_text_segment_address
3585 0x10000, // abi_pagesize (overridable by -z max-page-size)
3586 0x1000, // common_pagesize (overridable by -z common-page-size)
3587 false, // isolate_execinstr
3588 0, // rosegment_gap
3589 elfcpp::SHN_UNDEF, // small_common_shndx
3590 elfcpp::SHN_UNDEF, // large_common_shndx
3591 0, // small_common_section_flags
3592 0, // large_common_section_flags
3593 NULL, // attributes_section
3594 NULL, // attributes_vendor
3595 "_start", // entry_symbol_name
3596 32, // hash_entry_size
3597 elfcpp::SHT_PROGBITS, // unwind_section_type
3598 };
3599
3600 template<>
3601 const Target::Target_info Target_aarch64<64, true>::aarch64_info =
3602 {
3603 64, // size
3604 true, // is_big_endian
3605 elfcpp::EM_AARCH64, // machine_code
3606 false, // has_make_symbol
3607 false, // has_resolve
3608 false, // has_code_fill
3609 false, // is_default_stack_executable
3610 true, // can_icf_inline_merge_sections
3611 '\0', // wrap_char
3612 "/lib/ld.so.1", // program interpreter
3613 0x400000, // default_text_segment_address
3614 0x10000, // abi_pagesize (overridable by -z max-page-size)
3615 0x1000, // common_pagesize (overridable by -z common-page-size)
3616 false, // isolate_execinstr
3617 0, // rosegment_gap
3618 elfcpp::SHN_UNDEF, // small_common_shndx
3619 elfcpp::SHN_UNDEF, // large_common_shndx
3620 0, // small_common_section_flags
3621 0, // large_common_section_flags
3622 NULL, // attributes_section
3623 NULL, // attributes_vendor
3624 "_start", // entry_symbol_name
3625 32, // hash_entry_size
3626 elfcpp::SHT_PROGBITS, // unwind_section_type
3627 };
3628
3629 template<>
3630 const Target::Target_info Target_aarch64<32, true>::aarch64_info =
3631 {
3632 32, // size
3633 true, // is_big_endian
3634 elfcpp::EM_AARCH64, // machine_code
3635 false, // has_make_symbol
3636 false, // has_resolve
3637 false, // has_code_fill
3638 false, // is_default_stack_executable
3639 false, // can_icf_inline_merge_sections
3640 '\0', // wrap_char
3641 "/lib/ld.so.1", // program interpreter
3642 0x400000, // default_text_segment_address
3643 0x10000, // abi_pagesize (overridable by -z max-page-size)
3644 0x1000, // common_pagesize (overridable by -z common-page-size)
3645 false, // isolate_execinstr
3646 0, // rosegment_gap
3647 elfcpp::SHN_UNDEF, // small_common_shndx
3648 elfcpp::SHN_UNDEF, // large_common_shndx
3649 0, // small_common_section_flags
3650 0, // large_common_section_flags
3651 NULL, // attributes_section
3652 NULL, // attributes_vendor
3653 "_start", // entry_symbol_name
3654 32, // hash_entry_size
3655 elfcpp::SHT_PROGBITS, // unwind_section_type
3656 };
3657
3658 // Get the GOT section, creating it if necessary.
3659
3660 template<int size, bool big_endian>
3661 Output_data_got_aarch64<size, big_endian>*
3662 Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab,
3663 Layout* layout)
3664 {
3665 if (this->got_ == NULL)
3666 {
3667 gold_assert(symtab != NULL && layout != NULL);
3668
3669 // When using -z now, we can treat .got.plt as a relro section.
3670 // Without -z now, it is modified after program startup by lazy
3671 // PLT relocations.
3672 bool is_got_plt_relro = parameters->options().now();
3673 Output_section_order got_order = (is_got_plt_relro
3674 ? ORDER_RELRO
3675 : ORDER_RELRO_LAST);
3676 Output_section_order got_plt_order = (is_got_plt_relro
3677 ? ORDER_RELRO
3678 : ORDER_NON_RELRO_FIRST);
3679
3680 // Layout of .got and .got.plt sections.
3681 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_
3682 // ...
3683 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT
3684 // .gotplt[1] reserved for ld.so (resolver)
3685 // .gotplt[2] reserved
3686
3687 // Generate .got section.
3688 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab,
3689 layout);
3690 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3691 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
3692 this->got_, got_order, true);
3693 // The first word of GOT is reserved for the address of .dynamic.
3694 // We put 0 here now. The value will be replaced later in
3695 // Output_data_got_aarch64::do_write.
3696 this->got_->add_constant(0);
3697
3698 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3699 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section,
3700 // even if there is a .got.plt section.
3701 this->global_offset_table_ =
3702 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3703 Symbol_table::PREDEFINED,
3704 this->got_,
3705 0, 0, elfcpp::STT_OBJECT,
3706 elfcpp::STB_LOCAL,
3707 elfcpp::STV_HIDDEN, 0,
3708 false, false);
3709
3710 // Generate .got.plt section.
3711 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT");
3712 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3713 (elfcpp::SHF_ALLOC
3714 | elfcpp::SHF_WRITE),
3715 this->got_plt_, got_plt_order,
3716 is_got_plt_relro);
3717
3718 // The first three entries are reserved.
3719 this->got_plt_->set_current_data_size(
3720 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3721
3722 // If there are any IRELATIVE relocations, they get GOT entries
3723 // in .got.plt after the jump slot entries.
3724 this->got_irelative_ = new Output_data_space(size / 8,
3725 "** GOT IRELATIVE PLT");
3726 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3727 (elfcpp::SHF_ALLOC
3728 | elfcpp::SHF_WRITE),
3729 this->got_irelative_,
3730 got_plt_order,
3731 is_got_plt_relro);
3732
3733 // If there are any TLSDESC relocations, they get GOT entries in
3734 // .got.plt after the jump slot and IRELATIVE entries.
3735 this->got_tlsdesc_ = new Output_data_got<size, big_endian>();
3736 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3737 (elfcpp::SHF_ALLOC
3738 | elfcpp::SHF_WRITE),
3739 this->got_tlsdesc_,
3740 got_plt_order,
3741 is_got_plt_relro);
3742
3743 if (!is_got_plt_relro)
3744 {
3745 // Those bytes can go into the relro segment.
3746 layout->increase_relro(
3747 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3748 }
3749
3750 }
3751 return this->got_;
3752 }
3753
3754 // Get the dynamic reloc section, creating it if necessary.
3755
3756 template<int size, bool big_endian>
3757 typename Target_aarch64<size, big_endian>::Reloc_section*
3758 Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout)
3759 {
3760 if (this->rela_dyn_ == NULL)
3761 {
3762 gold_assert(layout != NULL);
3763 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc());
3764 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3765 elfcpp::SHF_ALLOC, this->rela_dyn_,
3766 ORDER_DYNAMIC_RELOCS, false);
3767 }
3768 return this->rela_dyn_;
3769 }
3770
3771 // Get the section to use for IRELATIVE relocs, creating it if
3772 // necessary. These go in .rela.dyn, but only after all other dynamic
3773 // relocations. They need to follow the other dynamic relocations so
3774 // that they can refer to global variables initialized by those
3775 // relocs.
3776
3777 template<int size, bool big_endian>
3778 typename Target_aarch64<size, big_endian>::Reloc_section*
3779 Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout)
3780 {
3781 if (this->rela_irelative_ == NULL)
3782 {
3783 // Make sure we have already created the dynamic reloc section.
3784 this->rela_dyn_section(layout);
3785 this->rela_irelative_ = new Reloc_section(false);
3786 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3787 elfcpp::SHF_ALLOC, this->rela_irelative_,
3788 ORDER_DYNAMIC_RELOCS, false);
3789 gold_assert(this->rela_dyn_->output_section()
3790 == this->rela_irelative_->output_section());
3791 }
3792 return this->rela_irelative_;
3793 }
3794
3795
3796 // do_make_elf_object to override the same function in the base class. We need
3797 // to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to
3798 // store backend specific information. Hence we need to have our own ELF object
3799 // creation.
3800
3801 template<int size, bool big_endian>
3802 Object*
3803 Target_aarch64<size, big_endian>::do_make_elf_object(
3804 const std::string& name,
3805 Input_file* input_file,
3806 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr)
3807 {
3808 int et = ehdr.get_e_type();
3809 // ET_EXEC files are valid input for --just-symbols/-R,
3810 // and we treat them as relocatable objects.
3811 if (et == elfcpp::ET_EXEC && input_file->just_symbols())
3812 return Sized_target<size, big_endian>::do_make_elf_object(
3813 name, input_file, offset, ehdr);
3814 else if (et == elfcpp::ET_REL)
3815 {
3816 AArch64_relobj<size, big_endian>* obj =
3817 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr);
3818 obj->setup();
3819 return obj;
3820 }
3821 else if (et == elfcpp::ET_DYN)
3822 {
3823 // Keep base implementation.
3824 Sized_dynobj<size, big_endian>* obj =
3825 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr);
3826 obj->setup();
3827 return obj;
3828 }
3829 else
3830 {
3831 gold_error(_("%s: unsupported ELF file type %d"),
3832 name.c_str(), et);
3833 return NULL;
3834 }
3835 }
3836
3837
3838 // Scan a relocation for stub generation.
3839
3840 template<int size, bool big_endian>
3841 void
3842 Target_aarch64<size, big_endian>::scan_reloc_for_stub(
3843 const Relocate_info<size, big_endian>* relinfo,
3844 unsigned int r_type,
3845 const Sized_symbol<size>* gsym,
3846 unsigned int r_sym,
3847 const Symbol_value<size>* psymval,
3848 typename elfcpp::Elf_types<size>::Elf_Swxword addend,
3849 Address address)
3850 {
3851 const AArch64_relobj<size, big_endian>* aarch64_relobj =
3852 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3853
3854 Symbol_value<size> symval;
3855 if (gsym != NULL)
3856 {
3857 const AArch64_reloc_property* arp = aarch64_reloc_property_table->
3858 get_reloc_property(r_type);
3859 if (gsym->use_plt_offset(arp->reference_flags()))
3860 {
3861 // This uses a PLT, change the symbol value.
3862 symval.set_output_value(this->plt_address_for_global(gsym));
3863 psymval = &symval;
3864 }
3865 else if (gsym->is_undefined())
3866 {
3867 // There is no need to generate a stub symbol if the original symbol
3868 // is undefined.
3869 gold_debug(DEBUG_TARGET,
3870 "stub: not creating a stub for undefined symbol %s in file %s",
3871 gsym->name(), aarch64_relobj->name().c_str());
3872 return;
3873 }
3874 }
3875
3876 // Get the symbol value.
3877 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0);
3878
3879 // Owing to pipelining, the PC relative branches below actually skip
3880 // two instructions when the branch offset is 0.
3881 Address destination = static_cast<Address>(-1);
3882 switch (r_type)
3883 {
3884 case elfcpp::R_AARCH64_CALL26:
3885 case elfcpp::R_AARCH64_JUMP26:
3886 destination = value + addend;
3887 break;
3888 default:
3889 gold_unreachable();
3890 }
3891
3892 int stub_type = The_reloc_stub::
3893 stub_type_for_reloc(r_type, address, destination);
3894 if (stub_type == ST_NONE)
3895 return;
3896
3897 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
3898 gold_assert(stub_table != NULL);
3899
3900 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend);
3901 The_reloc_stub* stub = stub_table->find_reloc_stub(key);
3902 if (stub == NULL)
3903 {
3904 stub = new The_reloc_stub(stub_type);
3905 stub_table->add_reloc_stub(stub, key);
3906 }
3907 stub->set_destination_address(destination);
3908 } // End of Target_aarch64::scan_reloc_for_stub
3909
3910
3911 // This function scans a relocation section for stub generation.
3912 // The template parameter Relocate must be a class type which provides
3913 // a single function, relocate(), which implements the machine
3914 // specific part of a relocation.
3915
3916 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
3917 // SHT_REL or SHT_RELA.
3918
3919 // PRELOCS points to the relocation data. RELOC_COUNT is the number
3920 // of relocs. OUTPUT_SECTION is the output section.
3921 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
3922 // mapped to output offsets.
3923
3924 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
3925 // VIEW_SIZE is the size. These refer to the input section, unless
3926 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
3927 // the output section.
3928
3929 template<int size, bool big_endian>
3930 template<int sh_type>
3931 void inline
3932 Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
3933 const Relocate_info<size, big_endian>* relinfo,
3934 const unsigned char* prelocs,
3935 size_t reloc_count,
3936 Output_section* /*output_section*/,
3937 bool /*needs_special_offset_handling*/,
3938 const unsigned char* /*view*/,
3939 Address view_address,
3940 section_size_type)
3941 {
3942 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype;
3943
3944 const int reloc_size =
3945 Reloc_types<sh_type,size,big_endian>::reloc_size;
3946 AArch64_relobj<size, big_endian>* object =
3947 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3948 unsigned int local_count = object->local_symbol_count();
3949
3950 gold::Default_comdat_behavior default_comdat_behavior;
3951 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
3952
3953 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
3954 {
3955 Reltype reloc(prelocs);
3956 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info();
3957 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info);
3958 unsigned int r_type = elfcpp::elf_r_type<size>(r_info);
3959 if (r_type != elfcpp::R_AARCH64_CALL26
3960 && r_type != elfcpp::R_AARCH64_JUMP26)
3961 continue;
3962
3963 section_offset_type offset =
3964 convert_to_section_size_type(reloc.get_r_offset());
3965
3966 // Get the addend.
3967 typename elfcpp::Elf_types<size>::Elf_Swxword addend =
3968 reloc.get_r_addend();
3969
3970 const Sized_symbol<size>* sym;
3971 Symbol_value<size> symval;
3972 const Symbol_value<size> *psymval;
3973 bool is_defined_in_discarded_section;
3974 unsigned int shndx;
3975 const Symbol* gsym = NULL;
3976 if (r_sym < local_count)
3977 {
3978 sym = NULL;
3979 psymval = object->local_symbol(r_sym);
3980
3981 // If the local symbol belongs to a section we are discarding,
3982 // and that section is a debug section, try to find the
3983 // corresponding kept section and map this symbol to its
3984 // counterpart in the kept section. The symbol must not
3985 // correspond to a section we are folding.
3986 bool is_ordinary;
3987 shndx = psymval->input_shndx(&is_ordinary);
3988 is_defined_in_discarded_section =
3989 (is_ordinary
3990 && shndx != elfcpp::SHN_UNDEF
3991 && !object->is_section_included(shndx)
3992 && !relinfo->symtab->is_section_folded(object, shndx));
3993
3994 // We need to compute the would-be final value of this local
3995 // symbol.
3996 if (!is_defined_in_discarded_section)
3997 {
3998 typedef Sized_relobj_file<size, big_endian> ObjType;
3999 if (psymval->is_section_symbol())
4000 symval.set_is_section_symbol();
4001 typename ObjType::Compute_final_local_value_status status =
4002 object->compute_final_local_value(r_sym, psymval, &symval,
4003 relinfo->symtab);
4004 if (status == ObjType::CFLV_OK)
4005 {
4006 // Currently we cannot handle a branch to a target in
4007 // a merged section. If this is the case, issue an error
4008 // and also free the merge symbol value.
4009 if (!symval.has_output_value())
4010 {
4011 const std::string& section_name =
4012 object->section_name(shndx);
4013 object->error(_("cannot handle branch to local %u "
4014 "in a merged section %s"),
4015 r_sym, section_name.c_str());
4016 }
4017 psymval = &symval;
4018 }
4019 else
4020 {
4021 // We cannot determine the final value.
4022 continue;
4023 }
4024 }
4025 }
4026 else
4027 {
4028 gsym = object->global_symbol(r_sym);
4029 gold_assert(gsym != NULL);
4030 if (gsym->is_forwarder())
4031 gsym = relinfo->symtab->resolve_forwards(gsym);
4032
4033 sym = static_cast<const Sized_symbol<size>*>(gsym);
4034 if (sym->has_symtab_index() && sym->symtab_index() != -1U)
4035 symval.set_output_symtab_index(sym->symtab_index());
4036 else
4037 symval.set_no_output_symtab_entry();
4038
4039 // We need to compute the would-be final value of this global
4040 // symbol.
4041 const Symbol_table* symtab = relinfo->symtab;
4042 const Sized_symbol<size>* sized_symbol =
4043 symtab->get_sized_symbol<size>(gsym);
4044 Symbol_table::Compute_final_value_status status;
4045 typename elfcpp::Elf_types<size>::Elf_Addr value =
4046 symtab->compute_final_value<size>(sized_symbol, &status);
4047
4048 // Skip this if the symbol has not output section.
4049 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
4050 continue;
4051 symval.set_output_value(value);
4052
4053 if (gsym->type() == elfcpp::STT_TLS)
4054 symval.set_is_tls_symbol();
4055 else if (gsym->type() == elfcpp::STT_GNU_IFUNC)
4056 symval.set_is_ifunc_symbol();
4057 psymval = &symval;
4058
4059 is_defined_in_discarded_section =
4060 (gsym->is_defined_in_discarded_section()
4061 && gsym->is_undefined());
4062 shndx = 0;
4063 }
4064
4065 Symbol_value<size> symval2;
4066 if (is_defined_in_discarded_section)
4067 {
4068 std::string name = object->section_name(relinfo->data_shndx);
4069
4070 if (comdat_behavior == CB_UNDETERMINED)
4071 comdat_behavior = default_comdat_behavior.get(name.c_str());
4072
4073 if (comdat_behavior == CB_PRETEND)
4074 {
4075 bool found;
4076 typename elfcpp::Elf_types<size>::Elf_Addr value =
4077 object->map_to_kept_section(shndx, name, &found);
4078 if (found)
4079 symval2.set_output_value(value + psymval->input_value());
4080 else
4081 symval2.set_output_value(0);
4082 }
4083 else
4084 {
4085 if (comdat_behavior == CB_ERROR)
4086 issue_discarded_error(relinfo, i, offset, r_sym, gsym);
4087 symval2.set_output_value(0);
4088 }
4089 symval2.set_no_output_symtab_entry();
4090 psymval = &symval2;
4091 }
4092
4093 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
4094 addend, view_address + offset);
4095 } // End of iterating relocs in a section
4096 } // End of Target_aarch64::scan_reloc_section_for_stubs
4097
4098
4099 // Scan an input section for stub generation.
4100
4101 template<int size, bool big_endian>
4102 void
4103 Target_aarch64<size, big_endian>::scan_section_for_stubs(
4104 const Relocate_info<size, big_endian>* relinfo,
4105 unsigned int sh_type,
4106 const unsigned char* prelocs,
4107 size_t reloc_count,
4108 Output_section* output_section,
4109 bool needs_special_offset_handling,
4110 const unsigned char* view,
4111 Address view_address,
4112 section_size_type view_size)
4113 {
4114 gold_assert(sh_type == elfcpp::SHT_RELA);
4115 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
4116 relinfo,
4117 prelocs,
4118 reloc_count,
4119 output_section,
4120 needs_special_offset_handling,
4121 view,
4122 view_address,
4123 view_size);
4124 }
4125
4126
4127 // Relocate a single reloc stub.
4128
4129 template<int size, bool big_endian>
4130 void Target_aarch64<size, big_endian>::
4131 relocate_reloc_stub(The_reloc_stub* stub,
4132 const The_relocate_info*,
4133 Output_section*,
4134 unsigned char* view,
4135 Address address,
4136 section_size_type)
4137 {
4138 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions;
4139 typedef typename The_reloc_functions::Status The_reloc_functions_status;
4140 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
4141
4142 Insntype* ip = reinterpret_cast<Insntype*>(view);
4143 int insn_number = stub->insn_num();
4144 const uint32_t* insns = stub->insns();
4145 // Check the insns are really those stub insns.
4146 for (int i = 0; i < insn_number; ++i)
4147 {
4148 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i);
4149 gold_assert(((uint32_t)insn == insns[i]));
4150 }
4151
4152 Address dest = stub->destination_address();
4153
4154 switch(stub->type())
4155 {
4156 case ST_ADRP_BRANCH:
4157 {
4158 // 1st reloc is ADR_PREL_PG_HI21
4159 The_reloc_functions_status status =
4160 The_reloc_functions::adrp(view, dest, address);
4161 // An error should never arise in the above step. If so, please
4162 // check 'aarch64_valid_for_adrp_p'.
4163 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4164
4165 // 2nd reloc is ADD_ABS_LO12_NC
4166 const AArch64_reloc_property* arp =
4167 aarch64_reloc_property_table->get_reloc_property(
4168 elfcpp::R_AARCH64_ADD_ABS_LO12_NC);
4169 gold_assert(arp != NULL);
4170 status = The_reloc_functions::template
4171 rela_general<32>(view + 4, dest, 0, arp);
4172 // An error should never arise, it is an "_NC" relocation.
4173 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4174 }
4175 break;
4176
4177 case ST_LONG_BRANCH_ABS:
4178 // 1st reloc is R_AARCH64_PREL64, at offset 8
4179 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest);
4180 break;
4181
4182 case ST_LONG_BRANCH_PCREL:
4183 {
4184 // "PC" calculation is the 2nd insn in the stub.
4185 uint64_t offset = dest - (address + 4);
4186 // Offset is placed at offset 4 and 5.
4187 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset);
4188 }
4189 break;
4190
4191 default:
4192 gold_unreachable();
4193 }
4194 }
4195
4196
4197 // A class to handle the PLT data.
4198 // This is an abstract base class that handles most of the linker details
4199 // but does not know the actual contents of PLT entries. The derived
4200 // classes below fill in those details.
4201
4202 template<int size, bool big_endian>
4203 class Output_data_plt_aarch64 : public Output_section_data
4204 {
4205 public:
4206 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
4207 Reloc_section;
4208 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4209
4210 Output_data_plt_aarch64(Layout* layout,
4211 uint64_t addralign,
4212 Output_data_got_aarch64<size, big_endian>* got,
4213 Output_data_space* got_plt,
4214 Output_data_space* got_irelative)
4215 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL),
4216 got_(got), got_plt_(got_plt), got_irelative_(got_irelative),
4217 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U)
4218 { this->init(layout); }
4219
4220 // Initialize the PLT section.
4221 void
4222 init(Layout* layout);
4223
4224 // Add an entry to the PLT.
4225 void
4226 add_entry(Symbol_table*, Layout*, Symbol* gsym);
4227
4228 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol.
4229 unsigned int
4230 add_local_ifunc_entry(Symbol_table* symtab, Layout*,
4231 Sized_relobj_file<size, big_endian>* relobj,
4232 unsigned int local_sym_index);
4233
4234 // Add the relocation for a PLT entry.
4235 void
4236 add_relocation(Symbol_table*, Layout*, Symbol* gsym,
4237 unsigned int got_offset);
4238
4239 // Add the reserved TLSDESC_PLT entry to the PLT.
4240 void
4241 reserve_tlsdesc_entry(unsigned int got_offset)
4242 { this->tlsdesc_got_offset_ = got_offset; }
4243
4244 // Return true if a TLSDESC_PLT entry has been reserved.
4245 bool
4246 has_tlsdesc_entry() const
4247 { return this->tlsdesc_got_offset_ != -1U; }
4248
4249 // Return the GOT offset for the reserved TLSDESC_PLT entry.
4250 unsigned int
4251 get_tlsdesc_got_offset() const
4252 { return this->tlsdesc_got_offset_; }
4253
4254 // Return the PLT offset of the reserved TLSDESC_PLT entry.
4255 unsigned int
4256 get_tlsdesc_plt_offset() const
4257 {
4258 return (this->first_plt_entry_offset() +
4259 (this->count_ + this->irelative_count_)
4260 * this->get_plt_entry_size());
4261 }
4262
4263 // Return the .rela.plt section data.
4264 Reloc_section*
4265 rela_plt()
4266 { return this->rel_; }
4267
4268 // Return where the TLSDESC relocations should go.
4269 Reloc_section*
4270 rela_tlsdesc(Layout*);
4271
4272 // Return where the IRELATIVE relocations should go in the PLT
4273 // relocations.
4274 Reloc_section*
4275 rela_irelative(Symbol_table*, Layout*);
4276
4277 // Return whether we created a section for IRELATIVE relocations.
4278 bool
4279 has_irelative_section() const
4280 { return this->irelative_rel_ != NULL; }
4281
4282 // Return the number of PLT entries.
4283 unsigned int
4284 entry_count() const
4285 { return this->count_ + this->irelative_count_; }
4286
4287 // Return the offset of the first non-reserved PLT entry.
4288 unsigned int
4289 first_plt_entry_offset() const
4290 { return this->do_first_plt_entry_offset(); }
4291
4292 // Return the size of a PLT entry.
4293 unsigned int
4294 get_plt_entry_size() const
4295 { return this->do_get_plt_entry_size(); }
4296
4297 // Return the reserved tlsdesc entry size.
4298 unsigned int
4299 get_plt_tlsdesc_entry_size() const
4300 { return this->do_get_plt_tlsdesc_entry_size(); }
4301
4302 // Return the PLT address to use for a global symbol.
4303 uint64_t
4304 address_for_global(const Symbol*);
4305
4306 // Return the PLT address to use for a local symbol.
4307 uint64_t
4308 address_for_local(const Relobj*, unsigned int symndx);
4309
4310 protected:
4311 // Fill in the first PLT entry.
4312 void
4313 fill_first_plt_entry(unsigned char* pov,
4314 Address got_address,
4315 Address plt_address)
4316 { this->do_fill_first_plt_entry(pov, got_address, plt_address); }
4317
4318 // Fill in a normal PLT entry.
4319 void
4320 fill_plt_entry(unsigned char* pov,
4321 Address got_address,
4322 Address plt_address,
4323 unsigned int got_offset,
4324 unsigned int plt_offset)
4325 {
4326 this->do_fill_plt_entry(pov, got_address, plt_address,
4327 got_offset, plt_offset);
4328 }
4329
4330 // Fill in the reserved TLSDESC PLT entry.
4331 void
4332 fill_tlsdesc_entry(unsigned char* pov,
4333 Address gotplt_address,
4334 Address plt_address,
4335 Address got_base,
4336 unsigned int tlsdesc_got_offset,
4337 unsigned int plt_offset)
4338 {
4339 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4340 tlsdesc_got_offset, plt_offset);
4341 }
4342
4343 virtual unsigned int
4344 do_first_plt_entry_offset() const = 0;
4345
4346 virtual unsigned int
4347 do_get_plt_entry_size() const = 0;
4348
4349 virtual unsigned int
4350 do_get_plt_tlsdesc_entry_size() const = 0;
4351
4352 virtual void
4353 do_fill_first_plt_entry(unsigned char* pov,
4354 Address got_addr,
4355 Address plt_addr) = 0;
4356
4357 virtual void
4358 do_fill_plt_entry(unsigned char* pov,
4359 Address got_address,
4360 Address plt_address,
4361 unsigned int got_offset,
4362 unsigned int plt_offset) = 0;
4363
4364 virtual void
4365 do_fill_tlsdesc_entry(unsigned char* pov,
4366 Address gotplt_address,
4367 Address plt_address,
4368 Address got_base,
4369 unsigned int tlsdesc_got_offset,
4370 unsigned int plt_offset) = 0;
4371
4372 void
4373 do_adjust_output_section(Output_section* os);
4374
4375 // Write to a map file.
4376 void
4377 do_print_to_mapfile(Mapfile* mapfile) const
4378 { mapfile->print_output_data(this, _("** PLT")); }
4379
4380 private:
4381 // Set the final size.
4382 void
4383 set_final_data_size();
4384
4385 // Write out the PLT data.
4386 void
4387 do_write(Output_file*);
4388
4389 // The reloc section.
4390 Reloc_section* rel_;
4391
4392 // The TLSDESC relocs, if necessary. These must follow the regular
4393 // PLT relocs.
4394 Reloc_section* tlsdesc_rel_;
4395
4396 // The IRELATIVE relocs, if necessary. These must follow the
4397 // regular PLT relocations.
4398 Reloc_section* irelative_rel_;
4399
4400 // The .got section.
4401 Output_data_got_aarch64<size, big_endian>* got_;
4402
4403 // The .got.plt section.
4404 Output_data_space* got_plt_;
4405
4406 // The part of the .got.plt section used for IRELATIVE relocs.
4407 Output_data_space* got_irelative_;
4408
4409 // The number of PLT entries.
4410 unsigned int count_;
4411
4412 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These
4413 // follow the regular PLT entries.
4414 unsigned int irelative_count_;
4415
4416 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline.
4417 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1
4418 // indicates an offset is not allocated.
4419 unsigned int tlsdesc_got_offset_;
4420 };
4421
4422 // Initialize the PLT section.
4423
4424 template<int size, bool big_endian>
4425 void
4426 Output_data_plt_aarch64<size, big_endian>::init(Layout* layout)
4427 {
4428 this->rel_ = new Reloc_section(false);
4429 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4430 elfcpp::SHF_ALLOC, this->rel_,
4431 ORDER_DYNAMIC_PLT_RELOCS, false);
4432 }
4433
4434 template<int size, bool big_endian>
4435 void
4436 Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section(
4437 Output_section* os)
4438 {
4439 os->set_entsize(this->get_plt_entry_size());
4440 }
4441
4442 // Add an entry to the PLT.
4443
4444 template<int size, bool big_endian>
4445 void
4446 Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab,
4447 Layout* layout, Symbol* gsym)
4448 {
4449 gold_assert(!gsym->has_plt_offset());
4450
4451 unsigned int* pcount;
4452 unsigned int plt_reserved;
4453 Output_section_data_build* got;
4454
4455 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4456 && gsym->can_use_relative_reloc(false))
4457 {
4458 pcount = &this->irelative_count_;
4459 plt_reserved = 0;
4460 got = this->got_irelative_;
4461 }
4462 else
4463 {
4464 pcount = &this->count_;
4465 plt_reserved = this->first_plt_entry_offset();
4466 got = this->got_plt_;
4467 }
4468
4469 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size()
4470 + plt_reserved);
4471
4472 ++*pcount;
4473
4474 section_offset_type got_offset = got->current_data_size();
4475
4476 // Every PLT entry needs a GOT entry which points back to the PLT
4477 // entry (this will be changed by the dynamic linker, normally
4478 // lazily when the function is called).
4479 got->set_current_data_size(got_offset + size / 8);
4480
4481 // Every PLT entry needs a reloc.
4482 this->add_relocation(symtab, layout, gsym, got_offset);
4483
4484 // Note that we don't need to save the symbol. The contents of the
4485 // PLT are independent of which symbols are used. The symbols only
4486 // appear in the relocations.
4487 }
4488
4489 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return
4490 // the PLT offset.
4491
4492 template<int size, bool big_endian>
4493 unsigned int
4494 Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry(
4495 Symbol_table* symtab,
4496 Layout* layout,
4497 Sized_relobj_file<size, big_endian>* relobj,
4498 unsigned int local_sym_index)
4499 {
4500 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size();
4501 ++this->irelative_count_;
4502
4503 section_offset_type got_offset = this->got_irelative_->current_data_size();
4504
4505 // Every PLT entry needs a GOT entry which points back to the PLT
4506 // entry.
4507 this->got_irelative_->set_current_data_size(got_offset + size / 8);
4508
4509 // Every PLT entry needs a reloc.
4510 Reloc_section* rela = this->rela_irelative(symtab, layout);
4511 rela->add_symbolless_local_addend(relobj, local_sym_index,
4512 elfcpp::R_AARCH64_IRELATIVE,
4513 this->got_irelative_, got_offset, 0);
4514
4515 return plt_offset;
4516 }
4517
4518 // Add the relocation for a PLT entry.
4519
4520 template<int size, bool big_endian>
4521 void
4522 Output_data_plt_aarch64<size, big_endian>::add_relocation(
4523 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset)
4524 {
4525 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4526 && gsym->can_use_relative_reloc(false))
4527 {
4528 Reloc_section* rela = this->rela_irelative(symtab, layout);
4529 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE,
4530 this->got_irelative_, got_offset, 0);
4531 }
4532 else
4533 {
4534 gsym->set_needs_dynsym_entry();
4535 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_,
4536 got_offset, 0);
4537 }
4538 }
4539
4540 // Return where the TLSDESC relocations should go, creating it if
4541 // necessary. These follow the JUMP_SLOT relocations.
4542
4543 template<int size, bool big_endian>
4544 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4545 Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout)
4546 {
4547 if (this->tlsdesc_rel_ == NULL)
4548 {
4549 this->tlsdesc_rel_ = new Reloc_section(false);
4550 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4551 elfcpp::SHF_ALLOC, this->tlsdesc_rel_,
4552 ORDER_DYNAMIC_PLT_RELOCS, false);
4553 gold_assert(this->tlsdesc_rel_->output_section()
4554 == this->rel_->output_section());
4555 }
4556 return this->tlsdesc_rel_;
4557 }
4558
4559 // Return where the IRELATIVE relocations should go in the PLT. These
4560 // follow the JUMP_SLOT and the TLSDESC relocations.
4561
4562 template<int size, bool big_endian>
4563 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4564 Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab,
4565 Layout* layout)
4566 {
4567 if (this->irelative_rel_ == NULL)
4568 {
4569 // Make sure we have a place for the TLSDESC relocations, in
4570 // case we see any later on.
4571 this->rela_tlsdesc(layout);
4572 this->irelative_rel_ = new Reloc_section(false);
4573 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4574 elfcpp::SHF_ALLOC, this->irelative_rel_,
4575 ORDER_DYNAMIC_PLT_RELOCS, false);
4576 gold_assert(this->irelative_rel_->output_section()
4577 == this->rel_->output_section());
4578
4579 if (parameters->doing_static_link())
4580 {
4581 // A statically linked executable will only have a .rela.plt
4582 // section to hold R_AARCH64_IRELATIVE relocs for
4583 // STT_GNU_IFUNC symbols. The library will use these
4584 // symbols to locate the IRELATIVE relocs at program startup
4585 // time.
4586 symtab->define_in_output_data("__rela_iplt_start", NULL,
4587 Symbol_table::PREDEFINED,
4588 this->irelative_rel_, 0, 0,
4589 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4590 elfcpp::STV_HIDDEN, 0, false, true);
4591 symtab->define_in_output_data("__rela_iplt_end", NULL,
4592 Symbol_table::PREDEFINED,
4593 this->irelative_rel_, 0, 0,
4594 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4595 elfcpp::STV_HIDDEN, 0, true, true);
4596 }
4597 }
4598 return this->irelative_rel_;
4599 }
4600
4601 // Return the PLT address to use for a global symbol.
4602
4603 template<int size, bool big_endian>
4604 uint64_t
4605 Output_data_plt_aarch64<size, big_endian>::address_for_global(
4606 const Symbol* gsym)
4607 {
4608 uint64_t offset = 0;
4609 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4610 && gsym->can_use_relative_reloc(false))
4611 offset = (this->first_plt_entry_offset() +
4612 this->count_ * this->get_plt_entry_size());
4613 return this->address() + offset + gsym->plt_offset();
4614 }
4615
4616 // Return the PLT address to use for a local symbol. These are always
4617 // IRELATIVE relocs.
4618
4619 template<int size, bool big_endian>
4620 uint64_t
4621 Output_data_plt_aarch64<size, big_endian>::address_for_local(
4622 const Relobj* object,
4623 unsigned int r_sym)
4624 {
4625 return (this->address()
4626 + this->first_plt_entry_offset()
4627 + this->count_ * this->get_plt_entry_size()
4628 + object->local_plt_offset(r_sym));
4629 }
4630
4631 // Set the final size.
4632
4633 template<int size, bool big_endian>
4634 void
4635 Output_data_plt_aarch64<size, big_endian>::set_final_data_size()
4636 {
4637 unsigned int count = this->count_ + this->irelative_count_;
4638 unsigned int extra_size = 0;
4639 if (this->has_tlsdesc_entry())
4640 extra_size += this->get_plt_tlsdesc_entry_size();
4641 this->set_data_size(this->first_plt_entry_offset()
4642 + count * this->get_plt_entry_size()
4643 + extra_size);
4644 }
4645
4646 template<int size, bool big_endian>
4647 class Output_data_plt_aarch64_standard :
4648 public Output_data_plt_aarch64<size, big_endian>
4649 {
4650 public:
4651 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4652 Output_data_plt_aarch64_standard(
4653 Layout* layout,
4654 Output_data_got_aarch64<size, big_endian>* got,
4655 Output_data_space* got_plt,
4656 Output_data_space* got_irelative)
4657 : Output_data_plt_aarch64<size, big_endian>(layout,
4658 size == 32 ? 4 : 8,
4659 got, got_plt,
4660 got_irelative)
4661 { }
4662
4663 protected:
4664 // Return the offset of the first non-reserved PLT entry.
4665 virtual unsigned int
4666 do_first_plt_entry_offset() const
4667 { return this->first_plt_entry_size; }
4668
4669 // Return the size of a PLT entry
4670 virtual unsigned int
4671 do_get_plt_entry_size() const
4672 { return this->plt_entry_size; }
4673
4674 // Return the size of a tlsdesc entry
4675 virtual unsigned int
4676 do_get_plt_tlsdesc_entry_size() const
4677 { return this->plt_tlsdesc_entry_size; }
4678
4679 virtual void
4680 do_fill_first_plt_entry(unsigned char* pov,
4681 Address got_address,
4682 Address plt_address);
4683
4684 virtual void
4685 do_fill_plt_entry(unsigned char* pov,
4686 Address got_address,
4687 Address plt_address,
4688 unsigned int got_offset,
4689 unsigned int plt_offset);
4690
4691 virtual void
4692 do_fill_tlsdesc_entry(unsigned char* pov,
4693 Address gotplt_address,
4694 Address plt_address,
4695 Address got_base,
4696 unsigned int tlsdesc_got_offset,
4697 unsigned int plt_offset);
4698
4699 private:
4700 // The size of the first plt entry size.
4701 static const int first_plt_entry_size = 32;
4702 // The size of the plt entry size.
4703 static const int plt_entry_size = 16;
4704 // The size of the plt tlsdesc entry size.
4705 static const int plt_tlsdesc_entry_size = 32;
4706 // Template for the first PLT entry.
4707 static const uint32_t first_plt_entry[first_plt_entry_size / 4];
4708 // Template for subsequent PLT entries.
4709 static const uint32_t plt_entry[plt_entry_size / 4];
4710 // The reserved TLSDESC entry in the PLT for an executable.
4711 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4];
4712 };
4713
4714 // The first entry in the PLT for an executable.
4715
4716 template<>
4717 const uint32_t
4718 Output_data_plt_aarch64_standard<32, false>::
4719 first_plt_entry[first_plt_entry_size / 4] =
4720 {
4721 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4722 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4723 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4724 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4725 0xd61f0220, /* br x17 */
4726 0xd503201f, /* nop */
4727 0xd503201f, /* nop */
4728 0xd503201f, /* nop */
4729 };
4730
4731
4732 template<>
4733 const uint32_t
4734 Output_data_plt_aarch64_standard<32, true>::
4735 first_plt_entry[first_plt_entry_size / 4] =
4736 {
4737 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4738 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4739 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4740 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4741 0xd61f0220, /* br x17 */
4742 0xd503201f, /* nop */
4743 0xd503201f, /* nop */
4744 0xd503201f, /* nop */
4745 };
4746
4747
4748 template<>
4749 const uint32_t
4750 Output_data_plt_aarch64_standard<64, false>::
4751 first_plt_entry[first_plt_entry_size / 4] =
4752 {
4753 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4754 0x90000010, /* adrp x16, PLT_GOT+16 */
4755 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4756 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4757 0xd61f0220, /* br x17 */
4758 0xd503201f, /* nop */
4759 0xd503201f, /* nop */
4760 0xd503201f, /* nop */
4761 };
4762
4763
4764 template<>
4765 const uint32_t
4766 Output_data_plt_aarch64_standard<64, true>::
4767 first_plt_entry[first_plt_entry_size / 4] =
4768 {
4769 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4770 0x90000010, /* adrp x16, PLT_GOT+16 */
4771 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4772 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4773 0xd61f0220, /* br x17 */
4774 0xd503201f, /* nop */
4775 0xd503201f, /* nop */
4776 0xd503201f, /* nop */
4777 };
4778
4779
4780 template<>
4781 const uint32_t
4782 Output_data_plt_aarch64_standard<32, false>::
4783 plt_entry[plt_entry_size / 4] =
4784 {
4785 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4786 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4787 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4788 0xd61f0220, /* br x17. */
4789 };
4790
4791
4792 template<>
4793 const uint32_t
4794 Output_data_plt_aarch64_standard<32, true>::
4795 plt_entry[plt_entry_size / 4] =
4796 {
4797 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4798 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4799 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4800 0xd61f0220, /* br x17. */
4801 };
4802
4803
4804 template<>
4805 const uint32_t
4806 Output_data_plt_aarch64_standard<64, false>::
4807 plt_entry[plt_entry_size / 4] =
4808 {
4809 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4810 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4811 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4812 0xd61f0220, /* br x17. */
4813 };
4814
4815
4816 template<>
4817 const uint32_t
4818 Output_data_plt_aarch64_standard<64, true>::
4819 plt_entry[plt_entry_size / 4] =
4820 {
4821 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4822 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4823 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4824 0xd61f0220, /* br x17. */
4825 };
4826
4827
4828 template<int size, bool big_endian>
4829 void
4830 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry(
4831 unsigned char* pov,
4832 Address got_address,
4833 Address plt_address)
4834 {
4835 // PLT0 of the small PLT looks like this in ELF64 -
4836 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack.
4837 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT
4838 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the
4839 // symbol resolver
4840 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the
4841 // GOTPLT entry for this.
4842 // br x17
4843 // PLT0 will be slightly different in ELF32 due to different got entry
4844 // size.
4845 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size);
4846 Address gotplt_2nd_ent = got_address + (size / 8) * 2;
4847
4848 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2.
4849 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff.
4850 // FIXME: This only works for 64bit
4851 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4,
4852 gotplt_2nd_ent, plt_address + 4);
4853
4854 // Fill in R_AARCH64_LDST8_LO12
4855 elfcpp::Swap<32, big_endian>::writeval(
4856 pov + 8,
4857 ((this->first_plt_entry[2] & 0xffc003ff)
4858 | ((gotplt_2nd_ent & 0xff8) << 7)));
4859
4860 // Fill in R_AARCH64_ADD_ABS_LO12
4861 elfcpp::Swap<32, big_endian>::writeval(
4862 pov + 12,
4863 ((this->first_plt_entry[3] & 0xffc003ff)
4864 | ((gotplt_2nd_ent & 0xfff) << 10)));
4865 }
4866
4867
4868 // Subsequent entries in the PLT for an executable.
4869 // FIXME: This only works for 64bit
4870
4871 template<int size, bool big_endian>
4872 void
4873 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry(
4874 unsigned char* pov,
4875 Address got_address,
4876 Address plt_address,
4877 unsigned int got_offset,
4878 unsigned int plt_offset)
4879 {
4880 memcpy(pov, this->plt_entry, this->plt_entry_size);
4881
4882 Address gotplt_entry_address = got_address + got_offset;
4883 Address plt_entry_address = plt_address + plt_offset;
4884
4885 // Fill in R_AARCH64_PCREL_ADR_HI21
4886 AArch64_relocate_functions<size, big_endian>::adrp(
4887 pov,
4888 gotplt_entry_address,
4889 plt_entry_address);
4890
4891 // Fill in R_AARCH64_LDST64_ABS_LO12
4892 elfcpp::Swap<32, big_endian>::writeval(
4893 pov + 4,
4894 ((this->plt_entry[1] & 0xffc003ff)
4895 | ((gotplt_entry_address & 0xff8) << 7)));
4896
4897 // Fill in R_AARCH64_ADD_ABS_LO12
4898 elfcpp::Swap<32, big_endian>::writeval(
4899 pov + 8,
4900 ((this->plt_entry[2] & 0xffc003ff)
4901 | ((gotplt_entry_address & 0xfff) <<10)));
4902
4903 }
4904
4905
4906 template<>
4907 const uint32_t
4908 Output_data_plt_aarch64_standard<32, false>::
4909 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4910 {
4911 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4912 0x90000002, /* adrp x2, 0 */
4913 0x90000003, /* adrp x3, 0 */
4914 0xb9400042, /* ldr w2, [w2, #0] */
4915 0x11000063, /* add w3, w3, 0 */
4916 0xd61f0040, /* br x2 */
4917 0xd503201f, /* nop */
4918 0xd503201f, /* nop */
4919 };
4920
4921 template<>
4922 const uint32_t
4923 Output_data_plt_aarch64_standard<32, true>::
4924 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4925 {
4926 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4927 0x90000002, /* adrp x2, 0 */
4928 0x90000003, /* adrp x3, 0 */
4929 0xb9400042, /* ldr w2, [w2, #0] */
4930 0x11000063, /* add w3, w3, 0 */
4931 0xd61f0040, /* br x2 */
4932 0xd503201f, /* nop */
4933 0xd503201f, /* nop */
4934 };
4935
4936 template<>
4937 const uint32_t
4938 Output_data_plt_aarch64_standard<64, false>::
4939 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4940 {
4941 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4942 0x90000002, /* adrp x2, 0 */
4943 0x90000003, /* adrp x3, 0 */
4944 0xf9400042, /* ldr x2, [x2, #0] */
4945 0x91000063, /* add x3, x3, 0 */
4946 0xd61f0040, /* br x2 */
4947 0xd503201f, /* nop */
4948 0xd503201f, /* nop */
4949 };
4950
4951 template<>
4952 const uint32_t
4953 Output_data_plt_aarch64_standard<64, true>::
4954 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4955 {
4956 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4957 0x90000002, /* adrp x2, 0 */
4958 0x90000003, /* adrp x3, 0 */
4959 0xf9400042, /* ldr x2, [x2, #0] */
4960 0x91000063, /* add x3, x3, 0 */
4961 0xd61f0040, /* br x2 */
4962 0xd503201f, /* nop */
4963 0xd503201f, /* nop */
4964 };
4965
4966 template<int size, bool big_endian>
4967 void
4968 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry(
4969 unsigned char* pov,
4970 Address gotplt_address,
4971 Address plt_address,
4972 Address got_base,
4973 unsigned int tlsdesc_got_offset,
4974 unsigned int plt_offset)
4975 {
4976 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size);
4977
4978 // move DT_TLSDESC_GOT address into x2
4979 // move .got.plt address into x3
4980 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset;
4981 Address plt_entry_address = plt_address + plt_offset;
4982
4983 // R_AARCH64_ADR_PREL_PG_HI21
4984 AArch64_relocate_functions<size, big_endian>::adrp(
4985 pov + 4,
4986 tlsdesc_got_entry,
4987 plt_entry_address + 4);
4988
4989 // R_AARCH64_ADR_PREL_PG_HI21
4990 AArch64_relocate_functions<size, big_endian>::adrp(
4991 pov + 8,
4992 gotplt_address,
4993 plt_entry_address + 8);
4994
4995 // R_AARCH64_LDST64_ABS_LO12
4996 elfcpp::Swap<32, big_endian>::writeval(
4997 pov + 12,
4998 ((this->tlsdesc_plt_entry[3] & 0xffc003ff)
4999 | ((tlsdesc_got_entry & 0xff8) << 7)));
5000
5001 // R_AARCH64_ADD_ABS_LO12
5002 elfcpp::Swap<32, big_endian>::writeval(
5003 pov + 16,
5004 ((this->tlsdesc_plt_entry[4] & 0xffc003ff)
5005 | ((gotplt_address & 0xfff) << 10)));
5006 }
5007
5008 // Write out the PLT. This uses the hand-coded instructions above,
5009 // and adjusts them as needed. This is specified by the AMD64 ABI.
5010
5011 template<int size, bool big_endian>
5012 void
5013 Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of)
5014 {
5015 const off_t offset = this->offset();
5016 const section_size_type oview_size =
5017 convert_to_section_size_type(this->data_size());
5018 unsigned char* const oview = of->get_output_view(offset, oview_size);
5019
5020 const off_t got_file_offset = this->got_plt_->offset();
5021 gold_assert(got_file_offset + this->got_plt_->data_size()
5022 == this->got_irelative_->offset());
5023
5024 const section_size_type got_size =
5025 convert_to_section_size_type(this->got_plt_->data_size()
5026 + this->got_irelative_->data_size());
5027 unsigned char* const got_view = of->get_output_view(got_file_offset,
5028 got_size);
5029
5030 unsigned char* pov = oview;
5031
5032 // The base address of the .plt section.
5033 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address();
5034 // The base address of the PLT portion of the .got section.
5035 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address
5036 = this->got_plt_->address();
5037
5038 this->fill_first_plt_entry(pov, gotplt_address, plt_address);
5039 pov += this->first_plt_entry_offset();
5040
5041 // The first three entries in .got.plt are reserved.
5042 unsigned char* got_pov = got_view;
5043 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT);
5044 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
5045
5046 unsigned int plt_offset = this->first_plt_entry_offset();
5047 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
5048 const unsigned int count = this->count_ + this->irelative_count_;
5049 for (unsigned int plt_index = 0;
5050 plt_index < count;
5051 ++plt_index,
5052 pov += this->get_plt_entry_size(),
5053 got_pov += size / 8,
5054 plt_offset += this->get_plt_entry_size(),
5055 got_offset += size / 8)
5056 {
5057 // Set and adjust the PLT entry itself.
5058 this->fill_plt_entry(pov, gotplt_address, plt_address,
5059 got_offset, plt_offset);
5060
5061 // Set the entry in the GOT, which points to plt0.
5062 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address);
5063 }
5064
5065 if (this->has_tlsdesc_entry())
5066 {
5067 // Set and adjust the reserved TLSDESC PLT entry.
5068 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
5069 // The base address of the .base section.
5070 typename elfcpp::Elf_types<size>::Elf_Addr got_base =
5071 this->got_->address();
5072 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
5073 tlsdesc_got_offset, plt_offset);
5074 pov += this->get_plt_tlsdesc_entry_size();
5075 }
5076
5077 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
5078 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
5079
5080 of->write_output_view(offset, oview_size, oview);
5081 of->write_output_view(got_file_offset, got_size, got_view);
5082 }
5083
5084 // Telling how to update the immediate field of an instruction.
5085 struct AArch64_howto
5086 {
5087 // The immediate field mask.
5088 elfcpp::Elf_Xword dst_mask;
5089
5090 // The offset to apply relocation immediate
5091 int doffset;
5092
5093 // The second part offset, if the immediate field has two parts.
5094 // -1 if the immediate field has only one part.
5095 int doffset2;
5096 };
5097
5098 static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] =
5099 {
5100 {0, -1, -1}, // DATA
5101 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16
5102 {0xffffe0, 5, -1}, // LD [23:5]-imm19
5103 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi
5104 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi
5105 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12
5106 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12
5107 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14
5108 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19
5109 {0x3ffffff, 0, -1}, // B [25:0]-imm26
5110 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26
5111 };
5112
5113 // AArch64 relocate function class
5114
5115 template<int size, bool big_endian>
5116 class AArch64_relocate_functions
5117 {
5118 public:
5119 typedef enum
5120 {
5121 STATUS_OKAY, // No error during relocation.
5122 STATUS_OVERFLOW, // Relocation overflow.
5123 STATUS_BAD_RELOC, // Relocation cannot be applied.
5124 } Status;
5125
5126 typedef AArch64_relocate_functions<size, big_endian> This;
5127 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
5128 typedef Relocate_info<size, big_endian> The_relocate_info;
5129 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
5130 typedef Reloc_stub<size, big_endian> The_reloc_stub;
5131 typedef Stub_table<size, big_endian> The_stub_table;
5132 typedef elfcpp::Rela<size, big_endian> The_rela;
5133 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype;
5134
5135 // Return the page address of the address.
5136 // Page(address) = address & ~0xFFF
5137
5138 static inline AArch64_valtype
5139 Page(Address address)
5140 {
5141 return (address & (~static_cast<Address>(0xFFF)));
5142 }
5143
5144 private:
5145 // Update instruction (pointed by view) with selected bits (immed).
5146 // val = (val & ~dst_mask) | (immed << doffset)
5147
5148 template<int valsize>
5149 static inline void
5150 update_view(unsigned char* view,
5151 AArch64_valtype immed,
5152 elfcpp::Elf_Xword doffset,
5153 elfcpp::Elf_Xword dst_mask)
5154 {
5155 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5156 Valtype* wv = reinterpret_cast<Valtype*>(view);
5157 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5158
5159 // Clear immediate fields.
5160 val &= ~dst_mask;
5161 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5162 static_cast<Valtype>(val | (immed << doffset)));
5163 }
5164
5165 // Update two parts of an instruction (pointed by view) with selected
5166 // bits (immed1 and immed2).
5167 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2)
5168
5169 template<int valsize>
5170 static inline void
5171 update_view_two_parts(
5172 unsigned char* view,
5173 AArch64_valtype immed1,
5174 AArch64_valtype immed2,
5175 elfcpp::Elf_Xword doffset1,
5176 elfcpp::Elf_Xword doffset2,
5177 elfcpp::Elf_Xword dst_mask)
5178 {
5179 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5180 Valtype* wv = reinterpret_cast<Valtype*>(view);
5181 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5182 val &= ~dst_mask;
5183 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5184 static_cast<Valtype>(val | (immed1 << doffset1) |
5185 (immed2 << doffset2)));
5186 }
5187
5188 // Update adr or adrp instruction with immed.
5189 // In adr and adrp: [30:29] immlo [23:5] immhi
5190
5191 static inline void
5192 update_adr(unsigned char* view, AArch64_valtype immed)
5193 {
5194 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5);
5195 This::template update_view_two_parts<32>(
5196 view,
5197 immed & 0x3,
5198 (immed & 0x1ffffc) >> 2,
5199 29,
5200 5,
5201 dst_mask);
5202 }
5203
5204 // Update movz/movn instruction with bits immed.
5205 // Set instruction to movz if is_movz is true, otherwise set instruction
5206 // to movn.
5207
5208 static inline void
5209 update_movnz(unsigned char* view,
5210 AArch64_valtype immed,
5211 bool is_movz)
5212 {
5213 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
5214 Valtype* wv = reinterpret_cast<Valtype*>(view);
5215 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
5216
5217 const elfcpp::Elf_Xword doffset =
5218 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset;
5219 const elfcpp::Elf_Xword dst_mask =
5220 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask;
5221
5222 // Clear immediate fields and opc code.
5223 val &= ~(dst_mask | (0x3 << 29));
5224
5225 // Set instruction to movz or movn.
5226 // movz: [30:29] is 10 movn: [30:29] is 00
5227 if (is_movz)
5228 val |= (0x2 << 29);
5229
5230 elfcpp::Swap<32, big_endian>::writeval(wv,
5231 static_cast<Valtype>(val | (immed << doffset)));
5232 }
5233
5234 public:
5235
5236 // Update selected bits in text.
5237
5238 template<int valsize>
5239 static inline typename This::Status
5240 reloc_common(unsigned char* view, Address x,
5241 const AArch64_reloc_property* reloc_property)
5242 {
5243 // Select bits from X.
5244 Address immed = reloc_property->select_x_value(x);
5245
5246 // Update view.
5247 const AArch64_reloc_property::Reloc_inst inst =
5248 reloc_property->reloc_inst();
5249 // If it is a data relocation or instruction has 2 parts of immediate
5250 // fields, you should not call pcrela_general.
5251 gold_assert(aarch64_howto[inst].doffset2 == -1 &&
5252 aarch64_howto[inst].doffset != -1);
5253 This::template update_view<valsize>(view, immed,
5254 aarch64_howto[inst].doffset,
5255 aarch64_howto[inst].dst_mask);
5256
5257 // Do check overflow or alignment if needed.
5258 return (reloc_property->checkup_x_value(x)
5259 ? This::STATUS_OKAY
5260 : This::STATUS_OVERFLOW);
5261 }
5262
5263 // Construct a B insn. Note, although we group it here with other relocation
5264 // operation, there is actually no 'relocation' involved here.
5265 static inline void
5266 construct_b(unsigned char* view, unsigned int branch_offset)
5267 {
5268 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2),
5269 26, 0, 0xffffffff);
5270 }
5271
5272 // Do a simple rela relocation at unaligned addresses.
5273
5274 template<int valsize>
5275 static inline typename This::Status
5276 rela_ua(unsigned char* view,
5277 const Sized_relobj_file<size, big_endian>* object,
5278 const Symbol_value<size>* psymval,
5279 AArch64_valtype addend,
5280 const AArch64_reloc_property* reloc_property)
5281 {
5282 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5283 Valtype;
5284 typename elfcpp::Elf_types<size>::Elf_Addr x =
5285 psymval->value(object, addend);
5286 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5287 static_cast<Valtype>(x));
5288 return (reloc_property->checkup_x_value(x)
5289 ? This::STATUS_OKAY
5290 : This::STATUS_OVERFLOW);
5291 }
5292
5293 // Do a simple pc-relative relocation at unaligned addresses.
5294
5295 template<int valsize>
5296 static inline typename This::Status
5297 pcrela_ua(unsigned char* view,
5298 const Sized_relobj_file<size, big_endian>* object,
5299 const Symbol_value<size>* psymval,
5300 AArch64_valtype addend,
5301 Address address,
5302 const AArch64_reloc_property* reloc_property)
5303 {
5304 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5305 Valtype;
5306 Address x = psymval->value(object, addend) - address;
5307 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5308 static_cast<Valtype>(x));
5309 return (reloc_property->checkup_x_value(x)
5310 ? This::STATUS_OKAY
5311 : This::STATUS_OVERFLOW);
5312 }
5313
5314 // Do a simple rela relocation at aligned addresses.
5315
5316 template<int valsize>
5317 static inline typename This::Status
5318 rela(
5319 unsigned char* view,
5320 const Sized_relobj_file<size, big_endian>* object,
5321 const Symbol_value<size>* psymval,
5322 AArch64_valtype addend,
5323 const AArch64_reloc_property* reloc_property)
5324 {
5325 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5326 Valtype* wv = reinterpret_cast<Valtype*>(view);
5327 Address x = psymval->value(object, addend);
5328 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x));
5329 return (reloc_property->checkup_x_value(x)
5330 ? This::STATUS_OKAY
5331 : This::STATUS_OVERFLOW);
5332 }
5333
5334 // Do relocate. Update selected bits in text.
5335 // new_val = (val & ~dst_mask) | (immed << doffset)
5336
5337 template<int valsize>
5338 static inline typename This::Status
5339 rela_general(unsigned char* view,
5340 const Sized_relobj_file<size, big_endian>* object,
5341 const Symbol_value<size>* psymval,
5342 AArch64_valtype addend,
5343 const AArch64_reloc_property* reloc_property)
5344 {
5345 // Calculate relocation.
5346 Address x = psymval->value(object, addend);
5347 return This::template reloc_common<valsize>(view, x, reloc_property);
5348 }
5349
5350 // Do relocate. Update selected bits in text.
5351 // new val = (val & ~dst_mask) | (immed << doffset)
5352
5353 template<int valsize>
5354 static inline typename This::Status
5355 rela_general(
5356 unsigned char* view,
5357 AArch64_valtype s,
5358 AArch64_valtype addend,
5359 const AArch64_reloc_property* reloc_property)
5360 {
5361 // Calculate relocation.
5362 Address x = s + addend;
5363 return This::template reloc_common<valsize>(view, x, reloc_property);
5364 }
5365
5366 // Do address relative relocate. Update selected bits in text.
5367 // new val = (val & ~dst_mask) | (immed << doffset)
5368
5369 template<int valsize>
5370 static inline typename This::Status
5371 pcrela_general(
5372 unsigned char* view,
5373 const Sized_relobj_file<size, big_endian>* object,
5374 const Symbol_value<size>* psymval,
5375 AArch64_valtype addend,
5376 Address address,
5377 const AArch64_reloc_property* reloc_property)
5378 {
5379 // Calculate relocation.
5380 Address x = psymval->value(object, addend) - address;
5381 return This::template reloc_common<valsize>(view, x, reloc_property);
5382 }
5383
5384
5385 // Calculate (S + A) - address, update adr instruction.
5386
5387 static inline typename This::Status
5388 adr(unsigned char* view,
5389 const Sized_relobj_file<size, big_endian>* object,
5390 const Symbol_value<size>* psymval,
5391 Address addend,
5392 Address address,
5393 const AArch64_reloc_property* /* reloc_property */)
5394 {
5395 AArch64_valtype x = psymval->value(object, addend) - address;
5396 // Pick bits [20:0] of X.
5397 AArch64_valtype immed = x & 0x1fffff;
5398 update_adr(view, immed);
5399 // Check -2^20 <= X < 2^20
5400 return (size == 64 && Bits<21>::has_overflow((x))
5401 ? This::STATUS_OVERFLOW
5402 : This::STATUS_OKAY);
5403 }
5404
5405 // Calculate PG(S+A) - PG(address), update adrp instruction.
5406 // R_AARCH64_ADR_PREL_PG_HI21
5407
5408 static inline typename This::Status
5409 adrp(
5410 unsigned char* view,
5411 Address sa,
5412 Address address)
5413 {
5414 AArch64_valtype x = This::Page(sa) - This::Page(address);
5415 // Pick [32:12] of X.
5416 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5417 update_adr(view, immed);
5418 // Check -2^32 <= X < 2^32
5419 return (size == 64 && Bits<33>::has_overflow((x))
5420 ? This::STATUS_OVERFLOW
5421 : This::STATUS_OKAY);
5422 }
5423
5424 // Calculate PG(S+A) - PG(address), update adrp instruction.
5425 // R_AARCH64_ADR_PREL_PG_HI21
5426
5427 static inline typename This::Status
5428 adrp(unsigned char* view,
5429 const Sized_relobj_file<size, big_endian>* object,
5430 const Symbol_value<size>* psymval,
5431 Address addend,
5432 Address address,
5433 const AArch64_reloc_property* reloc_property)
5434 {
5435 Address sa = psymval->value(object, addend);
5436 AArch64_valtype x = This::Page(sa) - This::Page(address);
5437 // Pick [32:12] of X.
5438 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5439 update_adr(view, immed);
5440 return (reloc_property->checkup_x_value(x)
5441 ? This::STATUS_OKAY
5442 : This::STATUS_OVERFLOW);
5443 }
5444
5445 // Update mov[n/z] instruction. Check overflow if needed.
5446 // If X >=0, set the instruction to movz and its immediate value to the
5447 // selected bits S.
5448 // If X < 0, set the instruction to movn and its immediate value to
5449 // NOT (selected bits of).
5450
5451 static inline typename This::Status
5452 movnz(unsigned char* view,
5453 AArch64_valtype x,
5454 const AArch64_reloc_property* reloc_property)
5455 {
5456 // Select bits from X.
5457 Address immed;
5458 bool is_movz;
5459 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW;
5460 if (static_cast<SignedW>(x) >= 0)
5461 {
5462 immed = reloc_property->select_x_value(x);
5463 is_movz = true;
5464 }
5465 else
5466 {
5467 immed = reloc_property->select_x_value(~x);;
5468 is_movz = false;
5469 }
5470
5471 // Update movnz instruction.
5472 update_movnz(view, immed, is_movz);
5473
5474 // Do check overflow or alignment if needed.
5475 return (reloc_property->checkup_x_value(x)
5476 ? This::STATUS_OKAY
5477 : This::STATUS_OVERFLOW);
5478 }
5479
5480 static inline bool
5481 maybe_apply_stub(unsigned int,
5482 const The_relocate_info*,
5483 const The_rela&,
5484 unsigned char*,
5485 Address,
5486 const Sized_symbol<size>*,
5487 const Symbol_value<size>*,
5488 const Sized_relobj_file<size, big_endian>*,
5489 section_size_type);
5490
5491 }; // End of AArch64_relocate_functions
5492
5493
5494 // For a certain relocation type (usually jump/branch), test to see if the
5495 // destination needs a stub to fulfil. If so, re-route the destination of the
5496 // original instruction to the stub, note, at this time, the stub has already
5497 // been generated.
5498
5499 template<int size, bool big_endian>
5500 bool
5501 AArch64_relocate_functions<size, big_endian>::
5502 maybe_apply_stub(unsigned int r_type,
5503 const The_relocate_info* relinfo,
5504 const The_rela& rela,
5505 unsigned char* view,
5506 Address address,
5507 const Sized_symbol<size>* gsym,
5508 const Symbol_value<size>* psymval,
5509 const Sized_relobj_file<size, big_endian>* object,
5510 section_size_type current_group_size)
5511 {
5512 if (parameters->options().relocatable())
5513 return false;
5514
5515 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend();
5516 Address branch_target = psymval->value(object, 0) + addend;
5517 int stub_type =
5518 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target);
5519 if (stub_type == ST_NONE)
5520 return false;
5521
5522 const The_aarch64_relobj* aarch64_relobj =
5523 static_cast<const The_aarch64_relobj*>(object);
5524 const AArch64_reloc_property* arp =
5525 aarch64_reloc_property_table->get_reloc_property(r_type);
5526 gold_assert(arp != NULL);
5527
5528 // We don't create stubs for undefined symbols, but do for weak.
5529 if (gsym
5530 && !gsym->use_plt_offset(arp->reference_flags())
5531 && gsym->is_undefined())
5532 {
5533 gold_debug(DEBUG_TARGET,
5534 "stub: looking for a stub for undefined symbol %s in file %s",
5535 gsym->name(), aarch64_relobj->name().c_str());
5536 return false;
5537 }
5538
5539 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
5540 gold_assert(stub_table != NULL);
5541
5542 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5543 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
5544 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
5545 gold_assert(stub != NULL);
5546
5547 Address new_branch_target = stub_table->address() + stub->offset();
5548 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset =
5549 new_branch_target - address;
5550 typename This::Status status = This::template
5551 rela_general<32>(view, branch_offset, 0, arp);
5552 if (status != This::STATUS_OKAY)
5553 gold_error(_("Stub is too far away, try a smaller value "
5554 "for '--stub-group-size'. The current value is 0x%lx."),
5555 static_cast<unsigned long>(current_group_size));
5556 return true;
5557 }
5558
5559
5560 // Group input sections for stub generation.
5561 //
5562 // We group input sections in an output section so that the total size,
5563 // including any padding space due to alignment is smaller than GROUP_SIZE
5564 // unless the only input section in group is bigger than GROUP_SIZE already.
5565 // Then an ARM stub table is created to follow the last input section
5566 // in group. For each group an ARM stub table is created an is placed
5567 // after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
5568 // extend the group after the stub table.
5569
5570 template<int size, bool big_endian>
5571 void
5572 Target_aarch64<size, big_endian>::group_sections(
5573 Layout* layout,
5574 section_size_type group_size,
5575 bool stubs_always_after_branch,
5576 const Task* task)
5577 {
5578 // Group input sections and insert stub table
5579 Layout::Section_list section_list;
5580 layout->get_executable_sections(&section_list);
5581 for (Layout::Section_list::const_iterator p = section_list.begin();
5582 p != section_list.end();
5583 ++p)
5584 {
5585 AArch64_output_section<size, big_endian>* output_section =
5586 static_cast<AArch64_output_section<size, big_endian>*>(*p);
5587 output_section->group_sections(group_size, stubs_always_after_branch,
5588 this, task);
5589 }
5590 }
5591
5592
5593 // Find the AArch64_input_section object corresponding to the SHNDX-th input
5594 // section of RELOBJ.
5595
5596 template<int size, bool big_endian>
5597 AArch64_input_section<size, big_endian>*
5598 Target_aarch64<size, big_endian>::find_aarch64_input_section(
5599 Relobj* relobj, unsigned int shndx) const
5600 {
5601 Section_id sid(relobj, shndx);
5602 typename AArch64_input_section_map::const_iterator p =
5603 this->aarch64_input_section_map_.find(sid);
5604 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL;
5605 }
5606
5607
5608 // Make a new AArch64_input_section object.
5609
5610 template<int size, bool big_endian>
5611 AArch64_input_section<size, big_endian>*
5612 Target_aarch64<size, big_endian>::new_aarch64_input_section(
5613 Relobj* relobj, unsigned int shndx)
5614 {
5615 Section_id sid(relobj, shndx);
5616
5617 AArch64_input_section<size, big_endian>* input_section =
5618 new AArch64_input_section<size, big_endian>(relobj, shndx);
5619 input_section->init();
5620
5621 // Register new AArch64_input_section in map for look-up.
5622 std::pair<typename AArch64_input_section_map::iterator,bool> ins =
5623 this->aarch64_input_section_map_.insert(
5624 std::make_pair(sid, input_section));
5625
5626 // Make sure that it we have not created another AArch64_input_section
5627 // for this input section already.
5628 gold_assert(ins.second);
5629
5630 return input_section;
5631 }
5632
5633
5634 // Relaxation hook. This is where we do stub generation.
5635
5636 template<int size, bool big_endian>
5637 bool
5638 Target_aarch64<size, big_endian>::do_relax(
5639 int pass,
5640 const Input_objects* input_objects,
5641 Symbol_table* symtab,
5642 Layout* layout ,
5643 const Task* task)
5644 {
5645 gold_assert(!parameters->options().relocatable());
5646 if (pass == 1)
5647 {
5648 // We don't handle negative stub_group_size right now.
5649 this->stub_group_size_ = abs(parameters->options().stub_group_size());
5650 if (this->stub_group_size_ == 1)
5651 {
5652 // Leave room for 4096 4-byte stub entries. If we exceed that, then we
5653 // will fail to link. The user will have to relink with an explicit
5654 // group size option.
5655 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET -
5656 4096 * 4;
5657 }
5658 group_sections(layout, this->stub_group_size_, true, task);
5659 }
5660 else
5661 {
5662 // If this is not the first pass, addresses and file offsets have
5663 // been reset at this point, set them here.
5664 for (Stub_table_iterator sp = this->stub_tables_.begin();
5665 sp != this->stub_tables_.end(); ++sp)
5666 {
5667 The_stub_table* stt = *sp;
5668 The_aarch64_input_section* owner = stt->owner();
5669 off_t off = align_address(owner->original_size(),
5670 stt->addralign());
5671 stt->set_address_and_file_offset(owner->address() + off,
5672 owner->offset() + off);
5673 }
5674 }
5675
5676 // Scan relocs for relocation stubs
5677 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
5678 op != input_objects->relobj_end();
5679 ++op)
5680 {
5681 The_aarch64_relobj* aarch64_relobj =
5682 static_cast<The_aarch64_relobj*>(*op);
5683 // Lock the object so we can read from it. This is only called
5684 // single-threaded from Layout::finalize, so it is OK to lock.
5685 Task_lock_obj<Object> tl(task, aarch64_relobj);
5686 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout);
5687 }
5688
5689 bool any_stub_table_changed = false;
5690 for (Stub_table_iterator siter = this->stub_tables_.begin();
5691 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter)
5692 {
5693 The_stub_table* stub_table = *siter;
5694 if (stub_table->update_data_size_changed_p())
5695 {
5696 The_aarch64_input_section* owner = stub_table->owner();
5697 uint64_t address = owner->address();
5698 off_t offset = owner->offset();
5699 owner->reset_address_and_file_offset();
5700 owner->set_address_and_file_offset(address, offset);
5701
5702 any_stub_table_changed = true;
5703 }
5704 }
5705
5706 // Do not continue relaxation.
5707 bool continue_relaxation = any_stub_table_changed;
5708 if (!continue_relaxation)
5709 for (Stub_table_iterator sp = this->stub_tables_.begin();
5710 (sp != this->stub_tables_.end());
5711 ++sp)
5712 (*sp)->finalize_stubs();
5713
5714 return continue_relaxation;
5715 }
5716
5717
5718 // Make a new Stub_table.
5719
5720 template<int size, bool big_endian>
5721 Stub_table<size, big_endian>*
5722 Target_aarch64<size, big_endian>::new_stub_table(
5723 AArch64_input_section<size, big_endian>* owner)
5724 {
5725 Stub_table<size, big_endian>* stub_table =
5726 new Stub_table<size, big_endian>(owner);
5727 stub_table->set_address(align_address(
5728 owner->address() + owner->data_size(), 8));
5729 stub_table->set_file_offset(owner->offset() + owner->data_size());
5730 stub_table->finalize_data_size();
5731
5732 this->stub_tables_.push_back(stub_table);
5733
5734 return stub_table;
5735 }
5736
5737
5738 template<int size, bool big_endian>
5739 uint64_t
5740 Target_aarch64<size, big_endian>::do_reloc_addend(
5741 void* arg, unsigned int r_type, uint64_t) const
5742 {
5743 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
5744 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg);
5745 gold_assert(intarg < this->tlsdesc_reloc_info_.size());
5746 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]);
5747 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym);
5748 gold_assert(psymval->is_tls_symbol());
5749 // The value of a TLS symbol is the offset in the TLS segment.
5750 return psymval->value(ti.object, 0);
5751 }
5752
5753 // Return the number of entries in the PLT.
5754
5755 template<int size, bool big_endian>
5756 unsigned int
5757 Target_aarch64<size, big_endian>::plt_entry_count() const
5758 {
5759 if (this->plt_ == NULL)
5760 return 0;
5761 return this->plt_->entry_count();
5762 }
5763
5764 // Return the offset of the first non-reserved PLT entry.
5765
5766 template<int size, bool big_endian>
5767 unsigned int
5768 Target_aarch64<size, big_endian>::first_plt_entry_offset() const
5769 {
5770 return this->plt_->first_plt_entry_offset();
5771 }
5772
5773 // Return the size of each PLT entry.
5774
5775 template<int size, bool big_endian>
5776 unsigned int
5777 Target_aarch64<size, big_endian>::plt_entry_size() const
5778 {
5779 return this->plt_->get_plt_entry_size();
5780 }
5781
5782 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
5783
5784 template<int size, bool big_endian>
5785 void
5786 Target_aarch64<size, big_endian>::define_tls_base_symbol(
5787 Symbol_table* symtab, Layout* layout)
5788 {
5789 if (this->tls_base_symbol_defined_)
5790 return;
5791
5792 Output_segment* tls_segment = layout->tls_segment();
5793 if (tls_segment != NULL)
5794 {
5795 // _TLS_MODULE_BASE_ always points to the beginning of tls segment.
5796 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL,
5797 Symbol_table::PREDEFINED,
5798 tls_segment, 0, 0,
5799 elfcpp::STT_TLS,
5800 elfcpp::STB_LOCAL,
5801 elfcpp::STV_HIDDEN, 0,
5802 Symbol::SEGMENT_START,
5803 true);
5804 }
5805 this->tls_base_symbol_defined_ = true;
5806 }
5807
5808 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
5809
5810 template<int size, bool big_endian>
5811 void
5812 Target_aarch64<size, big_endian>::reserve_tlsdesc_entries(
5813 Symbol_table* symtab, Layout* layout)
5814 {
5815 if (this->plt_ == NULL)
5816 this->make_plt_section(symtab, layout);
5817
5818 if (!this->plt_->has_tlsdesc_entry())
5819 {
5820 // Allocate the TLSDESC_GOT entry.
5821 Output_data_got_aarch64<size, big_endian>* got =
5822 this->got_section(symtab, layout);
5823 unsigned int got_offset = got->add_constant(0);
5824
5825 // Allocate the TLSDESC_PLT entry.
5826 this->plt_->reserve_tlsdesc_entry(got_offset);
5827 }
5828 }
5829
5830 // Create a GOT entry for the TLS module index.
5831
5832 template<int size, bool big_endian>
5833 unsigned int
5834 Target_aarch64<size, big_endian>::got_mod_index_entry(
5835 Symbol_table* symtab, Layout* layout,
5836 Sized_relobj_file<size, big_endian>* object)
5837 {
5838 if (this->got_mod_index_offset_ == -1U)
5839 {
5840 gold_assert(symtab != NULL && layout != NULL && object != NULL);
5841 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
5842 Output_data_got_aarch64<size, big_endian>* got =
5843 this->got_section(symtab, layout);
5844 unsigned int got_offset = got->add_constant(0);
5845 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got,
5846 got_offset, 0);
5847 got->add_constant(0);
5848 this->got_mod_index_offset_ = got_offset;
5849 }
5850 return this->got_mod_index_offset_;
5851 }
5852
5853 // Optimize the TLS relocation type based on what we know about the
5854 // symbol. IS_FINAL is true if the final address of this symbol is
5855 // known at link time.
5856
5857 template<int size, bool big_endian>
5858 tls::Tls_optimization
5859 Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final,
5860 int r_type)
5861 {
5862 // If we are generating a shared library, then we can't do anything
5863 // in the linker
5864 if (parameters->options().shared())
5865 return tls::TLSOPT_NONE;
5866
5867 switch (r_type)
5868 {
5869 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
5870 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
5871 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19:
5872 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21:
5873 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
5874 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
5875 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
5876 case elfcpp::R_AARCH64_TLSDESC_OFF_G1:
5877 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC:
5878 case elfcpp::R_AARCH64_TLSDESC_LDR:
5879 case elfcpp::R_AARCH64_TLSDESC_ADD:
5880 case elfcpp::R_AARCH64_TLSDESC_CALL:
5881 // These are General-Dynamic which permits fully general TLS
5882 // access. Since we know that we are generating an executable,
5883 // we can convert this to Initial-Exec. If we also know that
5884 // this is a local symbol, we can further switch to Local-Exec.
5885 if (is_final)
5886 return tls::TLSOPT_TO_LE;
5887 return tls::TLSOPT_TO_IE;
5888
5889 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
5890 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
5891 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
5892 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5893 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
5894 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5895 // These are Local-Dynamic, which refer to local symbols in the
5896 // dynamic TLS block. Since we know that we generating an
5897 // executable, we can switch to Local-Exec.
5898 return tls::TLSOPT_TO_LE;
5899
5900 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5901 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5902 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5903 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5904 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5905 // These are Initial-Exec relocs which get the thread offset
5906 // from the GOT. If we know that we are linking against the
5907 // local symbol, we can switch to Local-Exec, which links the
5908 // thread offset into the instruction.
5909 if (is_final)
5910 return tls::TLSOPT_TO_LE;
5911 return tls::TLSOPT_NONE;
5912
5913 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
5914 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
5915 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5916 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
5917 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5918 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
5919 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
5920 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5921 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
5922 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
5923 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
5924 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
5925 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
5926 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
5927 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
5928 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
5929 // When we already have Local-Exec, there is nothing further we
5930 // can do.
5931 return tls::TLSOPT_NONE;
5932
5933 default:
5934 gold_unreachable();
5935 }
5936 }
5937
5938 // Returns true if this relocation type could be that of a function pointer.
5939
5940 template<int size, bool big_endian>
5941 inline bool
5942 Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc(
5943 unsigned int r_type)
5944 {
5945 switch (r_type)
5946 {
5947 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
5948 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
5949 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
5950 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
5951 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
5952 {
5953 return true;
5954 }
5955 }
5956 return false;
5957 }
5958
5959 // For safe ICF, scan a relocation for a local symbol to check if it
5960 // corresponds to a function pointer being taken. In that case mark
5961 // the function whose pointer was taken as not foldable.
5962
5963 template<int size, bool big_endian>
5964 inline bool
5965 Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer(
5966 Symbol_table* ,
5967 Layout* ,
5968 Target_aarch64<size, big_endian>* ,
5969 Sized_relobj_file<size, big_endian>* ,
5970 unsigned int ,
5971 Output_section* ,
5972 const elfcpp::Rela<size, big_endian>& ,
5973 unsigned int r_type,
5974 const elfcpp::Sym<size, big_endian>&)
5975 {
5976 // When building a shared library, do not fold any local symbols.
5977 return (parameters->options().shared()
5978 || possible_function_pointer_reloc(r_type));
5979 }
5980
5981 // For safe ICF, scan a relocation for a global symbol to check if it
5982 // corresponds to a function pointer being taken. In that case mark
5983 // the function whose pointer was taken as not foldable.
5984
5985 template<int size, bool big_endian>
5986 inline bool
5987 Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer(
5988 Symbol_table* ,
5989 Layout* ,
5990 Target_aarch64<size, big_endian>* ,
5991 Sized_relobj_file<size, big_endian>* ,
5992 unsigned int ,
5993 Output_section* ,
5994 const elfcpp::Rela<size, big_endian>& ,
5995 unsigned int r_type,
5996 Symbol* gsym)
5997 {
5998 // When building a shared library, do not fold symbols whose visibility
5999 // is hidden, internal or protected.
6000 return ((parameters->options().shared()
6001 && (gsym->visibility() == elfcpp::STV_INTERNAL
6002 || gsym->visibility() == elfcpp::STV_PROTECTED
6003 || gsym->visibility() == elfcpp::STV_HIDDEN))
6004 || possible_function_pointer_reloc(r_type));
6005 }
6006
6007 // Report an unsupported relocation against a local symbol.
6008
6009 template<int size, bool big_endian>
6010 void
6011 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local(
6012 Sized_relobj_file<size, big_endian>* object,
6013 unsigned int r_type)
6014 {
6015 gold_error(_("%s: unsupported reloc %u against local symbol"),
6016 object->name().c_str(), r_type);
6017 }
6018
6019 // We are about to emit a dynamic relocation of type R_TYPE. If the
6020 // dynamic linker does not support it, issue an error.
6021
6022 template<int size, bool big_endian>
6023 void
6024 Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object,
6025 unsigned int r_type)
6026 {
6027 gold_assert(r_type != elfcpp::R_AARCH64_NONE);
6028
6029 switch (r_type)
6030 {
6031 // These are the relocation types supported by glibc for AARCH64.
6032 case elfcpp::R_AARCH64_NONE:
6033 case elfcpp::R_AARCH64_COPY:
6034 case elfcpp::R_AARCH64_GLOB_DAT:
6035 case elfcpp::R_AARCH64_JUMP_SLOT:
6036 case elfcpp::R_AARCH64_RELATIVE:
6037 case elfcpp::R_AARCH64_TLS_DTPREL64:
6038 case elfcpp::R_AARCH64_TLS_DTPMOD64:
6039 case elfcpp::R_AARCH64_TLS_TPREL64:
6040 case elfcpp::R_AARCH64_TLSDESC:
6041 case elfcpp::R_AARCH64_IRELATIVE:
6042 case elfcpp::R_AARCH64_ABS32:
6043 case elfcpp::R_AARCH64_ABS64:
6044 return;
6045
6046 default:
6047 break;
6048 }
6049
6050 // This prevents us from issuing more than one error per reloc
6051 // section. But we can still wind up issuing more than one
6052 // error per object file.
6053 if (this->issued_non_pic_error_)
6054 return;
6055 gold_assert(parameters->options().output_is_position_independent());
6056 object->error(_("requires unsupported dynamic reloc; "
6057 "recompile with -fPIC"));
6058 this->issued_non_pic_error_ = true;
6059 return;
6060 }
6061
6062 // Return whether we need to make a PLT entry for a relocation of the
6063 // given type against a STT_GNU_IFUNC symbol.
6064
6065 template<int size, bool big_endian>
6066 bool
6067 Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc(
6068 Sized_relobj_file<size, big_endian>* object,
6069 unsigned int r_type)
6070 {
6071 const AArch64_reloc_property* arp =
6072 aarch64_reloc_property_table->get_reloc_property(r_type);
6073 gold_assert(arp != NULL);
6074
6075 int flags = arp->reference_flags();
6076 if (flags & Symbol::TLS_REF)
6077 {
6078 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"),
6079 object->name().c_str(), arp->name().c_str());
6080 return false;
6081 }
6082 return flags != 0;
6083 }
6084
6085 // Scan a relocation for a local symbol.
6086
6087 template<int size, bool big_endian>
6088 inline void
6089 Target_aarch64<size, big_endian>::Scan::local(
6090 Symbol_table* symtab,
6091 Layout* layout,
6092 Target_aarch64<size, big_endian>* target,
6093 Sized_relobj_file<size, big_endian>* object,
6094 unsigned int data_shndx,
6095 Output_section* output_section,
6096 const elfcpp::Rela<size, big_endian>& rela,
6097 unsigned int r_type,
6098 const elfcpp::Sym<size, big_endian>& lsym,
6099 bool is_discarded)
6100 {
6101 if (is_discarded)
6102 return;
6103
6104 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6105 Reloc_section;
6106 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6107
6108 // A local STT_GNU_IFUNC symbol may require a PLT entry.
6109 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC;
6110 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type))
6111 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym);
6112
6113 switch (r_type)
6114 {
6115 case elfcpp::R_AARCH64_NONE:
6116 break;
6117
6118 case elfcpp::R_AARCH64_ABS32:
6119 case elfcpp::R_AARCH64_ABS16:
6120 if (parameters->options().output_is_position_independent())
6121 {
6122 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6123 object->name().c_str(), r_type);
6124 }
6125 break;
6126
6127 case elfcpp::R_AARCH64_ABS64:
6128 // If building a shared library or pie, we need to mark this as a dynmic
6129 // reloction, so that the dynamic loader can relocate it.
6130 if (parameters->options().output_is_position_independent())
6131 {
6132 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6133 rela_dyn->add_local_relative(object, r_sym,
6134 elfcpp::R_AARCH64_RELATIVE,
6135 output_section,
6136 data_shndx,
6137 rela.get_r_offset(),
6138 rela.get_r_addend(),
6139 is_ifunc);
6140 }
6141 break;
6142
6143 case elfcpp::R_AARCH64_PREL64:
6144 case elfcpp::R_AARCH64_PREL32:
6145 case elfcpp::R_AARCH64_PREL16:
6146 break;
6147
6148 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6149 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6150 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6151 // The above relocations are used to access GOT entries.
6152 {
6153 Output_data_got_aarch64<size, big_endian>* got =
6154 target->got_section(symtab, layout);
6155 bool is_new = false;
6156 // This symbol requires a GOT entry.
6157 if (is_ifunc)
6158 is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD);
6159 else
6160 is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD);
6161 if (is_new && parameters->options().output_is_position_independent())
6162 target->rela_dyn_section(layout)->
6163 add_local_relative(object,
6164 r_sym,
6165 elfcpp::R_AARCH64_RELATIVE,
6166 got,
6167 object->local_got_offset(r_sym,
6168 GOT_TYPE_STANDARD),
6169 0,
6170 false);
6171 }
6172 break;
6173
6174 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6175 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6176 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6177 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6178 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6179 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6180 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6181 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6182 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6183 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6184 if (parameters->options().output_is_position_independent())
6185 {
6186 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6187 object->name().c_str(), r_type);
6188 }
6189 break;
6190
6191 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6192 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6193 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6194 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6195 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6196 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6197 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6198 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6199 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6200 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6201 break;
6202
6203 // Control flow, pc-relative. We don't need to do anything for a relative
6204 // addressing relocation against a local symbol if it does not reference
6205 // the GOT.
6206 case elfcpp::R_AARCH64_TSTBR14:
6207 case elfcpp::R_AARCH64_CONDBR19:
6208 case elfcpp::R_AARCH64_JUMP26:
6209 case elfcpp::R_AARCH64_CALL26:
6210 break;
6211
6212 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6213 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6214 {
6215 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6216 optimize_tls_reloc(!parameters->options().shared(), r_type);
6217 if (tlsopt == tls::TLSOPT_TO_LE)
6218 break;
6219
6220 layout->set_has_static_tls();
6221 // Create a GOT entry for the tp-relative offset.
6222 if (!parameters->doing_static_link())
6223 {
6224 Output_data_got_aarch64<size, big_endian>* got =
6225 target->got_section(symtab, layout);
6226 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET,
6227 target->rela_dyn_section(layout),
6228 elfcpp::R_AARCH64_TLS_TPREL64);
6229 }
6230 else if (!object->local_has_got_offset(r_sym,
6231 GOT_TYPE_TLS_OFFSET))
6232 {
6233 Output_data_got_aarch64<size, big_endian>* got =
6234 target->got_section(symtab, layout);
6235 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET);
6236 unsigned int got_offset =
6237 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET);
6238 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6239 gold_assert(addend == 0);
6240 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64,
6241 object, r_sym);
6242 }
6243 }
6244 break;
6245
6246 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6247 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
6248 {
6249 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6250 optimize_tls_reloc(!parameters->options().shared(), r_type);
6251 if (tlsopt == tls::TLSOPT_TO_LE)
6252 {
6253 layout->set_has_static_tls();
6254 break;
6255 }
6256 gold_assert(tlsopt == tls::TLSOPT_NONE);
6257
6258 Output_data_got_aarch64<size, big_endian>* got =
6259 target->got_section(symtab, layout);
6260 got->add_local_pair_with_rel(object,r_sym, data_shndx,
6261 GOT_TYPE_TLS_PAIR,
6262 target->rela_dyn_section(layout),
6263 elfcpp::R_AARCH64_TLS_DTPMOD64);
6264 }
6265 break;
6266
6267 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6268 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6269 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6270 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6271 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6272 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6273 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6274 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6275 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
6276 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
6277 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
6278 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
6279 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
6280 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
6281 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
6282 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
6283 {
6284 layout->set_has_static_tls();
6285 bool output_is_shared = parameters->options().shared();
6286 if (output_is_shared)
6287 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."),
6288 object->name().c_str(), r_type);
6289 }
6290 break;
6291
6292 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6293 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
6294 {
6295 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6296 optimize_tls_reloc(!parameters->options().shared(), r_type);
6297 if (tlsopt == tls::TLSOPT_NONE)
6298 {
6299 // Create a GOT entry for the module index.
6300 target->got_mod_index_entry(symtab, layout, object);
6301 }
6302 else if (tlsopt != tls::TLSOPT_TO_LE)
6303 unsupported_reloc_local(object, r_type);
6304 }
6305 break;
6306
6307 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6308 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6309 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6310 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6311 break;
6312
6313 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6314 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6315 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
6316 {
6317 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6318 optimize_tls_reloc(!parameters->options().shared(), r_type);
6319 target->define_tls_base_symbol(symtab, layout);
6320 if (tlsopt == tls::TLSOPT_NONE)
6321 {
6322 // Create reserved PLT and GOT entries for the resolver.
6323 target->reserve_tlsdesc_entries(symtab, layout);
6324
6325 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc.
6326 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT
6327 // entry needs to be in an area in .got.plt, not .got. Call
6328 // got_section to make sure the section has been created.
6329 target->got_section(symtab, layout);
6330 Output_data_got<size, big_endian>* got =
6331 target->got_tlsdesc_section();
6332 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6333 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC))
6334 {
6335 unsigned int got_offset = got->add_constant(0);
6336 got->add_constant(0);
6337 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC,
6338 got_offset);
6339 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6340 // We store the arguments we need in a vector, and use
6341 // the index into the vector as the parameter to pass
6342 // to the target specific routines.
6343 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym);
6344 void* arg = reinterpret_cast<void*>(intarg);
6345 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg,
6346 got, got_offset, 0);
6347 }
6348 }
6349 else if (tlsopt != tls::TLSOPT_TO_LE)
6350 unsupported_reloc_local(object, r_type);
6351 }
6352 break;
6353
6354 case elfcpp::R_AARCH64_TLSDESC_CALL:
6355 break;
6356
6357 default:
6358 unsupported_reloc_local(object, r_type);
6359 }
6360 }
6361
6362
6363 // Report an unsupported relocation against a global symbol.
6364
6365 template<int size, bool big_endian>
6366 void
6367 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global(
6368 Sized_relobj_file<size, big_endian>* object,
6369 unsigned int r_type,
6370 Symbol* gsym)
6371 {
6372 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
6373 object->name().c_str(), r_type, gsym->demangled_name().c_str());
6374 }
6375
6376 template<int size, bool big_endian>
6377 inline void
6378 Target_aarch64<size, big_endian>::Scan::global(
6379 Symbol_table* symtab,
6380 Layout* layout,
6381 Target_aarch64<size, big_endian>* target,
6382 Sized_relobj_file<size, big_endian> * object,
6383 unsigned int data_shndx,
6384 Output_section* output_section,
6385 const elfcpp::Rela<size, big_endian>& rela,
6386 unsigned int r_type,
6387 Symbol* gsym)
6388 {
6389 // A STT_GNU_IFUNC symbol may require a PLT entry.
6390 if (gsym->type() == elfcpp::STT_GNU_IFUNC
6391 && this->reloc_needs_plt_for_ifunc(object, r_type))
6392 target->make_plt_entry(symtab, layout, gsym);
6393
6394 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6395 Reloc_section;
6396 const AArch64_reloc_property* arp =
6397 aarch64_reloc_property_table->get_reloc_property(r_type);
6398 gold_assert(arp != NULL);
6399
6400 switch (r_type)
6401 {
6402 case elfcpp::R_AARCH64_NONE:
6403 break;
6404
6405 case elfcpp::R_AARCH64_ABS16:
6406 case elfcpp::R_AARCH64_ABS32:
6407 case elfcpp::R_AARCH64_ABS64:
6408 {
6409 // Make a PLT entry if necessary.
6410 if (gsym->needs_plt_entry())
6411 {
6412 target->make_plt_entry(symtab, layout, gsym);
6413 // Since this is not a PC-relative relocation, we may be
6414 // taking the address of a function. In that case we need to
6415 // set the entry in the dynamic symbol table to the address of
6416 // the PLT entry.
6417 if (gsym->is_from_dynobj() && !parameters->options().shared())
6418 gsym->set_needs_dynsym_value();
6419 }
6420 // Make a dynamic relocation if necessary.
6421 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6422 {
6423 if (!parameters->options().output_is_position_independent()
6424 && gsym->may_need_copy_reloc())
6425 {
6426 target->copy_reloc(symtab, layout, object,
6427 data_shndx, output_section, gsym, rela);
6428 }
6429 else if (r_type == elfcpp::R_AARCH64_ABS64
6430 && gsym->type() == elfcpp::STT_GNU_IFUNC
6431 && gsym->can_use_relative_reloc(false)
6432 && !gsym->is_from_dynobj()
6433 && !gsym->is_undefined()
6434 && !gsym->is_preemptible())
6435 {
6436 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC
6437 // symbol. This makes a function address in a PIE executable
6438 // match the address in a shared library that it links against.
6439 Reloc_section* rela_dyn =
6440 target->rela_irelative_section(layout);
6441 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE;
6442 rela_dyn->add_symbolless_global_addend(gsym, r_type,
6443 output_section, object,
6444 data_shndx,
6445 rela.get_r_offset(),
6446 rela.get_r_addend());
6447 }
6448 else if (r_type == elfcpp::R_AARCH64_ABS64
6449 && gsym->can_use_relative_reloc(false))
6450 {
6451 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6452 rela_dyn->add_global_relative(gsym,
6453 elfcpp::R_AARCH64_RELATIVE,
6454 output_section,
6455 object,
6456 data_shndx,
6457 rela.get_r_offset(),
6458 rela.get_r_addend(),
6459 false);
6460 }
6461 else
6462 {
6463 check_non_pic(object, r_type);
6464 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>*
6465 rela_dyn = target->rela_dyn_section(layout);
6466 rela_dyn->add_global(
6467 gsym, r_type, output_section, object,
6468 data_shndx, rela.get_r_offset(),rela.get_r_addend());
6469 }
6470 }
6471 }
6472 break;
6473
6474 case elfcpp::R_AARCH64_PREL16:
6475 case elfcpp::R_AARCH64_PREL32:
6476 case elfcpp::R_AARCH64_PREL64:
6477 // This is used to fill the GOT absolute address.
6478 if (gsym->needs_plt_entry())
6479 {
6480 target->make_plt_entry(symtab, layout, gsym);
6481 }
6482 break;
6483
6484 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6485 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6486 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6487 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6488 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6489 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6490 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6491 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6492 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6493 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6494 if (parameters->options().output_is_position_independent())
6495 {
6496 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6497 object->name().c_str(), r_type);
6498 }
6499 break;
6500
6501 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6502 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6503 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6504 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6505 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6506 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6507 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6508 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6509 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6510 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6511 {
6512 if (gsym->needs_plt_entry())
6513 target->make_plt_entry(symtab, layout, gsym);
6514 // Make a dynamic relocation if necessary.
6515 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6516 {
6517 if (parameters->options().output_is_executable()
6518 && gsym->may_need_copy_reloc())
6519 {
6520 target->copy_reloc(symtab, layout, object,
6521 data_shndx, output_section, gsym, rela);
6522 }
6523 }
6524 break;
6525 }
6526
6527 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6528 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6529 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6530 {
6531 // The above relocations are used to access GOT entries.
6532 // Note a GOT entry is an *address* to a symbol.
6533 // The symbol requires a GOT entry
6534 Output_data_got_aarch64<size, big_endian>* got =
6535 target->got_section(symtab, layout);
6536 if (gsym->final_value_is_known())
6537 {
6538 // For a STT_GNU_IFUNC symbol we want the PLT address.
6539 if (gsym->type() == elfcpp::STT_GNU_IFUNC)
6540 got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6541 else
6542 got->add_global(gsym, GOT_TYPE_STANDARD);
6543 }
6544 else
6545 {
6546 // If this symbol is not fully resolved, we need to add a dynamic
6547 // relocation for it.
6548 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6549
6550 // Use a GLOB_DAT rather than a RELATIVE reloc if:
6551 //
6552 // 1) The symbol may be defined in some other module.
6553 // 2) We are building a shared library and this is a protected
6554 // symbol; using GLOB_DAT means that the dynamic linker can use
6555 // the address of the PLT in the main executable when appropriate
6556 // so that function address comparisons work.
6557 // 3) This is a STT_GNU_IFUNC symbol in position dependent code,
6558 // again so that function address comparisons work.
6559 if (gsym->is_from_dynobj()
6560 || gsym->is_undefined()
6561 || gsym->is_preemptible()
6562 || (gsym->visibility() == elfcpp::STV_PROTECTED
6563 && parameters->options().shared())
6564 || (gsym->type() == elfcpp::STT_GNU_IFUNC
6565 && parameters->options().output_is_position_independent()))
6566 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
6567 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT);
6568 else
6569 {
6570 // For a STT_GNU_IFUNC symbol we want to write the PLT
6571 // offset into the GOT, so that function pointer
6572 // comparisons work correctly.
6573 bool is_new;
6574 if (gsym->type() != elfcpp::STT_GNU_IFUNC)
6575 is_new = got->add_global(gsym, GOT_TYPE_STANDARD);
6576 else
6577 {
6578 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6579 // Tell the dynamic linker to use the PLT address
6580 // when resolving relocations.
6581 if (gsym->is_from_dynobj()
6582 && !parameters->options().shared())
6583 gsym->set_needs_dynsym_value();
6584 }
6585 if (is_new)
6586 {
6587 rela_dyn->add_global_relative(
6588 gsym, elfcpp::R_AARCH64_RELATIVE,
6589 got,
6590 gsym->got_offset(GOT_TYPE_STANDARD),
6591 0,
6592 false);
6593 }
6594 }
6595 }
6596 break;
6597 }
6598
6599 case elfcpp::R_AARCH64_TSTBR14:
6600 case elfcpp::R_AARCH64_CONDBR19:
6601 case elfcpp::R_AARCH64_JUMP26:
6602 case elfcpp::R_AARCH64_CALL26:
6603 {
6604 if (gsym->final_value_is_known())
6605 break;
6606
6607 if (gsym->is_defined() &&
6608 !gsym->is_from_dynobj() &&
6609 !gsym->is_preemptible())
6610 break;
6611
6612 // Make plt entry for function call.
6613 target->make_plt_entry(symtab, layout, gsym);
6614 break;
6615 }
6616
6617 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6618 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic
6619 {
6620 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6621 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6622 if (tlsopt == tls::TLSOPT_TO_LE)
6623 {
6624 layout->set_has_static_tls();
6625 break;
6626 }
6627 gold_assert(tlsopt == tls::TLSOPT_NONE);
6628
6629 // General dynamic.
6630 Output_data_got_aarch64<size, big_endian>* got =
6631 target->got_section(symtab, layout);
6632 // Create 2 consecutive entries for module index and offset.
6633 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR,
6634 target->rela_dyn_section(layout),
6635 elfcpp::R_AARCH64_TLS_DTPMOD64,
6636 elfcpp::R_AARCH64_TLS_DTPREL64);
6637 }
6638 break;
6639
6640 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6641 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic
6642 {
6643 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6644 optimize_tls_reloc(!parameters->options().shared(), r_type);
6645 if (tlsopt == tls::TLSOPT_NONE)
6646 {
6647 // Create a GOT entry for the module index.
6648 target->got_mod_index_entry(symtab, layout, object);
6649 }
6650 else if (tlsopt != tls::TLSOPT_TO_LE)
6651 unsupported_reloc_local(object, r_type);
6652 }
6653 break;
6654
6655 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6656 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6657 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6658 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic
6659 break;
6660
6661 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6662 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable
6663 {
6664 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6665 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6666 if (tlsopt == tls::TLSOPT_TO_LE)
6667 break;
6668
6669 layout->set_has_static_tls();
6670 // Create a GOT entry for the tp-relative offset.
6671 Output_data_got_aarch64<size, big_endian>* got
6672 = target->got_section(symtab, layout);
6673 if (!parameters->doing_static_link())
6674 {
6675 got->add_global_with_rel(
6676 gsym, GOT_TYPE_TLS_OFFSET,
6677 target->rela_dyn_section(layout),
6678 elfcpp::R_AARCH64_TLS_TPREL64);
6679 }
6680 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET))
6681 {
6682 got->add_global(gsym, GOT_TYPE_TLS_OFFSET);
6683 unsigned int got_offset =
6684 gsym->got_offset(GOT_TYPE_TLS_OFFSET);
6685 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6686 gold_assert(addend == 0);
6687 got->add_static_reloc(got_offset,
6688 elfcpp::R_AARCH64_TLS_TPREL64, gsym);
6689 }
6690 }
6691 break;
6692
6693 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6694 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6695 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6696 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6697 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6698 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6699 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6700 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6701 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
6702 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
6703 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
6704 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
6705 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
6706 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
6707 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
6708 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: // Local executable
6709 layout->set_has_static_tls();
6710 if (parameters->options().shared())
6711 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."),
6712 object->name().c_str(), r_type);
6713 break;
6714
6715 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6716 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6717 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor
6718 {
6719 target->define_tls_base_symbol(symtab, layout);
6720 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6721 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6722 if (tlsopt == tls::TLSOPT_NONE)
6723 {
6724 // Create reserved PLT and GOT entries for the resolver.
6725 target->reserve_tlsdesc_entries(symtab, layout);
6726
6727 // Create a double GOT entry with an R_AARCH64_TLSDESC
6728 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT
6729 // entry needs to be in an area in .got.plt, not .got. Call
6730 // got_section to make sure the section has been created.
6731 target->got_section(symtab, layout);
6732 Output_data_got<size, big_endian>* got =
6733 target->got_tlsdesc_section();
6734 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6735 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt,
6736 elfcpp::R_AARCH64_TLSDESC, 0);
6737 }
6738 else if (tlsopt == tls::TLSOPT_TO_IE)
6739 {
6740 // Create a GOT entry for the tp-relative offset.
6741 Output_data_got<size, big_endian>* got
6742 = target->got_section(symtab, layout);
6743 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET,
6744 target->rela_dyn_section(layout),
6745 elfcpp::R_AARCH64_TLS_TPREL64);
6746 }
6747 else if (tlsopt != tls::TLSOPT_TO_LE)
6748 unsupported_reloc_global(object, r_type, gsym);
6749 }
6750 break;
6751
6752 case elfcpp::R_AARCH64_TLSDESC_CALL:
6753 break;
6754
6755 default:
6756 gold_error(_("%s: unsupported reloc type in global scan"),
6757 aarch64_reloc_property_table->
6758 reloc_name_in_error_message(r_type).c_str());
6759 }
6760 return;
6761 } // End of Scan::global
6762
6763
6764 // Create the PLT section.
6765 template<int size, bool big_endian>
6766 void
6767 Target_aarch64<size, big_endian>::make_plt_section(
6768 Symbol_table* symtab, Layout* layout)
6769 {
6770 if (this->plt_ == NULL)
6771 {
6772 // Create the GOT section first.
6773 this->got_section(symtab, layout);
6774
6775 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_,
6776 this->got_irelative_);
6777
6778 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
6779 (elfcpp::SHF_ALLOC
6780 | elfcpp::SHF_EXECINSTR),
6781 this->plt_, ORDER_PLT, false);
6782
6783 // Make the sh_info field of .rela.plt point to .plt.
6784 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section();
6785 rela_plt_os->set_info_section(this->plt_->output_section());
6786 }
6787 }
6788
6789 // Return the section for TLSDESC relocations.
6790
6791 template<int size, bool big_endian>
6792 typename Target_aarch64<size, big_endian>::Reloc_section*
6793 Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const
6794 {
6795 return this->plt_section()->rela_tlsdesc(layout);
6796 }
6797
6798 // Create a PLT entry for a global symbol.
6799
6800 template<int size, bool big_endian>
6801 void
6802 Target_aarch64<size, big_endian>::make_plt_entry(
6803 Symbol_table* symtab,
6804 Layout* layout,
6805 Symbol* gsym)
6806 {
6807 if (gsym->has_plt_offset())
6808 return;
6809
6810 if (this->plt_ == NULL)
6811 this->make_plt_section(symtab, layout);
6812
6813 this->plt_->add_entry(symtab, layout, gsym);
6814 }
6815
6816 // Make a PLT entry for a local STT_GNU_IFUNC symbol.
6817
6818 template<int size, bool big_endian>
6819 void
6820 Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry(
6821 Symbol_table* symtab, Layout* layout,
6822 Sized_relobj_file<size, big_endian>* relobj,
6823 unsigned int local_sym_index)
6824 {
6825 if (relobj->local_has_plt_offset(local_sym_index))
6826 return;
6827 if (this->plt_ == NULL)
6828 this->make_plt_section(symtab, layout);
6829 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout,
6830 relobj,
6831 local_sym_index);
6832 relobj->set_local_plt_offset(local_sym_index, plt_offset);
6833 }
6834
6835 template<int size, bool big_endian>
6836 void
6837 Target_aarch64<size, big_endian>::gc_process_relocs(
6838 Symbol_table* symtab,
6839 Layout* layout,
6840 Sized_relobj_file<size, big_endian>* object,
6841 unsigned int data_shndx,
6842 unsigned int sh_type,
6843 const unsigned char* prelocs,
6844 size_t reloc_count,
6845 Output_section* output_section,
6846 bool needs_special_offset_handling,
6847 size_t local_symbol_count,
6848 const unsigned char* plocal_symbols)
6849 {
6850 typedef Target_aarch64<size, big_endian> Aarch64;
6851 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6852 Classify_reloc;
6853
6854 if (sh_type == elfcpp::SHT_REL)
6855 {
6856 return;
6857 }
6858
6859 gold::gc_process_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6860 symtab,
6861 layout,
6862 this,
6863 object,
6864 data_shndx,
6865 prelocs,
6866 reloc_count,
6867 output_section,
6868 needs_special_offset_handling,
6869 local_symbol_count,
6870 plocal_symbols);
6871 }
6872
6873 // Scan relocations for a section.
6874
6875 template<int size, bool big_endian>
6876 void
6877 Target_aarch64<size, big_endian>::scan_relocs(
6878 Symbol_table* symtab,
6879 Layout* layout,
6880 Sized_relobj_file<size, big_endian>* object,
6881 unsigned int data_shndx,
6882 unsigned int sh_type,
6883 const unsigned char* prelocs,
6884 size_t reloc_count,
6885 Output_section* output_section,
6886 bool needs_special_offset_handling,
6887 size_t local_symbol_count,
6888 const unsigned char* plocal_symbols)
6889 {
6890 typedef Target_aarch64<size, big_endian> Aarch64;
6891 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6892 Classify_reloc;
6893
6894 if (sh_type == elfcpp::SHT_REL)
6895 {
6896 gold_error(_("%s: unsupported REL reloc section"),
6897 object->name().c_str());
6898 return;
6899 }
6900
6901 gold::scan_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6902 symtab,
6903 layout,
6904 this,
6905 object,
6906 data_shndx,
6907 prelocs,
6908 reloc_count,
6909 output_section,
6910 needs_special_offset_handling,
6911 local_symbol_count,
6912 plocal_symbols);
6913 }
6914
6915 // Return the value to use for a dynamic which requires special
6916 // treatment. This is how we support equality comparisons of function
6917 // pointers across shared library boundaries, as described in the
6918 // processor specific ABI supplement.
6919
6920 template<int size, bool big_endian>
6921 uint64_t
6922 Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const
6923 {
6924 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
6925 return this->plt_address_for_global(gsym);
6926 }
6927
6928
6929 // Finalize the sections.
6930
6931 template<int size, bool big_endian>
6932 void
6933 Target_aarch64<size, big_endian>::do_finalize_sections(
6934 Layout* layout,
6935 const Input_objects*,
6936 Symbol_table* symtab)
6937 {
6938 const Reloc_section* rel_plt = (this->plt_ == NULL
6939 ? NULL
6940 : this->plt_->rela_plt());
6941 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt,
6942 this->rela_dyn_, true, false);
6943
6944 // Emit any relocs we saved in an attempt to avoid generating COPY
6945 // relocs.
6946 if (this->copy_relocs_.any_saved_relocs())
6947 this->copy_relocs_.emit(this->rela_dyn_section(layout));
6948
6949 // Fill in some more dynamic tags.
6950 Output_data_dynamic* const odyn = layout->dynamic_data();
6951 if (odyn != NULL)
6952 {
6953 if (this->plt_ != NULL
6954 && this->plt_->output_section() != NULL
6955 && this->plt_ ->has_tlsdesc_entry())
6956 {
6957 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset();
6958 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset();
6959 this->got_->finalize_data_size();
6960 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT,
6961 this->plt_, plt_offset);
6962 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT,
6963 this->got_, got_offset);
6964 }
6965 }
6966
6967 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of
6968 // the .got.plt section.
6969 Symbol* sym = this->global_offset_table_;
6970 if (sym != NULL)
6971 {
6972 uint64_t data_size = this->got_plt_->current_data_size();
6973 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size);
6974
6975 // If the .got section is more than 0x8000 bytes, we add
6976 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16
6977 // bit relocations have a greater chance of working.
6978 if (data_size >= 0x8000)
6979 symtab->get_sized_symbol<size>(sym)->set_value(
6980 symtab->get_sized_symbol<size>(sym)->value() + 0x8000);
6981 }
6982
6983 if (parameters->doing_static_link()
6984 && (this->plt_ == NULL || !this->plt_->has_irelative_section()))
6985 {
6986 // If linking statically, make sure that the __rela_iplt symbols
6987 // were defined if necessary, even if we didn't create a PLT.
6988 static const Define_symbol_in_segment syms[] =
6989 {
6990 {
6991 "__rela_iplt_start", // name
6992 elfcpp::PT_LOAD, // segment_type
6993 elfcpp::PF_W, // segment_flags_set
6994 elfcpp::PF(0), // segment_flags_clear
6995 0, // value
6996 0, // size
6997 elfcpp::STT_NOTYPE, // type
6998 elfcpp::STB_GLOBAL, // binding
6999 elfcpp::STV_HIDDEN, // visibility
7000 0, // nonvis
7001 Symbol::SEGMENT_START, // offset_from_base
7002 true // only_if_ref
7003 },
7004 {
7005 "__rela_iplt_end", // name
7006 elfcpp::PT_LOAD, // segment_type
7007 elfcpp::PF_W, // segment_flags_set
7008 elfcpp::PF(0), // segment_flags_clear
7009 0, // value
7010 0, // size
7011 elfcpp::STT_NOTYPE, // type
7012 elfcpp::STB_GLOBAL, // binding
7013 elfcpp::STV_HIDDEN, // visibility
7014 0, // nonvis
7015 Symbol::SEGMENT_START, // offset_from_base
7016 true // only_if_ref
7017 }
7018 };
7019
7020 symtab->define_symbols(layout, 2, syms,
7021 layout->script_options()->saw_sections_clause());
7022 }
7023
7024 return;
7025 }
7026
7027 // Perform a relocation.
7028
7029 template<int size, bool big_endian>
7030 inline bool
7031 Target_aarch64<size, big_endian>::Relocate::relocate(
7032 const Relocate_info<size, big_endian>* relinfo,
7033 unsigned int,
7034 Target_aarch64<size, big_endian>* target,
7035 Output_section* ,
7036 size_t relnum,
7037 const unsigned char* preloc,
7038 const Sized_symbol<size>* gsym,
7039 const Symbol_value<size>* psymval,
7040 unsigned char* view,
7041 typename elfcpp::Elf_types<size>::Elf_Addr address,
7042 section_size_type /* view_size */)
7043 {
7044 if (view == NULL)
7045 return true;
7046
7047 typedef AArch64_relocate_functions<size, big_endian> Reloc;
7048
7049 const elfcpp::Rela<size, big_endian> rela(preloc);
7050 unsigned int r_type = elfcpp::elf_r_type<size>(rela.get_r_info());
7051 const AArch64_reloc_property* reloc_property =
7052 aarch64_reloc_property_table->get_reloc_property(r_type);
7053
7054 if (reloc_property == NULL)
7055 {
7056 std::string reloc_name =
7057 aarch64_reloc_property_table->reloc_name_in_error_message(r_type);
7058 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7059 _("cannot relocate %s in object file"),
7060 reloc_name.c_str());
7061 return true;
7062 }
7063
7064 const Sized_relobj_file<size, big_endian>* object = relinfo->object;
7065
7066 // Pick the value to use for symbols defined in the PLT.
7067 Symbol_value<size> symval;
7068 if (gsym != NULL
7069 && gsym->use_plt_offset(reloc_property->reference_flags()))
7070 {
7071 symval.set_output_value(target->plt_address_for_global(gsym));
7072 psymval = &symval;
7073 }
7074 else if (gsym == NULL && psymval->is_ifunc_symbol())
7075 {
7076 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7077 if (object->local_has_plt_offset(r_sym))
7078 {
7079 symval.set_output_value(target->plt_address_for_local(object, r_sym));
7080 psymval = &symval;
7081 }
7082 }
7083
7084 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7085
7086 // Get the GOT offset if needed.
7087 // For aarch64, the GOT pointer points to the start of the GOT section.
7088 bool have_got_offset = false;
7089 int got_offset = 0;
7090 int got_base = (target->got_ != NULL
7091 ? (target->got_->current_data_size() >= 0x8000
7092 ? 0x8000 : 0)
7093 : 0);
7094 switch (r_type)
7095 {
7096 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0:
7097 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC:
7098 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1:
7099 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC:
7100 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2:
7101 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC:
7102 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3:
7103 case elfcpp::R_AARCH64_GOTREL64:
7104 case elfcpp::R_AARCH64_GOTREL32:
7105 case elfcpp::R_AARCH64_GOT_LD_PREL19:
7106 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15:
7107 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
7108 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
7109 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
7110 if (gsym != NULL)
7111 {
7112 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
7113 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base;
7114 }
7115 else
7116 {
7117 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7118 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
7119 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
7120 - got_base);
7121 }
7122 have_got_offset = true;
7123 break;
7124
7125 default:
7126 break;
7127 }
7128
7129 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY;
7130 typename elfcpp::Elf_types<size>::Elf_Addr value;
7131 switch (r_type)
7132 {
7133 case elfcpp::R_AARCH64_NONE:
7134 break;
7135
7136 case elfcpp::R_AARCH64_ABS64:
7137 if (!parameters->options().apply_dynamic_relocs()
7138 && parameters->options().output_is_position_independent()
7139 && gsym != NULL
7140 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())
7141 && !gsym->can_use_relative_reloc(false))
7142 // We have generated an absolute dynamic relocation, so do not
7143 // apply the relocation statically. (Works around bugs in older
7144 // Android dynamic linkers.)
7145 break;
7146 reloc_status = Reloc::template rela_ua<64>(
7147 view, object, psymval, addend, reloc_property);
7148 break;
7149
7150 case elfcpp::R_AARCH64_ABS32:
7151 if (!parameters->options().apply_dynamic_relocs()
7152 && parameters->options().output_is_position_independent()
7153 && gsym != NULL
7154 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
7155 // We have generated an absolute dynamic relocation, so do not
7156 // apply the relocation statically. (Works around bugs in older
7157 // Android dynamic linkers.)
7158 break;
7159 reloc_status = Reloc::template rela_ua<32>(
7160 view, object, psymval, addend, reloc_property);
7161 break;
7162
7163 case elfcpp::R_AARCH64_ABS16:
7164 if (!parameters->options().apply_dynamic_relocs()
7165 && parameters->options().output_is_position_independent()
7166 && gsym != NULL
7167 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
7168 // We have generated an absolute dynamic relocation, so do not
7169 // apply the relocation statically. (Works around bugs in older
7170 // Android dynamic linkers.)
7171 break;
7172 reloc_status = Reloc::template rela_ua<16>(
7173 view, object, psymval, addend, reloc_property);
7174 break;
7175
7176 case elfcpp::R_AARCH64_PREL64:
7177 reloc_status = Reloc::template pcrela_ua<64>(
7178 view, object, psymval, addend, address, reloc_property);
7179 break;
7180
7181 case elfcpp::R_AARCH64_PREL32:
7182 reloc_status = Reloc::template pcrela_ua<32>(
7183 view, object, psymval, addend, address, reloc_property);
7184 break;
7185
7186 case elfcpp::R_AARCH64_PREL16:
7187 reloc_status = Reloc::template pcrela_ua<16>(
7188 view, object, psymval, addend, address, reloc_property);
7189 break;
7190
7191 case elfcpp::R_AARCH64_MOVW_UABS_G0:
7192 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC:
7193 case elfcpp::R_AARCH64_MOVW_UABS_G1:
7194 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC:
7195 case elfcpp::R_AARCH64_MOVW_UABS_G2:
7196 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC:
7197 case elfcpp::R_AARCH64_MOVW_UABS_G3:
7198 reloc_status = Reloc::template rela_general<32>(
7199 view, object, psymval, addend, reloc_property);
7200 break;
7201 case elfcpp::R_AARCH64_MOVW_SABS_G0:
7202 case elfcpp::R_AARCH64_MOVW_SABS_G1:
7203 case elfcpp::R_AARCH64_MOVW_SABS_G2:
7204 reloc_status = Reloc::movnz(view, psymval->value(object, addend),
7205 reloc_property);
7206 break;
7207
7208 case elfcpp::R_AARCH64_LD_PREL_LO19:
7209 reloc_status = Reloc::template pcrela_general<32>(
7210 view, object, psymval, addend, address, reloc_property);
7211 break;
7212
7213 case elfcpp::R_AARCH64_ADR_PREL_LO21:
7214 reloc_status = Reloc::adr(view, object, psymval, addend,
7215 address, reloc_property);
7216 break;
7217
7218 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
7219 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
7220 reloc_status = Reloc::adrp(view, object, psymval, addend, address,
7221 reloc_property);
7222 break;
7223
7224 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC:
7225 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC:
7226 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC:
7227 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC:
7228 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC:
7229 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
7230 reloc_status = Reloc::template rela_general<32>(
7231 view, object, psymval, addend, reloc_property);
7232 break;
7233
7234 case elfcpp::R_AARCH64_CALL26:
7235 if (this->skip_call_tls_get_addr_)
7236 {
7237 // Double check that the TLSGD insn has been optimized away.
7238 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7239 Insntype insn = elfcpp::Swap<32, big_endian>::readval(
7240 reinterpret_cast<Insntype*>(view));
7241 gold_assert((insn & 0xff000000) == 0x91000000);
7242
7243 reloc_status = Reloc::STATUS_OKAY;
7244 this->skip_call_tls_get_addr_ = false;
7245 // Return false to stop further processing this reloc.
7246 return false;
7247 }
7248 // Fall through.
7249 case elfcpp::R_AARCH64_JUMP26:
7250 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address,
7251 gsym, psymval, object,
7252 target->stub_group_size_))
7253 break;
7254 // Fall through.
7255 case elfcpp::R_AARCH64_TSTBR14:
7256 case elfcpp::R_AARCH64_CONDBR19:
7257 reloc_status = Reloc::template pcrela_general<32>(
7258 view, object, psymval, addend, address, reloc_property);
7259 break;
7260
7261 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
7262 gold_assert(have_got_offset);
7263 value = target->got_->address() + got_base + got_offset;
7264 reloc_status = Reloc::adrp(view, value + addend, address);
7265 break;
7266
7267 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
7268 gold_assert(have_got_offset);
7269 value = target->got_->address() + got_base + got_offset;
7270 reloc_status = Reloc::template rela_general<32>(
7271 view, value, addend, reloc_property);
7272 break;
7273
7274 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
7275 {
7276 gold_assert(have_got_offset);
7277 value = target->got_->address() + got_base + got_offset + addend -
7278 Reloc::Page(target->got_->address() + got_base);
7279 if ((value & 7) != 0)
7280 reloc_status = Reloc::STATUS_OVERFLOW;
7281 else
7282 reloc_status = Reloc::template reloc_common<32>(
7283 view, value, reloc_property);
7284 break;
7285 }
7286
7287 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7288 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7289 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7290 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7291 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7292 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7293 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7294 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7295 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7296 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7297 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7298 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7299 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7300 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7301 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7302 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7303 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7304 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7305 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
7306 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
7307 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
7308 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
7309 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
7310 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
7311 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
7312 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
7313 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7314 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7315 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7316 case elfcpp::R_AARCH64_TLSDESC_CALL:
7317 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type,
7318 gsym, psymval, view, address);
7319 break;
7320
7321 // These are dynamic relocations, which are unexpected when linking.
7322 case elfcpp::R_AARCH64_COPY:
7323 case elfcpp::R_AARCH64_GLOB_DAT:
7324 case elfcpp::R_AARCH64_JUMP_SLOT:
7325 case elfcpp::R_AARCH64_RELATIVE:
7326 case elfcpp::R_AARCH64_IRELATIVE:
7327 case elfcpp::R_AARCH64_TLS_DTPREL64:
7328 case elfcpp::R_AARCH64_TLS_DTPMOD64:
7329 case elfcpp::R_AARCH64_TLS_TPREL64:
7330 case elfcpp::R_AARCH64_TLSDESC:
7331 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7332 _("unexpected reloc %u in object file"),
7333 r_type);
7334 break;
7335
7336 default:
7337 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7338 _("unsupported reloc %s"),
7339 reloc_property->name().c_str());
7340 break;
7341 }
7342
7343 // Report any errors.
7344 switch (reloc_status)
7345 {
7346 case Reloc::STATUS_OKAY:
7347 break;
7348 case Reloc::STATUS_OVERFLOW:
7349 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7350 _("relocation overflow in %s"),
7351 reloc_property->name().c_str());
7352 break;
7353 case Reloc::STATUS_BAD_RELOC:
7354 gold_error_at_location(
7355 relinfo,
7356 relnum,
7357 rela.get_r_offset(),
7358 _("unexpected opcode while processing relocation %s"),
7359 reloc_property->name().c_str());
7360 break;
7361 default:
7362 gold_unreachable();
7363 }
7364
7365 return true;
7366 }
7367
7368
7369 template<int size, bool big_endian>
7370 inline
7371 typename AArch64_relocate_functions<size, big_endian>::Status
7372 Target_aarch64<size, big_endian>::Relocate::relocate_tls(
7373 const Relocate_info<size, big_endian>* relinfo,
7374 Target_aarch64<size, big_endian>* target,
7375 size_t relnum,
7376 const elfcpp::Rela<size, big_endian>& rela,
7377 unsigned int r_type, const Sized_symbol<size>* gsym,
7378 const Symbol_value<size>* psymval,
7379 unsigned char* view,
7380 typename elfcpp::Elf_types<size>::Elf_Addr address)
7381 {
7382 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7383 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7384
7385 Output_segment* tls_segment = relinfo->layout->tls_segment();
7386 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7387 const AArch64_reloc_property* reloc_property =
7388 aarch64_reloc_property_table->get_reloc_property(r_type);
7389 gold_assert(reloc_property != NULL);
7390
7391 const bool is_final = (gsym == NULL
7392 ? !parameters->options().shared()
7393 : gsym->final_value_is_known());
7394 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
7395 optimize_tls_reloc(is_final, r_type);
7396
7397 Sized_relobj_file<size, big_endian>* object = relinfo->object;
7398 int tls_got_offset_type;
7399 switch (r_type)
7400 {
7401 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7402 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic
7403 {
7404 if (tlsopt == tls::TLSOPT_TO_LE)
7405 {
7406 if (tls_segment == NULL)
7407 {
7408 gold_assert(parameters->errors()->error_count() > 0
7409 || issue_undefined_symbol_error(gsym));
7410 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7411 }
7412 return tls_gd_to_le(relinfo, target, rela, r_type, view,
7413 psymval);
7414 }
7415 else if (tlsopt == tls::TLSOPT_NONE)
7416 {
7417 tls_got_offset_type = GOT_TYPE_TLS_PAIR;
7418 // Firstly get the address for the got entry.
7419 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7420 if (gsym != NULL)
7421 {
7422 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7423 got_entry_address = target->got_->address() +
7424 gsym->got_offset(tls_got_offset_type);
7425 }
7426 else
7427 {
7428 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7429 gold_assert(
7430 object->local_has_got_offset(r_sym, tls_got_offset_type));
7431 got_entry_address = target->got_->address() +
7432 object->local_got_offset(r_sym, tls_got_offset_type);
7433 }
7434
7435 // Relocate the address into adrp/ld, adrp/add pair.
7436 switch (r_type)
7437 {
7438 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7439 return aarch64_reloc_funcs::adrp(
7440 view, got_entry_address + addend, address);
7441
7442 break;
7443
7444 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7445 return aarch64_reloc_funcs::template rela_general<32>(
7446 view, got_entry_address, addend, reloc_property);
7447 break;
7448
7449 default:
7450 gold_unreachable();
7451 }
7452 }
7453 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7454 _("unsupported gd_to_ie relaxation on %u"),
7455 r_type);
7456 }
7457 break;
7458
7459 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7460 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic
7461 {
7462 if (tlsopt == tls::TLSOPT_TO_LE)
7463 {
7464 if (tls_segment == NULL)
7465 {
7466 gold_assert(parameters->errors()->error_count() > 0
7467 || issue_undefined_symbol_error(gsym));
7468 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7469 }
7470 return this->tls_ld_to_le(relinfo, target, rela, r_type, view,
7471 psymval);
7472 }
7473
7474 gold_assert(tlsopt == tls::TLSOPT_NONE);
7475 // Relocate the field with the offset of the GOT entry for
7476 // the module index.
7477 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7478 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) +
7479 target->got_->address());
7480
7481 switch (r_type)
7482 {
7483 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7484 return aarch64_reloc_funcs::adrp(
7485 view, got_entry_address + addend, address);
7486 break;
7487
7488 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7489 return aarch64_reloc_funcs::template rela_general<32>(
7490 view, got_entry_address, addend, reloc_property);
7491 break;
7492
7493 default:
7494 gold_unreachable();
7495 }
7496 }
7497 break;
7498
7499 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7500 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7501 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7502 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic
7503 {
7504 AArch64_address value = psymval->value(object, 0);
7505 if (tlsopt == tls::TLSOPT_TO_LE)
7506 {
7507 if (tls_segment == NULL)
7508 {
7509 gold_assert(parameters->errors()->error_count() > 0
7510 || issue_undefined_symbol_error(gsym));
7511 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7512 }
7513 }
7514 switch (r_type)
7515 {
7516 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7517 return aarch64_reloc_funcs::movnz(view, value + addend,
7518 reloc_property);
7519 break;
7520
7521 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7522 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7523 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7524 return aarch64_reloc_funcs::template rela_general<32>(
7525 view, value, addend, reloc_property);
7526 break;
7527
7528 default:
7529 gold_unreachable();
7530 }
7531 // We should never reach here.
7532 }
7533 break;
7534
7535 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7536 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec
7537 {
7538 if (tlsopt == tls::TLSOPT_TO_LE)
7539 {
7540 if (tls_segment == NULL)
7541 {
7542 gold_assert(parameters->errors()->error_count() > 0
7543 || issue_undefined_symbol_error(gsym));
7544 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7545 }
7546 return tls_ie_to_le(relinfo, target, rela, r_type, view,
7547 psymval);
7548 }
7549 tls_got_offset_type = GOT_TYPE_TLS_OFFSET;
7550
7551 // Firstly get the address for the got entry.
7552 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7553 if (gsym != NULL)
7554 {
7555 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7556 got_entry_address = target->got_->address() +
7557 gsym->got_offset(tls_got_offset_type);
7558 }
7559 else
7560 {
7561 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7562 gold_assert(
7563 object->local_has_got_offset(r_sym, tls_got_offset_type));
7564 got_entry_address = target->got_->address() +
7565 object->local_got_offset(r_sym, tls_got_offset_type);
7566 }
7567 // Relocate the address into adrp/ld, adrp/add pair.
7568 switch (r_type)
7569 {
7570 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7571 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7572 address);
7573 break;
7574 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7575 return aarch64_reloc_funcs::template rela_general<32>(
7576 view, got_entry_address, addend, reloc_property);
7577 default:
7578 gold_unreachable();
7579 }
7580 }
7581 // We shall never reach here.
7582 break;
7583
7584 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7585 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7586 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7587 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7588 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7589 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7590 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7591 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7592 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
7593 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
7594 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
7595 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
7596 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
7597 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
7598 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
7599 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
7600 {
7601 gold_assert(tls_segment != NULL);
7602 AArch64_address value = psymval->value(object, 0);
7603
7604 if (!parameters->options().shared())
7605 {
7606 AArch64_address aligned_tcb_size =
7607 align_address(target->tcb_size(),
7608 tls_segment->maximum_alignment());
7609 value += aligned_tcb_size;
7610 switch (r_type)
7611 {
7612 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7613 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7614 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7615 return aarch64_reloc_funcs::movnz(view, value + addend,
7616 reloc_property);
7617 default:
7618 return aarch64_reloc_funcs::template
7619 rela_general<32>(view,
7620 value,
7621 addend,
7622 reloc_property);
7623 }
7624 }
7625 else
7626 gold_error(_("%s: unsupported reloc %u "
7627 "in non-static TLSLE mode."),
7628 object->name().c_str(), r_type);
7629 }
7630 break;
7631
7632 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7633 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7634 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7635 case elfcpp::R_AARCH64_TLSDESC_CALL:
7636 {
7637 if (tlsopt == tls::TLSOPT_TO_LE)
7638 {
7639 if (tls_segment == NULL)
7640 {
7641 gold_assert(parameters->errors()->error_count() > 0
7642 || issue_undefined_symbol_error(gsym));
7643 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7644 }
7645 return tls_desc_gd_to_le(relinfo, target, rela, r_type,
7646 view, psymval);
7647 }
7648 else
7649 {
7650 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE
7651 ? GOT_TYPE_TLS_OFFSET
7652 : GOT_TYPE_TLS_DESC);
7653 int got_tlsdesc_offset = 0;
7654 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL
7655 && tlsopt == tls::TLSOPT_NONE)
7656 {
7657 // We created GOT entries in the .got.tlsdesc portion of the
7658 // .got.plt section, but the offset stored in the symbol is the
7659 // offset within .got.tlsdesc.
7660 got_tlsdesc_offset = (target->got_tlsdesc_->address()
7661 - target->got_->address());
7662 }
7663 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7664 if (gsym != NULL)
7665 {
7666 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7667 got_entry_address = target->got_->address()
7668 + got_tlsdesc_offset
7669 + gsym->got_offset(tls_got_offset_type);
7670 }
7671 else
7672 {
7673 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7674 gold_assert(
7675 object->local_has_got_offset(r_sym, tls_got_offset_type));
7676 got_entry_address = target->got_->address() +
7677 got_tlsdesc_offset +
7678 object->local_got_offset(r_sym, tls_got_offset_type);
7679 }
7680 if (tlsopt == tls::TLSOPT_TO_IE)
7681 {
7682 return tls_desc_gd_to_ie(relinfo, target, rela, r_type,
7683 view, psymval, got_entry_address,
7684 address);
7685 }
7686
7687 // Now do tlsdesc relocation.
7688 switch (r_type)
7689 {
7690 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7691 return aarch64_reloc_funcs::adrp(view,
7692 got_entry_address + addend,
7693 address);
7694 break;
7695 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7696 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7697 return aarch64_reloc_funcs::template rela_general<32>(
7698 view, got_entry_address, addend, reloc_property);
7699 break;
7700 case elfcpp::R_AARCH64_TLSDESC_CALL:
7701 return aarch64_reloc_funcs::STATUS_OKAY;
7702 break;
7703 default:
7704 gold_unreachable();
7705 }
7706 }
7707 }
7708 break;
7709
7710 default:
7711 gold_error(_("%s: unsupported TLS reloc %u."),
7712 object->name().c_str(), r_type);
7713 }
7714 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7715 } // End of relocate_tls.
7716
7717
7718 template<int size, bool big_endian>
7719 inline
7720 typename AArch64_relocate_functions<size, big_endian>::Status
7721 Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le(
7722 const Relocate_info<size, big_endian>* relinfo,
7723 Target_aarch64<size, big_endian>* target,
7724 const elfcpp::Rela<size, big_endian>& rela,
7725 unsigned int r_type,
7726 unsigned char* view,
7727 const Symbol_value<size>* psymval)
7728 {
7729 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7730 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7731 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7732
7733 Insntype* ip = reinterpret_cast<Insntype*>(view);
7734 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7735 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7736 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7737
7738 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC)
7739 {
7740 // This is the 2nd relocs, optimization should already have been
7741 // done.
7742 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7743 return aarch64_reloc_funcs::STATUS_OKAY;
7744 }
7745
7746 // The original sequence is -
7747 // 90000000 adrp x0, 0 <main>
7748 // 91000000 add x0, x0, #0x0
7749 // 94000000 bl 0 <__tls_get_addr>
7750 // optimized to sequence -
7751 // d53bd040 mrs x0, tpidr_el0
7752 // 91400000 add x0, x0, #0x0, lsl #12
7753 // 91000000 add x0, x0, #0x0
7754
7755 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7756 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we
7757 // have to change "bl tls_get_addr", which does not have a corresponding tls
7758 // relocation type. So before proceeding, we need to make sure compiler
7759 // does not change the sequence.
7760 if(!(insn1 == 0x90000000 // adrp x0,0
7761 && insn2 == 0x91000000 // add x0, x0, #0x0
7762 && insn3 == 0x94000000)) // bl 0
7763 {
7764 // Ideally we should give up gd_to_le relaxation and do gd access.
7765 // However the gd_to_le relaxation decision has been made early
7766 // in the scan stage, where we did not allocate any GOT entry for
7767 // this symbol. Therefore we have to exit and report error now.
7768 gold_error(_("unexpected reloc insn sequence while relaxing "
7769 "tls gd to le for reloc %u."), r_type);
7770 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7771 }
7772
7773 // Write new insns.
7774 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7775 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7776 insn3 = 0x91000000; // add x0, x0, #0x0
7777 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7778 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7779 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7780
7781 // Calculate tprel value.
7782 Output_segment* tls_segment = relinfo->layout->tls_segment();
7783 gold_assert(tls_segment != NULL);
7784 AArch64_address value = psymval->value(relinfo->object, 0);
7785 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7786 AArch64_address aligned_tcb_size =
7787 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7788 AArch64_address x = value + aligned_tcb_size;
7789
7790 // After new insns are written, apply TLSLE relocs.
7791 const AArch64_reloc_property* rp1 =
7792 aarch64_reloc_property_table->get_reloc_property(
7793 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7794 const AArch64_reloc_property* rp2 =
7795 aarch64_reloc_property_table->get_reloc_property(
7796 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7797 gold_assert(rp1 != NULL && rp2 != NULL);
7798
7799 typename aarch64_reloc_funcs::Status s1 =
7800 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7801 x,
7802 addend,
7803 rp1);
7804 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7805 return s1;
7806
7807 typename aarch64_reloc_funcs::Status s2 =
7808 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7809 x,
7810 addend,
7811 rp2);
7812
7813 this->skip_call_tls_get_addr_ = true;
7814 return s2;
7815 } // End of tls_gd_to_le
7816
7817
7818 template<int size, bool big_endian>
7819 inline
7820 typename AArch64_relocate_functions<size, big_endian>::Status
7821 Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le(
7822 const Relocate_info<size, big_endian>* relinfo,
7823 Target_aarch64<size, big_endian>* target,
7824 const elfcpp::Rela<size, big_endian>& rela,
7825 unsigned int r_type,
7826 unsigned char* view,
7827 const Symbol_value<size>* psymval)
7828 {
7829 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7830 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7831 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7832
7833 Insntype* ip = reinterpret_cast<Insntype*>(view);
7834 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7835 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7836 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7837
7838 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC)
7839 {
7840 // This is the 2nd relocs, optimization should already have been
7841 // done.
7842 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7843 return aarch64_reloc_funcs::STATUS_OKAY;
7844 }
7845
7846 // The original sequence is -
7847 // 90000000 adrp x0, 0 <main>
7848 // 91000000 add x0, x0, #0x0
7849 // 94000000 bl 0 <__tls_get_addr>
7850 // optimized to sequence -
7851 // d53bd040 mrs x0, tpidr_el0
7852 // 91400000 add x0, x0, #0x0, lsl #12
7853 // 91000000 add x0, x0, #0x0
7854
7855 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7856 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we
7857 // have to change "bl tls_get_addr", which does not have a corresponding tls
7858 // relocation type. So before proceeding, we need to make sure compiler
7859 // does not change the sequence.
7860 if(!(insn1 == 0x90000000 // adrp x0,0
7861 && insn2 == 0x91000000 // add x0, x0, #0x0
7862 && insn3 == 0x94000000)) // bl 0
7863 {
7864 // Ideally we should give up gd_to_le relaxation and do gd access.
7865 // However the gd_to_le relaxation decision has been made early
7866 // in the scan stage, where we did not allocate a GOT entry for
7867 // this symbol. Therefore we have to exit and report an error now.
7868 gold_error(_("unexpected reloc insn sequence while relaxing "
7869 "tls gd to le for reloc %u."), r_type);
7870 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7871 }
7872
7873 // Write new insns.
7874 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7875 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7876 insn3 = 0x91000000; // add x0, x0, #0x0
7877 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7878 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7879 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7880
7881 // Calculate tprel value.
7882 Output_segment* tls_segment = relinfo->layout->tls_segment();
7883 gold_assert(tls_segment != NULL);
7884 AArch64_address value = psymval->value(relinfo->object, 0);
7885 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7886 AArch64_address aligned_tcb_size =
7887 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7888 AArch64_address x = value + aligned_tcb_size;
7889
7890 // After new insns are written, apply TLSLE relocs.
7891 const AArch64_reloc_property* rp1 =
7892 aarch64_reloc_property_table->get_reloc_property(
7893 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7894 const AArch64_reloc_property* rp2 =
7895 aarch64_reloc_property_table->get_reloc_property(
7896 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7897 gold_assert(rp1 != NULL && rp2 != NULL);
7898
7899 typename aarch64_reloc_funcs::Status s1 =
7900 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7901 x,
7902 addend,
7903 rp1);
7904 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7905 return s1;
7906
7907 typename aarch64_reloc_funcs::Status s2 =
7908 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7909 x,
7910 addend,
7911 rp2);
7912
7913 this->skip_call_tls_get_addr_ = true;
7914 return s2;
7915
7916 } // End of tls_ld_to_le
7917
7918 template<int size, bool big_endian>
7919 inline
7920 typename AArch64_relocate_functions<size, big_endian>::Status
7921 Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le(
7922 const Relocate_info<size, big_endian>* relinfo,
7923 Target_aarch64<size, big_endian>* target,
7924 const elfcpp::Rela<size, big_endian>& rela,
7925 unsigned int r_type,
7926 unsigned char* view,
7927 const Symbol_value<size>* psymval)
7928 {
7929 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7930 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7931 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7932
7933 AArch64_address value = psymval->value(relinfo->object, 0);
7934 Output_segment* tls_segment = relinfo->layout->tls_segment();
7935 AArch64_address aligned_tcb_address =
7936 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7937 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7938 AArch64_address x = value + addend + aligned_tcb_address;
7939 // "x" is the offset to tp, we can only do this if x is within
7940 // range [0, 2^32-1]
7941 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0)))
7942 {
7943 gold_error(_("TLS variable referred by reloc %u is too far from TP."),
7944 r_type);
7945 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7946 }
7947
7948 Insntype* ip = reinterpret_cast<Insntype*>(view);
7949 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7950 unsigned int regno;
7951 Insntype newinsn;
7952 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21)
7953 {
7954 // Generate movz.
7955 regno = (insn & 0x1f);
7956 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5);
7957 }
7958 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC)
7959 {
7960 // Generate movk.
7961 regno = (insn & 0x1f);
7962 gold_assert(regno == ((insn >> 5) & 0x1f));
7963 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5);
7964 }
7965 else
7966 gold_unreachable();
7967
7968 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7969 return aarch64_reloc_funcs::STATUS_OKAY;
7970 } // End of tls_ie_to_le
7971
7972
7973 template<int size, bool big_endian>
7974 inline
7975 typename AArch64_relocate_functions<size, big_endian>::Status
7976 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le(
7977 const Relocate_info<size, big_endian>* relinfo,
7978 Target_aarch64<size, big_endian>* target,
7979 const elfcpp::Rela<size, big_endian>& rela,
7980 unsigned int r_type,
7981 unsigned char* view,
7982 const Symbol_value<size>* psymval)
7983 {
7984 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7985 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7986 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7987
7988 // TLSDESC-GD sequence is like:
7989 // adrp x0, :tlsdesc:v1
7990 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7991 // add x0, x0, :tlsdesc_lo12:v1
7992 // .tlsdesccall v1
7993 // blr x1
7994 // After desc_gd_to_le optimization, the sequence will be like:
7995 // movz x0, #0x0, lsl #16
7996 // movk x0, #0x10
7997 // nop
7998 // nop
7999
8000 // Calculate tprel value.
8001 Output_segment* tls_segment = relinfo->layout->tls_segment();
8002 gold_assert(tls_segment != NULL);
8003 Insntype* ip = reinterpret_cast<Insntype*>(view);
8004 const elfcpp::Elf_Xword addend = rela.get_r_addend();
8005 AArch64_address value = psymval->value(relinfo->object, addend);
8006 AArch64_address aligned_tcb_size =
8007 align_address(target->tcb_size(), tls_segment->maximum_alignment());
8008 AArch64_address x = value + aligned_tcb_size;
8009 // x is the offset to tp, we can only do this if x is within range
8010 // [0, 2^32-1]. If x is out of range, fail and exit.
8011 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0)
8012 {
8013 gold_error(_("TLS variable referred by reloc %u is too far from TP. "
8014 "We Can't do gd_to_le relaxation.\n"), r_type);
8015 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
8016 }
8017 Insntype newinsn;
8018 switch (r_type)
8019 {
8020 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
8021 case elfcpp::R_AARCH64_TLSDESC_CALL:
8022 // Change to nop
8023 newinsn = 0xd503201f;
8024 break;
8025
8026 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
8027 // Change to movz.
8028 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5);
8029 break;
8030
8031 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
8032 // Change to movk.
8033 newinsn = 0xf2800000 | ((x & 0xffff) << 5);
8034 break;
8035
8036 default:
8037 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"),
8038 r_type);
8039 gold_unreachable();
8040 }
8041 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
8042 return aarch64_reloc_funcs::STATUS_OKAY;
8043 } // End of tls_desc_gd_to_le
8044
8045
8046 template<int size, bool big_endian>
8047 inline
8048 typename AArch64_relocate_functions<size, big_endian>::Status
8049 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie(
8050 const Relocate_info<size, big_endian>* /* relinfo */,
8051 Target_aarch64<size, big_endian>* /* target */,
8052 const elfcpp::Rela<size, big_endian>& rela,
8053 unsigned int r_type,
8054 unsigned char* view,
8055 const Symbol_value<size>* /* psymval */,
8056 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address,
8057 typename elfcpp::Elf_types<size>::Elf_Addr address)
8058 {
8059 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
8060 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
8061
8062 // TLSDESC-GD sequence is like:
8063 // adrp x0, :tlsdesc:v1
8064 // ldr x1, [x0, #:tlsdesc_lo12:v1]
8065 // add x0, x0, :tlsdesc_lo12:v1
8066 // .tlsdesccall v1
8067 // blr x1
8068 // After desc_gd_to_ie optimization, the sequence will be like:
8069 // adrp x0, :tlsie:v1
8070 // ldr x0, [x0, :tlsie_lo12:v1]
8071 // nop
8072 // nop
8073
8074 Insntype* ip = reinterpret_cast<Insntype*>(view);
8075 const elfcpp::Elf_Xword addend = rela.get_r_addend();
8076 Insntype newinsn;
8077 switch (r_type)
8078 {
8079 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
8080 case elfcpp::R_AARCH64_TLSDESC_CALL:
8081 // Change to nop
8082 newinsn = 0xd503201f;
8083 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
8084 break;
8085
8086 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
8087 {
8088 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
8089 address);
8090 }
8091 break;
8092
8093 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
8094 {
8095 // Set ldr target register to be x0.
8096 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
8097 insn &= 0xffffffe0;
8098 elfcpp::Swap<32, big_endian>::writeval(ip, insn);
8099 // Do relocation.
8100 const AArch64_reloc_property* reloc_property =
8101 aarch64_reloc_property_table->get_reloc_property(
8102 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8103 return aarch64_reloc_funcs::template rela_general<32>(
8104 view, got_entry_address, addend, reloc_property);
8105 }
8106 break;
8107
8108 default:
8109 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"),
8110 r_type);
8111 gold_unreachable();
8112 }
8113 return aarch64_reloc_funcs::STATUS_OKAY;
8114 } // End of tls_desc_gd_to_ie
8115
8116 // Relocate section data.
8117
8118 template<int size, bool big_endian>
8119 void
8120 Target_aarch64<size, big_endian>::relocate_section(
8121 const Relocate_info<size, big_endian>* relinfo,
8122 unsigned int sh_type,
8123 const unsigned char* prelocs,
8124 size_t reloc_count,
8125 Output_section* output_section,
8126 bool needs_special_offset_handling,
8127 unsigned char* view,
8128 typename elfcpp::Elf_types<size>::Elf_Addr address,
8129 section_size_type view_size,
8130 const Reloc_symbol_changes* reloc_symbol_changes)
8131 {
8132 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
8133 typedef Target_aarch64<size, big_endian> Aarch64;
8134 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate;
8135 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8136 Classify_reloc;
8137
8138 gold_assert(sh_type == elfcpp::SHT_RELA);
8139
8140 // See if we are relocating a relaxed input section. If so, the view
8141 // covers the whole output section and we need to adjust accordingly.
8142 if (needs_special_offset_handling)
8143 {
8144 const Output_relaxed_input_section* poris =
8145 output_section->find_relaxed_input_section(relinfo->object,
8146 relinfo->data_shndx);
8147 if (poris != NULL)
8148 {
8149 Address section_address = poris->address();
8150 section_size_type section_size = poris->data_size();
8151
8152 gold_assert((section_address >= address)
8153 && ((section_address + section_size)
8154 <= (address + view_size)));
8155
8156 off_t offset = section_address - address;
8157 view += offset;
8158 address += offset;
8159 view_size = section_size;
8160 }
8161 }
8162
8163 gold::relocate_section<size, big_endian, Aarch64, AArch64_relocate,
8164 gold::Default_comdat_behavior, Classify_reloc>(
8165 relinfo,
8166 this,
8167 prelocs,
8168 reloc_count,
8169 output_section,
8170 needs_special_offset_handling,
8171 view,
8172 address,
8173 view_size,
8174 reloc_symbol_changes);
8175 }
8176
8177 // Scan the relocs during a relocatable link.
8178
8179 template<int size, bool big_endian>
8180 void
8181 Target_aarch64<size, big_endian>::scan_relocatable_relocs(
8182 Symbol_table* symtab,
8183 Layout* layout,
8184 Sized_relobj_file<size, big_endian>* object,
8185 unsigned int data_shndx,
8186 unsigned int sh_type,
8187 const unsigned char* prelocs,
8188 size_t reloc_count,
8189 Output_section* output_section,
8190 bool needs_special_offset_handling,
8191 size_t local_symbol_count,
8192 const unsigned char* plocal_symbols,
8193 Relocatable_relocs* rr)
8194 {
8195 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8196 Classify_reloc;
8197 typedef gold::Default_scan_relocatable_relocs<Classify_reloc>
8198 Scan_relocatable_relocs;
8199
8200 gold_assert(sh_type == elfcpp::SHT_RELA);
8201
8202 gold::scan_relocatable_relocs<size, big_endian, Scan_relocatable_relocs>(
8203 symtab,
8204 layout,
8205 object,
8206 data_shndx,
8207 prelocs,
8208 reloc_count,
8209 output_section,
8210 needs_special_offset_handling,
8211 local_symbol_count,
8212 plocal_symbols,
8213 rr);
8214 }
8215
8216 // Scan the relocs for --emit-relocs.
8217
8218 template<int size, bool big_endian>
8219 void
8220 Target_aarch64<size, big_endian>::emit_relocs_scan(
8221 Symbol_table* symtab,
8222 Layout* layout,
8223 Sized_relobj_file<size, big_endian>* object,
8224 unsigned int data_shndx,
8225 unsigned int sh_type,
8226 const unsigned char* prelocs,
8227 size_t reloc_count,
8228 Output_section* output_section,
8229 bool needs_special_offset_handling,
8230 size_t local_symbol_count,
8231 const unsigned char* plocal_syms,
8232 Relocatable_relocs* rr)
8233 {
8234 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8235 Classify_reloc;
8236 typedef gold::Default_emit_relocs_strategy<Classify_reloc>
8237 Emit_relocs_strategy;
8238
8239 gold_assert(sh_type == elfcpp::SHT_RELA);
8240
8241 gold::scan_relocatable_relocs<size, big_endian, Emit_relocs_strategy>(
8242 symtab,
8243 layout,
8244 object,
8245 data_shndx,
8246 prelocs,
8247 reloc_count,
8248 output_section,
8249 needs_special_offset_handling,
8250 local_symbol_count,
8251 plocal_syms,
8252 rr);
8253 }
8254
8255 // Relocate a section during a relocatable link.
8256
8257 template<int size, bool big_endian>
8258 void
8259 Target_aarch64<size, big_endian>::relocate_relocs(
8260 const Relocate_info<size, big_endian>* relinfo,
8261 unsigned int sh_type,
8262 const unsigned char* prelocs,
8263 size_t reloc_count,
8264 Output_section* output_section,
8265 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
8266 unsigned char* view,
8267 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
8268 section_size_type view_size,
8269 unsigned char* reloc_view,
8270 section_size_type reloc_view_size)
8271 {
8272 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8273 Classify_reloc;
8274
8275 gold_assert(sh_type == elfcpp::SHT_RELA);
8276
8277 gold::relocate_relocs<size, big_endian, Classify_reloc>(
8278 relinfo,
8279 prelocs,
8280 reloc_count,
8281 output_section,
8282 offset_in_output_section,
8283 view,
8284 view_address,
8285 view_size,
8286 reloc_view,
8287 reloc_view_size);
8288 }
8289
8290
8291 // Return whether this is a 3-insn erratum sequence.
8292
8293 template<int size, bool big_endian>
8294 bool
8295 Target_aarch64<size, big_endian>::is_erratum_843419_sequence(
8296 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8297 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
8298 typename elfcpp::Swap<32,big_endian>::Valtype insn3)
8299 {
8300 unsigned rt1, rt2;
8301 bool load, pair;
8302
8303 // The 2nd insn is a single register load or store; or register pair
8304 // store.
8305 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load)
8306 && (!pair || (pair && !load)))
8307 {
8308 // The 3rd insn is a load or store instruction from the "Load/store
8309 // register (unsigned immediate)" encoding class, using Rn as the
8310 // base address register.
8311 if (Insn_utilities::aarch64_ldst_uimm(insn3)
8312 && (Insn_utilities::aarch64_rn(insn3)
8313 == Insn_utilities::aarch64_rd(insn1)))
8314 return true;
8315 }
8316 return false;
8317 }
8318
8319
8320 // Return whether this is a 835769 sequence.
8321 // (Similarly implemented as in elfnn-aarch64.c.)
8322
8323 template<int size, bool big_endian>
8324 bool
8325 Target_aarch64<size, big_endian>::is_erratum_835769_sequence(
8326 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8327 typename elfcpp::Swap<32,big_endian>::Valtype insn2)
8328 {
8329 uint32_t rt;
8330 uint32_t rt2 = 0;
8331 uint32_t rn;
8332 uint32_t rm;
8333 uint32_t ra;
8334 bool pair;
8335 bool load;
8336
8337 if (Insn_utilities::aarch64_mlxl(insn2)
8338 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load))
8339 {
8340 /* Any SIMD memory op is independent of the subsequent MLA
8341 by definition of the erratum. */
8342 if (Insn_utilities::aarch64_bit(insn1, 26))
8343 return true;
8344
8345 /* If not SIMD, check for integer memory ops and MLA relationship. */
8346 rn = Insn_utilities::aarch64_rn(insn2);
8347 ra = Insn_utilities::aarch64_ra(insn2);
8348 rm = Insn_utilities::aarch64_rm(insn2);
8349
8350 /* If this is a load and there's a true(RAW) dependency, we are safe
8351 and this is not an erratum sequence. */
8352 if (load &&
8353 (rt == rn || rt == rm || rt == ra
8354 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
8355 return false;
8356
8357 /* We conservatively put out stubs for all other cases (including
8358 writebacks). */
8359 return true;
8360 }
8361
8362 return false;
8363 }
8364
8365
8366 // Helper method to create erratum stub for ST_E_843419 and ST_E_835769.
8367
8368 template<int size, bool big_endian>
8369 void
8370 Target_aarch64<size, big_endian>::create_erratum_stub(
8371 AArch64_relobj<size, big_endian>* relobj,
8372 unsigned int shndx,
8373 section_size_type erratum_insn_offset,
8374 Address erratum_address,
8375 typename Insn_utilities::Insntype erratum_insn,
8376 int erratum_type,
8377 unsigned int e843419_adrp_offset)
8378 {
8379 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769);
8380 The_stub_table* stub_table = relobj->stub_table(shndx);
8381 gold_assert(stub_table != NULL);
8382 if (stub_table->find_erratum_stub(relobj,
8383 shndx,
8384 erratum_insn_offset) == NULL)
8385 {
8386 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8387 The_erratum_stub* stub;
8388 if (erratum_type == ST_E_835769)
8389 stub = new The_erratum_stub(relobj, erratum_type, shndx,
8390 erratum_insn_offset);
8391 else if (erratum_type == ST_E_843419)
8392 stub = new E843419_stub<size, big_endian>(
8393 relobj, shndx, erratum_insn_offset, e843419_adrp_offset);
8394 else
8395 gold_unreachable();
8396 stub->set_erratum_insn(erratum_insn);
8397 stub->set_erratum_address(erratum_address);
8398 // For erratum ST_E_843419 and ST_E_835769, the destination address is
8399 // always the next insn after erratum insn.
8400 stub->set_destination_address(erratum_address + BPI);
8401 stub_table->add_erratum_stub(stub);
8402 }
8403 }
8404
8405
8406 // Scan erratum for section SHNDX range [output_address + span_start,
8407 // output_address + span_end). Note here we do not share the code with
8408 // scan_erratum_843419_span function, because for 843419 we optimize by only
8409 // scanning the last few insns of a page, whereas for 835769, we need to scan
8410 // every insn.
8411
8412 template<int size, bool big_endian>
8413 void
8414 Target_aarch64<size, big_endian>::scan_erratum_835769_span(
8415 AArch64_relobj<size, big_endian>* relobj,
8416 unsigned int shndx,
8417 const section_size_type span_start,
8418 const section_size_type span_end,
8419 unsigned char* input_view,
8420 Address output_address)
8421 {
8422 typedef typename Insn_utilities::Insntype Insntype;
8423
8424 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8425
8426 // Adjust output_address and view to the start of span.
8427 output_address += span_start;
8428 input_view += span_start;
8429
8430 section_size_type span_length = span_end - span_start;
8431 section_size_type offset = 0;
8432 for (offset = 0; offset + BPI < span_length; offset += BPI)
8433 {
8434 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8435 Insntype insn1 = ip[0];
8436 Insntype insn2 = ip[1];
8437 if (is_erratum_835769_sequence(insn1, insn2))
8438 {
8439 Insntype erratum_insn = insn2;
8440 // "span_start + offset" is the offset for insn1. So for insn2, it is
8441 // "span_start + offset + BPI".
8442 section_size_type erratum_insn_offset = span_start + offset + BPI;
8443 Address erratum_address = output_address + offset + BPI;
8444 gold_info(_("Erratum 835769 found and fixed at \"%s\", "
8445 "section %d, offset 0x%08x."),
8446 relobj->name().c_str(), shndx,
8447 (unsigned int)(span_start + offset));
8448
8449 this->create_erratum_stub(relobj, shndx,
8450 erratum_insn_offset, erratum_address,
8451 erratum_insn, ST_E_835769);
8452 offset += BPI; // Skip mac insn.
8453 }
8454 }
8455 } // End of "Target_aarch64::scan_erratum_835769_span".
8456
8457
8458 // Scan erratum for section SHNDX range
8459 // [output_address + span_start, output_address + span_end).
8460
8461 template<int size, bool big_endian>
8462 void
8463 Target_aarch64<size, big_endian>::scan_erratum_843419_span(
8464 AArch64_relobj<size, big_endian>* relobj,
8465 unsigned int shndx,
8466 const section_size_type span_start,
8467 const section_size_type span_end,
8468 unsigned char* input_view,
8469 Address output_address)
8470 {
8471 typedef typename Insn_utilities::Insntype Insntype;
8472
8473 // Adjust output_address and view to the start of span.
8474 output_address += span_start;
8475 input_view += span_start;
8476
8477 if ((output_address & 0x03) != 0)
8478 return;
8479
8480 section_size_type offset = 0;
8481 section_size_type span_length = span_end - span_start;
8482 // The first instruction must be ending at 0xFF8 or 0xFFC.
8483 unsigned int page_offset = output_address & 0xFFF;
8484 // Make sure starting position, that is "output_address+offset",
8485 // starts at page position 0xff8 or 0xffc.
8486 if (page_offset < 0xff8)
8487 offset = 0xff8 - page_offset;
8488 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length)
8489 {
8490 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8491 Insntype insn1 = ip[0];
8492 if (Insn_utilities::is_adrp(insn1))
8493 {
8494 Insntype insn2 = ip[1];
8495 Insntype insn3 = ip[2];
8496 Insntype erratum_insn;
8497 unsigned insn_offset;
8498 bool do_report = false;
8499 if (is_erratum_843419_sequence(insn1, insn2, insn3))
8500 {
8501 do_report = true;
8502 erratum_insn = insn3;
8503 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN;
8504 }
8505 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length)
8506 {
8507 // Optionally we can have an insn between ins2 and ins3
8508 Insntype insn_opt = ip[2];
8509 // And insn_opt must not be a branch.
8510 if (!Insn_utilities::aarch64_b(insn_opt)
8511 && !Insn_utilities::aarch64_bl(insn_opt)
8512 && !Insn_utilities::aarch64_blr(insn_opt)
8513 && !Insn_utilities::aarch64_br(insn_opt))
8514 {
8515 // And insn_opt must not write to dest reg in insn1. However
8516 // we do a conservative scan, which means we may fix/report
8517 // more than necessary, but it doesn't hurt.
8518
8519 Insntype insn4 = ip[3];
8520 if (is_erratum_843419_sequence(insn1, insn2, insn4))
8521 {
8522 do_report = true;
8523 erratum_insn = insn4;
8524 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN;
8525 }
8526 }
8527 }
8528 if (do_report)
8529 {
8530 unsigned int erratum_insn_offset =
8531 span_start + offset + insn_offset;
8532 Address erratum_address =
8533 output_address + offset + insn_offset;
8534 create_erratum_stub(relobj, shndx,
8535 erratum_insn_offset, erratum_address,
8536 erratum_insn, ST_E_843419,
8537 span_start + offset);
8538 }
8539 }
8540
8541 // Advance to next candidate instruction. We only consider instruction
8542 // sequences starting at a page offset of 0xff8 or 0xffc.
8543 page_offset = (output_address + offset) & 0xfff;
8544 if (page_offset == 0xff8)
8545 offset += 4;
8546 else // (page_offset == 0xffc), we move to next page's 0xff8.
8547 offset += 0xffc;
8548 }
8549 } // End of "Target_aarch64::scan_erratum_843419_span".
8550
8551
8552 // The selector for aarch64 object files.
8553
8554 template<int size, bool big_endian>
8555 class Target_selector_aarch64 : public Target_selector
8556 {
8557 public:
8558 Target_selector_aarch64();
8559
8560 virtual Target*
8561 do_instantiate_target()
8562 { return new Target_aarch64<size, big_endian>(); }
8563 };
8564
8565 template<>
8566 Target_selector_aarch64<32, true>::Target_selector_aarch64()
8567 : Target_selector(elfcpp::EM_AARCH64, 32, true,
8568 "elf32-bigaarch64", "aarch64_elf32_be_vec")
8569 { }
8570
8571 template<>
8572 Target_selector_aarch64<32, false>::Target_selector_aarch64()
8573 : Target_selector(elfcpp::EM_AARCH64, 32, false,
8574 "elf32-littleaarch64", "aarch64_elf32_le_vec")
8575 { }
8576
8577 template<>
8578 Target_selector_aarch64<64, true>::Target_selector_aarch64()
8579 : Target_selector(elfcpp::EM_AARCH64, 64, true,
8580 "elf64-bigaarch64", "aarch64_elf64_be_vec")
8581 { }
8582
8583 template<>
8584 Target_selector_aarch64<64, false>::Target_selector_aarch64()
8585 : Target_selector(elfcpp::EM_AARCH64, 64, false,
8586 "elf64-littleaarch64", "aarch64_elf64_le_vec")
8587 { }
8588
8589 Target_selector_aarch64<32, true> target_selector_aarch64elf32b;
8590 Target_selector_aarch64<32, false> target_selector_aarch64elf32;
8591 Target_selector_aarch64<64, true> target_selector_aarch64elfb;
8592 Target_selector_aarch64<64, false> target_selector_aarch64elf;
8593
8594 } // End anonymous namespace.
This page took 0.219948 seconds and 5 git commands to generate.