Reduce the size of s390 symbol tables by allowing relocations in mergeable string...
[deliverable/binutils-gdb.git] / gold / aarch64.cc
1 // aarch64.cc -- aarch64 target support for gold.
2
3 // Copyright (C) 2014-2017 Free Software Foundation, Inc.
4 // Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>.
5
6 // This file is part of gold.
7
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 3 of the License, or
11 // (at your option) any later version.
12
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
17
18 // You should have received a copy of the GNU General Public License
19 // along with this program; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 // MA 02110-1301, USA.
22
23 #include "gold.h"
24
25 #include <cstring>
26 #include <map>
27 #include <set>
28
29 #include "elfcpp.h"
30 #include "dwarf.h"
31 #include "parameters.h"
32 #include "reloc.h"
33 #include "aarch64.h"
34 #include "object.h"
35 #include "symtab.h"
36 #include "layout.h"
37 #include "output.h"
38 #include "copy-relocs.h"
39 #include "target.h"
40 #include "target-reloc.h"
41 #include "target-select.h"
42 #include "tls.h"
43 #include "freebsd.h"
44 #include "nacl.h"
45 #include "gc.h"
46 #include "icf.h"
47 #include "aarch64-reloc-property.h"
48
49 // The first three .got.plt entries are reserved.
50 const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3;
51
52
53 namespace
54 {
55
56 using namespace gold;
57
58 template<int size, bool big_endian>
59 class Output_data_plt_aarch64;
60
61 template<int size, bool big_endian>
62 class Output_data_plt_aarch64_standard;
63
64 template<int size, bool big_endian>
65 class Target_aarch64;
66
67 template<int size, bool big_endian>
68 class AArch64_relocate_functions;
69
70 // Utility class dealing with insns. This is ported from macros in
71 // bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This
72 // class is used in erratum sequence scanning.
73
74 template<bool big_endian>
75 class AArch64_insn_utilities
76 {
77 public:
78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
79
80 static const int BYTES_PER_INSN;
81
82 // Zero register encoding - 31.
83 static const unsigned int AARCH64_ZR;
84
85 static unsigned int
86 aarch64_bit(Insntype insn, int pos)
87 { return ((1 << pos) & insn) >> pos; }
88
89 static unsigned int
90 aarch64_bits(Insntype insn, int pos, int l)
91 { return (insn >> pos) & ((1 << l) - 1); }
92
93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is
94 // the name defined in armv8 insn manual C3.5.9.
95 static unsigned int
96 aarch64_op31(Insntype insn)
97 { return aarch64_bits(insn, 21, 3); }
98
99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the
100 // third source register. See armv8 insn manual C3.5.9.
101 static unsigned int
102 aarch64_ra(Insntype insn)
103 { return aarch64_bits(insn, 10, 5); }
104
105 static bool
106 is_adr(const Insntype insn)
107 { return (insn & 0x9F000000) == 0x10000000; }
108
109 static bool
110 is_adrp(const Insntype insn)
111 { return (insn & 0x9F000000) == 0x90000000; }
112
113 static unsigned int
114 aarch64_rm(const Insntype insn)
115 { return aarch64_bits(insn, 16, 5); }
116
117 static unsigned int
118 aarch64_rn(const Insntype insn)
119 { return aarch64_bits(insn, 5, 5); }
120
121 static unsigned int
122 aarch64_rd(const Insntype insn)
123 { return aarch64_bits(insn, 0, 5); }
124
125 static unsigned int
126 aarch64_rt(const Insntype insn)
127 { return aarch64_bits(insn, 0, 5); }
128
129 static unsigned int
130 aarch64_rt2(const Insntype insn)
131 { return aarch64_bits(insn, 10, 5); }
132
133 // Encode imm21 into adr. Signed imm21 is in the range of [-1M, 1M).
134 static Insntype
135 aarch64_adr_encode_imm(Insntype adr, int imm21)
136 {
137 gold_assert(is_adr(adr));
138 gold_assert(-(1 << 20) <= imm21 && imm21 < (1 << 20));
139 const int mask19 = (1 << 19) - 1;
140 const int mask2 = 3;
141 adr &= ~((mask19 << 5) | (mask2 << 29));
142 adr |= ((imm21 & mask2) << 29) | (((imm21 >> 2) & mask19) << 5);
143 return adr;
144 }
145
146 // Retrieve encoded adrp 33-bit signed imm value. This value is obtained by
147 // 21-bit signed imm encoded in the insn multiplied by 4k (page size) and
148 // 64-bit sign-extended, resulting in [-4G, 4G) with 12-lsb being 0.
149 static int64_t
150 aarch64_adrp_decode_imm(const Insntype adrp)
151 {
152 const int mask19 = (1 << 19) - 1;
153 const int mask2 = 3;
154 gold_assert(is_adrp(adrp));
155 // 21-bit imm encoded in adrp.
156 uint64_t imm = ((adrp >> 29) & mask2) | (((adrp >> 5) & mask19) << 2);
157 // Retrieve msb of 21-bit-signed imm for sign extension.
158 uint64_t msbt = (imm >> 20) & 1;
159 // Real value is imm multiplied by 4k. Value now has 33-bit information.
160 int64_t value = imm << 12;
161 // Sign extend to 64-bit by repeating msbt 31 (64-33) times and merge it
162 // with value.
163 return ((((uint64_t)(1) << 32) - msbt) << 33) | value;
164 }
165
166 static bool
167 aarch64_b(const Insntype insn)
168 { return (insn & 0xFC000000) == 0x14000000; }
169
170 static bool
171 aarch64_bl(const Insntype insn)
172 { return (insn & 0xFC000000) == 0x94000000; }
173
174 static bool
175 aarch64_blr(const Insntype insn)
176 { return (insn & 0xFFFFFC1F) == 0xD63F0000; }
177
178 static bool
179 aarch64_br(const Insntype insn)
180 { return (insn & 0xFFFFFC1F) == 0xD61F0000; }
181
182 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
183 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops.
184 static bool
185 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; }
186
187 static bool
188 aarch64_ldst(Insntype insn)
189 { return (insn & 0x0a000000) == 0x08000000; }
190
191 static bool
192 aarch64_ldst_ex(Insntype insn)
193 { return (insn & 0x3f000000) == 0x08000000; }
194
195 static bool
196 aarch64_ldst_pcrel(Insntype insn)
197 { return (insn & 0x3b000000) == 0x18000000; }
198
199 static bool
200 aarch64_ldst_nap(Insntype insn)
201 { return (insn & 0x3b800000) == 0x28000000; }
202
203 static bool
204 aarch64_ldstp_pi(Insntype insn)
205 { return (insn & 0x3b800000) == 0x28800000; }
206
207 static bool
208 aarch64_ldstp_o(Insntype insn)
209 { return (insn & 0x3b800000) == 0x29000000; }
210
211 static bool
212 aarch64_ldstp_pre(Insntype insn)
213 { return (insn & 0x3b800000) == 0x29800000; }
214
215 static bool
216 aarch64_ldst_ui(Insntype insn)
217 { return (insn & 0x3b200c00) == 0x38000000; }
218
219 static bool
220 aarch64_ldst_piimm(Insntype insn)
221 { return (insn & 0x3b200c00) == 0x38000400; }
222
223 static bool
224 aarch64_ldst_u(Insntype insn)
225 { return (insn & 0x3b200c00) == 0x38000800; }
226
227 static bool
228 aarch64_ldst_preimm(Insntype insn)
229 { return (insn & 0x3b200c00) == 0x38000c00; }
230
231 static bool
232 aarch64_ldst_ro(Insntype insn)
233 { return (insn & 0x3b200c00) == 0x38200800; }
234
235 static bool
236 aarch64_ldst_uimm(Insntype insn)
237 { return (insn & 0x3b000000) == 0x39000000; }
238
239 static bool
240 aarch64_ldst_simd_m(Insntype insn)
241 { return (insn & 0xbfbf0000) == 0x0c000000; }
242
243 static bool
244 aarch64_ldst_simd_m_pi(Insntype insn)
245 { return (insn & 0xbfa00000) == 0x0c800000; }
246
247 static bool
248 aarch64_ldst_simd_s(Insntype insn)
249 { return (insn & 0xbf9f0000) == 0x0d000000; }
250
251 static bool
252 aarch64_ldst_simd_s_pi(Insntype insn)
253 { return (insn & 0xbf800000) == 0x0d800000; }
254
255 // Classify an INSN if it is indeed a load/store. Return true if INSN is a
256 // LD/ST instruction otherwise return false. For scalar LD/ST instructions
257 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair
258 // instructions PAIR is TRUE, RT and RT2 are returned.
259 static bool
260 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2,
261 bool *pair, bool *load)
262 {
263 uint32_t opcode;
264 unsigned int r;
265 uint32_t opc = 0;
266 uint32_t v = 0;
267 uint32_t opc_v = 0;
268
269 /* Bail out quickly if INSN doesn't fall into the the load-store
270 encoding space. */
271 if (!aarch64_ldst (insn))
272 return false;
273
274 *pair = false;
275 *load = false;
276 if (aarch64_ldst_ex (insn))
277 {
278 *rt = aarch64_rt (insn);
279 *rt2 = *rt;
280 if (aarch64_bit (insn, 21) == 1)
281 {
282 *pair = true;
283 *rt2 = aarch64_rt2 (insn);
284 }
285 *load = aarch64_ld (insn);
286 return true;
287 }
288 else if (aarch64_ldst_nap (insn)
289 || aarch64_ldstp_pi (insn)
290 || aarch64_ldstp_o (insn)
291 || aarch64_ldstp_pre (insn))
292 {
293 *pair = true;
294 *rt = aarch64_rt (insn);
295 *rt2 = aarch64_rt2 (insn);
296 *load = aarch64_ld (insn);
297 return true;
298 }
299 else if (aarch64_ldst_pcrel (insn)
300 || aarch64_ldst_ui (insn)
301 || aarch64_ldst_piimm (insn)
302 || aarch64_ldst_u (insn)
303 || aarch64_ldst_preimm (insn)
304 || aarch64_ldst_ro (insn)
305 || aarch64_ldst_uimm (insn))
306 {
307 *rt = aarch64_rt (insn);
308 *rt2 = *rt;
309 if (aarch64_ldst_pcrel (insn))
310 *load = true;
311 opc = aarch64_bits (insn, 22, 2);
312 v = aarch64_bit (insn, 26);
313 opc_v = opc | (v << 2);
314 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
315 || opc_v == 5 || opc_v == 7);
316 return true;
317 }
318 else if (aarch64_ldst_simd_m (insn)
319 || aarch64_ldst_simd_m_pi (insn))
320 {
321 *rt = aarch64_rt (insn);
322 *load = aarch64_bit (insn, 22);
323 opcode = (insn >> 12) & 0xf;
324 switch (opcode)
325 {
326 case 0:
327 case 2:
328 *rt2 = *rt + 3;
329 break;
330
331 case 4:
332 case 6:
333 *rt2 = *rt + 2;
334 break;
335
336 case 7:
337 *rt2 = *rt;
338 break;
339
340 case 8:
341 case 10:
342 *rt2 = *rt + 1;
343 break;
344
345 default:
346 return false;
347 }
348 return true;
349 }
350 else if (aarch64_ldst_simd_s (insn)
351 || aarch64_ldst_simd_s_pi (insn))
352 {
353 *rt = aarch64_rt (insn);
354 r = (insn >> 21) & 1;
355 *load = aarch64_bit (insn, 22);
356 opcode = (insn >> 13) & 0x7;
357 switch (opcode)
358 {
359 case 0:
360 case 2:
361 case 4:
362 *rt2 = *rt + r;
363 break;
364
365 case 1:
366 case 3:
367 case 5:
368 *rt2 = *rt + (r == 0 ? 2 : 3);
369 break;
370
371 case 6:
372 *rt2 = *rt + r;
373 break;
374
375 case 7:
376 *rt2 = *rt + (r == 0 ? 2 : 3);
377 break;
378
379 default:
380 return false;
381 }
382 return true;
383 }
384 return false;
385 } // End of "aarch64_mem_op_p".
386
387 // Return true if INSN is mac insn.
388 static bool
389 aarch64_mac(Insntype insn)
390 { return (insn & 0xff000000) == 0x9b000000; }
391
392 // Return true if INSN is multiply-accumulate.
393 // (This is similar to implementaton in elfnn-aarch64.c.)
394 static bool
395 aarch64_mlxl(Insntype insn)
396 {
397 uint32_t op31 = aarch64_op31(insn);
398 if (aarch64_mac(insn)
399 && (op31 == 0 || op31 == 1 || op31 == 5)
400 /* Exclude MUL instructions which are encoded as a multiple-accumulate
401 with RA = XZR. */
402 && aarch64_ra(insn) != AARCH64_ZR)
403 {
404 return true;
405 }
406 return false;
407 }
408 }; // End of "AArch64_insn_utilities".
409
410
411 // Insn length in byte.
412
413 template<bool big_endian>
414 const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4;
415
416
417 // Zero register encoding - 31.
418
419 template<bool big_endian>
420 const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f;
421
422
423 // Output_data_got_aarch64 class.
424
425 template<int size, bool big_endian>
426 class Output_data_got_aarch64 : public Output_data_got<size, big_endian>
427 {
428 public:
429 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype;
430 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout)
431 : Output_data_got<size, big_endian>(),
432 symbol_table_(symtab), layout_(layout)
433 { }
434
435 // Add a static entry for the GOT entry at OFFSET. GSYM is a global
436 // symbol and R_TYPE is the code of a dynamic relocation that needs to be
437 // applied in a static link.
438 void
439 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
440 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); }
441
442
443 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object
444 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic
445 // relocation that needs to be applied in a static link.
446 void
447 add_static_reloc(unsigned int got_offset, unsigned int r_type,
448 Sized_relobj_file<size, big_endian>* relobj,
449 unsigned int index)
450 {
451 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj,
452 index));
453 }
454
455
456 protected:
457 // Write out the GOT table.
458 void
459 do_write(Output_file* of) {
460 // The first entry in the GOT is the address of the .dynamic section.
461 gold_assert(this->data_size() >= size / 8);
462 Output_section* dynamic = this->layout_->dynamic_section();
463 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address();
464 this->replace_constant(0, dynamic_addr);
465 Output_data_got<size, big_endian>::do_write(of);
466
467 // Handling static relocs
468 if (this->static_relocs_.empty())
469 return;
470
471 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
472
473 gold_assert(parameters->doing_static_link());
474 const off_t offset = this->offset();
475 const section_size_type oview_size =
476 convert_to_section_size_type(this->data_size());
477 unsigned char* const oview = of->get_output_view(offset, oview_size);
478
479 Output_segment* tls_segment = this->layout_->tls_segment();
480 gold_assert(tls_segment != NULL);
481
482 AArch64_address aligned_tcb_address =
483 align_address(Target_aarch64<size, big_endian>::TCB_SIZE,
484 tls_segment->maximum_alignment());
485
486 for (size_t i = 0; i < this->static_relocs_.size(); ++i)
487 {
488 Static_reloc& reloc(this->static_relocs_[i]);
489 AArch64_address value;
490
491 if (!reloc.symbol_is_global())
492 {
493 Sized_relobj_file<size, big_endian>* object = reloc.relobj();
494 const Symbol_value<size>* psymval =
495 reloc.relobj()->local_symbol(reloc.index());
496
497 // We are doing static linking. Issue an error and skip this
498 // relocation if the symbol is undefined or in a discarded_section.
499 bool is_ordinary;
500 unsigned int shndx = psymval->input_shndx(&is_ordinary);
501 if ((shndx == elfcpp::SHN_UNDEF)
502 || (is_ordinary
503 && shndx != elfcpp::SHN_UNDEF
504 && !object->is_section_included(shndx)
505 && !this->symbol_table_->is_section_folded(object, shndx)))
506 {
507 gold_error(_("undefined or discarded local symbol %u from "
508 " object %s in GOT"),
509 reloc.index(), reloc.relobj()->name().c_str());
510 continue;
511 }
512 value = psymval->value(object, 0);
513 }
514 else
515 {
516 const Symbol* gsym = reloc.symbol();
517 gold_assert(gsym != NULL);
518 if (gsym->is_forwarder())
519 gsym = this->symbol_table_->resolve_forwards(gsym);
520
521 // We are doing static linking. Issue an error and skip this
522 // relocation if the symbol is undefined or in a discarded_section
523 // unless it is a weakly_undefined symbol.
524 if ((gsym->is_defined_in_discarded_section()
525 || gsym->is_undefined())
526 && !gsym->is_weak_undefined())
527 {
528 gold_error(_("undefined or discarded symbol %s in GOT"),
529 gsym->name());
530 continue;
531 }
532
533 if (!gsym->is_weak_undefined())
534 {
535 const Sized_symbol<size>* sym =
536 static_cast<const Sized_symbol<size>*>(gsym);
537 value = sym->value();
538 }
539 else
540 value = 0;
541 }
542
543 unsigned got_offset = reloc.got_offset();
544 gold_assert(got_offset < oview_size);
545
546 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype;
547 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset);
548 Valtype x;
549 switch (reloc.r_type())
550 {
551 case elfcpp::R_AARCH64_TLS_DTPREL64:
552 x = value;
553 break;
554 case elfcpp::R_AARCH64_TLS_TPREL64:
555 x = value + aligned_tcb_address;
556 break;
557 default:
558 gold_unreachable();
559 }
560 elfcpp::Swap<size, big_endian>::writeval(wv, x);
561 }
562
563 of->write_output_view(offset, oview_size, oview);
564 }
565
566 private:
567 // Symbol table of the output object.
568 Symbol_table* symbol_table_;
569 // A pointer to the Layout class, so that we can find the .dynamic
570 // section when we write out the GOT section.
571 Layout* layout_;
572
573 // This class represent dynamic relocations that need to be applied by
574 // gold because we are using TLS relocations in a static link.
575 class Static_reloc
576 {
577 public:
578 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
579 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true)
580 { this->u_.global.symbol = gsym; }
581
582 Static_reloc(unsigned int got_offset, unsigned int r_type,
583 Sized_relobj_file<size, big_endian>* relobj, unsigned int index)
584 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false)
585 {
586 this->u_.local.relobj = relobj;
587 this->u_.local.index = index;
588 }
589
590 // Return the GOT offset.
591 unsigned int
592 got_offset() const
593 { return this->got_offset_; }
594
595 // Relocation type.
596 unsigned int
597 r_type() const
598 { return this->r_type_; }
599
600 // Whether the symbol is global or not.
601 bool
602 symbol_is_global() const
603 { return this->symbol_is_global_; }
604
605 // For a relocation against a global symbol, the global symbol.
606 Symbol*
607 symbol() const
608 {
609 gold_assert(this->symbol_is_global_);
610 return this->u_.global.symbol;
611 }
612
613 // For a relocation against a local symbol, the defining object.
614 Sized_relobj_file<size, big_endian>*
615 relobj() const
616 {
617 gold_assert(!this->symbol_is_global_);
618 return this->u_.local.relobj;
619 }
620
621 // For a relocation against a local symbol, the local symbol index.
622 unsigned int
623 index() const
624 {
625 gold_assert(!this->symbol_is_global_);
626 return this->u_.local.index;
627 }
628
629 private:
630 // GOT offset of the entry to which this relocation is applied.
631 unsigned int got_offset_;
632 // Type of relocation.
633 unsigned int r_type_;
634 // Whether this relocation is against a global symbol.
635 bool symbol_is_global_;
636 // A global or local symbol.
637 union
638 {
639 struct
640 {
641 // For a global symbol, the symbol itself.
642 Symbol* symbol;
643 } global;
644 struct
645 {
646 // For a local symbol, the object defining the symbol.
647 Sized_relobj_file<size, big_endian>* relobj;
648 // For a local symbol, the symbol index.
649 unsigned int index;
650 } local;
651 } u_;
652 }; // End of inner class Static_reloc
653
654 std::vector<Static_reloc> static_relocs_;
655 }; // End of Output_data_got_aarch64
656
657
658 template<int size, bool big_endian>
659 class AArch64_input_section;
660
661
662 template<int size, bool big_endian>
663 class AArch64_output_section;
664
665
666 template<int size, bool big_endian>
667 class AArch64_relobj;
668
669
670 // Stub type enum constants.
671
672 enum
673 {
674 ST_NONE = 0,
675
676 // Using adrp/add pair, 4 insns (including alignment) without mem access,
677 // the fastest stub. This has a limited jump distance, which is tested by
678 // aarch64_valid_for_adrp_p.
679 ST_ADRP_BRANCH = 1,
680
681 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access,
682 // unlimited in jump distance.
683 ST_LONG_BRANCH_ABS = 2,
684
685 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1
686 // mem access, slowest one. Only used in position independent executables.
687 ST_LONG_BRANCH_PCREL = 3,
688
689 // Stub for erratum 843419 handling.
690 ST_E_843419 = 4,
691
692 // Stub for erratum 835769 handling.
693 ST_E_835769 = 5,
694
695 // Number of total stub types.
696 ST_NUMBER = 6
697 };
698
699
700 // Struct that wraps insns for a particular stub. All stub templates are
701 // created/initialized as constants by Stub_template_repertoire.
702
703 template<bool big_endian>
704 struct Stub_template
705 {
706 const typename AArch64_insn_utilities<big_endian>::Insntype* insns;
707 const int insn_num;
708 };
709
710
711 // Simple singleton class that creates/initializes/stores all types of stub
712 // templates.
713
714 template<bool big_endian>
715 class Stub_template_repertoire
716 {
717 public:
718 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
719
720 // Single static method to get stub template for a given stub type.
721 static const Stub_template<big_endian>*
722 get_stub_template(int type)
723 {
724 static Stub_template_repertoire<big_endian> singleton;
725 return singleton.stub_templates_[type];
726 }
727
728 private:
729 // Constructor - creates/initializes all stub templates.
730 Stub_template_repertoire();
731 ~Stub_template_repertoire()
732 { }
733
734 // Disallowing copy ctor and copy assignment operator.
735 Stub_template_repertoire(Stub_template_repertoire&);
736 Stub_template_repertoire& operator=(Stub_template_repertoire&);
737
738 // Data that stores all insn templates.
739 const Stub_template<big_endian>* stub_templates_[ST_NUMBER];
740 }; // End of "class Stub_template_repertoire".
741
742
743 // Constructor - creates/initilizes all stub templates.
744
745 template<bool big_endian>
746 Stub_template_repertoire<big_endian>::Stub_template_repertoire()
747 {
748 // Insn array definitions.
749 const static Insntype ST_NONE_INSNS[] = {};
750
751 const static Insntype ST_ADRP_BRANCH_INSNS[] =
752 {
753 0x90000010, /* adrp ip0, X */
754 /* ADR_PREL_PG_HI21(X) */
755 0x91000210, /* add ip0, ip0, :lo12:X */
756 /* ADD_ABS_LO12_NC(X) */
757 0xd61f0200, /* br ip0 */
758 0x00000000, /* alignment padding */
759 };
760
761 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] =
762 {
763 0x58000050, /* ldr ip0, 0x8 */
764 0xd61f0200, /* br ip0 */
765 0x00000000, /* address field */
766 0x00000000, /* address fields */
767 };
768
769 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] =
770 {
771 0x58000090, /* ldr ip0, 0x10 */
772 0x10000011, /* adr ip1, #0 */
773 0x8b110210, /* add ip0, ip0, ip1 */
774 0xd61f0200, /* br ip0 */
775 0x00000000, /* address field */
776 0x00000000, /* address field */
777 0x00000000, /* alignment padding */
778 0x00000000, /* alignment padding */
779 };
780
781 const static Insntype ST_E_843419_INSNS[] =
782 {
783 0x00000000, /* Placeholder for erratum insn. */
784 0x14000000, /* b <label> */
785 };
786
787 // ST_E_835769 has the same stub template as ST_E_843419
788 // but we reproduce the array here so that the sizeof
789 // expressions in install_insn_template will work.
790 const static Insntype ST_E_835769_INSNS[] =
791 {
792 0x00000000, /* Placeholder for erratum insn. */
793 0x14000000, /* b <label> */
794 };
795
796 #define install_insn_template(T) \
797 const static Stub_template<big_endian> template_##T = { \
798 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \
799 this->stub_templates_[T] = &template_##T
800
801 install_insn_template(ST_NONE);
802 install_insn_template(ST_ADRP_BRANCH);
803 install_insn_template(ST_LONG_BRANCH_ABS);
804 install_insn_template(ST_LONG_BRANCH_PCREL);
805 install_insn_template(ST_E_843419);
806 install_insn_template(ST_E_835769);
807
808 #undef install_insn_template
809 }
810
811
812 // Base class for stubs.
813
814 template<int size, bool big_endian>
815 class Stub_base
816 {
817 public:
818 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
819 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
820
821 static const AArch64_address invalid_address =
822 static_cast<AArch64_address>(-1);
823
824 static const section_offset_type invalid_offset =
825 static_cast<section_offset_type>(-1);
826
827 Stub_base(int type)
828 : destination_address_(invalid_address),
829 offset_(invalid_offset),
830 type_(type)
831 {}
832
833 ~Stub_base()
834 {}
835
836 // Get stub type.
837 int
838 type() const
839 { return this->type_; }
840
841 // Get stub template that provides stub insn information.
842 const Stub_template<big_endian>*
843 stub_template() const
844 {
845 return Stub_template_repertoire<big_endian>::
846 get_stub_template(this->type());
847 }
848
849 // Get destination address.
850 AArch64_address
851 destination_address() const
852 {
853 gold_assert(this->destination_address_ != this->invalid_address);
854 return this->destination_address_;
855 }
856
857 // Set destination address.
858 void
859 set_destination_address(AArch64_address address)
860 {
861 gold_assert(address != this->invalid_address);
862 this->destination_address_ = address;
863 }
864
865 // Reset the destination address.
866 void
867 reset_destination_address()
868 { this->destination_address_ = this->invalid_address; }
869
870 // Get offset of code stub. For Reloc_stub, it is the offset from the
871 // beginning of its containing stub table; for Erratum_stub, it is the offset
872 // from the end of reloc_stubs.
873 section_offset_type
874 offset() const
875 {
876 gold_assert(this->offset_ != this->invalid_offset);
877 return this->offset_;
878 }
879
880 // Set stub offset.
881 void
882 set_offset(section_offset_type offset)
883 { this->offset_ = offset; }
884
885 // Return the stub insn.
886 const Insntype*
887 insns() const
888 { return this->stub_template()->insns; }
889
890 // Return num of stub insns.
891 unsigned int
892 insn_num() const
893 { return this->stub_template()->insn_num; }
894
895 // Get size of the stub.
896 int
897 stub_size() const
898 {
899 return this->insn_num() *
900 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
901 }
902
903 // Write stub to output file.
904 void
905 write(unsigned char* view, section_size_type view_size)
906 { this->do_write(view, view_size); }
907
908 protected:
909 // Abstract method to be implemented by sub-classes.
910 virtual void
911 do_write(unsigned char*, section_size_type) = 0;
912
913 private:
914 // The last insn of a stub is a jump to destination insn. This field records
915 // the destination address.
916 AArch64_address destination_address_;
917 // The stub offset. Note this has difference interpretations between an
918 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the
919 // beginning of the containing stub_table, whereas for Erratum_stub, this is
920 // the offset from the end of reloc_stubs.
921 section_offset_type offset_;
922 // Stub type.
923 const int type_;
924 }; // End of "Stub_base".
925
926
927 // Erratum stub class. An erratum stub differs from a reloc stub in that for
928 // each erratum occurrence, we generate an erratum stub. We never share erratum
929 // stubs, whereas for reloc stubs, different branches insns share a single reloc
930 // stub as long as the branch targets are the same. (More to the point, reloc
931 // stubs can be shared because they're used to reach a specific target, whereas
932 // erratum stubs branch back to the original control flow.)
933
934 template<int size, bool big_endian>
935 class Erratum_stub : public Stub_base<size, big_endian>
936 {
937 public:
938 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
939 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
940 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
941 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
942
943 static const int STUB_ADDR_ALIGN;
944
945 static const Insntype invalid_insn = static_cast<Insntype>(-1);
946
947 Erratum_stub(The_aarch64_relobj* relobj, int type,
948 unsigned shndx, unsigned int sh_offset)
949 : Stub_base<size, big_endian>(type), relobj_(relobj),
950 shndx_(shndx), sh_offset_(sh_offset),
951 erratum_insn_(invalid_insn),
952 erratum_address_(this->invalid_address)
953 {}
954
955 ~Erratum_stub() {}
956
957 // Return the object that contains the erratum.
958 The_aarch64_relobj*
959 relobj()
960 { return this->relobj_; }
961
962 // Get section index of the erratum.
963 unsigned int
964 shndx() const
965 { return this->shndx_; }
966
967 // Get section offset of the erratum.
968 unsigned int
969 sh_offset() const
970 { return this->sh_offset_; }
971
972 // Get the erratum insn. This is the insn located at erratum_insn_address.
973 Insntype
974 erratum_insn() const
975 {
976 gold_assert(this->erratum_insn_ != this->invalid_insn);
977 return this->erratum_insn_;
978 }
979
980 // Set the insn that the erratum happens to.
981 void
982 set_erratum_insn(Insntype insn)
983 { this->erratum_insn_ = insn; }
984
985 // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a
986 // relocation spot, in this case, the erratum_insn_ recorded at scanning phase
987 // is no longer the one we want to write out to the stub, update erratum_insn_
988 // with relocated version. Also note that in this case xn must not be "PC", so
989 // it is safe to move the erratum insn from the origin place to the stub. For
990 // 835769, the erratum insn is multiply-accumulate insn, which could not be a
991 // relocation spot (assertion added though).
992 void
993 update_erratum_insn(Insntype insn)
994 {
995 gold_assert(this->erratum_insn_ != this->invalid_insn);
996 switch (this->type())
997 {
998 case ST_E_843419:
999 gold_assert(Insn_utilities::aarch64_ldst_uimm(insn));
1000 gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn()));
1001 gold_assert(Insn_utilities::aarch64_rd(insn) ==
1002 Insn_utilities::aarch64_rd(this->erratum_insn()));
1003 gold_assert(Insn_utilities::aarch64_rn(insn) ==
1004 Insn_utilities::aarch64_rn(this->erratum_insn()));
1005 // Update plain ld/st insn with relocated insn.
1006 this->erratum_insn_ = insn;
1007 break;
1008 case ST_E_835769:
1009 gold_assert(insn == this->erratum_insn());
1010 break;
1011 default:
1012 gold_unreachable();
1013 }
1014 }
1015
1016
1017 // Return the address where an erratum must be done.
1018 AArch64_address
1019 erratum_address() const
1020 {
1021 gold_assert(this->erratum_address_ != this->invalid_address);
1022 return this->erratum_address_;
1023 }
1024
1025 // Set the address where an erratum must be done.
1026 void
1027 set_erratum_address(AArch64_address addr)
1028 { this->erratum_address_ = addr; }
1029
1030 // Comparator used to group Erratum_stubs in a set by (obj, shndx,
1031 // sh_offset). We do not include 'type' in the calculation, because there is
1032 // at most one stub type at (obj, shndx, sh_offset).
1033 bool
1034 operator<(const Erratum_stub<size, big_endian>& k) const
1035 {
1036 if (this == &k)
1037 return false;
1038 // We group stubs by relobj.
1039 if (this->relobj_ != k.relobj_)
1040 return this->relobj_ < k.relobj_;
1041 // Then by section index.
1042 if (this->shndx_ != k.shndx_)
1043 return this->shndx_ < k.shndx_;
1044 // Lastly by section offset.
1045 return this->sh_offset_ < k.sh_offset_;
1046 }
1047
1048 protected:
1049 virtual void
1050 do_write(unsigned char*, section_size_type);
1051
1052 private:
1053 // The object that needs to be fixed.
1054 The_aarch64_relobj* relobj_;
1055 // The shndx in the object that needs to be fixed.
1056 const unsigned int shndx_;
1057 // The section offset in the obejct that needs to be fixed.
1058 const unsigned int sh_offset_;
1059 // The insn to be fixed.
1060 Insntype erratum_insn_;
1061 // The address of the above insn.
1062 AArch64_address erratum_address_;
1063 }; // End of "Erratum_stub".
1064
1065
1066 // Erratum sub class to wrap additional info needed by 843419. In fixing this
1067 // erratum, we may choose to replace 'adrp' with 'adr', in this case, we need
1068 // adrp's code position (two or three insns before erratum insn itself).
1069
1070 template<int size, bool big_endian>
1071 class E843419_stub : public Erratum_stub<size, big_endian>
1072 {
1073 public:
1074 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
1075
1076 E843419_stub(AArch64_relobj<size, big_endian>* relobj,
1077 unsigned int shndx, unsigned int sh_offset,
1078 unsigned int adrp_sh_offset)
1079 : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset),
1080 adrp_sh_offset_(adrp_sh_offset)
1081 {}
1082
1083 unsigned int
1084 adrp_sh_offset() const
1085 { return this->adrp_sh_offset_; }
1086
1087 private:
1088 // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we
1089 // can can obtain it from its parent.)
1090 const unsigned int adrp_sh_offset_;
1091 };
1092
1093
1094 template<int size, bool big_endian>
1095 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1096
1097 // Comparator used in set definition.
1098 template<int size, bool big_endian>
1099 struct Erratum_stub_less
1100 {
1101 bool
1102 operator()(const Erratum_stub<size, big_endian>* s1,
1103 const Erratum_stub<size, big_endian>* s2) const
1104 { return *s1 < *s2; }
1105 };
1106
1107 // Erratum_stub implementation for writing stub to output file.
1108
1109 template<int size, bool big_endian>
1110 void
1111 Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type)
1112 {
1113 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1114 const Insntype* insns = this->insns();
1115 uint32_t num_insns = this->insn_num();
1116 Insntype* ip = reinterpret_cast<Insntype*>(view);
1117 // For current implemented erratum 843419 and 835769, the first insn in the
1118 // stub is always a copy of the problematic insn (in 843419, the mem access
1119 // insn, in 835769, the mac insn), followed by a jump-back.
1120 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn());
1121 for (uint32_t i = 1; i < num_insns; ++i)
1122 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1123 }
1124
1125
1126 // Reloc stub class.
1127
1128 template<int size, bool big_endian>
1129 class Reloc_stub : public Stub_base<size, big_endian>
1130 {
1131 public:
1132 typedef Reloc_stub<size, big_endian> This;
1133 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1134
1135 // Branch range. This is used to calculate the section group size, as well as
1136 // determine whether a stub is needed.
1137 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2;
1138 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2);
1139
1140 // Constant used to determine if an offset fits in the adrp instruction
1141 // encoding.
1142 static const int MAX_ADRP_IMM = (1 << 20) - 1;
1143 static const int MIN_ADRP_IMM = -(1 << 20);
1144
1145 static const int BYTES_PER_INSN = 4;
1146 static const int STUB_ADDR_ALIGN;
1147
1148 // Determine whether the offset fits in the jump/branch instruction.
1149 static bool
1150 aarch64_valid_branch_offset_p(int64_t offset)
1151 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; }
1152
1153 // Determine whether the offset fits in the adrp immediate field.
1154 static bool
1155 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest)
1156 {
1157 typedef AArch64_relocate_functions<size, big_endian> Reloc;
1158 int64_t adrp_imm = (Reloc::Page(dest) - Reloc::Page(location)) >> 12;
1159 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM;
1160 }
1161
1162 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1163 // needed.
1164 static int
1165 stub_type_for_reloc(unsigned int r_type, AArch64_address address,
1166 AArch64_address target);
1167
1168 Reloc_stub(int type)
1169 : Stub_base<size, big_endian>(type)
1170 { }
1171
1172 ~Reloc_stub()
1173 { }
1174
1175 // The key class used to index the stub instance in the stub table's stub map.
1176 class Key
1177 {
1178 public:
1179 Key(int type, const Symbol* symbol, const Relobj* relobj,
1180 unsigned int r_sym, int32_t addend)
1181 : type_(type), addend_(addend)
1182 {
1183 if (symbol != NULL)
1184 {
1185 this->r_sym_ = Reloc_stub::invalid_index;
1186 this->u_.symbol = symbol;
1187 }
1188 else
1189 {
1190 gold_assert(relobj != NULL && r_sym != invalid_index);
1191 this->r_sym_ = r_sym;
1192 this->u_.relobj = relobj;
1193 }
1194 }
1195
1196 ~Key()
1197 { }
1198
1199 // Return stub type.
1200 int
1201 type() const
1202 { return this->type_; }
1203
1204 // Return the local symbol index or invalid_index.
1205 unsigned int
1206 r_sym() const
1207 { return this->r_sym_; }
1208
1209 // Return the symbol if there is one.
1210 const Symbol*
1211 symbol() const
1212 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
1213
1214 // Return the relobj if there is one.
1215 const Relobj*
1216 relobj() const
1217 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
1218
1219 // Whether this equals to another key k.
1220 bool
1221 eq(const Key& k) const
1222 {
1223 return ((this->type_ == k.type_)
1224 && (this->r_sym_ == k.r_sym_)
1225 && ((this->r_sym_ != Reloc_stub::invalid_index)
1226 ? (this->u_.relobj == k.u_.relobj)
1227 : (this->u_.symbol == k.u_.symbol))
1228 && (this->addend_ == k.addend_));
1229 }
1230
1231 // Return a hash value.
1232 size_t
1233 hash_value() const
1234 {
1235 size_t name_hash_value = gold::string_hash<char>(
1236 (this->r_sym_ != Reloc_stub::invalid_index)
1237 ? this->u_.relobj->name().c_str()
1238 : this->u_.symbol->name());
1239 // We only have 4 stub types.
1240 size_t stub_type_hash_value = 0x03 & this->type_;
1241 return (name_hash_value
1242 ^ stub_type_hash_value
1243 ^ ((this->r_sym_ & 0x3fff) << 2)
1244 ^ ((this->addend_ & 0xffff) << 16));
1245 }
1246
1247 // Functors for STL associative containers.
1248 struct hash
1249 {
1250 size_t
1251 operator()(const Key& k) const
1252 { return k.hash_value(); }
1253 };
1254
1255 struct equal_to
1256 {
1257 bool
1258 operator()(const Key& k1, const Key& k2) const
1259 { return k1.eq(k2); }
1260 };
1261
1262 private:
1263 // Stub type.
1264 const int type_;
1265 // If this is a local symbol, this is the index in the defining object.
1266 // Otherwise, it is invalid_index for a global symbol.
1267 unsigned int r_sym_;
1268 // If r_sym_ is an invalid index, this points to a global symbol.
1269 // Otherwise, it points to a relobj. We used the unsized and target
1270 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
1271 // Arm_relobj, in order to avoid making the stub class a template
1272 // as most of the stub machinery is endianness-neutral. However, it
1273 // may require a bit of casting done by users of this class.
1274 union
1275 {
1276 const Symbol* symbol;
1277 const Relobj* relobj;
1278 } u_;
1279 // Addend associated with a reloc.
1280 int32_t addend_;
1281 }; // End of inner class Reloc_stub::Key
1282
1283 protected:
1284 // This may be overridden in the child class.
1285 virtual void
1286 do_write(unsigned char*, section_size_type);
1287
1288 private:
1289 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
1290 }; // End of Reloc_stub
1291
1292 template<int size, bool big_endian>
1293 const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1294
1295 // Write data to output file.
1296
1297 template<int size, bool big_endian>
1298 void
1299 Reloc_stub<size, big_endian>::
1300 do_write(unsigned char* view, section_size_type)
1301 {
1302 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1303 const uint32_t* insns = this->insns();
1304 uint32_t num_insns = this->insn_num();
1305 Insntype* ip = reinterpret_cast<Insntype*>(view);
1306 for (uint32_t i = 0; i < num_insns; ++i)
1307 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1308 }
1309
1310
1311 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1312 // needed.
1313
1314 template<int size, bool big_endian>
1315 inline int
1316 Reloc_stub<size, big_endian>::stub_type_for_reloc(
1317 unsigned int r_type, AArch64_address location, AArch64_address dest)
1318 {
1319 int64_t branch_offset = 0;
1320 switch(r_type)
1321 {
1322 case elfcpp::R_AARCH64_CALL26:
1323 case elfcpp::R_AARCH64_JUMP26:
1324 branch_offset = dest - location;
1325 break;
1326 default:
1327 gold_unreachable();
1328 }
1329
1330 if (aarch64_valid_branch_offset_p(branch_offset))
1331 return ST_NONE;
1332
1333 if (aarch64_valid_for_adrp_p(location, dest))
1334 return ST_ADRP_BRANCH;
1335
1336 // Always use PC-relative addressing in case of -shared or -pie.
1337 if (parameters->options().output_is_position_independent())
1338 return ST_LONG_BRANCH_PCREL;
1339
1340 // This saves 2 insns per stub, compared to ST_LONG_BRANCH_PCREL.
1341 // But is only applicable to non-shared or non-pie.
1342 return ST_LONG_BRANCH_ABS;
1343 }
1344
1345 // A class to hold stubs for the ARM target.
1346
1347 template<int size, bool big_endian>
1348 class Stub_table : public Output_data
1349 {
1350 public:
1351 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1352 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1353 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
1354 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1355 typedef Reloc_stub<size, big_endian> The_reloc_stub;
1356 typedef typename The_reloc_stub::Key The_reloc_stub_key;
1357 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1358 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less;
1359 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash;
1360 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to;
1361 typedef Stub_table<size, big_endian> The_stub_table;
1362 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*,
1363 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to>
1364 Reloc_stub_map;
1365 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter;
1366 typedef Relocate_info<size, big_endian> The_relocate_info;
1367
1368 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set;
1369 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter;
1370
1371 Stub_table(The_aarch64_input_section* owner)
1372 : Output_data(), owner_(owner), reloc_stubs_size_(0),
1373 erratum_stubs_size_(0), prev_data_size_(0)
1374 { }
1375
1376 ~Stub_table()
1377 { }
1378
1379 The_aarch64_input_section*
1380 owner() const
1381 { return owner_; }
1382
1383 // Whether this stub table is empty.
1384 bool
1385 empty() const
1386 { return reloc_stubs_.empty() && erratum_stubs_.empty(); }
1387
1388 // Return the current data size.
1389 off_t
1390 current_data_size() const
1391 { return this->current_data_size_for_child(); }
1392
1393 // Add a STUB using KEY. The caller is responsible for avoiding addition
1394 // if a STUB with the same key has already been added.
1395 void
1396 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key);
1397
1398 // Add an erratum stub into the erratum stub set. The set is ordered by
1399 // (relobj, shndx, sh_offset).
1400 void
1401 add_erratum_stub(The_erratum_stub* stub);
1402
1403 // Find if such erratum exists for any given (obj, shndx, sh_offset).
1404 The_erratum_stub*
1405 find_erratum_stub(The_aarch64_relobj* a64relobj,
1406 unsigned int shndx, unsigned int sh_offset);
1407
1408 // Find all the erratums for a given input section. The return value is a pair
1409 // of iterators [begin, end).
1410 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1411 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj,
1412 unsigned int shndx);
1413
1414 // Compute the erratum stub address.
1415 AArch64_address
1416 erratum_stub_address(The_erratum_stub* stub) const
1417 {
1418 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_,
1419 The_erratum_stub::STUB_ADDR_ALIGN);
1420 r += stub->offset();
1421 return r;
1422 }
1423
1424 // Finalize stubs. No-op here, just for completeness.
1425 void
1426 finalize_stubs()
1427 { }
1428
1429 // Look up a relocation stub using KEY. Return NULL if there is none.
1430 The_reloc_stub*
1431 find_reloc_stub(The_reloc_stub_key& key)
1432 {
1433 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key);
1434 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
1435 }
1436
1437 // Relocate stubs in this stub table.
1438 void
1439 relocate_stubs(const The_relocate_info*,
1440 The_target_aarch64*,
1441 Output_section*,
1442 unsigned char*,
1443 AArch64_address,
1444 section_size_type);
1445
1446 // Update data size at the end of a relaxation pass. Return true if data size
1447 // is different from that of the previous relaxation pass.
1448 bool
1449 update_data_size_changed_p()
1450 {
1451 // No addralign changed here.
1452 off_t s = align_address(this->reloc_stubs_size_,
1453 The_erratum_stub::STUB_ADDR_ALIGN)
1454 + this->erratum_stubs_size_;
1455 bool changed = (s != this->prev_data_size_);
1456 this->prev_data_size_ = s;
1457 return changed;
1458 }
1459
1460 protected:
1461 // Write out section contents.
1462 void
1463 do_write(Output_file*);
1464
1465 // Return the required alignment.
1466 uint64_t
1467 do_addralign() const
1468 {
1469 return std::max(The_reloc_stub::STUB_ADDR_ALIGN,
1470 The_erratum_stub::STUB_ADDR_ALIGN);
1471 }
1472
1473 // Reset address and file offset.
1474 void
1475 do_reset_address_and_file_offset()
1476 { this->set_current_data_size_for_child(this->prev_data_size_); }
1477
1478 // Set final data size.
1479 void
1480 set_final_data_size()
1481 { this->set_data_size(this->current_data_size()); }
1482
1483 private:
1484 // Relocate one stub.
1485 void
1486 relocate_stub(The_reloc_stub*,
1487 const The_relocate_info*,
1488 The_target_aarch64*,
1489 Output_section*,
1490 unsigned char*,
1491 AArch64_address,
1492 section_size_type);
1493
1494 private:
1495 // Owner of this stub table.
1496 The_aarch64_input_section* owner_;
1497 // The relocation stubs.
1498 Reloc_stub_map reloc_stubs_;
1499 // The erratum stubs.
1500 Erratum_stub_set erratum_stubs_;
1501 // Size of reloc stubs.
1502 off_t reloc_stubs_size_;
1503 // Size of erratum stubs.
1504 off_t erratum_stubs_size_;
1505 // data size of this in the previous pass.
1506 off_t prev_data_size_;
1507 }; // End of Stub_table
1508
1509
1510 // Add an erratum stub into the erratum stub set. The set is ordered by
1511 // (relobj, shndx, sh_offset).
1512
1513 template<int size, bool big_endian>
1514 void
1515 Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub)
1516 {
1517 std::pair<Erratum_stub_set_iter, bool> ret =
1518 this->erratum_stubs_.insert(stub);
1519 gold_assert(ret.second);
1520 this->erratum_stubs_size_ = align_address(
1521 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1522 stub->set_offset(this->erratum_stubs_size_);
1523 this->erratum_stubs_size_ += stub->stub_size();
1524 }
1525
1526
1527 // Find if such erratum exists for given (obj, shndx, sh_offset).
1528
1529 template<int size, bool big_endian>
1530 Erratum_stub<size, big_endian>*
1531 Stub_table<size, big_endian>::find_erratum_stub(
1532 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset)
1533 {
1534 // A dummy object used as key to search in the set.
1535 The_erratum_stub key(a64relobj, ST_NONE,
1536 shndx, sh_offset);
1537 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key);
1538 if (i != this->erratum_stubs_.end())
1539 {
1540 The_erratum_stub* stub(*i);
1541 gold_assert(stub->erratum_insn() != 0);
1542 return stub;
1543 }
1544 return NULL;
1545 }
1546
1547
1548 // Find all the errata for a given input section. The return value is a pair of
1549 // iterators [begin, end).
1550
1551 template<int size, bool big_endian>
1552 std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter,
1553 typename Stub_table<size, big_endian>::Erratum_stub_set_iter>
1554 Stub_table<size, big_endian>::find_erratum_stubs_for_input_section(
1555 The_aarch64_relobj* a64relobj, unsigned int shndx)
1556 {
1557 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair;
1558 Erratum_stub_set_iter start, end;
1559 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0);
1560 start = this->erratum_stubs_.lower_bound(&low_key);
1561 if (start == this->erratum_stubs_.end())
1562 return Result_pair(this->erratum_stubs_.end(),
1563 this->erratum_stubs_.end());
1564 end = start;
1565 while (end != this->erratum_stubs_.end() &&
1566 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx)
1567 ++end;
1568 return Result_pair(start, end);
1569 }
1570
1571
1572 // Add a STUB using KEY. The caller is responsible for avoiding addition
1573 // if a STUB with the same key has already been added.
1574
1575 template<int size, bool big_endian>
1576 void
1577 Stub_table<size, big_endian>::add_reloc_stub(
1578 The_reloc_stub* stub, const The_reloc_stub_key& key)
1579 {
1580 gold_assert(stub->type() == key.type());
1581 this->reloc_stubs_[key] = stub;
1582
1583 // Assign stub offset early. We can do this because we never remove
1584 // reloc stubs and they are in the beginning of the stub table.
1585 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_,
1586 The_reloc_stub::STUB_ADDR_ALIGN);
1587 stub->set_offset(this->reloc_stubs_size_);
1588 this->reloc_stubs_size_ += stub->stub_size();
1589 }
1590
1591
1592 // Relocate all stubs in this stub table.
1593
1594 template<int size, bool big_endian>
1595 void
1596 Stub_table<size, big_endian>::
1597 relocate_stubs(const The_relocate_info* relinfo,
1598 The_target_aarch64* target_aarch64,
1599 Output_section* output_section,
1600 unsigned char* view,
1601 AArch64_address address,
1602 section_size_type view_size)
1603 {
1604 // "view_size" is the total size of the stub_table.
1605 gold_assert(address == this->address() &&
1606 view_size == static_cast<section_size_type>(this->data_size()));
1607 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin();
1608 p != this->reloc_stubs_.end(); ++p)
1609 relocate_stub(p->second, relinfo, target_aarch64, output_section,
1610 view, address, view_size);
1611
1612 // Just for convenience.
1613 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
1614
1615 // Now 'relocate' erratum stubs.
1616 for(Erratum_stub_set_iter i = this->erratum_stubs_.begin();
1617 i != this->erratum_stubs_.end(); ++i)
1618 {
1619 AArch64_address stub_address = this->erratum_stub_address(*i);
1620 // The address of "b" in the stub that is to be "relocated".
1621 AArch64_address stub_b_insn_address;
1622 // Branch offset that is to be filled in "b" insn.
1623 int b_offset = 0;
1624 switch ((*i)->type())
1625 {
1626 case ST_E_843419:
1627 case ST_E_835769:
1628 // The 1st insn of the erratum could be a relocation spot,
1629 // in this case we need to fix it with
1630 // "(*i)->erratum_insn()".
1631 elfcpp::Swap<32, big_endian>::writeval(
1632 view + (stub_address - this->address()),
1633 (*i)->erratum_insn());
1634 // For the erratum, the 2nd insn is a b-insn to be patched
1635 // (relocated).
1636 stub_b_insn_address = stub_address + 1 * BPI;
1637 b_offset = (*i)->destination_address() - stub_b_insn_address;
1638 AArch64_relocate_functions<size, big_endian>::construct_b(
1639 view + (stub_b_insn_address - this->address()),
1640 ((unsigned int)(b_offset)) & 0xfffffff);
1641 break;
1642 default:
1643 gold_unreachable();
1644 break;
1645 }
1646 }
1647 }
1648
1649
1650 // Relocate one stub. This is a helper for Stub_table::relocate_stubs().
1651
1652 template<int size, bool big_endian>
1653 void
1654 Stub_table<size, big_endian>::
1655 relocate_stub(The_reloc_stub* stub,
1656 const The_relocate_info* relinfo,
1657 The_target_aarch64* target_aarch64,
1658 Output_section* output_section,
1659 unsigned char* view,
1660 AArch64_address address,
1661 section_size_type view_size)
1662 {
1663 // "offset" is the offset from the beginning of the stub_table.
1664 section_size_type offset = stub->offset();
1665 section_size_type stub_size = stub->stub_size();
1666 // "view_size" is the total size of the stub_table.
1667 gold_assert(offset + stub_size <= view_size);
1668
1669 target_aarch64->relocate_stub(stub, relinfo, output_section,
1670 view + offset, address + offset, view_size);
1671 }
1672
1673
1674 // Write out the stubs to file.
1675
1676 template<int size, bool big_endian>
1677 void
1678 Stub_table<size, big_endian>::do_write(Output_file* of)
1679 {
1680 off_t offset = this->offset();
1681 const section_size_type oview_size =
1682 convert_to_section_size_type(this->data_size());
1683 unsigned char* const oview = of->get_output_view(offset, oview_size);
1684
1685 // Write relocation stubs.
1686 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
1687 p != this->reloc_stubs_.end(); ++p)
1688 {
1689 The_reloc_stub* stub = p->second;
1690 AArch64_address address = this->address() + stub->offset();
1691 gold_assert(address ==
1692 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN));
1693 stub->write(oview + stub->offset(), stub->stub_size());
1694 }
1695
1696 // Write erratum stubs.
1697 unsigned int erratum_stub_start_offset =
1698 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1699 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin();
1700 p != this->erratum_stubs_.end(); ++p)
1701 {
1702 The_erratum_stub* stub(*p);
1703 stub->write(oview + erratum_stub_start_offset + stub->offset(),
1704 stub->stub_size());
1705 }
1706
1707 of->write_output_view(this->offset(), oview_size, oview);
1708 }
1709
1710
1711 // AArch64_relobj class.
1712
1713 template<int size, bool big_endian>
1714 class AArch64_relobj : public Sized_relobj_file<size, big_endian>
1715 {
1716 public:
1717 typedef AArch64_relobj<size, big_endian> This;
1718 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1719 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1720 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1721 typedef Stub_table<size, big_endian> The_stub_table;
1722 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1723 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter;
1724 typedef std::vector<The_stub_table*> Stub_table_list;
1725 static const AArch64_address invalid_address =
1726 static_cast<AArch64_address>(-1);
1727
1728 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset,
1729 const typename elfcpp::Ehdr<size, big_endian>& ehdr)
1730 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr),
1731 stub_tables_()
1732 { }
1733
1734 ~AArch64_relobj()
1735 { }
1736
1737 // Return the stub table of the SHNDX-th section if there is one.
1738 The_stub_table*
1739 stub_table(unsigned int shndx) const
1740 {
1741 gold_assert(shndx < this->stub_tables_.size());
1742 return this->stub_tables_[shndx];
1743 }
1744
1745 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1746 void
1747 set_stub_table(unsigned int shndx, The_stub_table* stub_table)
1748 {
1749 gold_assert(shndx < this->stub_tables_.size());
1750 this->stub_tables_[shndx] = stub_table;
1751 }
1752
1753 // Entrance to errata scanning.
1754 void
1755 scan_errata(unsigned int shndx,
1756 const elfcpp::Shdr<size, big_endian>&,
1757 Output_section*, const Symbol_table*,
1758 The_target_aarch64*);
1759
1760 // Scan all relocation sections for stub generation.
1761 void
1762 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*,
1763 const Layout*);
1764
1765 // Whether a section is a scannable text section.
1766 bool
1767 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int,
1768 const Output_section*, const Symbol_table*);
1769
1770 // Convert regular input section with index SHNDX to a relaxed section.
1771 void
1772 convert_input_section_to_relaxed_section(unsigned /* shndx */)
1773 {
1774 // The stubs have relocations and we need to process them after writing
1775 // out the stubs. So relocation now must follow section write.
1776 this->set_relocs_must_follow_section_writes();
1777 }
1778
1779 // Structure for mapping symbol position.
1780 struct Mapping_symbol_position
1781 {
1782 Mapping_symbol_position(unsigned int shndx, AArch64_address offset):
1783 shndx_(shndx), offset_(offset)
1784 {}
1785
1786 // "<" comparator used in ordered_map container.
1787 bool
1788 operator<(const Mapping_symbol_position& p) const
1789 {
1790 return (this->shndx_ < p.shndx_
1791 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_));
1792 }
1793
1794 // Section index.
1795 unsigned int shndx_;
1796
1797 // Section offset.
1798 AArch64_address offset_;
1799 };
1800
1801 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info;
1802
1803 protected:
1804 // Post constructor setup.
1805 void
1806 do_setup()
1807 {
1808 // Call parent's setup method.
1809 Sized_relobj_file<size, big_endian>::do_setup();
1810
1811 // Initialize look-up tables.
1812 this->stub_tables_.resize(this->shnum());
1813 }
1814
1815 virtual void
1816 do_relocate_sections(
1817 const Symbol_table* symtab, const Layout* layout,
1818 const unsigned char* pshdrs, Output_file* of,
1819 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1820
1821 // Count local symbols and (optionally) record mapping info.
1822 virtual void
1823 do_count_local_symbols(Stringpool_template<char>*,
1824 Stringpool_template<char>*);
1825
1826 private:
1827 // Fix all errata in the object.
1828 void
1829 fix_errata(typename Sized_relobj_file<size, big_endian>::Views* pviews);
1830
1831 // Try to fix erratum 843419 in an optimized way. Return true if patch is
1832 // applied.
1833 bool
1834 try_fix_erratum_843419_optimized(
1835 The_erratum_stub*,
1836 typename Sized_relobj_file<size, big_endian>::View_size&);
1837
1838 // Whether a section needs to be scanned for relocation stubs.
1839 bool
1840 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&,
1841 const Relobj::Output_sections&,
1842 const Symbol_table*, const unsigned char*);
1843
1844 // List of stub tables.
1845 Stub_table_list stub_tables_;
1846
1847 // Mapping symbol information sorted by (section index, section_offset).
1848 Mapping_symbol_info mapping_symbol_info_;
1849 }; // End of AArch64_relobj
1850
1851
1852 // Override to record mapping symbol information.
1853 template<int size, bool big_endian>
1854 void
1855 AArch64_relobj<size, big_endian>::do_count_local_symbols(
1856 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool)
1857 {
1858 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool);
1859
1860 // Only erratum-fixing work needs mapping symbols, so skip this time consuming
1861 // processing if not fixing erratum.
1862 if (!parameters->options().fix_cortex_a53_843419()
1863 && !parameters->options().fix_cortex_a53_835769())
1864 return;
1865
1866 const unsigned int loccount = this->local_symbol_count();
1867 if (loccount == 0)
1868 return;
1869
1870 // Read the symbol table section header.
1871 const unsigned int symtab_shndx = this->symtab_shndx();
1872 elfcpp::Shdr<size, big_endian>
1873 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
1874 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
1875
1876 // Read the local symbols.
1877 const int sym_size =elfcpp::Elf_sizes<size>::sym_size;
1878 gold_assert(loccount == symtabshdr.get_sh_info());
1879 off_t locsize = loccount * sym_size;
1880 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
1881 locsize, true, true);
1882
1883 // For mapping symbol processing, we need to read the symbol names.
1884 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
1885 if (strtab_shndx >= this->shnum())
1886 {
1887 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
1888 return;
1889 }
1890
1891 elfcpp::Shdr<size, big_endian>
1892 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
1893 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
1894 {
1895 this->error(_("symbol table name section has wrong type: %u"),
1896 static_cast<unsigned int>(strtabshdr.get_sh_type()));
1897 return;
1898 }
1899
1900 const char* pnames =
1901 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
1902 strtabshdr.get_sh_size(),
1903 false, false));
1904
1905 // Skip the first dummy symbol.
1906 psyms += sym_size;
1907 typename Sized_relobj_file<size, big_endian>::Local_values*
1908 plocal_values = this->local_values();
1909 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
1910 {
1911 elfcpp::Sym<size, big_endian> sym(psyms);
1912 Symbol_value<size>& lv((*plocal_values)[i]);
1913 AArch64_address input_value = lv.input_value();
1914
1915 // Check to see if this is a mapping symbol. AArch64 mapping symbols are
1916 // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping
1917 // symbols.
1918 // Mapping symbols could be one of the following 4 forms -
1919 // a) $x
1920 // b) $x.<any...>
1921 // c) $d
1922 // d) $d.<any...>
1923 const char* sym_name = pnames + sym.get_st_name();
1924 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd')
1925 && (sym_name[2] == '\0' || sym_name[2] == '.'))
1926 {
1927 bool is_ordinary;
1928 unsigned int input_shndx =
1929 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary);
1930 gold_assert(is_ordinary);
1931
1932 Mapping_symbol_position msp(input_shndx, input_value);
1933 // Insert mapping_symbol_info into map whose ordering is defined by
1934 // (shndx, offset_within_section).
1935 this->mapping_symbol_info_[msp] = sym_name[1];
1936 }
1937 }
1938 }
1939
1940
1941 // Fix all errata in the object.
1942
1943 template<int size, bool big_endian>
1944 void
1945 AArch64_relobj<size, big_endian>::fix_errata(
1946 typename Sized_relobj_file<size, big_endian>::Views* pviews)
1947 {
1948 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
1949 unsigned int shnum = this->shnum();
1950 for (unsigned int i = 1; i < shnum; ++i)
1951 {
1952 The_stub_table* stub_table = this->stub_table(i);
1953 if (!stub_table)
1954 continue;
1955 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1956 ipair(stub_table->find_erratum_stubs_for_input_section(this, i));
1957 Erratum_stub_set_iter p = ipair.first, end = ipair.second;
1958 while (p != end)
1959 {
1960 The_erratum_stub* stub = *p;
1961 typename Sized_relobj_file<size, big_endian>::View_size&
1962 pview((*pviews)[i]);
1963
1964 // Double check data before fix.
1965 gold_assert(pview.address + stub->sh_offset()
1966 == stub->erratum_address());
1967
1968 // Update previously recorded erratum insn with relocated
1969 // version.
1970 Insntype* ip =
1971 reinterpret_cast<Insntype*>(pview.view + stub->sh_offset());
1972 Insntype insn_to_fix = ip[0];
1973 stub->update_erratum_insn(insn_to_fix);
1974
1975 // First try to see if erratum is 843419 and if it can be fixed
1976 // without using branch-to-stub.
1977 if (!try_fix_erratum_843419_optimized(stub, pview))
1978 {
1979 // Replace the erratum insn with a branch-to-stub.
1980 AArch64_address stub_address =
1981 stub_table->erratum_stub_address(stub);
1982 unsigned int b_offset = stub_address - stub->erratum_address();
1983 AArch64_relocate_functions<size, big_endian>::construct_b(
1984 pview.view + stub->sh_offset(), b_offset & 0xfffffff);
1985 }
1986 ++p;
1987 }
1988 }
1989 }
1990
1991
1992 // This is an optimization for 843419. This erratum requires the sequence begin
1993 // with 'adrp', when final value calculated by adrp fits in adr, we can just
1994 // replace 'adrp' with 'adr', so we save 2 jumps per occurrence. (Note, however,
1995 // in this case, we do not delete the erratum stub (too late to do so), it is
1996 // merely generated without ever being called.)
1997
1998 template<int size, bool big_endian>
1999 bool
2000 AArch64_relobj<size, big_endian>::try_fix_erratum_843419_optimized(
2001 The_erratum_stub* stub,
2002 typename Sized_relobj_file<size, big_endian>::View_size& pview)
2003 {
2004 if (stub->type() != ST_E_843419)
2005 return false;
2006
2007 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2008 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
2009 E843419_stub<size, big_endian>* e843419_stub =
2010 reinterpret_cast<E843419_stub<size, big_endian>*>(stub);
2011 AArch64_address pc = pview.address + e843419_stub->adrp_sh_offset();
2012 Insntype* adrp_view = reinterpret_cast<Insntype*>(
2013 pview.view + e843419_stub->adrp_sh_offset());
2014 Insntype adrp_insn = adrp_view[0];
2015 gold_assert(Insn_utilities::is_adrp(adrp_insn));
2016 // Get adrp 33-bit signed imm value.
2017 int64_t adrp_imm = Insn_utilities::
2018 aarch64_adrp_decode_imm(adrp_insn);
2019 // adrp - final value transferred to target register is calculated as:
2020 // PC[11:0] = Zeros(12)
2021 // adrp_dest_value = PC + adrp_imm;
2022 int64_t adrp_dest_value = (pc & ~((1 << 12) - 1)) + adrp_imm;
2023 // adr -final value transferred to target register is calucalted as:
2024 // PC + adr_imm
2025 // So we have:
2026 // PC + adr_imm = adrp_dest_value
2027 // ==>
2028 // adr_imm = adrp_dest_value - PC
2029 int64_t adr_imm = adrp_dest_value - pc;
2030 // Check if imm fits in adr (21-bit signed).
2031 if (-(1 << 20) <= adr_imm && adr_imm < (1 << 20))
2032 {
2033 // Convert 'adrp' into 'adr'.
2034 Insntype adr_insn = adrp_insn & ((1u << 31) - 1);
2035 adr_insn = Insn_utilities::
2036 aarch64_adr_encode_imm(adr_insn, adr_imm);
2037 elfcpp::Swap<32, big_endian>::writeval(adrp_view, adr_insn);
2038 return true;
2039 }
2040 return false;
2041 }
2042
2043
2044 // Relocate sections.
2045
2046 template<int size, bool big_endian>
2047 void
2048 AArch64_relobj<size, big_endian>::do_relocate_sections(
2049 const Symbol_table* symtab, const Layout* layout,
2050 const unsigned char* pshdrs, Output_file* of,
2051 typename Sized_relobj_file<size, big_endian>::Views* pviews)
2052 {
2053 // Relocate the section data.
2054 this->relocate_section_range(symtab, layout, pshdrs, of, pviews,
2055 1, this->shnum() - 1);
2056
2057 // We do not generate stubs if doing a relocatable link.
2058 if (parameters->options().relocatable())
2059 return;
2060
2061 if (parameters->options().fix_cortex_a53_843419()
2062 || parameters->options().fix_cortex_a53_835769())
2063 this->fix_errata(pviews);
2064
2065 Relocate_info<size, big_endian> relinfo;
2066 relinfo.symtab = symtab;
2067 relinfo.layout = layout;
2068 relinfo.object = this;
2069
2070 // Relocate stub tables.
2071 unsigned int shnum = this->shnum();
2072 The_target_aarch64* target = The_target_aarch64::current_target();
2073
2074 for (unsigned int i = 1; i < shnum; ++i)
2075 {
2076 The_aarch64_input_section* aarch64_input_section =
2077 target->find_aarch64_input_section(this, i);
2078 if (aarch64_input_section != NULL
2079 && aarch64_input_section->is_stub_table_owner()
2080 && !aarch64_input_section->stub_table()->empty())
2081 {
2082 Output_section* os = this->output_section(i);
2083 gold_assert(os != NULL);
2084
2085 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
2086 relinfo.reloc_shdr = NULL;
2087 relinfo.data_shndx = i;
2088 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size;
2089
2090 typename Sized_relobj_file<size, big_endian>::View_size&
2091 view_struct = (*pviews)[i];
2092 gold_assert(view_struct.view != NULL);
2093
2094 The_stub_table* stub_table = aarch64_input_section->stub_table();
2095 off_t offset = stub_table->address() - view_struct.address;
2096 unsigned char* view = view_struct.view + offset;
2097 AArch64_address address = stub_table->address();
2098 section_size_type view_size = stub_table->data_size();
2099 stub_table->relocate_stubs(&relinfo, target, os, view, address,
2100 view_size);
2101 }
2102 }
2103 }
2104
2105
2106 // Determine if an input section is scannable for stub processing. SHDR is
2107 // the header of the section and SHNDX is the section index. OS is the output
2108 // section for the input section and SYMTAB is the global symbol table used to
2109 // look up ICF information.
2110
2111 template<int size, bool big_endian>
2112 bool
2113 AArch64_relobj<size, big_endian>::text_section_is_scannable(
2114 const elfcpp::Shdr<size, big_endian>& text_shdr,
2115 unsigned int text_shndx,
2116 const Output_section* os,
2117 const Symbol_table* symtab)
2118 {
2119 // Skip any empty sections, unallocated sections or sections whose
2120 // type are not SHT_PROGBITS.
2121 if (text_shdr.get_sh_size() == 0
2122 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0
2123 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2124 return false;
2125
2126 // Skip any discarded or ICF'ed sections.
2127 if (os == NULL || symtab->is_section_folded(this, text_shndx))
2128 return false;
2129
2130 // Skip exception frame.
2131 if (strcmp(os->name(), ".eh_frame") == 0)
2132 return false ;
2133
2134 gold_assert(!this->is_output_section_offset_invalid(text_shndx) ||
2135 os->find_relaxed_input_section(this, text_shndx) != NULL);
2136
2137 return true;
2138 }
2139
2140
2141 // Determine if we want to scan the SHNDX-th section for relocation stubs.
2142 // This is a helper for AArch64_relobj::scan_sections_for_stubs().
2143
2144 template<int size, bool big_endian>
2145 bool
2146 AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning(
2147 const elfcpp::Shdr<size, big_endian>& shdr,
2148 const Relobj::Output_sections& out_sections,
2149 const Symbol_table* symtab,
2150 const unsigned char* pshdrs)
2151 {
2152 unsigned int sh_type = shdr.get_sh_type();
2153 if (sh_type != elfcpp::SHT_RELA)
2154 return false;
2155
2156 // Ignore empty section.
2157 off_t sh_size = shdr.get_sh_size();
2158 if (sh_size == 0)
2159 return false;
2160
2161 // Ignore reloc section with unexpected symbol table. The
2162 // error will be reported in the final link.
2163 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
2164 return false;
2165
2166 gold_assert(sh_type == elfcpp::SHT_RELA);
2167 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2168
2169 // Ignore reloc section with unexpected entsize or uneven size.
2170 // The error will be reported in the final link.
2171 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
2172 return false;
2173
2174 // Ignore reloc section with bad info. This error will be
2175 // reported in the final link.
2176 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info());
2177 if (text_shndx >= this->shnum())
2178 return false;
2179
2180 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2181 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs +
2182 text_shndx * shdr_size);
2183 return this->text_section_is_scannable(text_shdr, text_shndx,
2184 out_sections[text_shndx], symtab);
2185 }
2186
2187
2188 // Scan section SHNDX for erratum 843419 and 835769.
2189
2190 template<int size, bool big_endian>
2191 void
2192 AArch64_relobj<size, big_endian>::scan_errata(
2193 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr,
2194 Output_section* os, const Symbol_table* symtab,
2195 The_target_aarch64* target)
2196 {
2197 if (shdr.get_sh_size() == 0
2198 || (shdr.get_sh_flags() &
2199 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0
2200 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2201 return;
2202
2203 if (!os || symtab->is_section_folded(this, shndx)) return;
2204
2205 AArch64_address output_offset = this->get_output_section_offset(shndx);
2206 AArch64_address output_address;
2207 if (output_offset != invalid_address)
2208 output_address = os->address() + output_offset;
2209 else
2210 {
2211 const Output_relaxed_input_section* poris =
2212 os->find_relaxed_input_section(this, shndx);
2213 if (!poris) return;
2214 output_address = poris->address();
2215 }
2216
2217 section_size_type input_view_size = 0;
2218 const unsigned char* input_view =
2219 this->section_contents(shndx, &input_view_size, false);
2220
2221 Mapping_symbol_position section_start(shndx, 0);
2222 // Find the first mapping symbol record within section shndx.
2223 typename Mapping_symbol_info::const_iterator p =
2224 this->mapping_symbol_info_.lower_bound(section_start);
2225 while (p != this->mapping_symbol_info_.end() &&
2226 p->first.shndx_ == shndx)
2227 {
2228 typename Mapping_symbol_info::const_iterator prev = p;
2229 ++p;
2230 if (prev->second == 'x')
2231 {
2232 section_size_type span_start =
2233 convert_to_section_size_type(prev->first.offset_);
2234 section_size_type span_end;
2235 if (p != this->mapping_symbol_info_.end()
2236 && p->first.shndx_ == shndx)
2237 span_end = convert_to_section_size_type(p->first.offset_);
2238 else
2239 span_end = convert_to_section_size_type(shdr.get_sh_size());
2240
2241 // Here we do not share the scanning code of both errata. For 843419,
2242 // only the last few insns of each page are examined, which is fast,
2243 // whereas, for 835769, every insn pair needs to be checked.
2244
2245 if (parameters->options().fix_cortex_a53_843419())
2246 target->scan_erratum_843419_span(
2247 this, shndx, span_start, span_end,
2248 const_cast<unsigned char*>(input_view), output_address);
2249
2250 if (parameters->options().fix_cortex_a53_835769())
2251 target->scan_erratum_835769_span(
2252 this, shndx, span_start, span_end,
2253 const_cast<unsigned char*>(input_view), output_address);
2254 }
2255 }
2256 }
2257
2258
2259 // Scan relocations for stub generation.
2260
2261 template<int size, bool big_endian>
2262 void
2263 AArch64_relobj<size, big_endian>::scan_sections_for_stubs(
2264 The_target_aarch64* target,
2265 const Symbol_table* symtab,
2266 const Layout* layout)
2267 {
2268 unsigned int shnum = this->shnum();
2269 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2270
2271 // Read the section headers.
2272 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
2273 shnum * shdr_size,
2274 true, true);
2275
2276 // To speed up processing, we set up hash tables for fast lookup of
2277 // input offsets to output addresses.
2278 this->initialize_input_to_output_maps();
2279
2280 const Relobj::Output_sections& out_sections(this->output_sections());
2281
2282 Relocate_info<size, big_endian> relinfo;
2283 relinfo.symtab = symtab;
2284 relinfo.layout = layout;
2285 relinfo.object = this;
2286
2287 // Do relocation stubs scanning.
2288 const unsigned char* p = pshdrs + shdr_size;
2289 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
2290 {
2291 const elfcpp::Shdr<size, big_endian> shdr(p);
2292 if (parameters->options().fix_cortex_a53_843419()
2293 || parameters->options().fix_cortex_a53_835769())
2294 scan_errata(i, shdr, out_sections[i], symtab, target);
2295 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
2296 pshdrs))
2297 {
2298 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
2299 AArch64_address output_offset =
2300 this->get_output_section_offset(index);
2301 AArch64_address output_address;
2302 if (output_offset != invalid_address)
2303 {
2304 output_address = out_sections[index]->address() + output_offset;
2305 }
2306 else
2307 {
2308 // Currently this only happens for a relaxed section.
2309 const Output_relaxed_input_section* poris =
2310 out_sections[index]->find_relaxed_input_section(this, index);
2311 gold_assert(poris != NULL);
2312 output_address = poris->address();
2313 }
2314
2315 // Get the relocations.
2316 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
2317 shdr.get_sh_size(),
2318 true, false);
2319
2320 // Get the section contents.
2321 section_size_type input_view_size = 0;
2322 const unsigned char* input_view =
2323 this->section_contents(index, &input_view_size, false);
2324
2325 relinfo.reloc_shndx = i;
2326 relinfo.data_shndx = index;
2327 unsigned int sh_type = shdr.get_sh_type();
2328 unsigned int reloc_size;
2329 gold_assert (sh_type == elfcpp::SHT_RELA);
2330 reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2331
2332 Output_section* os = out_sections[index];
2333 target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
2334 shdr.get_sh_size() / reloc_size,
2335 os,
2336 output_offset == invalid_address,
2337 input_view, output_address,
2338 input_view_size);
2339 }
2340 }
2341 }
2342
2343
2344 // A class to wrap an ordinary input section containing executable code.
2345
2346 template<int size, bool big_endian>
2347 class AArch64_input_section : public Output_relaxed_input_section
2348 {
2349 public:
2350 typedef Stub_table<size, big_endian> The_stub_table;
2351
2352 AArch64_input_section(Relobj* relobj, unsigned int shndx)
2353 : Output_relaxed_input_section(relobj, shndx, 1),
2354 stub_table_(NULL),
2355 original_contents_(NULL), original_size_(0),
2356 original_addralign_(1)
2357 { }
2358
2359 ~AArch64_input_section()
2360 { delete[] this->original_contents_; }
2361
2362 // Initialize.
2363 void
2364 init();
2365
2366 // Set the stub_table.
2367 void
2368 set_stub_table(The_stub_table* st)
2369 { this->stub_table_ = st; }
2370
2371 // Whether this is a stub table owner.
2372 bool
2373 is_stub_table_owner() const
2374 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
2375
2376 // Return the original size of the section.
2377 uint32_t
2378 original_size() const
2379 { return this->original_size_; }
2380
2381 // Return the stub table.
2382 The_stub_table*
2383 stub_table()
2384 { return stub_table_; }
2385
2386 protected:
2387 // Write out this input section.
2388 void
2389 do_write(Output_file*);
2390
2391 // Return required alignment of this.
2392 uint64_t
2393 do_addralign() const
2394 {
2395 if (this->is_stub_table_owner())
2396 return std::max(this->stub_table_->addralign(),
2397 static_cast<uint64_t>(this->original_addralign_));
2398 else
2399 return this->original_addralign_;
2400 }
2401
2402 // Finalize data size.
2403 void
2404 set_final_data_size();
2405
2406 // Reset address and file offset.
2407 void
2408 do_reset_address_and_file_offset();
2409
2410 // Output offset.
2411 bool
2412 do_output_offset(const Relobj* object, unsigned int shndx,
2413 section_offset_type offset,
2414 section_offset_type* poutput) const
2415 {
2416 if ((object == this->relobj())
2417 && (shndx == this->shndx())
2418 && (offset >= 0)
2419 && (offset <=
2420 convert_types<section_offset_type, uint32_t>(this->original_size_)))
2421 {
2422 *poutput = offset;
2423 return true;
2424 }
2425 else
2426 return false;
2427 }
2428
2429 private:
2430 // Copying is not allowed.
2431 AArch64_input_section(const AArch64_input_section&);
2432 AArch64_input_section& operator=(const AArch64_input_section&);
2433
2434 // The relocation stubs.
2435 The_stub_table* stub_table_;
2436 // Original section contents. We have to make a copy here since the file
2437 // containing the original section may not be locked when we need to access
2438 // the contents.
2439 unsigned char* original_contents_;
2440 // Section size of the original input section.
2441 uint32_t original_size_;
2442 // Address alignment of the original input section.
2443 uint32_t original_addralign_;
2444 }; // End of AArch64_input_section
2445
2446
2447 // Finalize data size.
2448
2449 template<int size, bool big_endian>
2450 void
2451 AArch64_input_section<size, big_endian>::set_final_data_size()
2452 {
2453 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2454
2455 if (this->is_stub_table_owner())
2456 {
2457 this->stub_table_->finalize_data_size();
2458 off = align_address(off, this->stub_table_->addralign());
2459 off += this->stub_table_->data_size();
2460 }
2461 this->set_data_size(off);
2462 }
2463
2464
2465 // Reset address and file offset.
2466
2467 template<int size, bool big_endian>
2468 void
2469 AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset()
2470 {
2471 // Size of the original input section contents.
2472 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2473
2474 // If this is a stub table owner, account for the stub table size.
2475 if (this->is_stub_table_owner())
2476 {
2477 The_stub_table* stub_table = this->stub_table_;
2478
2479 // Reset the stub table's address and file offset. The
2480 // current data size for child will be updated after that.
2481 stub_table_->reset_address_and_file_offset();
2482 off = align_address(off, stub_table_->addralign());
2483 off += stub_table->current_data_size();
2484 }
2485
2486 this->set_current_data_size(off);
2487 }
2488
2489
2490 // Initialize an Arm_input_section.
2491
2492 template<int size, bool big_endian>
2493 void
2494 AArch64_input_section<size, big_endian>::init()
2495 {
2496 Relobj* relobj = this->relobj();
2497 unsigned int shndx = this->shndx();
2498
2499 // We have to cache original size, alignment and contents to avoid locking
2500 // the original file.
2501 this->original_addralign_ =
2502 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx));
2503
2504 // This is not efficient but we expect only a small number of relaxed
2505 // input sections for stubs.
2506 section_size_type section_size;
2507 const unsigned char* section_contents =
2508 relobj->section_contents(shndx, &section_size, false);
2509 this->original_size_ =
2510 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx));
2511
2512 gold_assert(this->original_contents_ == NULL);
2513 this->original_contents_ = new unsigned char[section_size];
2514 memcpy(this->original_contents_, section_contents, section_size);
2515
2516 // We want to make this look like the original input section after
2517 // output sections are finalized.
2518 Output_section* os = relobj->output_section(shndx);
2519 off_t offset = relobj->output_section_offset(shndx);
2520 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
2521 this->set_address(os->address() + offset);
2522 this->set_file_offset(os->offset() + offset);
2523 this->set_current_data_size(this->original_size_);
2524 this->finalize_data_size();
2525 }
2526
2527
2528 // Write data to output file.
2529
2530 template<int size, bool big_endian>
2531 void
2532 AArch64_input_section<size, big_endian>::do_write(Output_file* of)
2533 {
2534 // We have to write out the original section content.
2535 gold_assert(this->original_contents_ != NULL);
2536 of->write(this->offset(), this->original_contents_,
2537 this->original_size_);
2538
2539 // If this owns a stub table and it is not empty, write it.
2540 if (this->is_stub_table_owner() && !this->stub_table_->empty())
2541 this->stub_table_->write(of);
2542 }
2543
2544
2545 // Arm output section class. This is defined mainly to add a number of stub
2546 // generation methods.
2547
2548 template<int size, bool big_endian>
2549 class AArch64_output_section : public Output_section
2550 {
2551 public:
2552 typedef Target_aarch64<size, big_endian> The_target_aarch64;
2553 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2554 typedef Stub_table<size, big_endian> The_stub_table;
2555 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2556
2557 public:
2558 AArch64_output_section(const char* name, elfcpp::Elf_Word type,
2559 elfcpp::Elf_Xword flags)
2560 : Output_section(name, type, flags)
2561 { }
2562
2563 ~AArch64_output_section() {}
2564
2565 // Group input sections for stub generation.
2566 void
2567 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*,
2568 const Task*);
2569
2570 private:
2571 typedef Output_section::Input_section Input_section;
2572 typedef Output_section::Input_section_list Input_section_list;
2573
2574 // Create a stub group.
2575 void
2576 create_stub_group(Input_section_list::const_iterator,
2577 Input_section_list::const_iterator,
2578 Input_section_list::const_iterator,
2579 The_target_aarch64*,
2580 std::vector<Output_relaxed_input_section*>&,
2581 const Task*);
2582 }; // End of AArch64_output_section
2583
2584
2585 // Create a stub group for input sections from FIRST to LAST. OWNER points to
2586 // the input section that will be the owner of the stub table.
2587
2588 template<int size, bool big_endian> void
2589 AArch64_output_section<size, big_endian>::create_stub_group(
2590 Input_section_list::const_iterator first,
2591 Input_section_list::const_iterator last,
2592 Input_section_list::const_iterator owner,
2593 The_target_aarch64* target,
2594 std::vector<Output_relaxed_input_section*>& new_relaxed_sections,
2595 const Task* task)
2596 {
2597 // Currently we convert ordinary input sections into relaxed sections only
2598 // at this point.
2599 The_aarch64_input_section* input_section;
2600 if (owner->is_relaxed_input_section())
2601 gold_unreachable();
2602 else
2603 {
2604 gold_assert(owner->is_input_section());
2605 // Create a new relaxed input section. We need to lock the original
2606 // file.
2607 Task_lock_obj<Object> tl(task, owner->relobj());
2608 input_section =
2609 target->new_aarch64_input_section(owner->relobj(), owner->shndx());
2610 new_relaxed_sections.push_back(input_section);
2611 }
2612
2613 // Create a stub table.
2614 The_stub_table* stub_table =
2615 target->new_stub_table(input_section);
2616
2617 input_section->set_stub_table(stub_table);
2618
2619 Input_section_list::const_iterator p = first;
2620 // Look for input sections or relaxed input sections in [first ... last].
2621 do
2622 {
2623 if (p->is_input_section() || p->is_relaxed_input_section())
2624 {
2625 // The stub table information for input sections live
2626 // in their objects.
2627 The_aarch64_relobj* aarch64_relobj =
2628 static_cast<The_aarch64_relobj*>(p->relobj());
2629 aarch64_relobj->set_stub_table(p->shndx(), stub_table);
2630 }
2631 }
2632 while (p++ != last);
2633 }
2634
2635
2636 // Group input sections for stub generation. GROUP_SIZE is roughly the limit of
2637 // stub groups. We grow a stub group by adding input section until the size is
2638 // just below GROUP_SIZE. The last input section will be converted into a stub
2639 // table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond
2640 // after the stub table, effectively doubling the group size.
2641 //
2642 // This is similar to the group_sections() function in elf32-arm.c but is
2643 // implemented differently.
2644
2645 template<int size, bool big_endian>
2646 void AArch64_output_section<size, big_endian>::group_sections(
2647 section_size_type group_size,
2648 bool stubs_always_after_branch,
2649 Target_aarch64<size, big_endian>* target,
2650 const Task* task)
2651 {
2652 typedef enum
2653 {
2654 NO_GROUP,
2655 FINDING_STUB_SECTION,
2656 HAS_STUB_SECTION
2657 } State;
2658
2659 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
2660
2661 State state = NO_GROUP;
2662 section_size_type off = 0;
2663 section_size_type group_begin_offset = 0;
2664 section_size_type group_end_offset = 0;
2665 section_size_type stub_table_end_offset = 0;
2666 Input_section_list::const_iterator group_begin =
2667 this->input_sections().end();
2668 Input_section_list::const_iterator stub_table =
2669 this->input_sections().end();
2670 Input_section_list::const_iterator group_end = this->input_sections().end();
2671 for (Input_section_list::const_iterator p = this->input_sections().begin();
2672 p != this->input_sections().end();
2673 ++p)
2674 {
2675 section_size_type section_begin_offset =
2676 align_address(off, p->addralign());
2677 section_size_type section_end_offset =
2678 section_begin_offset + p->data_size();
2679
2680 // Check to see if we should group the previously seen sections.
2681 switch (state)
2682 {
2683 case NO_GROUP:
2684 break;
2685
2686 case FINDING_STUB_SECTION:
2687 // Adding this section makes the group larger than GROUP_SIZE.
2688 if (section_end_offset - group_begin_offset >= group_size)
2689 {
2690 if (stubs_always_after_branch)
2691 {
2692 gold_assert(group_end != this->input_sections().end());
2693 this->create_stub_group(group_begin, group_end, group_end,
2694 target, new_relaxed_sections,
2695 task);
2696 state = NO_GROUP;
2697 }
2698 else
2699 {
2700 // Input sections up to stub_group_size bytes after the stub
2701 // table can be handled by it too.
2702 state = HAS_STUB_SECTION;
2703 stub_table = group_end;
2704 stub_table_end_offset = group_end_offset;
2705 }
2706 }
2707 break;
2708
2709 case HAS_STUB_SECTION:
2710 // Adding this section makes the post stub-section group larger
2711 // than GROUP_SIZE.
2712 gold_unreachable();
2713 // NOT SUPPORTED YET. For completeness only.
2714 if (section_end_offset - stub_table_end_offset >= group_size)
2715 {
2716 gold_assert(group_end != this->input_sections().end());
2717 this->create_stub_group(group_begin, group_end, stub_table,
2718 target, new_relaxed_sections, task);
2719 state = NO_GROUP;
2720 }
2721 break;
2722
2723 default:
2724 gold_unreachable();
2725 }
2726
2727 // If we see an input section and currently there is no group, start
2728 // a new one. Skip any empty sections. We look at the data size
2729 // instead of calling p->relobj()->section_size() to avoid locking.
2730 if ((p->is_input_section() || p->is_relaxed_input_section())
2731 && (p->data_size() != 0))
2732 {
2733 if (state == NO_GROUP)
2734 {
2735 state = FINDING_STUB_SECTION;
2736 group_begin = p;
2737 group_begin_offset = section_begin_offset;
2738 }
2739
2740 // Keep track of the last input section seen.
2741 group_end = p;
2742 group_end_offset = section_end_offset;
2743 }
2744
2745 off = section_end_offset;
2746 }
2747
2748 // Create a stub group for any ungrouped sections.
2749 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
2750 {
2751 gold_assert(group_end != this->input_sections().end());
2752 this->create_stub_group(group_begin, group_end,
2753 (state == FINDING_STUB_SECTION
2754 ? group_end
2755 : stub_table),
2756 target, new_relaxed_sections, task);
2757 }
2758
2759 if (!new_relaxed_sections.empty())
2760 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
2761
2762 // Update the section offsets
2763 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
2764 {
2765 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>(
2766 new_relaxed_sections[i]->relobj());
2767 unsigned int shndx = new_relaxed_sections[i]->shndx();
2768 // Tell AArch64_relobj that this input section is converted.
2769 relobj->convert_input_section_to_relaxed_section(shndx);
2770 }
2771 } // End of AArch64_output_section::group_sections
2772
2773
2774 AArch64_reloc_property_table* aarch64_reloc_property_table = NULL;
2775
2776
2777 // The aarch64 target class.
2778 // See the ABI at
2779 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf
2780 template<int size, bool big_endian>
2781 class Target_aarch64 : public Sized_target<size, big_endian>
2782 {
2783 public:
2784 typedef Target_aarch64<size, big_endian> This;
2785 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
2786 Reloc_section;
2787 typedef Relocate_info<size, big_endian> The_relocate_info;
2788 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
2789 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2790 typedef Reloc_stub<size, big_endian> The_reloc_stub;
2791 typedef Erratum_stub<size, big_endian> The_erratum_stub;
2792 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key;
2793 typedef Stub_table<size, big_endian> The_stub_table;
2794 typedef std::vector<The_stub_table*> Stub_table_list;
2795 typedef typename Stub_table_list::iterator Stub_table_iterator;
2796 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2797 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section;
2798 typedef Unordered_map<Section_id,
2799 AArch64_input_section<size, big_endian>*,
2800 Section_id_hash> AArch64_input_section_map;
2801 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2802 const static int TCB_SIZE = size / 8 * 2;
2803
2804 Target_aarch64(const Target::Target_info* info = &aarch64_info)
2805 : Sized_target<size, big_endian>(info),
2806 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL),
2807 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL),
2808 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY),
2809 got_mod_index_offset_(-1U),
2810 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false),
2811 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_()
2812 { }
2813
2814 // Scan the relocations to determine unreferenced sections for
2815 // garbage collection.
2816 void
2817 gc_process_relocs(Symbol_table* symtab,
2818 Layout* layout,
2819 Sized_relobj_file<size, big_endian>* object,
2820 unsigned int data_shndx,
2821 unsigned int sh_type,
2822 const unsigned char* prelocs,
2823 size_t reloc_count,
2824 Output_section* output_section,
2825 bool needs_special_offset_handling,
2826 size_t local_symbol_count,
2827 const unsigned char* plocal_symbols);
2828
2829 // Scan the relocations to look for symbol adjustments.
2830 void
2831 scan_relocs(Symbol_table* symtab,
2832 Layout* layout,
2833 Sized_relobj_file<size, big_endian>* object,
2834 unsigned int data_shndx,
2835 unsigned int sh_type,
2836 const unsigned char* prelocs,
2837 size_t reloc_count,
2838 Output_section* output_section,
2839 bool needs_special_offset_handling,
2840 size_t local_symbol_count,
2841 const unsigned char* plocal_symbols);
2842
2843 // Finalize the sections.
2844 void
2845 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
2846
2847 // Return the value to use for a dynamic which requires special
2848 // treatment.
2849 uint64_t
2850 do_dynsym_value(const Symbol*) const;
2851
2852 // Relocate a section.
2853 void
2854 relocate_section(const Relocate_info<size, big_endian>*,
2855 unsigned int sh_type,
2856 const unsigned char* prelocs,
2857 size_t reloc_count,
2858 Output_section* output_section,
2859 bool needs_special_offset_handling,
2860 unsigned char* view,
2861 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2862 section_size_type view_size,
2863 const Reloc_symbol_changes*);
2864
2865 // Scan the relocs during a relocatable link.
2866 void
2867 scan_relocatable_relocs(Symbol_table* symtab,
2868 Layout* layout,
2869 Sized_relobj_file<size, big_endian>* object,
2870 unsigned int data_shndx,
2871 unsigned int sh_type,
2872 const unsigned char* prelocs,
2873 size_t reloc_count,
2874 Output_section* output_section,
2875 bool needs_special_offset_handling,
2876 size_t local_symbol_count,
2877 const unsigned char* plocal_symbols,
2878 Relocatable_relocs*);
2879
2880 // Scan the relocs for --emit-relocs.
2881 void
2882 emit_relocs_scan(Symbol_table* symtab,
2883 Layout* layout,
2884 Sized_relobj_file<size, big_endian>* object,
2885 unsigned int data_shndx,
2886 unsigned int sh_type,
2887 const unsigned char* prelocs,
2888 size_t reloc_count,
2889 Output_section* output_section,
2890 bool needs_special_offset_handling,
2891 size_t local_symbol_count,
2892 const unsigned char* plocal_syms,
2893 Relocatable_relocs* rr);
2894
2895 // Relocate a section during a relocatable link.
2896 void
2897 relocate_relocs(
2898 const Relocate_info<size, big_endian>*,
2899 unsigned int sh_type,
2900 const unsigned char* prelocs,
2901 size_t reloc_count,
2902 Output_section* output_section,
2903 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
2904 unsigned char* view,
2905 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2906 section_size_type view_size,
2907 unsigned char* reloc_view,
2908 section_size_type reloc_view_size);
2909
2910 // Return the symbol index to use for a target specific relocation.
2911 // The only target specific relocation is R_AARCH64_TLSDESC for a
2912 // local symbol, which is an absolute reloc.
2913 unsigned int
2914 do_reloc_symbol_index(void*, unsigned int r_type) const
2915 {
2916 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
2917 return 0;
2918 }
2919
2920 // Return the addend to use for a target specific relocation.
2921 uint64_t
2922 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const;
2923
2924 // Return the PLT section.
2925 uint64_t
2926 do_plt_address_for_global(const Symbol* gsym) const
2927 { return this->plt_section()->address_for_global(gsym); }
2928
2929 uint64_t
2930 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const
2931 { return this->plt_section()->address_for_local(relobj, symndx); }
2932
2933 // This function should be defined in targets that can use relocation
2934 // types to determine (implemented in local_reloc_may_be_function_pointer
2935 // and global_reloc_may_be_function_pointer)
2936 // if a function's pointer is taken. ICF uses this in safe mode to only
2937 // fold those functions whose pointer is defintely not taken.
2938 bool
2939 do_can_check_for_function_pointers() const
2940 { return true; }
2941
2942 // Return the number of entries in the PLT.
2943 unsigned int
2944 plt_entry_count() const;
2945
2946 //Return the offset of the first non-reserved PLT entry.
2947 unsigned int
2948 first_plt_entry_offset() const;
2949
2950 // Return the size of each PLT entry.
2951 unsigned int
2952 plt_entry_size() const;
2953
2954 // Create a stub table.
2955 The_stub_table*
2956 new_stub_table(The_aarch64_input_section*);
2957
2958 // Create an aarch64 input section.
2959 The_aarch64_input_section*
2960 new_aarch64_input_section(Relobj*, unsigned int);
2961
2962 // Find an aarch64 input section instance for a given OBJ and SHNDX.
2963 The_aarch64_input_section*
2964 find_aarch64_input_section(Relobj*, unsigned int) const;
2965
2966 // Return the thread control block size.
2967 unsigned int
2968 tcb_size() const { return This::TCB_SIZE; }
2969
2970 // Scan a section for stub generation.
2971 void
2972 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int,
2973 const unsigned char*, size_t, Output_section*,
2974 bool, const unsigned char*,
2975 Address,
2976 section_size_type);
2977
2978 // Scan a relocation section for stub.
2979 template<int sh_type>
2980 void
2981 scan_reloc_section_for_stubs(
2982 const The_relocate_info* relinfo,
2983 const unsigned char* prelocs,
2984 size_t reloc_count,
2985 Output_section* output_section,
2986 bool needs_special_offset_handling,
2987 const unsigned char* view,
2988 Address view_address,
2989 section_size_type);
2990
2991 // Relocate a single stub.
2992 void
2993 relocate_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*,
2994 Output_section*, unsigned char*, Address,
2995 section_size_type);
2996
2997 // Get the default AArch64 target.
2998 static This*
2999 current_target()
3000 {
3001 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64
3002 && parameters->target().get_size() == size
3003 && parameters->target().is_big_endian() == big_endian);
3004 return static_cast<This*>(parameters->sized_target<size, big_endian>());
3005 }
3006
3007
3008 // Scan erratum 843419 for a part of a section.
3009 void
3010 scan_erratum_843419_span(
3011 AArch64_relobj<size, big_endian>*,
3012 unsigned int,
3013 const section_size_type,
3014 const section_size_type,
3015 unsigned char*,
3016 Address);
3017
3018 // Scan erratum 835769 for a part of a section.
3019 void
3020 scan_erratum_835769_span(
3021 AArch64_relobj<size, big_endian>*,
3022 unsigned int,
3023 const section_size_type,
3024 const section_size_type,
3025 unsigned char*,
3026 Address);
3027
3028 protected:
3029 void
3030 do_select_as_default_target()
3031 {
3032 gold_assert(aarch64_reloc_property_table == NULL);
3033 aarch64_reloc_property_table = new AArch64_reloc_property_table();
3034 }
3035
3036 // Add a new reloc argument, returning the index in the vector.
3037 size_t
3038 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object,
3039 unsigned int r_sym)
3040 {
3041 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym));
3042 return this->tlsdesc_reloc_info_.size() - 1;
3043 }
3044
3045 virtual Output_data_plt_aarch64<size, big_endian>*
3046 do_make_data_plt(Layout* layout,
3047 Output_data_got_aarch64<size, big_endian>* got,
3048 Output_data_space* got_plt,
3049 Output_data_space* got_irelative)
3050 {
3051 return new Output_data_plt_aarch64_standard<size, big_endian>(
3052 layout, got, got_plt, got_irelative);
3053 }
3054
3055
3056 // do_make_elf_object to override the same function in the base class.
3057 Object*
3058 do_make_elf_object(const std::string&, Input_file*, off_t,
3059 const elfcpp::Ehdr<size, big_endian>&);
3060
3061 Output_data_plt_aarch64<size, big_endian>*
3062 make_data_plt(Layout* layout,
3063 Output_data_got_aarch64<size, big_endian>* got,
3064 Output_data_space* got_plt,
3065 Output_data_space* got_irelative)
3066 {
3067 return this->do_make_data_plt(layout, got, got_plt, got_irelative);
3068 }
3069
3070 // We only need to generate stubs, and hence perform relaxation if we are
3071 // not doing relocatable linking.
3072 virtual bool
3073 do_may_relax() const
3074 { return !parameters->options().relocatable(); }
3075
3076 // Relaxation hook. This is where we do stub generation.
3077 virtual bool
3078 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*);
3079
3080 void
3081 group_sections(Layout* layout,
3082 section_size_type group_size,
3083 bool stubs_always_after_branch,
3084 const Task* task);
3085
3086 void
3087 scan_reloc_for_stub(const The_relocate_info*, unsigned int,
3088 const Sized_symbol<size>*, unsigned int,
3089 const Symbol_value<size>*,
3090 typename elfcpp::Elf_types<size>::Elf_Swxword,
3091 Address Elf_Addr);
3092
3093 // Make an output section.
3094 Output_section*
3095 do_make_output_section(const char* name, elfcpp::Elf_Word type,
3096 elfcpp::Elf_Xword flags)
3097 { return new The_aarch64_output_section(name, type, flags); }
3098
3099 private:
3100 // The class which scans relocations.
3101 class Scan
3102 {
3103 public:
3104 Scan()
3105 : issued_non_pic_error_(false)
3106 { }
3107
3108 inline void
3109 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3110 Sized_relobj_file<size, big_endian>* object,
3111 unsigned int data_shndx,
3112 Output_section* output_section,
3113 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3114 const elfcpp::Sym<size, big_endian>& lsym,
3115 bool is_discarded);
3116
3117 inline void
3118 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3119 Sized_relobj_file<size, big_endian>* object,
3120 unsigned int data_shndx,
3121 Output_section* output_section,
3122 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3123 Symbol* gsym);
3124
3125 inline bool
3126 local_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3127 Target_aarch64<size, big_endian>* ,
3128 Sized_relobj_file<size, big_endian>* ,
3129 unsigned int ,
3130 Output_section* ,
3131 const elfcpp::Rela<size, big_endian>& ,
3132 unsigned int r_type,
3133 const elfcpp::Sym<size, big_endian>&);
3134
3135 inline bool
3136 global_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3137 Target_aarch64<size, big_endian>* ,
3138 Sized_relobj_file<size, big_endian>* ,
3139 unsigned int ,
3140 Output_section* ,
3141 const elfcpp::Rela<size, big_endian>& ,
3142 unsigned int r_type,
3143 Symbol* gsym);
3144
3145 private:
3146 static void
3147 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*,
3148 unsigned int r_type);
3149
3150 static void
3151 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*,
3152 unsigned int r_type, Symbol*);
3153
3154 inline bool
3155 possible_function_pointer_reloc(unsigned int r_type);
3156
3157 void
3158 check_non_pic(Relobj*, unsigned int r_type);
3159
3160 bool
3161 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*,
3162 unsigned int r_type);
3163
3164 // Whether we have issued an error about a non-PIC compilation.
3165 bool issued_non_pic_error_;
3166 };
3167
3168 // The class which implements relocation.
3169 class Relocate
3170 {
3171 public:
3172 Relocate()
3173 : skip_call_tls_get_addr_(false)
3174 { }
3175
3176 ~Relocate()
3177 { }
3178
3179 // Do a relocation. Return false if the caller should not issue
3180 // any warnings about this relocation.
3181 inline bool
3182 relocate(const Relocate_info<size, big_endian>*, unsigned int,
3183 Target_aarch64*, Output_section*, size_t, const unsigned char*,
3184 const Sized_symbol<size>*, const Symbol_value<size>*,
3185 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr,
3186 section_size_type);
3187
3188 private:
3189 inline typename AArch64_relocate_functions<size, big_endian>::Status
3190 relocate_tls(const Relocate_info<size, big_endian>*,
3191 Target_aarch64<size, big_endian>*,
3192 size_t,
3193 const elfcpp::Rela<size, big_endian>&,
3194 unsigned int r_type, const Sized_symbol<size>*,
3195 const Symbol_value<size>*,
3196 unsigned char*,
3197 typename elfcpp::Elf_types<size>::Elf_Addr);
3198
3199 inline typename AArch64_relocate_functions<size, big_endian>::Status
3200 tls_gd_to_le(
3201 const Relocate_info<size, big_endian>*,
3202 Target_aarch64<size, big_endian>*,
3203 const elfcpp::Rela<size, big_endian>&,
3204 unsigned int,
3205 unsigned char*,
3206 const Symbol_value<size>*);
3207
3208 inline typename AArch64_relocate_functions<size, big_endian>::Status
3209 tls_ld_to_le(
3210 const Relocate_info<size, big_endian>*,
3211 Target_aarch64<size, big_endian>*,
3212 const elfcpp::Rela<size, big_endian>&,
3213 unsigned int,
3214 unsigned char*,
3215 const Symbol_value<size>*);
3216
3217 inline typename AArch64_relocate_functions<size, big_endian>::Status
3218 tls_ie_to_le(
3219 const Relocate_info<size, big_endian>*,
3220 Target_aarch64<size, big_endian>*,
3221 const elfcpp::Rela<size, big_endian>&,
3222 unsigned int,
3223 unsigned char*,
3224 const Symbol_value<size>*);
3225
3226 inline typename AArch64_relocate_functions<size, big_endian>::Status
3227 tls_desc_gd_to_le(
3228 const Relocate_info<size, big_endian>*,
3229 Target_aarch64<size, big_endian>*,
3230 const elfcpp::Rela<size, big_endian>&,
3231 unsigned int,
3232 unsigned char*,
3233 const Symbol_value<size>*);
3234
3235 inline typename AArch64_relocate_functions<size, big_endian>::Status
3236 tls_desc_gd_to_ie(
3237 const Relocate_info<size, big_endian>*,
3238 Target_aarch64<size, big_endian>*,
3239 const elfcpp::Rela<size, big_endian>&,
3240 unsigned int,
3241 unsigned char*,
3242 const Symbol_value<size>*,
3243 typename elfcpp::Elf_types<size>::Elf_Addr,
3244 typename elfcpp::Elf_types<size>::Elf_Addr);
3245
3246 bool skip_call_tls_get_addr_;
3247
3248 }; // End of class Relocate
3249
3250 // Adjust TLS relocation type based on the options and whether this
3251 // is a local symbol.
3252 static tls::Tls_optimization
3253 optimize_tls_reloc(bool is_final, int r_type);
3254
3255 // Get the GOT section, creating it if necessary.
3256 Output_data_got_aarch64<size, big_endian>*
3257 got_section(Symbol_table*, Layout*);
3258
3259 // Get the GOT PLT section.
3260 Output_data_space*
3261 got_plt_section() const
3262 {
3263 gold_assert(this->got_plt_ != NULL);
3264 return this->got_plt_;
3265 }
3266
3267 // Get the GOT section for TLSDESC entries.
3268 Output_data_got<size, big_endian>*
3269 got_tlsdesc_section() const
3270 {
3271 gold_assert(this->got_tlsdesc_ != NULL);
3272 return this->got_tlsdesc_;
3273 }
3274
3275 // Create the PLT section.
3276 void
3277 make_plt_section(Symbol_table* symtab, Layout* layout);
3278
3279 // Create a PLT entry for a global symbol.
3280 void
3281 make_plt_entry(Symbol_table*, Layout*, Symbol*);
3282
3283 // Create a PLT entry for a local STT_GNU_IFUNC symbol.
3284 void
3285 make_local_ifunc_plt_entry(Symbol_table*, Layout*,
3286 Sized_relobj_file<size, big_endian>* relobj,
3287 unsigned int local_sym_index);
3288
3289 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
3290 void
3291 define_tls_base_symbol(Symbol_table*, Layout*);
3292
3293 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
3294 void
3295 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout);
3296
3297 // Create a GOT entry for the TLS module index.
3298 unsigned int
3299 got_mod_index_entry(Symbol_table* symtab, Layout* layout,
3300 Sized_relobj_file<size, big_endian>* object);
3301
3302 // Get the PLT section.
3303 Output_data_plt_aarch64<size, big_endian>*
3304 plt_section() const
3305 {
3306 gold_assert(this->plt_ != NULL);
3307 return this->plt_;
3308 }
3309
3310 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769. For
3311 // ST_E_843419, we need an additional field for adrp offset.
3312 void create_erratum_stub(
3313 AArch64_relobj<size, big_endian>* relobj,
3314 unsigned int shndx,
3315 section_size_type erratum_insn_offset,
3316 Address erratum_address,
3317 typename Insn_utilities::Insntype erratum_insn,
3318 int erratum_type,
3319 unsigned int e843419_adrp_offset=0);
3320
3321 // Return whether this is a 3-insn erratum sequence.
3322 bool is_erratum_843419_sequence(
3323 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
3324 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
3325 typename elfcpp::Swap<32,big_endian>::Valtype insn3);
3326
3327 // Return whether this is a 835769 sequence.
3328 // (Similarly implemented as in elfnn-aarch64.c.)
3329 bool is_erratum_835769_sequence(
3330 typename elfcpp::Swap<32,big_endian>::Valtype,
3331 typename elfcpp::Swap<32,big_endian>::Valtype);
3332
3333 // Get the dynamic reloc section, creating it if necessary.
3334 Reloc_section*
3335 rela_dyn_section(Layout*);
3336
3337 // Get the section to use for TLSDESC relocations.
3338 Reloc_section*
3339 rela_tlsdesc_section(Layout*) const;
3340
3341 // Get the section to use for IRELATIVE relocations.
3342 Reloc_section*
3343 rela_irelative_section(Layout*);
3344
3345 // Add a potential copy relocation.
3346 void
3347 copy_reloc(Symbol_table* symtab, Layout* layout,
3348 Sized_relobj_file<size, big_endian>* object,
3349 unsigned int shndx, Output_section* output_section,
3350 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc)
3351 {
3352 unsigned int r_type = elfcpp::elf_r_type<size>(reloc.get_r_info());
3353 this->copy_relocs_.copy_reloc(symtab, layout,
3354 symtab->get_sized_symbol<size>(sym),
3355 object, shndx, output_section,
3356 r_type, reloc.get_r_offset(),
3357 reloc.get_r_addend(),
3358 this->rela_dyn_section(layout));
3359 }
3360
3361 // Information about this specific target which we pass to the
3362 // general Target structure.
3363 static const Target::Target_info aarch64_info;
3364
3365 // The types of GOT entries needed for this platform.
3366 // These values are exposed to the ABI in an incremental link.
3367 // Do not renumber existing values without changing the version
3368 // number of the .gnu_incremental_inputs section.
3369 enum Got_type
3370 {
3371 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
3372 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset
3373 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair
3374 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair
3375 };
3376
3377 // This type is used as the argument to the target specific
3378 // relocation routines. The only target specific reloc is
3379 // R_AARCh64_TLSDESC against a local symbol.
3380 struct Tlsdesc_info
3381 {
3382 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object,
3383 unsigned int a_r_sym)
3384 : object(a_object), r_sym(a_r_sym)
3385 { }
3386
3387 // The object in which the local symbol is defined.
3388 Sized_relobj_file<size, big_endian>* object;
3389 // The local symbol index in the object.
3390 unsigned int r_sym;
3391 };
3392
3393 // The GOT section.
3394 Output_data_got_aarch64<size, big_endian>* got_;
3395 // The PLT section.
3396 Output_data_plt_aarch64<size, big_endian>* plt_;
3397 // The GOT PLT section.
3398 Output_data_space* got_plt_;
3399 // The GOT section for IRELATIVE relocations.
3400 Output_data_space* got_irelative_;
3401 // The GOT section for TLSDESC relocations.
3402 Output_data_got<size, big_endian>* got_tlsdesc_;
3403 // The _GLOBAL_OFFSET_TABLE_ symbol.
3404 Symbol* global_offset_table_;
3405 // The dynamic reloc section.
3406 Reloc_section* rela_dyn_;
3407 // The section to use for IRELATIVE relocs.
3408 Reloc_section* rela_irelative_;
3409 // Relocs saved to avoid a COPY reloc.
3410 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_;
3411 // Offset of the GOT entry for the TLS module index.
3412 unsigned int got_mod_index_offset_;
3413 // We handle R_AARCH64_TLSDESC against a local symbol as a target
3414 // specific relocation. Here we store the object and local symbol
3415 // index for the relocation.
3416 std::vector<Tlsdesc_info> tlsdesc_reloc_info_;
3417 // True if the _TLS_MODULE_BASE_ symbol has been defined.
3418 bool tls_base_symbol_defined_;
3419 // List of stub_tables
3420 Stub_table_list stub_tables_;
3421 // Actual stub group size
3422 section_size_type stub_group_size_;
3423 AArch64_input_section_map aarch64_input_section_map_;
3424 }; // End of Target_aarch64
3425
3426
3427 template<>
3428 const Target::Target_info Target_aarch64<64, false>::aarch64_info =
3429 {
3430 64, // size
3431 false, // is_big_endian
3432 elfcpp::EM_AARCH64, // machine_code
3433 false, // has_make_symbol
3434 false, // has_resolve
3435 false, // has_code_fill
3436 true, // is_default_stack_executable
3437 true, // can_icf_inline_merge_sections
3438 '\0', // wrap_char
3439 "/lib/ld.so.1", // program interpreter
3440 0x400000, // default_text_segment_address
3441 0x10000, // abi_pagesize (overridable by -z max-page-size)
3442 0x1000, // common_pagesize (overridable by -z common-page-size)
3443 false, // isolate_execinstr
3444 0, // rosegment_gap
3445 elfcpp::SHN_UNDEF, // small_common_shndx
3446 elfcpp::SHN_UNDEF, // large_common_shndx
3447 0, // small_common_section_flags
3448 0, // large_common_section_flags
3449 NULL, // attributes_section
3450 NULL, // attributes_vendor
3451 "_start", // entry_symbol_name
3452 32, // hash_entry_size
3453 };
3454
3455 template<>
3456 const Target::Target_info Target_aarch64<32, false>::aarch64_info =
3457 {
3458 32, // size
3459 false, // is_big_endian
3460 elfcpp::EM_AARCH64, // machine_code
3461 false, // has_make_symbol
3462 false, // has_resolve
3463 false, // has_code_fill
3464 true, // is_default_stack_executable
3465 false, // can_icf_inline_merge_sections
3466 '\0', // wrap_char
3467 "/lib/ld.so.1", // program interpreter
3468 0x400000, // default_text_segment_address
3469 0x10000, // abi_pagesize (overridable by -z max-page-size)
3470 0x1000, // common_pagesize (overridable by -z common-page-size)
3471 false, // isolate_execinstr
3472 0, // rosegment_gap
3473 elfcpp::SHN_UNDEF, // small_common_shndx
3474 elfcpp::SHN_UNDEF, // large_common_shndx
3475 0, // small_common_section_flags
3476 0, // large_common_section_flags
3477 NULL, // attributes_section
3478 NULL, // attributes_vendor
3479 "_start", // entry_symbol_name
3480 32, // hash_entry_size
3481 };
3482
3483 template<>
3484 const Target::Target_info Target_aarch64<64, true>::aarch64_info =
3485 {
3486 64, // size
3487 true, // is_big_endian
3488 elfcpp::EM_AARCH64, // machine_code
3489 false, // has_make_symbol
3490 false, // has_resolve
3491 false, // has_code_fill
3492 true, // is_default_stack_executable
3493 true, // can_icf_inline_merge_sections
3494 '\0', // wrap_char
3495 "/lib/ld.so.1", // program interpreter
3496 0x400000, // default_text_segment_address
3497 0x10000, // abi_pagesize (overridable by -z max-page-size)
3498 0x1000, // common_pagesize (overridable by -z common-page-size)
3499 false, // isolate_execinstr
3500 0, // rosegment_gap
3501 elfcpp::SHN_UNDEF, // small_common_shndx
3502 elfcpp::SHN_UNDEF, // large_common_shndx
3503 0, // small_common_section_flags
3504 0, // large_common_section_flags
3505 NULL, // attributes_section
3506 NULL, // attributes_vendor
3507 "_start", // entry_symbol_name
3508 32, // hash_entry_size
3509 };
3510
3511 template<>
3512 const Target::Target_info Target_aarch64<32, true>::aarch64_info =
3513 {
3514 32, // size
3515 true, // is_big_endian
3516 elfcpp::EM_AARCH64, // machine_code
3517 false, // has_make_symbol
3518 false, // has_resolve
3519 false, // has_code_fill
3520 true, // is_default_stack_executable
3521 false, // can_icf_inline_merge_sections
3522 '\0', // wrap_char
3523 "/lib/ld.so.1", // program interpreter
3524 0x400000, // default_text_segment_address
3525 0x10000, // abi_pagesize (overridable by -z max-page-size)
3526 0x1000, // common_pagesize (overridable by -z common-page-size)
3527 false, // isolate_execinstr
3528 0, // rosegment_gap
3529 elfcpp::SHN_UNDEF, // small_common_shndx
3530 elfcpp::SHN_UNDEF, // large_common_shndx
3531 0, // small_common_section_flags
3532 0, // large_common_section_flags
3533 NULL, // attributes_section
3534 NULL, // attributes_vendor
3535 "_start", // entry_symbol_name
3536 32, // hash_entry_size
3537 };
3538
3539 // Get the GOT section, creating it if necessary.
3540
3541 template<int size, bool big_endian>
3542 Output_data_got_aarch64<size, big_endian>*
3543 Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab,
3544 Layout* layout)
3545 {
3546 if (this->got_ == NULL)
3547 {
3548 gold_assert(symtab != NULL && layout != NULL);
3549
3550 // When using -z now, we can treat .got.plt as a relro section.
3551 // Without -z now, it is modified after program startup by lazy
3552 // PLT relocations.
3553 bool is_got_plt_relro = parameters->options().now();
3554 Output_section_order got_order = (is_got_plt_relro
3555 ? ORDER_RELRO
3556 : ORDER_RELRO_LAST);
3557 Output_section_order got_plt_order = (is_got_plt_relro
3558 ? ORDER_RELRO
3559 : ORDER_NON_RELRO_FIRST);
3560
3561 // Layout of .got and .got.plt sections.
3562 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_
3563 // ...
3564 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT
3565 // .gotplt[1] reserved for ld.so (resolver)
3566 // .gotplt[2] reserved
3567
3568 // Generate .got section.
3569 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab,
3570 layout);
3571 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3572 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
3573 this->got_, got_order, true);
3574 // The first word of GOT is reserved for the address of .dynamic.
3575 // We put 0 here now. The value will be replaced later in
3576 // Output_data_got_aarch64::do_write.
3577 this->got_->add_constant(0);
3578
3579 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3580 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section,
3581 // even if there is a .got.plt section.
3582 this->global_offset_table_ =
3583 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3584 Symbol_table::PREDEFINED,
3585 this->got_,
3586 0, 0, elfcpp::STT_OBJECT,
3587 elfcpp::STB_LOCAL,
3588 elfcpp::STV_HIDDEN, 0,
3589 false, false);
3590
3591 // Generate .got.plt section.
3592 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT");
3593 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3594 (elfcpp::SHF_ALLOC
3595 | elfcpp::SHF_WRITE),
3596 this->got_plt_, got_plt_order,
3597 is_got_plt_relro);
3598
3599 // The first three entries are reserved.
3600 this->got_plt_->set_current_data_size(
3601 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3602
3603 // If there are any IRELATIVE relocations, they get GOT entries
3604 // in .got.plt after the jump slot entries.
3605 this->got_irelative_ = new Output_data_space(size / 8,
3606 "** GOT IRELATIVE PLT");
3607 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3608 (elfcpp::SHF_ALLOC
3609 | elfcpp::SHF_WRITE),
3610 this->got_irelative_,
3611 got_plt_order,
3612 is_got_plt_relro);
3613
3614 // If there are any TLSDESC relocations, they get GOT entries in
3615 // .got.plt after the jump slot and IRELATIVE entries.
3616 this->got_tlsdesc_ = new Output_data_got<size, big_endian>();
3617 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3618 (elfcpp::SHF_ALLOC
3619 | elfcpp::SHF_WRITE),
3620 this->got_tlsdesc_,
3621 got_plt_order,
3622 is_got_plt_relro);
3623
3624 if (!is_got_plt_relro)
3625 {
3626 // Those bytes can go into the relro segment.
3627 layout->increase_relro(
3628 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3629 }
3630
3631 }
3632 return this->got_;
3633 }
3634
3635 // Get the dynamic reloc section, creating it if necessary.
3636
3637 template<int size, bool big_endian>
3638 typename Target_aarch64<size, big_endian>::Reloc_section*
3639 Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout)
3640 {
3641 if (this->rela_dyn_ == NULL)
3642 {
3643 gold_assert(layout != NULL);
3644 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc());
3645 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3646 elfcpp::SHF_ALLOC, this->rela_dyn_,
3647 ORDER_DYNAMIC_RELOCS, false);
3648 }
3649 return this->rela_dyn_;
3650 }
3651
3652 // Get the section to use for IRELATIVE relocs, creating it if
3653 // necessary. These go in .rela.dyn, but only after all other dynamic
3654 // relocations. They need to follow the other dynamic relocations so
3655 // that they can refer to global variables initialized by those
3656 // relocs.
3657
3658 template<int size, bool big_endian>
3659 typename Target_aarch64<size, big_endian>::Reloc_section*
3660 Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout)
3661 {
3662 if (this->rela_irelative_ == NULL)
3663 {
3664 // Make sure we have already created the dynamic reloc section.
3665 this->rela_dyn_section(layout);
3666 this->rela_irelative_ = new Reloc_section(false);
3667 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3668 elfcpp::SHF_ALLOC, this->rela_irelative_,
3669 ORDER_DYNAMIC_RELOCS, false);
3670 gold_assert(this->rela_dyn_->output_section()
3671 == this->rela_irelative_->output_section());
3672 }
3673 return this->rela_irelative_;
3674 }
3675
3676
3677 // do_make_elf_object to override the same function in the base class. We need
3678 // to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to
3679 // store backend specific information. Hence we need to have our own ELF object
3680 // creation.
3681
3682 template<int size, bool big_endian>
3683 Object*
3684 Target_aarch64<size, big_endian>::do_make_elf_object(
3685 const std::string& name,
3686 Input_file* input_file,
3687 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr)
3688 {
3689 int et = ehdr.get_e_type();
3690 // ET_EXEC files are valid input for --just-symbols/-R,
3691 // and we treat them as relocatable objects.
3692 if (et == elfcpp::ET_EXEC && input_file->just_symbols())
3693 return Sized_target<size, big_endian>::do_make_elf_object(
3694 name, input_file, offset, ehdr);
3695 else if (et == elfcpp::ET_REL)
3696 {
3697 AArch64_relobj<size, big_endian>* obj =
3698 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr);
3699 obj->setup();
3700 return obj;
3701 }
3702 else if (et == elfcpp::ET_DYN)
3703 {
3704 // Keep base implementation.
3705 Sized_dynobj<size, big_endian>* obj =
3706 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr);
3707 obj->setup();
3708 return obj;
3709 }
3710 else
3711 {
3712 gold_error(_("%s: unsupported ELF file type %d"),
3713 name.c_str(), et);
3714 return NULL;
3715 }
3716 }
3717
3718
3719 // Scan a relocation for stub generation.
3720
3721 template<int size, bool big_endian>
3722 void
3723 Target_aarch64<size, big_endian>::scan_reloc_for_stub(
3724 const Relocate_info<size, big_endian>* relinfo,
3725 unsigned int r_type,
3726 const Sized_symbol<size>* gsym,
3727 unsigned int r_sym,
3728 const Symbol_value<size>* psymval,
3729 typename elfcpp::Elf_types<size>::Elf_Swxword addend,
3730 Address address)
3731 {
3732 const AArch64_relobj<size, big_endian>* aarch64_relobj =
3733 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3734
3735 Symbol_value<size> symval;
3736 if (gsym != NULL)
3737 {
3738 const AArch64_reloc_property* arp = aarch64_reloc_property_table->
3739 get_reloc_property(r_type);
3740 if (gsym->use_plt_offset(arp->reference_flags()))
3741 {
3742 // This uses a PLT, change the symbol value.
3743 symval.set_output_value(this->plt_section()->address()
3744 + gsym->plt_offset());
3745 psymval = &symval;
3746 }
3747 else if (gsym->is_undefined())
3748 // There is no need to generate a stub symbol is undefined.
3749 return;
3750 }
3751
3752 // Get the symbol value.
3753 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0);
3754
3755 // Owing to pipelining, the PC relative branches below actually skip
3756 // two instructions when the branch offset is 0.
3757 Address destination = static_cast<Address>(-1);
3758 switch (r_type)
3759 {
3760 case elfcpp::R_AARCH64_CALL26:
3761 case elfcpp::R_AARCH64_JUMP26:
3762 destination = value + addend;
3763 break;
3764 default:
3765 gold_unreachable();
3766 }
3767
3768 int stub_type = The_reloc_stub::
3769 stub_type_for_reloc(r_type, address, destination);
3770 if (stub_type == ST_NONE)
3771 return;
3772
3773 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
3774 gold_assert(stub_table != NULL);
3775
3776 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend);
3777 The_reloc_stub* stub = stub_table->find_reloc_stub(key);
3778 if (stub == NULL)
3779 {
3780 stub = new The_reloc_stub(stub_type);
3781 stub_table->add_reloc_stub(stub, key);
3782 }
3783 stub->set_destination_address(destination);
3784 } // End of Target_aarch64::scan_reloc_for_stub
3785
3786
3787 // This function scans a relocation section for stub generation.
3788 // The template parameter Relocate must be a class type which provides
3789 // a single function, relocate(), which implements the machine
3790 // specific part of a relocation.
3791
3792 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
3793 // SHT_REL or SHT_RELA.
3794
3795 // PRELOCS points to the relocation data. RELOC_COUNT is the number
3796 // of relocs. OUTPUT_SECTION is the output section.
3797 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
3798 // mapped to output offsets.
3799
3800 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
3801 // VIEW_SIZE is the size. These refer to the input section, unless
3802 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
3803 // the output section.
3804
3805 template<int size, bool big_endian>
3806 template<int sh_type>
3807 void inline
3808 Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
3809 const Relocate_info<size, big_endian>* relinfo,
3810 const unsigned char* prelocs,
3811 size_t reloc_count,
3812 Output_section* /*output_section*/,
3813 bool /*needs_special_offset_handling*/,
3814 const unsigned char* /*view*/,
3815 Address view_address,
3816 section_size_type)
3817 {
3818 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype;
3819
3820 const int reloc_size =
3821 Reloc_types<sh_type,size,big_endian>::reloc_size;
3822 AArch64_relobj<size, big_endian>* object =
3823 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3824 unsigned int local_count = object->local_symbol_count();
3825
3826 gold::Default_comdat_behavior default_comdat_behavior;
3827 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
3828
3829 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
3830 {
3831 Reltype reloc(prelocs);
3832 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info();
3833 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info);
3834 unsigned int r_type = elfcpp::elf_r_type<size>(r_info);
3835 if (r_type != elfcpp::R_AARCH64_CALL26
3836 && r_type != elfcpp::R_AARCH64_JUMP26)
3837 continue;
3838
3839 section_offset_type offset =
3840 convert_to_section_size_type(reloc.get_r_offset());
3841
3842 // Get the addend.
3843 typename elfcpp::Elf_types<size>::Elf_Swxword addend =
3844 reloc.get_r_addend();
3845
3846 const Sized_symbol<size>* sym;
3847 Symbol_value<size> symval;
3848 const Symbol_value<size> *psymval;
3849 bool is_defined_in_discarded_section;
3850 unsigned int shndx;
3851 if (r_sym < local_count)
3852 {
3853 sym = NULL;
3854 psymval = object->local_symbol(r_sym);
3855
3856 // If the local symbol belongs to a section we are discarding,
3857 // and that section is a debug section, try to find the
3858 // corresponding kept section and map this symbol to its
3859 // counterpart in the kept section. The symbol must not
3860 // correspond to a section we are folding.
3861 bool is_ordinary;
3862 shndx = psymval->input_shndx(&is_ordinary);
3863 is_defined_in_discarded_section =
3864 (is_ordinary
3865 && shndx != elfcpp::SHN_UNDEF
3866 && !object->is_section_included(shndx)
3867 && !relinfo->symtab->is_section_folded(object, shndx));
3868
3869 // We need to compute the would-be final value of this local
3870 // symbol.
3871 if (!is_defined_in_discarded_section)
3872 {
3873 typedef Sized_relobj_file<size, big_endian> ObjType;
3874 if (psymval->is_section_symbol())
3875 symval.set_is_section_symbol();
3876 typename ObjType::Compute_final_local_value_status status =
3877 object->compute_final_local_value(r_sym, psymval, &symval,
3878 relinfo->symtab);
3879 if (status == ObjType::CFLV_OK)
3880 {
3881 // Currently we cannot handle a branch to a target in
3882 // a merged section. If this is the case, issue an error
3883 // and also free the merge symbol value.
3884 if (!symval.has_output_value())
3885 {
3886 const std::string& section_name =
3887 object->section_name(shndx);
3888 object->error(_("cannot handle branch to local %u "
3889 "in a merged section %s"),
3890 r_sym, section_name.c_str());
3891 }
3892 psymval = &symval;
3893 }
3894 else
3895 {
3896 // We cannot determine the final value.
3897 continue;
3898 }
3899 }
3900 }
3901 else
3902 {
3903 const Symbol* gsym;
3904 gsym = object->global_symbol(r_sym);
3905 gold_assert(gsym != NULL);
3906 if (gsym->is_forwarder())
3907 gsym = relinfo->symtab->resolve_forwards(gsym);
3908
3909 sym = static_cast<const Sized_symbol<size>*>(gsym);
3910 if (sym->has_symtab_index() && sym->symtab_index() != -1U)
3911 symval.set_output_symtab_index(sym->symtab_index());
3912 else
3913 symval.set_no_output_symtab_entry();
3914
3915 // We need to compute the would-be final value of this global
3916 // symbol.
3917 const Symbol_table* symtab = relinfo->symtab;
3918 const Sized_symbol<size>* sized_symbol =
3919 symtab->get_sized_symbol<size>(gsym);
3920 Symbol_table::Compute_final_value_status status;
3921 typename elfcpp::Elf_types<size>::Elf_Addr value =
3922 symtab->compute_final_value<size>(sized_symbol, &status);
3923
3924 // Skip this if the symbol has not output section.
3925 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
3926 continue;
3927 symval.set_output_value(value);
3928
3929 if (gsym->type() == elfcpp::STT_TLS)
3930 symval.set_is_tls_symbol();
3931 else if (gsym->type() == elfcpp::STT_GNU_IFUNC)
3932 symval.set_is_ifunc_symbol();
3933 psymval = &symval;
3934
3935 is_defined_in_discarded_section =
3936 (gsym->is_defined_in_discarded_section()
3937 && gsym->is_undefined());
3938 shndx = 0;
3939 }
3940
3941 Symbol_value<size> symval2;
3942 if (is_defined_in_discarded_section)
3943 {
3944 if (comdat_behavior == CB_UNDETERMINED)
3945 {
3946 std::string name = object->section_name(relinfo->data_shndx);
3947 comdat_behavior = default_comdat_behavior.get(name.c_str());
3948 }
3949 if (comdat_behavior == CB_PRETEND)
3950 {
3951 bool found;
3952 typename elfcpp::Elf_types<size>::Elf_Addr value =
3953 object->map_to_kept_section(shndx, &found);
3954 if (found)
3955 symval2.set_output_value(value + psymval->input_value());
3956 else
3957 symval2.set_output_value(0);
3958 }
3959 else
3960 {
3961 if (comdat_behavior == CB_WARNING)
3962 gold_warning_at_location(relinfo, i, offset,
3963 _("relocation refers to discarded "
3964 "section"));
3965 symval2.set_output_value(0);
3966 }
3967 symval2.set_no_output_symtab_entry();
3968 psymval = &symval2;
3969 }
3970
3971 // If symbol is a section symbol, we don't know the actual type of
3972 // destination. Give up.
3973 if (psymval->is_section_symbol())
3974 continue;
3975
3976 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
3977 addend, view_address + offset);
3978 } // End of iterating relocs in a section
3979 } // End of Target_aarch64::scan_reloc_section_for_stubs
3980
3981
3982 // Scan an input section for stub generation.
3983
3984 template<int size, bool big_endian>
3985 void
3986 Target_aarch64<size, big_endian>::scan_section_for_stubs(
3987 const Relocate_info<size, big_endian>* relinfo,
3988 unsigned int sh_type,
3989 const unsigned char* prelocs,
3990 size_t reloc_count,
3991 Output_section* output_section,
3992 bool needs_special_offset_handling,
3993 const unsigned char* view,
3994 Address view_address,
3995 section_size_type view_size)
3996 {
3997 gold_assert(sh_type == elfcpp::SHT_RELA);
3998 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
3999 relinfo,
4000 prelocs,
4001 reloc_count,
4002 output_section,
4003 needs_special_offset_handling,
4004 view,
4005 view_address,
4006 view_size);
4007 }
4008
4009
4010 // Relocate a single stub.
4011
4012 template<int size, bool big_endian>
4013 void Target_aarch64<size, big_endian>::
4014 relocate_stub(The_reloc_stub* stub,
4015 const The_relocate_info*,
4016 Output_section*,
4017 unsigned char* view,
4018 Address address,
4019 section_size_type)
4020 {
4021 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions;
4022 typedef typename The_reloc_functions::Status The_reloc_functions_status;
4023 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
4024
4025 Insntype* ip = reinterpret_cast<Insntype*>(view);
4026 int insn_number = stub->insn_num();
4027 const uint32_t* insns = stub->insns();
4028 // Check the insns are really those stub insns.
4029 for (int i = 0; i < insn_number; ++i)
4030 {
4031 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i);
4032 gold_assert(((uint32_t)insn == insns[i]));
4033 }
4034
4035 Address dest = stub->destination_address();
4036
4037 switch(stub->type())
4038 {
4039 case ST_ADRP_BRANCH:
4040 {
4041 // 1st reloc is ADR_PREL_PG_HI21
4042 The_reloc_functions_status status =
4043 The_reloc_functions::adrp(view, dest, address);
4044 // An error should never arise in the above step. If so, please
4045 // check 'aarch64_valid_for_adrp_p'.
4046 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4047
4048 // 2nd reloc is ADD_ABS_LO12_NC
4049 const AArch64_reloc_property* arp =
4050 aarch64_reloc_property_table->get_reloc_property(
4051 elfcpp::R_AARCH64_ADD_ABS_LO12_NC);
4052 gold_assert(arp != NULL);
4053 status = The_reloc_functions::template
4054 rela_general<32>(view + 4, dest, 0, arp);
4055 // An error should never arise, it is an "_NC" relocation.
4056 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4057 }
4058 break;
4059
4060 case ST_LONG_BRANCH_ABS:
4061 // 1st reloc is R_AARCH64_PREL64, at offset 8
4062 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest);
4063 break;
4064
4065 case ST_LONG_BRANCH_PCREL:
4066 {
4067 // "PC" calculation is the 2nd insn in the stub.
4068 uint64_t offset = dest - (address + 4);
4069 // Offset is placed at offset 4 and 5.
4070 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset);
4071 }
4072 break;
4073
4074 default:
4075 gold_unreachable();
4076 }
4077 }
4078
4079
4080 // A class to handle the PLT data.
4081 // This is an abstract base class that handles most of the linker details
4082 // but does not know the actual contents of PLT entries. The derived
4083 // classes below fill in those details.
4084
4085 template<int size, bool big_endian>
4086 class Output_data_plt_aarch64 : public Output_section_data
4087 {
4088 public:
4089 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
4090 Reloc_section;
4091 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4092
4093 Output_data_plt_aarch64(Layout* layout,
4094 uint64_t addralign,
4095 Output_data_got_aarch64<size, big_endian>* got,
4096 Output_data_space* got_plt,
4097 Output_data_space* got_irelative)
4098 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL),
4099 got_(got), got_plt_(got_plt), got_irelative_(got_irelative),
4100 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U)
4101 { this->init(layout); }
4102
4103 // Initialize the PLT section.
4104 void
4105 init(Layout* layout);
4106
4107 // Add an entry to the PLT.
4108 void
4109 add_entry(Symbol_table*, Layout*, Symbol* gsym);
4110
4111 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol.
4112 unsigned int
4113 add_local_ifunc_entry(Symbol_table* symtab, Layout*,
4114 Sized_relobj_file<size, big_endian>* relobj,
4115 unsigned int local_sym_index);
4116
4117 // Add the relocation for a PLT entry.
4118 void
4119 add_relocation(Symbol_table*, Layout*, Symbol* gsym,
4120 unsigned int got_offset);
4121
4122 // Add the reserved TLSDESC_PLT entry to the PLT.
4123 void
4124 reserve_tlsdesc_entry(unsigned int got_offset)
4125 { this->tlsdesc_got_offset_ = got_offset; }
4126
4127 // Return true if a TLSDESC_PLT entry has been reserved.
4128 bool
4129 has_tlsdesc_entry() const
4130 { return this->tlsdesc_got_offset_ != -1U; }
4131
4132 // Return the GOT offset for the reserved TLSDESC_PLT entry.
4133 unsigned int
4134 get_tlsdesc_got_offset() const
4135 { return this->tlsdesc_got_offset_; }
4136
4137 // Return the PLT offset of the reserved TLSDESC_PLT entry.
4138 unsigned int
4139 get_tlsdesc_plt_offset() const
4140 {
4141 return (this->first_plt_entry_offset() +
4142 (this->count_ + this->irelative_count_)
4143 * this->get_plt_entry_size());
4144 }
4145
4146 // Return the .rela.plt section data.
4147 Reloc_section*
4148 rela_plt()
4149 { return this->rel_; }
4150
4151 // Return where the TLSDESC relocations should go.
4152 Reloc_section*
4153 rela_tlsdesc(Layout*);
4154
4155 // Return where the IRELATIVE relocations should go in the PLT
4156 // relocations.
4157 Reloc_section*
4158 rela_irelative(Symbol_table*, Layout*);
4159
4160 // Return whether we created a section for IRELATIVE relocations.
4161 bool
4162 has_irelative_section() const
4163 { return this->irelative_rel_ != NULL; }
4164
4165 // Return the number of PLT entries.
4166 unsigned int
4167 entry_count() const
4168 { return this->count_ + this->irelative_count_; }
4169
4170 // Return the offset of the first non-reserved PLT entry.
4171 unsigned int
4172 first_plt_entry_offset() const
4173 { return this->do_first_plt_entry_offset(); }
4174
4175 // Return the size of a PLT entry.
4176 unsigned int
4177 get_plt_entry_size() const
4178 { return this->do_get_plt_entry_size(); }
4179
4180 // Return the reserved tlsdesc entry size.
4181 unsigned int
4182 get_plt_tlsdesc_entry_size() const
4183 { return this->do_get_plt_tlsdesc_entry_size(); }
4184
4185 // Return the PLT address to use for a global symbol.
4186 uint64_t
4187 address_for_global(const Symbol*);
4188
4189 // Return the PLT address to use for a local symbol.
4190 uint64_t
4191 address_for_local(const Relobj*, unsigned int symndx);
4192
4193 protected:
4194 // Fill in the first PLT entry.
4195 void
4196 fill_first_plt_entry(unsigned char* pov,
4197 Address got_address,
4198 Address plt_address)
4199 { this->do_fill_first_plt_entry(pov, got_address, plt_address); }
4200
4201 // Fill in a normal PLT entry.
4202 void
4203 fill_plt_entry(unsigned char* pov,
4204 Address got_address,
4205 Address plt_address,
4206 unsigned int got_offset,
4207 unsigned int plt_offset)
4208 {
4209 this->do_fill_plt_entry(pov, got_address, plt_address,
4210 got_offset, plt_offset);
4211 }
4212
4213 // Fill in the reserved TLSDESC PLT entry.
4214 void
4215 fill_tlsdesc_entry(unsigned char* pov,
4216 Address gotplt_address,
4217 Address plt_address,
4218 Address got_base,
4219 unsigned int tlsdesc_got_offset,
4220 unsigned int plt_offset)
4221 {
4222 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4223 tlsdesc_got_offset, plt_offset);
4224 }
4225
4226 virtual unsigned int
4227 do_first_plt_entry_offset() const = 0;
4228
4229 virtual unsigned int
4230 do_get_plt_entry_size() const = 0;
4231
4232 virtual unsigned int
4233 do_get_plt_tlsdesc_entry_size() const = 0;
4234
4235 virtual void
4236 do_fill_first_plt_entry(unsigned char* pov,
4237 Address got_addr,
4238 Address plt_addr) = 0;
4239
4240 virtual void
4241 do_fill_plt_entry(unsigned char* pov,
4242 Address got_address,
4243 Address plt_address,
4244 unsigned int got_offset,
4245 unsigned int plt_offset) = 0;
4246
4247 virtual void
4248 do_fill_tlsdesc_entry(unsigned char* pov,
4249 Address gotplt_address,
4250 Address plt_address,
4251 Address got_base,
4252 unsigned int tlsdesc_got_offset,
4253 unsigned int plt_offset) = 0;
4254
4255 void
4256 do_adjust_output_section(Output_section* os);
4257
4258 // Write to a map file.
4259 void
4260 do_print_to_mapfile(Mapfile* mapfile) const
4261 { mapfile->print_output_data(this, _("** PLT")); }
4262
4263 private:
4264 // Set the final size.
4265 void
4266 set_final_data_size();
4267
4268 // Write out the PLT data.
4269 void
4270 do_write(Output_file*);
4271
4272 // The reloc section.
4273 Reloc_section* rel_;
4274
4275 // The TLSDESC relocs, if necessary. These must follow the regular
4276 // PLT relocs.
4277 Reloc_section* tlsdesc_rel_;
4278
4279 // The IRELATIVE relocs, if necessary. These must follow the
4280 // regular PLT relocations.
4281 Reloc_section* irelative_rel_;
4282
4283 // The .got section.
4284 Output_data_got_aarch64<size, big_endian>* got_;
4285
4286 // The .got.plt section.
4287 Output_data_space* got_plt_;
4288
4289 // The part of the .got.plt section used for IRELATIVE relocs.
4290 Output_data_space* got_irelative_;
4291
4292 // The number of PLT entries.
4293 unsigned int count_;
4294
4295 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These
4296 // follow the regular PLT entries.
4297 unsigned int irelative_count_;
4298
4299 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline.
4300 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1
4301 // indicates an offset is not allocated.
4302 unsigned int tlsdesc_got_offset_;
4303 };
4304
4305 // Initialize the PLT section.
4306
4307 template<int size, bool big_endian>
4308 void
4309 Output_data_plt_aarch64<size, big_endian>::init(Layout* layout)
4310 {
4311 this->rel_ = new Reloc_section(false);
4312 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4313 elfcpp::SHF_ALLOC, this->rel_,
4314 ORDER_DYNAMIC_PLT_RELOCS, false);
4315 }
4316
4317 template<int size, bool big_endian>
4318 void
4319 Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section(
4320 Output_section* os)
4321 {
4322 os->set_entsize(this->get_plt_entry_size());
4323 }
4324
4325 // Add an entry to the PLT.
4326
4327 template<int size, bool big_endian>
4328 void
4329 Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab,
4330 Layout* layout, Symbol* gsym)
4331 {
4332 gold_assert(!gsym->has_plt_offset());
4333
4334 unsigned int* pcount;
4335 unsigned int plt_reserved;
4336 Output_section_data_build* got;
4337
4338 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4339 && gsym->can_use_relative_reloc(false))
4340 {
4341 pcount = &this->irelative_count_;
4342 plt_reserved = 0;
4343 got = this->got_irelative_;
4344 }
4345 else
4346 {
4347 pcount = &this->count_;
4348 plt_reserved = this->first_plt_entry_offset();
4349 got = this->got_plt_;
4350 }
4351
4352 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size()
4353 + plt_reserved);
4354
4355 ++*pcount;
4356
4357 section_offset_type got_offset = got->current_data_size();
4358
4359 // Every PLT entry needs a GOT entry which points back to the PLT
4360 // entry (this will be changed by the dynamic linker, normally
4361 // lazily when the function is called).
4362 got->set_current_data_size(got_offset + size / 8);
4363
4364 // Every PLT entry needs a reloc.
4365 this->add_relocation(symtab, layout, gsym, got_offset);
4366
4367 // Note that we don't need to save the symbol. The contents of the
4368 // PLT are independent of which symbols are used. The symbols only
4369 // appear in the relocations.
4370 }
4371
4372 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return
4373 // the PLT offset.
4374
4375 template<int size, bool big_endian>
4376 unsigned int
4377 Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry(
4378 Symbol_table* symtab,
4379 Layout* layout,
4380 Sized_relobj_file<size, big_endian>* relobj,
4381 unsigned int local_sym_index)
4382 {
4383 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size();
4384 ++this->irelative_count_;
4385
4386 section_offset_type got_offset = this->got_irelative_->current_data_size();
4387
4388 // Every PLT entry needs a GOT entry which points back to the PLT
4389 // entry.
4390 this->got_irelative_->set_current_data_size(got_offset + size / 8);
4391
4392 // Every PLT entry needs a reloc.
4393 Reloc_section* rela = this->rela_irelative(symtab, layout);
4394 rela->add_symbolless_local_addend(relobj, local_sym_index,
4395 elfcpp::R_AARCH64_IRELATIVE,
4396 this->got_irelative_, got_offset, 0);
4397
4398 return plt_offset;
4399 }
4400
4401 // Add the relocation for a PLT entry.
4402
4403 template<int size, bool big_endian>
4404 void
4405 Output_data_plt_aarch64<size, big_endian>::add_relocation(
4406 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset)
4407 {
4408 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4409 && gsym->can_use_relative_reloc(false))
4410 {
4411 Reloc_section* rela = this->rela_irelative(symtab, layout);
4412 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE,
4413 this->got_irelative_, got_offset, 0);
4414 }
4415 else
4416 {
4417 gsym->set_needs_dynsym_entry();
4418 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_,
4419 got_offset, 0);
4420 }
4421 }
4422
4423 // Return where the TLSDESC relocations should go, creating it if
4424 // necessary. These follow the JUMP_SLOT relocations.
4425
4426 template<int size, bool big_endian>
4427 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4428 Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout)
4429 {
4430 if (this->tlsdesc_rel_ == NULL)
4431 {
4432 this->tlsdesc_rel_ = new Reloc_section(false);
4433 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4434 elfcpp::SHF_ALLOC, this->tlsdesc_rel_,
4435 ORDER_DYNAMIC_PLT_RELOCS, false);
4436 gold_assert(this->tlsdesc_rel_->output_section()
4437 == this->rel_->output_section());
4438 }
4439 return this->tlsdesc_rel_;
4440 }
4441
4442 // Return where the IRELATIVE relocations should go in the PLT. These
4443 // follow the JUMP_SLOT and the TLSDESC relocations.
4444
4445 template<int size, bool big_endian>
4446 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4447 Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab,
4448 Layout* layout)
4449 {
4450 if (this->irelative_rel_ == NULL)
4451 {
4452 // Make sure we have a place for the TLSDESC relocations, in
4453 // case we see any later on.
4454 this->rela_tlsdesc(layout);
4455 this->irelative_rel_ = new Reloc_section(false);
4456 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4457 elfcpp::SHF_ALLOC, this->irelative_rel_,
4458 ORDER_DYNAMIC_PLT_RELOCS, false);
4459 gold_assert(this->irelative_rel_->output_section()
4460 == this->rel_->output_section());
4461
4462 if (parameters->doing_static_link())
4463 {
4464 // A statically linked executable will only have a .rela.plt
4465 // section to hold R_AARCH64_IRELATIVE relocs for
4466 // STT_GNU_IFUNC symbols. The library will use these
4467 // symbols to locate the IRELATIVE relocs at program startup
4468 // time.
4469 symtab->define_in_output_data("__rela_iplt_start", NULL,
4470 Symbol_table::PREDEFINED,
4471 this->irelative_rel_, 0, 0,
4472 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4473 elfcpp::STV_HIDDEN, 0, false, true);
4474 symtab->define_in_output_data("__rela_iplt_end", NULL,
4475 Symbol_table::PREDEFINED,
4476 this->irelative_rel_, 0, 0,
4477 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4478 elfcpp::STV_HIDDEN, 0, true, true);
4479 }
4480 }
4481 return this->irelative_rel_;
4482 }
4483
4484 // Return the PLT address to use for a global symbol.
4485
4486 template<int size, bool big_endian>
4487 uint64_t
4488 Output_data_plt_aarch64<size, big_endian>::address_for_global(
4489 const Symbol* gsym)
4490 {
4491 uint64_t offset = 0;
4492 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4493 && gsym->can_use_relative_reloc(false))
4494 offset = (this->first_plt_entry_offset() +
4495 this->count_ * this->get_plt_entry_size());
4496 return this->address() + offset + gsym->plt_offset();
4497 }
4498
4499 // Return the PLT address to use for a local symbol. These are always
4500 // IRELATIVE relocs.
4501
4502 template<int size, bool big_endian>
4503 uint64_t
4504 Output_data_plt_aarch64<size, big_endian>::address_for_local(
4505 const Relobj* object,
4506 unsigned int r_sym)
4507 {
4508 return (this->address()
4509 + this->first_plt_entry_offset()
4510 + this->count_ * this->get_plt_entry_size()
4511 + object->local_plt_offset(r_sym));
4512 }
4513
4514 // Set the final size.
4515
4516 template<int size, bool big_endian>
4517 void
4518 Output_data_plt_aarch64<size, big_endian>::set_final_data_size()
4519 {
4520 unsigned int count = this->count_ + this->irelative_count_;
4521 unsigned int extra_size = 0;
4522 if (this->has_tlsdesc_entry())
4523 extra_size += this->get_plt_tlsdesc_entry_size();
4524 this->set_data_size(this->first_plt_entry_offset()
4525 + count * this->get_plt_entry_size()
4526 + extra_size);
4527 }
4528
4529 template<int size, bool big_endian>
4530 class Output_data_plt_aarch64_standard :
4531 public Output_data_plt_aarch64<size, big_endian>
4532 {
4533 public:
4534 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4535 Output_data_plt_aarch64_standard(
4536 Layout* layout,
4537 Output_data_got_aarch64<size, big_endian>* got,
4538 Output_data_space* got_plt,
4539 Output_data_space* got_irelative)
4540 : Output_data_plt_aarch64<size, big_endian>(layout,
4541 size == 32 ? 4 : 8,
4542 got, got_plt,
4543 got_irelative)
4544 { }
4545
4546 protected:
4547 // Return the offset of the first non-reserved PLT entry.
4548 virtual unsigned int
4549 do_first_plt_entry_offset() const
4550 { return this->first_plt_entry_size; }
4551
4552 // Return the size of a PLT entry
4553 virtual unsigned int
4554 do_get_plt_entry_size() const
4555 { return this->plt_entry_size; }
4556
4557 // Return the size of a tlsdesc entry
4558 virtual unsigned int
4559 do_get_plt_tlsdesc_entry_size() const
4560 { return this->plt_tlsdesc_entry_size; }
4561
4562 virtual void
4563 do_fill_first_plt_entry(unsigned char* pov,
4564 Address got_address,
4565 Address plt_address);
4566
4567 virtual void
4568 do_fill_plt_entry(unsigned char* pov,
4569 Address got_address,
4570 Address plt_address,
4571 unsigned int got_offset,
4572 unsigned int plt_offset);
4573
4574 virtual void
4575 do_fill_tlsdesc_entry(unsigned char* pov,
4576 Address gotplt_address,
4577 Address plt_address,
4578 Address got_base,
4579 unsigned int tlsdesc_got_offset,
4580 unsigned int plt_offset);
4581
4582 private:
4583 // The size of the first plt entry size.
4584 static const int first_plt_entry_size = 32;
4585 // The size of the plt entry size.
4586 static const int plt_entry_size = 16;
4587 // The size of the plt tlsdesc entry size.
4588 static const int plt_tlsdesc_entry_size = 32;
4589 // Template for the first PLT entry.
4590 static const uint32_t first_plt_entry[first_plt_entry_size / 4];
4591 // Template for subsequent PLT entries.
4592 static const uint32_t plt_entry[plt_entry_size / 4];
4593 // The reserved TLSDESC entry in the PLT for an executable.
4594 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4];
4595 };
4596
4597 // The first entry in the PLT for an executable.
4598
4599 template<>
4600 const uint32_t
4601 Output_data_plt_aarch64_standard<32, false>::
4602 first_plt_entry[first_plt_entry_size / 4] =
4603 {
4604 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4605 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4606 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4607 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4608 0xd61f0220, /* br x17 */
4609 0xd503201f, /* nop */
4610 0xd503201f, /* nop */
4611 0xd503201f, /* nop */
4612 };
4613
4614
4615 template<>
4616 const uint32_t
4617 Output_data_plt_aarch64_standard<32, true>::
4618 first_plt_entry[first_plt_entry_size / 4] =
4619 {
4620 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4621 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4622 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4623 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4624 0xd61f0220, /* br x17 */
4625 0xd503201f, /* nop */
4626 0xd503201f, /* nop */
4627 0xd503201f, /* nop */
4628 };
4629
4630
4631 template<>
4632 const uint32_t
4633 Output_data_plt_aarch64_standard<64, false>::
4634 first_plt_entry[first_plt_entry_size / 4] =
4635 {
4636 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4637 0x90000010, /* adrp x16, PLT_GOT+16 */
4638 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4639 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4640 0xd61f0220, /* br x17 */
4641 0xd503201f, /* nop */
4642 0xd503201f, /* nop */
4643 0xd503201f, /* nop */
4644 };
4645
4646
4647 template<>
4648 const uint32_t
4649 Output_data_plt_aarch64_standard<64, true>::
4650 first_plt_entry[first_plt_entry_size / 4] =
4651 {
4652 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4653 0x90000010, /* adrp x16, PLT_GOT+16 */
4654 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4655 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4656 0xd61f0220, /* br x17 */
4657 0xd503201f, /* nop */
4658 0xd503201f, /* nop */
4659 0xd503201f, /* nop */
4660 };
4661
4662
4663 template<>
4664 const uint32_t
4665 Output_data_plt_aarch64_standard<32, false>::
4666 plt_entry[plt_entry_size / 4] =
4667 {
4668 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4669 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4670 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4671 0xd61f0220, /* br x17. */
4672 };
4673
4674
4675 template<>
4676 const uint32_t
4677 Output_data_plt_aarch64_standard<32, true>::
4678 plt_entry[plt_entry_size / 4] =
4679 {
4680 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4681 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4682 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4683 0xd61f0220, /* br x17. */
4684 };
4685
4686
4687 template<>
4688 const uint32_t
4689 Output_data_plt_aarch64_standard<64, false>::
4690 plt_entry[plt_entry_size / 4] =
4691 {
4692 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4693 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4694 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4695 0xd61f0220, /* br x17. */
4696 };
4697
4698
4699 template<>
4700 const uint32_t
4701 Output_data_plt_aarch64_standard<64, true>::
4702 plt_entry[plt_entry_size / 4] =
4703 {
4704 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4705 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4706 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4707 0xd61f0220, /* br x17. */
4708 };
4709
4710
4711 template<int size, bool big_endian>
4712 void
4713 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry(
4714 unsigned char* pov,
4715 Address got_address,
4716 Address plt_address)
4717 {
4718 // PLT0 of the small PLT looks like this in ELF64 -
4719 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack.
4720 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT
4721 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the
4722 // symbol resolver
4723 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the
4724 // GOTPLT entry for this.
4725 // br x17
4726 // PLT0 will be slightly different in ELF32 due to different got entry
4727 // size.
4728 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size);
4729 Address gotplt_2nd_ent = got_address + (size / 8) * 2;
4730
4731 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2.
4732 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff.
4733 // FIXME: This only works for 64bit
4734 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4,
4735 gotplt_2nd_ent, plt_address + 4);
4736
4737 // Fill in R_AARCH64_LDST8_LO12
4738 elfcpp::Swap<32, big_endian>::writeval(
4739 pov + 8,
4740 ((this->first_plt_entry[2] & 0xffc003ff)
4741 | ((gotplt_2nd_ent & 0xff8) << 7)));
4742
4743 // Fill in R_AARCH64_ADD_ABS_LO12
4744 elfcpp::Swap<32, big_endian>::writeval(
4745 pov + 12,
4746 ((this->first_plt_entry[3] & 0xffc003ff)
4747 | ((gotplt_2nd_ent & 0xfff) << 10)));
4748 }
4749
4750
4751 // Subsequent entries in the PLT for an executable.
4752 // FIXME: This only works for 64bit
4753
4754 template<int size, bool big_endian>
4755 void
4756 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry(
4757 unsigned char* pov,
4758 Address got_address,
4759 Address plt_address,
4760 unsigned int got_offset,
4761 unsigned int plt_offset)
4762 {
4763 memcpy(pov, this->plt_entry, this->plt_entry_size);
4764
4765 Address gotplt_entry_address = got_address + got_offset;
4766 Address plt_entry_address = plt_address + plt_offset;
4767
4768 // Fill in R_AARCH64_PCREL_ADR_HI21
4769 AArch64_relocate_functions<size, big_endian>::adrp(
4770 pov,
4771 gotplt_entry_address,
4772 plt_entry_address);
4773
4774 // Fill in R_AARCH64_LDST64_ABS_LO12
4775 elfcpp::Swap<32, big_endian>::writeval(
4776 pov + 4,
4777 ((this->plt_entry[1] & 0xffc003ff)
4778 | ((gotplt_entry_address & 0xff8) << 7)));
4779
4780 // Fill in R_AARCH64_ADD_ABS_LO12
4781 elfcpp::Swap<32, big_endian>::writeval(
4782 pov + 8,
4783 ((this->plt_entry[2] & 0xffc003ff)
4784 | ((gotplt_entry_address & 0xfff) <<10)));
4785
4786 }
4787
4788
4789 template<>
4790 const uint32_t
4791 Output_data_plt_aarch64_standard<32, false>::
4792 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4793 {
4794 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4795 0x90000002, /* adrp x2, 0 */
4796 0x90000003, /* adrp x3, 0 */
4797 0xb9400042, /* ldr w2, [w2, #0] */
4798 0x11000063, /* add w3, w3, 0 */
4799 0xd61f0040, /* br x2 */
4800 0xd503201f, /* nop */
4801 0xd503201f, /* nop */
4802 };
4803
4804 template<>
4805 const uint32_t
4806 Output_data_plt_aarch64_standard<32, true>::
4807 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4808 {
4809 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4810 0x90000002, /* adrp x2, 0 */
4811 0x90000003, /* adrp x3, 0 */
4812 0xb9400042, /* ldr w2, [w2, #0] */
4813 0x11000063, /* add w3, w3, 0 */
4814 0xd61f0040, /* br x2 */
4815 0xd503201f, /* nop */
4816 0xd503201f, /* nop */
4817 };
4818
4819 template<>
4820 const uint32_t
4821 Output_data_plt_aarch64_standard<64, false>::
4822 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4823 {
4824 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4825 0x90000002, /* adrp x2, 0 */
4826 0x90000003, /* adrp x3, 0 */
4827 0xf9400042, /* ldr x2, [x2, #0] */
4828 0x91000063, /* add x3, x3, 0 */
4829 0xd61f0040, /* br x2 */
4830 0xd503201f, /* nop */
4831 0xd503201f, /* nop */
4832 };
4833
4834 template<>
4835 const uint32_t
4836 Output_data_plt_aarch64_standard<64, true>::
4837 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4838 {
4839 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4840 0x90000002, /* adrp x2, 0 */
4841 0x90000003, /* adrp x3, 0 */
4842 0xf9400042, /* ldr x2, [x2, #0] */
4843 0x91000063, /* add x3, x3, 0 */
4844 0xd61f0040, /* br x2 */
4845 0xd503201f, /* nop */
4846 0xd503201f, /* nop */
4847 };
4848
4849 template<int size, bool big_endian>
4850 void
4851 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry(
4852 unsigned char* pov,
4853 Address gotplt_address,
4854 Address plt_address,
4855 Address got_base,
4856 unsigned int tlsdesc_got_offset,
4857 unsigned int plt_offset)
4858 {
4859 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size);
4860
4861 // move DT_TLSDESC_GOT address into x2
4862 // move .got.plt address into x3
4863 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset;
4864 Address plt_entry_address = plt_address + plt_offset;
4865
4866 // R_AARCH64_ADR_PREL_PG_HI21
4867 AArch64_relocate_functions<size, big_endian>::adrp(
4868 pov + 4,
4869 tlsdesc_got_entry,
4870 plt_entry_address + 4);
4871
4872 // R_AARCH64_ADR_PREL_PG_HI21
4873 AArch64_relocate_functions<size, big_endian>::adrp(
4874 pov + 8,
4875 gotplt_address,
4876 plt_entry_address + 8);
4877
4878 // R_AARCH64_LDST64_ABS_LO12
4879 elfcpp::Swap<32, big_endian>::writeval(
4880 pov + 12,
4881 ((this->tlsdesc_plt_entry[3] & 0xffc003ff)
4882 | ((tlsdesc_got_entry & 0xff8) << 7)));
4883
4884 // R_AARCH64_ADD_ABS_LO12
4885 elfcpp::Swap<32, big_endian>::writeval(
4886 pov + 16,
4887 ((this->tlsdesc_plt_entry[4] & 0xffc003ff)
4888 | ((gotplt_address & 0xfff) << 10)));
4889 }
4890
4891 // Write out the PLT. This uses the hand-coded instructions above,
4892 // and adjusts them as needed. This is specified by the AMD64 ABI.
4893
4894 template<int size, bool big_endian>
4895 void
4896 Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of)
4897 {
4898 const off_t offset = this->offset();
4899 const section_size_type oview_size =
4900 convert_to_section_size_type(this->data_size());
4901 unsigned char* const oview = of->get_output_view(offset, oview_size);
4902
4903 const off_t got_file_offset = this->got_plt_->offset();
4904 gold_assert(got_file_offset + this->got_plt_->data_size()
4905 == this->got_irelative_->offset());
4906
4907 const section_size_type got_size =
4908 convert_to_section_size_type(this->got_plt_->data_size()
4909 + this->got_irelative_->data_size());
4910 unsigned char* const got_view = of->get_output_view(got_file_offset,
4911 got_size);
4912
4913 unsigned char* pov = oview;
4914
4915 // The base address of the .plt section.
4916 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address();
4917 // The base address of the PLT portion of the .got section.
4918 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address
4919 = this->got_plt_->address();
4920
4921 this->fill_first_plt_entry(pov, gotplt_address, plt_address);
4922 pov += this->first_plt_entry_offset();
4923
4924 // The first three entries in .got.plt are reserved.
4925 unsigned char* got_pov = got_view;
4926 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT);
4927 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4928
4929 unsigned int plt_offset = this->first_plt_entry_offset();
4930 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4931 const unsigned int count = this->count_ + this->irelative_count_;
4932 for (unsigned int plt_index = 0;
4933 plt_index < count;
4934 ++plt_index,
4935 pov += this->get_plt_entry_size(),
4936 got_pov += size / 8,
4937 plt_offset += this->get_plt_entry_size(),
4938 got_offset += size / 8)
4939 {
4940 // Set and adjust the PLT entry itself.
4941 this->fill_plt_entry(pov, gotplt_address, plt_address,
4942 got_offset, plt_offset);
4943
4944 // Set the entry in the GOT, which points to plt0.
4945 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address);
4946 }
4947
4948 if (this->has_tlsdesc_entry())
4949 {
4950 // Set and adjust the reserved TLSDESC PLT entry.
4951 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
4952 // The base address of the .base section.
4953 typename elfcpp::Elf_types<size>::Elf_Addr got_base =
4954 this->got_->address();
4955 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4956 tlsdesc_got_offset, plt_offset);
4957 pov += this->get_plt_tlsdesc_entry_size();
4958 }
4959
4960 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
4961 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
4962
4963 of->write_output_view(offset, oview_size, oview);
4964 of->write_output_view(got_file_offset, got_size, got_view);
4965 }
4966
4967 // Telling how to update the immediate field of an instruction.
4968 struct AArch64_howto
4969 {
4970 // The immediate field mask.
4971 elfcpp::Elf_Xword dst_mask;
4972
4973 // The offset to apply relocation immediate
4974 int doffset;
4975
4976 // The second part offset, if the immediate field has two parts.
4977 // -1 if the immediate field has only one part.
4978 int doffset2;
4979 };
4980
4981 static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] =
4982 {
4983 {0, -1, -1}, // DATA
4984 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16
4985 {0xffffe0, 5, -1}, // LD [23:5]-imm19
4986 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi
4987 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi
4988 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12
4989 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12
4990 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14
4991 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19
4992 {0x3ffffff, 0, -1}, // B [25:0]-imm26
4993 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26
4994 };
4995
4996 // AArch64 relocate function class
4997
4998 template<int size, bool big_endian>
4999 class AArch64_relocate_functions
5000 {
5001 public:
5002 typedef enum
5003 {
5004 STATUS_OKAY, // No error during relocation.
5005 STATUS_OVERFLOW, // Relocation overflow.
5006 STATUS_BAD_RELOC, // Relocation cannot be applied.
5007 } Status;
5008
5009 typedef AArch64_relocate_functions<size, big_endian> This;
5010 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
5011 typedef Relocate_info<size, big_endian> The_relocate_info;
5012 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
5013 typedef Reloc_stub<size, big_endian> The_reloc_stub;
5014 typedef Stub_table<size, big_endian> The_stub_table;
5015 typedef elfcpp::Rela<size, big_endian> The_rela;
5016 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype;
5017
5018 // Return the page address of the address.
5019 // Page(address) = address & ~0xFFF
5020
5021 static inline AArch64_valtype
5022 Page(Address address)
5023 {
5024 return (address & (~static_cast<Address>(0xFFF)));
5025 }
5026
5027 private:
5028 // Update instruction (pointed by view) with selected bits (immed).
5029 // val = (val & ~dst_mask) | (immed << doffset)
5030
5031 template<int valsize>
5032 static inline void
5033 update_view(unsigned char* view,
5034 AArch64_valtype immed,
5035 elfcpp::Elf_Xword doffset,
5036 elfcpp::Elf_Xword dst_mask)
5037 {
5038 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5039 Valtype* wv = reinterpret_cast<Valtype*>(view);
5040 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5041
5042 // Clear immediate fields.
5043 val &= ~dst_mask;
5044 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5045 static_cast<Valtype>(val | (immed << doffset)));
5046 }
5047
5048 // Update two parts of an instruction (pointed by view) with selected
5049 // bits (immed1 and immed2).
5050 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2)
5051
5052 template<int valsize>
5053 static inline void
5054 update_view_two_parts(
5055 unsigned char* view,
5056 AArch64_valtype immed1,
5057 AArch64_valtype immed2,
5058 elfcpp::Elf_Xword doffset1,
5059 elfcpp::Elf_Xword doffset2,
5060 elfcpp::Elf_Xword dst_mask)
5061 {
5062 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5063 Valtype* wv = reinterpret_cast<Valtype*>(view);
5064 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5065 val &= ~dst_mask;
5066 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5067 static_cast<Valtype>(val | (immed1 << doffset1) |
5068 (immed2 << doffset2)));
5069 }
5070
5071 // Update adr or adrp instruction with immed.
5072 // In adr and adrp: [30:29] immlo [23:5] immhi
5073
5074 static inline void
5075 update_adr(unsigned char* view, AArch64_valtype immed)
5076 {
5077 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5);
5078 This::template update_view_two_parts<32>(
5079 view,
5080 immed & 0x3,
5081 (immed & 0x1ffffc) >> 2,
5082 29,
5083 5,
5084 dst_mask);
5085 }
5086
5087 // Update movz/movn instruction with bits immed.
5088 // Set instruction to movz if is_movz is true, otherwise set instruction
5089 // to movn.
5090
5091 static inline void
5092 update_movnz(unsigned char* view,
5093 AArch64_valtype immed,
5094 bool is_movz)
5095 {
5096 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
5097 Valtype* wv = reinterpret_cast<Valtype*>(view);
5098 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
5099
5100 const elfcpp::Elf_Xword doffset =
5101 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset;
5102 const elfcpp::Elf_Xword dst_mask =
5103 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask;
5104
5105 // Clear immediate fields and opc code.
5106 val &= ~(dst_mask | (0x3 << 29));
5107
5108 // Set instruction to movz or movn.
5109 // movz: [30:29] is 10 movn: [30:29] is 00
5110 if (is_movz)
5111 val |= (0x2 << 29);
5112
5113 elfcpp::Swap<32, big_endian>::writeval(wv,
5114 static_cast<Valtype>(val | (immed << doffset)));
5115 }
5116
5117 public:
5118
5119 // Update selected bits in text.
5120
5121 template<int valsize>
5122 static inline typename This::Status
5123 reloc_common(unsigned char* view, Address x,
5124 const AArch64_reloc_property* reloc_property)
5125 {
5126 // Select bits from X.
5127 Address immed = reloc_property->select_x_value(x);
5128
5129 // Update view.
5130 const AArch64_reloc_property::Reloc_inst inst =
5131 reloc_property->reloc_inst();
5132 // If it is a data relocation or instruction has 2 parts of immediate
5133 // fields, you should not call pcrela_general.
5134 gold_assert(aarch64_howto[inst].doffset2 == -1 &&
5135 aarch64_howto[inst].doffset != -1);
5136 This::template update_view<valsize>(view, immed,
5137 aarch64_howto[inst].doffset,
5138 aarch64_howto[inst].dst_mask);
5139
5140 // Do check overflow or alignment if needed.
5141 return (reloc_property->checkup_x_value(x)
5142 ? This::STATUS_OKAY
5143 : This::STATUS_OVERFLOW);
5144 }
5145
5146 // Construct a B insn. Note, although we group it here with other relocation
5147 // operation, there is actually no 'relocation' involved here.
5148 static inline void
5149 construct_b(unsigned char* view, unsigned int branch_offset)
5150 {
5151 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2),
5152 26, 0, 0xffffffff);
5153 }
5154
5155 // Do a simple rela relocation at unaligned addresses.
5156
5157 template<int valsize>
5158 static inline typename This::Status
5159 rela_ua(unsigned char* view,
5160 const Sized_relobj_file<size, big_endian>* object,
5161 const Symbol_value<size>* psymval,
5162 AArch64_valtype addend,
5163 const AArch64_reloc_property* reloc_property)
5164 {
5165 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5166 Valtype;
5167 typename elfcpp::Elf_types<size>::Elf_Addr x =
5168 psymval->value(object, addend);
5169 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5170 static_cast<Valtype>(x));
5171 return (reloc_property->checkup_x_value(x)
5172 ? This::STATUS_OKAY
5173 : This::STATUS_OVERFLOW);
5174 }
5175
5176 // Do a simple pc-relative relocation at unaligned addresses.
5177
5178 template<int valsize>
5179 static inline typename This::Status
5180 pcrela_ua(unsigned char* view,
5181 const Sized_relobj_file<size, big_endian>* object,
5182 const Symbol_value<size>* psymval,
5183 AArch64_valtype addend,
5184 Address address,
5185 const AArch64_reloc_property* reloc_property)
5186 {
5187 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5188 Valtype;
5189 Address x = psymval->value(object, addend) - address;
5190 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5191 static_cast<Valtype>(x));
5192 return (reloc_property->checkup_x_value(x)
5193 ? This::STATUS_OKAY
5194 : This::STATUS_OVERFLOW);
5195 }
5196
5197 // Do a simple rela relocation at aligned addresses.
5198
5199 template<int valsize>
5200 static inline typename This::Status
5201 rela(
5202 unsigned char* view,
5203 const Sized_relobj_file<size, big_endian>* object,
5204 const Symbol_value<size>* psymval,
5205 AArch64_valtype addend,
5206 const AArch64_reloc_property* reloc_property)
5207 {
5208 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5209 Valtype* wv = reinterpret_cast<Valtype*>(view);
5210 Address x = psymval->value(object, addend);
5211 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x));
5212 return (reloc_property->checkup_x_value(x)
5213 ? This::STATUS_OKAY
5214 : This::STATUS_OVERFLOW);
5215 }
5216
5217 // Do relocate. Update selected bits in text.
5218 // new_val = (val & ~dst_mask) | (immed << doffset)
5219
5220 template<int valsize>
5221 static inline typename This::Status
5222 rela_general(unsigned char* view,
5223 const Sized_relobj_file<size, big_endian>* object,
5224 const Symbol_value<size>* psymval,
5225 AArch64_valtype addend,
5226 const AArch64_reloc_property* reloc_property)
5227 {
5228 // Calculate relocation.
5229 Address x = psymval->value(object, addend);
5230 return This::template reloc_common<valsize>(view, x, reloc_property);
5231 }
5232
5233 // Do relocate. Update selected bits in text.
5234 // new val = (val & ~dst_mask) | (immed << doffset)
5235
5236 template<int valsize>
5237 static inline typename This::Status
5238 rela_general(
5239 unsigned char* view,
5240 AArch64_valtype s,
5241 AArch64_valtype addend,
5242 const AArch64_reloc_property* reloc_property)
5243 {
5244 // Calculate relocation.
5245 Address x = s + addend;
5246 return This::template reloc_common<valsize>(view, x, reloc_property);
5247 }
5248
5249 // Do address relative relocate. Update selected bits in text.
5250 // new val = (val & ~dst_mask) | (immed << doffset)
5251
5252 template<int valsize>
5253 static inline typename This::Status
5254 pcrela_general(
5255 unsigned char* view,
5256 const Sized_relobj_file<size, big_endian>* object,
5257 const Symbol_value<size>* psymval,
5258 AArch64_valtype addend,
5259 Address address,
5260 const AArch64_reloc_property* reloc_property)
5261 {
5262 // Calculate relocation.
5263 Address x = psymval->value(object, addend) - address;
5264 return This::template reloc_common<valsize>(view, x, reloc_property);
5265 }
5266
5267
5268 // Calculate (S + A) - address, update adr instruction.
5269
5270 static inline typename This::Status
5271 adr(unsigned char* view,
5272 const Sized_relobj_file<size, big_endian>* object,
5273 const Symbol_value<size>* psymval,
5274 Address addend,
5275 Address address,
5276 const AArch64_reloc_property* /* reloc_property */)
5277 {
5278 AArch64_valtype x = psymval->value(object, addend) - address;
5279 // Pick bits [20:0] of X.
5280 AArch64_valtype immed = x & 0x1fffff;
5281 update_adr(view, immed);
5282 // Check -2^20 <= X < 2^20
5283 return (size == 64 && Bits<21>::has_overflow((x))
5284 ? This::STATUS_OVERFLOW
5285 : This::STATUS_OKAY);
5286 }
5287
5288 // Calculate PG(S+A) - PG(address), update adrp instruction.
5289 // R_AARCH64_ADR_PREL_PG_HI21
5290
5291 static inline typename This::Status
5292 adrp(
5293 unsigned char* view,
5294 Address sa,
5295 Address address)
5296 {
5297 AArch64_valtype x = This::Page(sa) - This::Page(address);
5298 // Pick [32:12] of X.
5299 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5300 update_adr(view, immed);
5301 // Check -2^32 <= X < 2^32
5302 return (size == 64 && Bits<33>::has_overflow((x))
5303 ? This::STATUS_OVERFLOW
5304 : This::STATUS_OKAY);
5305 }
5306
5307 // Calculate PG(S+A) - PG(address), update adrp instruction.
5308 // R_AARCH64_ADR_PREL_PG_HI21
5309
5310 static inline typename This::Status
5311 adrp(unsigned char* view,
5312 const Sized_relobj_file<size, big_endian>* object,
5313 const Symbol_value<size>* psymval,
5314 Address addend,
5315 Address address,
5316 const AArch64_reloc_property* reloc_property)
5317 {
5318 Address sa = psymval->value(object, addend);
5319 AArch64_valtype x = This::Page(sa) - This::Page(address);
5320 // Pick [32:12] of X.
5321 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5322 update_adr(view, immed);
5323 return (reloc_property->checkup_x_value(x)
5324 ? This::STATUS_OKAY
5325 : This::STATUS_OVERFLOW);
5326 }
5327
5328 // Update mov[n/z] instruction. Check overflow if needed.
5329 // If X >=0, set the instruction to movz and its immediate value to the
5330 // selected bits S.
5331 // If X < 0, set the instruction to movn and its immediate value to
5332 // NOT (selected bits of).
5333
5334 static inline typename This::Status
5335 movnz(unsigned char* view,
5336 AArch64_valtype x,
5337 const AArch64_reloc_property* reloc_property)
5338 {
5339 // Select bits from X.
5340 Address immed;
5341 bool is_movz;
5342 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW;
5343 if (static_cast<SignedW>(x) >= 0)
5344 {
5345 immed = reloc_property->select_x_value(x);
5346 is_movz = true;
5347 }
5348 else
5349 {
5350 immed = reloc_property->select_x_value(~x);;
5351 is_movz = false;
5352 }
5353
5354 // Update movnz instruction.
5355 update_movnz(view, immed, is_movz);
5356
5357 // Do check overflow or alignment if needed.
5358 return (reloc_property->checkup_x_value(x)
5359 ? This::STATUS_OKAY
5360 : This::STATUS_OVERFLOW);
5361 }
5362
5363 static inline bool
5364 maybe_apply_stub(unsigned int,
5365 const The_relocate_info*,
5366 const The_rela&,
5367 unsigned char*,
5368 Address,
5369 const Sized_symbol<size>*,
5370 const Symbol_value<size>*,
5371 const Sized_relobj_file<size, big_endian>*,
5372 section_size_type);
5373
5374 }; // End of AArch64_relocate_functions
5375
5376
5377 // For a certain relocation type (usually jump/branch), test to see if the
5378 // destination needs a stub to fulfil. If so, re-route the destination of the
5379 // original instruction to the stub, note, at this time, the stub has already
5380 // been generated.
5381
5382 template<int size, bool big_endian>
5383 bool
5384 AArch64_relocate_functions<size, big_endian>::
5385 maybe_apply_stub(unsigned int r_type,
5386 const The_relocate_info* relinfo,
5387 const The_rela& rela,
5388 unsigned char* view,
5389 Address address,
5390 const Sized_symbol<size>* gsym,
5391 const Symbol_value<size>* psymval,
5392 const Sized_relobj_file<size, big_endian>* object,
5393 section_size_type current_group_size)
5394 {
5395 if (parameters->options().relocatable())
5396 return false;
5397
5398 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend();
5399 Address branch_target = psymval->value(object, 0) + addend;
5400 int stub_type =
5401 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target);
5402 if (stub_type == ST_NONE)
5403 return false;
5404
5405 const The_aarch64_relobj* aarch64_relobj =
5406 static_cast<const The_aarch64_relobj*>(object);
5407 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
5408 gold_assert(stub_table != NULL);
5409
5410 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5411 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
5412 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
5413 gold_assert(stub != NULL);
5414
5415 Address new_branch_target = stub_table->address() + stub->offset();
5416 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset =
5417 new_branch_target - address;
5418 const AArch64_reloc_property* arp =
5419 aarch64_reloc_property_table->get_reloc_property(r_type);
5420 gold_assert(arp != NULL);
5421 typename This::Status status = This::template
5422 rela_general<32>(view, branch_offset, 0, arp);
5423 if (status != This::STATUS_OKAY)
5424 gold_error(_("Stub is too far away, try a smaller value "
5425 "for '--stub-group-size'. The current value is 0x%lx."),
5426 static_cast<unsigned long>(current_group_size));
5427 return true;
5428 }
5429
5430
5431 // Group input sections for stub generation.
5432 //
5433 // We group input sections in an output section so that the total size,
5434 // including any padding space due to alignment is smaller than GROUP_SIZE
5435 // unless the only input section in group is bigger than GROUP_SIZE already.
5436 // Then an ARM stub table is created to follow the last input section
5437 // in group. For each group an ARM stub table is created an is placed
5438 // after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
5439 // extend the group after the stub table.
5440
5441 template<int size, bool big_endian>
5442 void
5443 Target_aarch64<size, big_endian>::group_sections(
5444 Layout* layout,
5445 section_size_type group_size,
5446 bool stubs_always_after_branch,
5447 const Task* task)
5448 {
5449 // Group input sections and insert stub table
5450 Layout::Section_list section_list;
5451 layout->get_executable_sections(&section_list);
5452 for (Layout::Section_list::const_iterator p = section_list.begin();
5453 p != section_list.end();
5454 ++p)
5455 {
5456 AArch64_output_section<size, big_endian>* output_section =
5457 static_cast<AArch64_output_section<size, big_endian>*>(*p);
5458 output_section->group_sections(group_size, stubs_always_after_branch,
5459 this, task);
5460 }
5461 }
5462
5463
5464 // Find the AArch64_input_section object corresponding to the SHNDX-th input
5465 // section of RELOBJ.
5466
5467 template<int size, bool big_endian>
5468 AArch64_input_section<size, big_endian>*
5469 Target_aarch64<size, big_endian>::find_aarch64_input_section(
5470 Relobj* relobj, unsigned int shndx) const
5471 {
5472 Section_id sid(relobj, shndx);
5473 typename AArch64_input_section_map::const_iterator p =
5474 this->aarch64_input_section_map_.find(sid);
5475 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL;
5476 }
5477
5478
5479 // Make a new AArch64_input_section object.
5480
5481 template<int size, bool big_endian>
5482 AArch64_input_section<size, big_endian>*
5483 Target_aarch64<size, big_endian>::new_aarch64_input_section(
5484 Relobj* relobj, unsigned int shndx)
5485 {
5486 Section_id sid(relobj, shndx);
5487
5488 AArch64_input_section<size, big_endian>* input_section =
5489 new AArch64_input_section<size, big_endian>(relobj, shndx);
5490 input_section->init();
5491
5492 // Register new AArch64_input_section in map for look-up.
5493 std::pair<typename AArch64_input_section_map::iterator,bool> ins =
5494 this->aarch64_input_section_map_.insert(
5495 std::make_pair(sid, input_section));
5496
5497 // Make sure that it we have not created another AArch64_input_section
5498 // for this input section already.
5499 gold_assert(ins.second);
5500
5501 return input_section;
5502 }
5503
5504
5505 // Relaxation hook. This is where we do stub generation.
5506
5507 template<int size, bool big_endian>
5508 bool
5509 Target_aarch64<size, big_endian>::do_relax(
5510 int pass,
5511 const Input_objects* input_objects,
5512 Symbol_table* symtab,
5513 Layout* layout ,
5514 const Task* task)
5515 {
5516 gold_assert(!parameters->options().relocatable());
5517 if (pass == 1)
5518 {
5519 // We don't handle negative stub_group_size right now.
5520 this->stub_group_size_ = abs(parameters->options().stub_group_size());
5521 if (this->stub_group_size_ == 1)
5522 {
5523 // Leave room for 4096 4-byte stub entries. If we exceed that, then we
5524 // will fail to link. The user will have to relink with an explicit
5525 // group size option.
5526 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET -
5527 4096 * 4;
5528 }
5529 group_sections(layout, this->stub_group_size_, true, task);
5530 }
5531 else
5532 {
5533 // If this is not the first pass, addresses and file offsets have
5534 // been reset at this point, set them here.
5535 for (Stub_table_iterator sp = this->stub_tables_.begin();
5536 sp != this->stub_tables_.end(); ++sp)
5537 {
5538 The_stub_table* stt = *sp;
5539 The_aarch64_input_section* owner = stt->owner();
5540 off_t off = align_address(owner->original_size(),
5541 stt->addralign());
5542 stt->set_address_and_file_offset(owner->address() + off,
5543 owner->offset() + off);
5544 }
5545 }
5546
5547 // Scan relocs for relocation stubs
5548 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
5549 op != input_objects->relobj_end();
5550 ++op)
5551 {
5552 The_aarch64_relobj* aarch64_relobj =
5553 static_cast<The_aarch64_relobj*>(*op);
5554 // Lock the object so we can read from it. This is only called
5555 // single-threaded from Layout::finalize, so it is OK to lock.
5556 Task_lock_obj<Object> tl(task, aarch64_relobj);
5557 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout);
5558 }
5559
5560 bool any_stub_table_changed = false;
5561 for (Stub_table_iterator siter = this->stub_tables_.begin();
5562 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter)
5563 {
5564 The_stub_table* stub_table = *siter;
5565 if (stub_table->update_data_size_changed_p())
5566 {
5567 The_aarch64_input_section* owner = stub_table->owner();
5568 uint64_t address = owner->address();
5569 off_t offset = owner->offset();
5570 owner->reset_address_and_file_offset();
5571 owner->set_address_and_file_offset(address, offset);
5572
5573 any_stub_table_changed = true;
5574 }
5575 }
5576
5577 // Do not continue relaxation.
5578 bool continue_relaxation = any_stub_table_changed;
5579 if (!continue_relaxation)
5580 for (Stub_table_iterator sp = this->stub_tables_.begin();
5581 (sp != this->stub_tables_.end());
5582 ++sp)
5583 (*sp)->finalize_stubs();
5584
5585 return continue_relaxation;
5586 }
5587
5588
5589 // Make a new Stub_table.
5590
5591 template<int size, bool big_endian>
5592 Stub_table<size, big_endian>*
5593 Target_aarch64<size, big_endian>::new_stub_table(
5594 AArch64_input_section<size, big_endian>* owner)
5595 {
5596 Stub_table<size, big_endian>* stub_table =
5597 new Stub_table<size, big_endian>(owner);
5598 stub_table->set_address(align_address(
5599 owner->address() + owner->data_size(), 8));
5600 stub_table->set_file_offset(owner->offset() + owner->data_size());
5601 stub_table->finalize_data_size();
5602
5603 this->stub_tables_.push_back(stub_table);
5604
5605 return stub_table;
5606 }
5607
5608
5609 template<int size, bool big_endian>
5610 uint64_t
5611 Target_aarch64<size, big_endian>::do_reloc_addend(
5612 void* arg, unsigned int r_type, uint64_t) const
5613 {
5614 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
5615 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg);
5616 gold_assert(intarg < this->tlsdesc_reloc_info_.size());
5617 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]);
5618 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym);
5619 gold_assert(psymval->is_tls_symbol());
5620 // The value of a TLS symbol is the offset in the TLS segment.
5621 return psymval->value(ti.object, 0);
5622 }
5623
5624 // Return the number of entries in the PLT.
5625
5626 template<int size, bool big_endian>
5627 unsigned int
5628 Target_aarch64<size, big_endian>::plt_entry_count() const
5629 {
5630 if (this->plt_ == NULL)
5631 return 0;
5632 return this->plt_->entry_count();
5633 }
5634
5635 // Return the offset of the first non-reserved PLT entry.
5636
5637 template<int size, bool big_endian>
5638 unsigned int
5639 Target_aarch64<size, big_endian>::first_plt_entry_offset() const
5640 {
5641 return this->plt_->first_plt_entry_offset();
5642 }
5643
5644 // Return the size of each PLT entry.
5645
5646 template<int size, bool big_endian>
5647 unsigned int
5648 Target_aarch64<size, big_endian>::plt_entry_size() const
5649 {
5650 return this->plt_->get_plt_entry_size();
5651 }
5652
5653 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
5654
5655 template<int size, bool big_endian>
5656 void
5657 Target_aarch64<size, big_endian>::define_tls_base_symbol(
5658 Symbol_table* symtab, Layout* layout)
5659 {
5660 if (this->tls_base_symbol_defined_)
5661 return;
5662
5663 Output_segment* tls_segment = layout->tls_segment();
5664 if (tls_segment != NULL)
5665 {
5666 // _TLS_MODULE_BASE_ always points to the beginning of tls segment.
5667 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL,
5668 Symbol_table::PREDEFINED,
5669 tls_segment, 0, 0,
5670 elfcpp::STT_TLS,
5671 elfcpp::STB_LOCAL,
5672 elfcpp::STV_HIDDEN, 0,
5673 Symbol::SEGMENT_START,
5674 true);
5675 }
5676 this->tls_base_symbol_defined_ = true;
5677 }
5678
5679 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
5680
5681 template<int size, bool big_endian>
5682 void
5683 Target_aarch64<size, big_endian>::reserve_tlsdesc_entries(
5684 Symbol_table* symtab, Layout* layout)
5685 {
5686 if (this->plt_ == NULL)
5687 this->make_plt_section(symtab, layout);
5688
5689 if (!this->plt_->has_tlsdesc_entry())
5690 {
5691 // Allocate the TLSDESC_GOT entry.
5692 Output_data_got_aarch64<size, big_endian>* got =
5693 this->got_section(symtab, layout);
5694 unsigned int got_offset = got->add_constant(0);
5695
5696 // Allocate the TLSDESC_PLT entry.
5697 this->plt_->reserve_tlsdesc_entry(got_offset);
5698 }
5699 }
5700
5701 // Create a GOT entry for the TLS module index.
5702
5703 template<int size, bool big_endian>
5704 unsigned int
5705 Target_aarch64<size, big_endian>::got_mod_index_entry(
5706 Symbol_table* symtab, Layout* layout,
5707 Sized_relobj_file<size, big_endian>* object)
5708 {
5709 if (this->got_mod_index_offset_ == -1U)
5710 {
5711 gold_assert(symtab != NULL && layout != NULL && object != NULL);
5712 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
5713 Output_data_got_aarch64<size, big_endian>* got =
5714 this->got_section(symtab, layout);
5715 unsigned int got_offset = got->add_constant(0);
5716 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got,
5717 got_offset, 0);
5718 got->add_constant(0);
5719 this->got_mod_index_offset_ = got_offset;
5720 }
5721 return this->got_mod_index_offset_;
5722 }
5723
5724 // Optimize the TLS relocation type based on what we know about the
5725 // symbol. IS_FINAL is true if the final address of this symbol is
5726 // known at link time.
5727
5728 template<int size, bool big_endian>
5729 tls::Tls_optimization
5730 Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final,
5731 int r_type)
5732 {
5733 // If we are generating a shared library, then we can't do anything
5734 // in the linker
5735 if (parameters->options().shared())
5736 return tls::TLSOPT_NONE;
5737
5738 switch (r_type)
5739 {
5740 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
5741 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
5742 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19:
5743 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21:
5744 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
5745 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
5746 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
5747 case elfcpp::R_AARCH64_TLSDESC_OFF_G1:
5748 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC:
5749 case elfcpp::R_AARCH64_TLSDESC_LDR:
5750 case elfcpp::R_AARCH64_TLSDESC_ADD:
5751 case elfcpp::R_AARCH64_TLSDESC_CALL:
5752 // These are General-Dynamic which permits fully general TLS
5753 // access. Since we know that we are generating an executable,
5754 // we can convert this to Initial-Exec. If we also know that
5755 // this is a local symbol, we can further switch to Local-Exec.
5756 if (is_final)
5757 return tls::TLSOPT_TO_LE;
5758 return tls::TLSOPT_TO_IE;
5759
5760 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
5761 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
5762 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
5763 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5764 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
5765 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5766 // These are Local-Dynamic, which refer to local symbols in the
5767 // dynamic TLS block. Since we know that we generating an
5768 // executable, we can switch to Local-Exec.
5769 return tls::TLSOPT_TO_LE;
5770
5771 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5772 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5773 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5774 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5775 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5776 // These are Initial-Exec relocs which get the thread offset
5777 // from the GOT. If we know that we are linking against the
5778 // local symbol, we can switch to Local-Exec, which links the
5779 // thread offset into the instruction.
5780 if (is_final)
5781 return tls::TLSOPT_TO_LE;
5782 return tls::TLSOPT_NONE;
5783
5784 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
5785 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
5786 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5787 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
5788 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5789 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
5790 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
5791 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5792 // When we already have Local-Exec, there is nothing further we
5793 // can do.
5794 return tls::TLSOPT_NONE;
5795
5796 default:
5797 gold_unreachable();
5798 }
5799 }
5800
5801 // Returns true if this relocation type could be that of a function pointer.
5802
5803 template<int size, bool big_endian>
5804 inline bool
5805 Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc(
5806 unsigned int r_type)
5807 {
5808 switch (r_type)
5809 {
5810 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
5811 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
5812 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
5813 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
5814 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
5815 {
5816 return true;
5817 }
5818 }
5819 return false;
5820 }
5821
5822 // For safe ICF, scan a relocation for a local symbol to check if it
5823 // corresponds to a function pointer being taken. In that case mark
5824 // the function whose pointer was taken as not foldable.
5825
5826 template<int size, bool big_endian>
5827 inline bool
5828 Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer(
5829 Symbol_table* ,
5830 Layout* ,
5831 Target_aarch64<size, big_endian>* ,
5832 Sized_relobj_file<size, big_endian>* ,
5833 unsigned int ,
5834 Output_section* ,
5835 const elfcpp::Rela<size, big_endian>& ,
5836 unsigned int r_type,
5837 const elfcpp::Sym<size, big_endian>&)
5838 {
5839 // When building a shared library, do not fold any local symbols.
5840 return (parameters->options().shared()
5841 || possible_function_pointer_reloc(r_type));
5842 }
5843
5844 // For safe ICF, scan a relocation for a global symbol to check if it
5845 // corresponds to a function pointer being taken. In that case mark
5846 // the function whose pointer was taken as not foldable.
5847
5848 template<int size, bool big_endian>
5849 inline bool
5850 Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer(
5851 Symbol_table* ,
5852 Layout* ,
5853 Target_aarch64<size, big_endian>* ,
5854 Sized_relobj_file<size, big_endian>* ,
5855 unsigned int ,
5856 Output_section* ,
5857 const elfcpp::Rela<size, big_endian>& ,
5858 unsigned int r_type,
5859 Symbol* gsym)
5860 {
5861 // When building a shared library, do not fold symbols whose visibility
5862 // is hidden, internal or protected.
5863 return ((parameters->options().shared()
5864 && (gsym->visibility() == elfcpp::STV_INTERNAL
5865 || gsym->visibility() == elfcpp::STV_PROTECTED
5866 || gsym->visibility() == elfcpp::STV_HIDDEN))
5867 || possible_function_pointer_reloc(r_type));
5868 }
5869
5870 // Report an unsupported relocation against a local symbol.
5871
5872 template<int size, bool big_endian>
5873 void
5874 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local(
5875 Sized_relobj_file<size, big_endian>* object,
5876 unsigned int r_type)
5877 {
5878 gold_error(_("%s: unsupported reloc %u against local symbol"),
5879 object->name().c_str(), r_type);
5880 }
5881
5882 // We are about to emit a dynamic relocation of type R_TYPE. If the
5883 // dynamic linker does not support it, issue an error.
5884
5885 template<int size, bool big_endian>
5886 void
5887 Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object,
5888 unsigned int r_type)
5889 {
5890 gold_assert(r_type != elfcpp::R_AARCH64_NONE);
5891
5892 switch (r_type)
5893 {
5894 // These are the relocation types supported by glibc for AARCH64.
5895 case elfcpp::R_AARCH64_NONE:
5896 case elfcpp::R_AARCH64_COPY:
5897 case elfcpp::R_AARCH64_GLOB_DAT:
5898 case elfcpp::R_AARCH64_JUMP_SLOT:
5899 case elfcpp::R_AARCH64_RELATIVE:
5900 case elfcpp::R_AARCH64_TLS_DTPREL64:
5901 case elfcpp::R_AARCH64_TLS_DTPMOD64:
5902 case elfcpp::R_AARCH64_TLS_TPREL64:
5903 case elfcpp::R_AARCH64_TLSDESC:
5904 case elfcpp::R_AARCH64_IRELATIVE:
5905 case elfcpp::R_AARCH64_ABS32:
5906 case elfcpp::R_AARCH64_ABS64:
5907 return;
5908
5909 default:
5910 break;
5911 }
5912
5913 // This prevents us from issuing more than one error per reloc
5914 // section. But we can still wind up issuing more than one
5915 // error per object file.
5916 if (this->issued_non_pic_error_)
5917 return;
5918 gold_assert(parameters->options().output_is_position_independent());
5919 object->error(_("requires unsupported dynamic reloc; "
5920 "recompile with -fPIC"));
5921 this->issued_non_pic_error_ = true;
5922 return;
5923 }
5924
5925 // Return whether we need to make a PLT entry for a relocation of the
5926 // given type against a STT_GNU_IFUNC symbol.
5927
5928 template<int size, bool big_endian>
5929 bool
5930 Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc(
5931 Sized_relobj_file<size, big_endian>* object,
5932 unsigned int r_type)
5933 {
5934 const AArch64_reloc_property* arp =
5935 aarch64_reloc_property_table->get_reloc_property(r_type);
5936 gold_assert(arp != NULL);
5937
5938 int flags = arp->reference_flags();
5939 if (flags & Symbol::TLS_REF)
5940 {
5941 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"),
5942 object->name().c_str(), arp->name().c_str());
5943 return false;
5944 }
5945 return flags != 0;
5946 }
5947
5948 // Scan a relocation for a local symbol.
5949
5950 template<int size, bool big_endian>
5951 inline void
5952 Target_aarch64<size, big_endian>::Scan::local(
5953 Symbol_table* symtab,
5954 Layout* layout,
5955 Target_aarch64<size, big_endian>* target,
5956 Sized_relobj_file<size, big_endian>* object,
5957 unsigned int data_shndx,
5958 Output_section* output_section,
5959 const elfcpp::Rela<size, big_endian>& rela,
5960 unsigned int r_type,
5961 const elfcpp::Sym<size, big_endian>& lsym,
5962 bool is_discarded)
5963 {
5964 if (is_discarded)
5965 return;
5966
5967 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
5968 Reloc_section;
5969 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5970
5971 // A local STT_GNU_IFUNC symbol may require a PLT entry.
5972 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC;
5973 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type))
5974 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym);
5975
5976 switch (r_type)
5977 {
5978 case elfcpp::R_AARCH64_NONE:
5979 break;
5980
5981 case elfcpp::R_AARCH64_ABS32:
5982 case elfcpp::R_AARCH64_ABS16:
5983 if (parameters->options().output_is_position_independent())
5984 {
5985 gold_error(_("%s: unsupported reloc %u in pos independent link."),
5986 object->name().c_str(), r_type);
5987 }
5988 break;
5989
5990 case elfcpp::R_AARCH64_ABS64:
5991 // If building a shared library or pie, we need to mark this as a dynmic
5992 // reloction, so that the dynamic loader can relocate it.
5993 if (parameters->options().output_is_position_independent())
5994 {
5995 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
5996 rela_dyn->add_local_relative(object, r_sym,
5997 elfcpp::R_AARCH64_RELATIVE,
5998 output_section,
5999 data_shndx,
6000 rela.get_r_offset(),
6001 rela.get_r_addend(),
6002 is_ifunc);
6003 }
6004 break;
6005
6006 case elfcpp::R_AARCH64_PREL64:
6007 case elfcpp::R_AARCH64_PREL32:
6008 case elfcpp::R_AARCH64_PREL16:
6009 break;
6010
6011 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6012 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6013 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6014 // The above relocations are used to access GOT entries.
6015 {
6016 Output_data_got_aarch64<size, big_endian>* got =
6017 target->got_section(symtab, layout);
6018 bool is_new = false;
6019 // This symbol requires a GOT entry.
6020 if (is_ifunc)
6021 is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD);
6022 else
6023 is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD);
6024 if (is_new && parameters->options().output_is_position_independent())
6025 target->rela_dyn_section(layout)->
6026 add_local_relative(object,
6027 r_sym,
6028 elfcpp::R_AARCH64_RELATIVE,
6029 got,
6030 object->local_got_offset(r_sym,
6031 GOT_TYPE_STANDARD),
6032 0,
6033 false);
6034 }
6035 break;
6036
6037 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6038 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6039 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6040 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6041 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6042 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6043 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6044 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6045 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6046 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6047 if (parameters->options().output_is_position_independent())
6048 {
6049 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6050 object->name().c_str(), r_type);
6051 }
6052 break;
6053
6054 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6055 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6056 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6057 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6058 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6059 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6060 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6061 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6062 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6063 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6064 break;
6065
6066 // Control flow, pc-relative. We don't need to do anything for a relative
6067 // addressing relocation against a local symbol if it does not reference
6068 // the GOT.
6069 case elfcpp::R_AARCH64_TSTBR14:
6070 case elfcpp::R_AARCH64_CONDBR19:
6071 case elfcpp::R_AARCH64_JUMP26:
6072 case elfcpp::R_AARCH64_CALL26:
6073 break;
6074
6075 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6076 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6077 {
6078 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6079 optimize_tls_reloc(!parameters->options().shared(), r_type);
6080 if (tlsopt == tls::TLSOPT_TO_LE)
6081 break;
6082
6083 layout->set_has_static_tls();
6084 // Create a GOT entry for the tp-relative offset.
6085 if (!parameters->doing_static_link())
6086 {
6087 Output_data_got_aarch64<size, big_endian>* got =
6088 target->got_section(symtab, layout);
6089 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET,
6090 target->rela_dyn_section(layout),
6091 elfcpp::R_AARCH64_TLS_TPREL64);
6092 }
6093 else if (!object->local_has_got_offset(r_sym,
6094 GOT_TYPE_TLS_OFFSET))
6095 {
6096 Output_data_got_aarch64<size, big_endian>* got =
6097 target->got_section(symtab, layout);
6098 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET);
6099 unsigned int got_offset =
6100 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET);
6101 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6102 gold_assert(addend == 0);
6103 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64,
6104 object, r_sym);
6105 }
6106 }
6107 break;
6108
6109 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6110 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
6111 {
6112 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6113 optimize_tls_reloc(!parameters->options().shared(), r_type);
6114 if (tlsopt == tls::TLSOPT_TO_LE)
6115 {
6116 layout->set_has_static_tls();
6117 break;
6118 }
6119 gold_assert(tlsopt == tls::TLSOPT_NONE);
6120
6121 Output_data_got_aarch64<size, big_endian>* got =
6122 target->got_section(symtab, layout);
6123 got->add_local_pair_with_rel(object,r_sym, data_shndx,
6124 GOT_TYPE_TLS_PAIR,
6125 target->rela_dyn_section(layout),
6126 elfcpp::R_AARCH64_TLS_DTPMOD64);
6127 }
6128 break;
6129
6130 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6131 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6132 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6133 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6134 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6135 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6136 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6137 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6138 {
6139 layout->set_has_static_tls();
6140 bool output_is_shared = parameters->options().shared();
6141 if (output_is_shared)
6142 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."),
6143 object->name().c_str(), r_type);
6144 }
6145 break;
6146
6147 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6148 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
6149 {
6150 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6151 optimize_tls_reloc(!parameters->options().shared(), r_type);
6152 if (tlsopt == tls::TLSOPT_NONE)
6153 {
6154 // Create a GOT entry for the module index.
6155 target->got_mod_index_entry(symtab, layout, object);
6156 }
6157 else if (tlsopt != tls::TLSOPT_TO_LE)
6158 unsupported_reloc_local(object, r_type);
6159 }
6160 break;
6161
6162 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6163 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6164 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6165 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6166 break;
6167
6168 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6169 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6170 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
6171 {
6172 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6173 optimize_tls_reloc(!parameters->options().shared(), r_type);
6174 target->define_tls_base_symbol(symtab, layout);
6175 if (tlsopt == tls::TLSOPT_NONE)
6176 {
6177 // Create reserved PLT and GOT entries for the resolver.
6178 target->reserve_tlsdesc_entries(symtab, layout);
6179
6180 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc.
6181 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT
6182 // entry needs to be in an area in .got.plt, not .got. Call
6183 // got_section to make sure the section has been created.
6184 target->got_section(symtab, layout);
6185 Output_data_got<size, big_endian>* got =
6186 target->got_tlsdesc_section();
6187 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6188 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC))
6189 {
6190 unsigned int got_offset = got->add_constant(0);
6191 got->add_constant(0);
6192 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC,
6193 got_offset);
6194 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6195 // We store the arguments we need in a vector, and use
6196 // the index into the vector as the parameter to pass
6197 // to the target specific routines.
6198 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym);
6199 void* arg = reinterpret_cast<void*>(intarg);
6200 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg,
6201 got, got_offset, 0);
6202 }
6203 }
6204 else if (tlsopt != tls::TLSOPT_TO_LE)
6205 unsupported_reloc_local(object, r_type);
6206 }
6207 break;
6208
6209 case elfcpp::R_AARCH64_TLSDESC_CALL:
6210 break;
6211
6212 default:
6213 unsupported_reloc_local(object, r_type);
6214 }
6215 }
6216
6217
6218 // Report an unsupported relocation against a global symbol.
6219
6220 template<int size, bool big_endian>
6221 void
6222 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global(
6223 Sized_relobj_file<size, big_endian>* object,
6224 unsigned int r_type,
6225 Symbol* gsym)
6226 {
6227 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
6228 object->name().c_str(), r_type, gsym->demangled_name().c_str());
6229 }
6230
6231 template<int size, bool big_endian>
6232 inline void
6233 Target_aarch64<size, big_endian>::Scan::global(
6234 Symbol_table* symtab,
6235 Layout* layout,
6236 Target_aarch64<size, big_endian>* target,
6237 Sized_relobj_file<size, big_endian> * object,
6238 unsigned int data_shndx,
6239 Output_section* output_section,
6240 const elfcpp::Rela<size, big_endian>& rela,
6241 unsigned int r_type,
6242 Symbol* gsym)
6243 {
6244 // A STT_GNU_IFUNC symbol may require a PLT entry.
6245 if (gsym->type() == elfcpp::STT_GNU_IFUNC
6246 && this->reloc_needs_plt_for_ifunc(object, r_type))
6247 target->make_plt_entry(symtab, layout, gsym);
6248
6249 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6250 Reloc_section;
6251 const AArch64_reloc_property* arp =
6252 aarch64_reloc_property_table->get_reloc_property(r_type);
6253 gold_assert(arp != NULL);
6254
6255 switch (r_type)
6256 {
6257 case elfcpp::R_AARCH64_NONE:
6258 break;
6259
6260 case elfcpp::R_AARCH64_ABS16:
6261 case elfcpp::R_AARCH64_ABS32:
6262 case elfcpp::R_AARCH64_ABS64:
6263 {
6264 // Make a PLT entry if necessary.
6265 if (gsym->needs_plt_entry())
6266 {
6267 target->make_plt_entry(symtab, layout, gsym);
6268 // Since this is not a PC-relative relocation, we may be
6269 // taking the address of a function. In that case we need to
6270 // set the entry in the dynamic symbol table to the address of
6271 // the PLT entry.
6272 if (gsym->is_from_dynobj() && !parameters->options().shared())
6273 gsym->set_needs_dynsym_value();
6274 }
6275 // Make a dynamic relocation if necessary.
6276 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6277 {
6278 if (!parameters->options().output_is_position_independent()
6279 && gsym->may_need_copy_reloc())
6280 {
6281 target->copy_reloc(symtab, layout, object,
6282 data_shndx, output_section, gsym, rela);
6283 }
6284 else if (r_type == elfcpp::R_AARCH64_ABS64
6285 && gsym->type() == elfcpp::STT_GNU_IFUNC
6286 && gsym->can_use_relative_reloc(false)
6287 && !gsym->is_from_dynobj()
6288 && !gsym->is_undefined()
6289 && !gsym->is_preemptible())
6290 {
6291 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC
6292 // symbol. This makes a function address in a PIE executable
6293 // match the address in a shared library that it links against.
6294 Reloc_section* rela_dyn =
6295 target->rela_irelative_section(layout);
6296 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE;
6297 rela_dyn->add_symbolless_global_addend(gsym, r_type,
6298 output_section, object,
6299 data_shndx,
6300 rela.get_r_offset(),
6301 rela.get_r_addend());
6302 }
6303 else if (r_type == elfcpp::R_AARCH64_ABS64
6304 && gsym->can_use_relative_reloc(false))
6305 {
6306 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6307 rela_dyn->add_global_relative(gsym,
6308 elfcpp::R_AARCH64_RELATIVE,
6309 output_section,
6310 object,
6311 data_shndx,
6312 rela.get_r_offset(),
6313 rela.get_r_addend(),
6314 false);
6315 }
6316 else
6317 {
6318 check_non_pic(object, r_type);
6319 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>*
6320 rela_dyn = target->rela_dyn_section(layout);
6321 rela_dyn->add_global(
6322 gsym, r_type, output_section, object,
6323 data_shndx, rela.get_r_offset(),rela.get_r_addend());
6324 }
6325 }
6326 }
6327 break;
6328
6329 case elfcpp::R_AARCH64_PREL16:
6330 case elfcpp::R_AARCH64_PREL32:
6331 case elfcpp::R_AARCH64_PREL64:
6332 // This is used to fill the GOT absolute address.
6333 if (gsym->needs_plt_entry())
6334 {
6335 target->make_plt_entry(symtab, layout, gsym);
6336 }
6337 break;
6338
6339 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6340 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6341 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6342 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6343 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6344 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6345 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6346 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6347 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6348 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6349 if (parameters->options().output_is_position_independent())
6350 {
6351 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6352 object->name().c_str(), r_type);
6353 }
6354 break;
6355
6356 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6357 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6358 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6359 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6360 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6361 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6362 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6363 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6364 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6365 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6366 {
6367 if (gsym->needs_plt_entry())
6368 target->make_plt_entry(symtab, layout, gsym);
6369 // Make a dynamic relocation if necessary.
6370 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6371 {
6372 if (parameters->options().output_is_executable()
6373 && gsym->may_need_copy_reloc())
6374 {
6375 target->copy_reloc(symtab, layout, object,
6376 data_shndx, output_section, gsym, rela);
6377 }
6378 }
6379 break;
6380 }
6381
6382 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6383 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6384 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6385 {
6386 // The above relocations are used to access GOT entries.
6387 // Note a GOT entry is an *address* to a symbol.
6388 // The symbol requires a GOT entry
6389 Output_data_got_aarch64<size, big_endian>* got =
6390 target->got_section(symtab, layout);
6391 if (gsym->final_value_is_known())
6392 {
6393 // For a STT_GNU_IFUNC symbol we want the PLT address.
6394 if (gsym->type() == elfcpp::STT_GNU_IFUNC)
6395 got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6396 else
6397 got->add_global(gsym, GOT_TYPE_STANDARD);
6398 }
6399 else
6400 {
6401 // If this symbol is not fully resolved, we need to add a dynamic
6402 // relocation for it.
6403 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6404
6405 // Use a GLOB_DAT rather than a RELATIVE reloc if:
6406 //
6407 // 1) The symbol may be defined in some other module.
6408 // 2) We are building a shared library and this is a protected
6409 // symbol; using GLOB_DAT means that the dynamic linker can use
6410 // the address of the PLT in the main executable when appropriate
6411 // so that function address comparisons work.
6412 // 3) This is a STT_GNU_IFUNC symbol in position dependent code,
6413 // again so that function address comparisons work.
6414 if (gsym->is_from_dynobj()
6415 || gsym->is_undefined()
6416 || gsym->is_preemptible()
6417 || (gsym->visibility() == elfcpp::STV_PROTECTED
6418 && parameters->options().shared())
6419 || (gsym->type() == elfcpp::STT_GNU_IFUNC
6420 && parameters->options().output_is_position_independent()))
6421 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
6422 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT);
6423 else
6424 {
6425 // For a STT_GNU_IFUNC symbol we want to write the PLT
6426 // offset into the GOT, so that function pointer
6427 // comparisons work correctly.
6428 bool is_new;
6429 if (gsym->type() != elfcpp::STT_GNU_IFUNC)
6430 is_new = got->add_global(gsym, GOT_TYPE_STANDARD);
6431 else
6432 {
6433 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6434 // Tell the dynamic linker to use the PLT address
6435 // when resolving relocations.
6436 if (gsym->is_from_dynobj()
6437 && !parameters->options().shared())
6438 gsym->set_needs_dynsym_value();
6439 }
6440 if (is_new)
6441 {
6442 rela_dyn->add_global_relative(
6443 gsym, elfcpp::R_AARCH64_RELATIVE,
6444 got,
6445 gsym->got_offset(GOT_TYPE_STANDARD),
6446 0,
6447 false);
6448 }
6449 }
6450 }
6451 break;
6452 }
6453
6454 case elfcpp::R_AARCH64_TSTBR14:
6455 case elfcpp::R_AARCH64_CONDBR19:
6456 case elfcpp::R_AARCH64_JUMP26:
6457 case elfcpp::R_AARCH64_CALL26:
6458 {
6459 if (gsym->final_value_is_known())
6460 break;
6461
6462 if (gsym->is_defined() &&
6463 !gsym->is_from_dynobj() &&
6464 !gsym->is_preemptible())
6465 break;
6466
6467 // Make plt entry for function call.
6468 target->make_plt_entry(symtab, layout, gsym);
6469 break;
6470 }
6471
6472 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6473 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic
6474 {
6475 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6476 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6477 if (tlsopt == tls::TLSOPT_TO_LE)
6478 {
6479 layout->set_has_static_tls();
6480 break;
6481 }
6482 gold_assert(tlsopt == tls::TLSOPT_NONE);
6483
6484 // General dynamic.
6485 Output_data_got_aarch64<size, big_endian>* got =
6486 target->got_section(symtab, layout);
6487 // Create 2 consecutive entries for module index and offset.
6488 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR,
6489 target->rela_dyn_section(layout),
6490 elfcpp::R_AARCH64_TLS_DTPMOD64,
6491 elfcpp::R_AARCH64_TLS_DTPREL64);
6492 }
6493 break;
6494
6495 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6496 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic
6497 {
6498 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6499 optimize_tls_reloc(!parameters->options().shared(), r_type);
6500 if (tlsopt == tls::TLSOPT_NONE)
6501 {
6502 // Create a GOT entry for the module index.
6503 target->got_mod_index_entry(symtab, layout, object);
6504 }
6505 else if (tlsopt != tls::TLSOPT_TO_LE)
6506 unsupported_reloc_local(object, r_type);
6507 }
6508 break;
6509
6510 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6511 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6512 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6513 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic
6514 break;
6515
6516 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6517 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable
6518 {
6519 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6520 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6521 if (tlsopt == tls::TLSOPT_TO_LE)
6522 break;
6523
6524 layout->set_has_static_tls();
6525 // Create a GOT entry for the tp-relative offset.
6526 Output_data_got_aarch64<size, big_endian>* got
6527 = target->got_section(symtab, layout);
6528 if (!parameters->doing_static_link())
6529 {
6530 got->add_global_with_rel(
6531 gsym, GOT_TYPE_TLS_OFFSET,
6532 target->rela_dyn_section(layout),
6533 elfcpp::R_AARCH64_TLS_TPREL64);
6534 }
6535 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET))
6536 {
6537 got->add_global(gsym, GOT_TYPE_TLS_OFFSET);
6538 unsigned int got_offset =
6539 gsym->got_offset(GOT_TYPE_TLS_OFFSET);
6540 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6541 gold_assert(addend == 0);
6542 got->add_static_reloc(got_offset,
6543 elfcpp::R_AARCH64_TLS_TPREL64, gsym);
6544 }
6545 }
6546 break;
6547
6548 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6549 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6550 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6551 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6552 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6553 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6554 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6555 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: // Local executable
6556 layout->set_has_static_tls();
6557 if (parameters->options().shared())
6558 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."),
6559 object->name().c_str(), r_type);
6560 break;
6561
6562 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6563 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6564 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor
6565 {
6566 target->define_tls_base_symbol(symtab, layout);
6567 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6568 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6569 if (tlsopt == tls::TLSOPT_NONE)
6570 {
6571 // Create reserved PLT and GOT entries for the resolver.
6572 target->reserve_tlsdesc_entries(symtab, layout);
6573
6574 // Create a double GOT entry with an R_AARCH64_TLSDESC
6575 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT
6576 // entry needs to be in an area in .got.plt, not .got. Call
6577 // got_section to make sure the section has been created.
6578 target->got_section(symtab, layout);
6579 Output_data_got<size, big_endian>* got =
6580 target->got_tlsdesc_section();
6581 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6582 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt,
6583 elfcpp::R_AARCH64_TLSDESC, 0);
6584 }
6585 else if (tlsopt == tls::TLSOPT_TO_IE)
6586 {
6587 // Create a GOT entry for the tp-relative offset.
6588 Output_data_got<size, big_endian>* got
6589 = target->got_section(symtab, layout);
6590 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET,
6591 target->rela_dyn_section(layout),
6592 elfcpp::R_AARCH64_TLS_TPREL64);
6593 }
6594 else if (tlsopt != tls::TLSOPT_TO_LE)
6595 unsupported_reloc_global(object, r_type, gsym);
6596 }
6597 break;
6598
6599 case elfcpp::R_AARCH64_TLSDESC_CALL:
6600 break;
6601
6602 default:
6603 gold_error(_("%s: unsupported reloc type in global scan"),
6604 aarch64_reloc_property_table->
6605 reloc_name_in_error_message(r_type).c_str());
6606 }
6607 return;
6608 } // End of Scan::global
6609
6610
6611 // Create the PLT section.
6612 template<int size, bool big_endian>
6613 void
6614 Target_aarch64<size, big_endian>::make_plt_section(
6615 Symbol_table* symtab, Layout* layout)
6616 {
6617 if (this->plt_ == NULL)
6618 {
6619 // Create the GOT section first.
6620 this->got_section(symtab, layout);
6621
6622 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_,
6623 this->got_irelative_);
6624
6625 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
6626 (elfcpp::SHF_ALLOC
6627 | elfcpp::SHF_EXECINSTR),
6628 this->plt_, ORDER_PLT, false);
6629
6630 // Make the sh_info field of .rela.plt point to .plt.
6631 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section();
6632 rela_plt_os->set_info_section(this->plt_->output_section());
6633 }
6634 }
6635
6636 // Return the section for TLSDESC relocations.
6637
6638 template<int size, bool big_endian>
6639 typename Target_aarch64<size, big_endian>::Reloc_section*
6640 Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const
6641 {
6642 return this->plt_section()->rela_tlsdesc(layout);
6643 }
6644
6645 // Create a PLT entry for a global symbol.
6646
6647 template<int size, bool big_endian>
6648 void
6649 Target_aarch64<size, big_endian>::make_plt_entry(
6650 Symbol_table* symtab,
6651 Layout* layout,
6652 Symbol* gsym)
6653 {
6654 if (gsym->has_plt_offset())
6655 return;
6656
6657 if (this->plt_ == NULL)
6658 this->make_plt_section(symtab, layout);
6659
6660 this->plt_->add_entry(symtab, layout, gsym);
6661 }
6662
6663 // Make a PLT entry for a local STT_GNU_IFUNC symbol.
6664
6665 template<int size, bool big_endian>
6666 void
6667 Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry(
6668 Symbol_table* symtab, Layout* layout,
6669 Sized_relobj_file<size, big_endian>* relobj,
6670 unsigned int local_sym_index)
6671 {
6672 if (relobj->local_has_plt_offset(local_sym_index))
6673 return;
6674 if (this->plt_ == NULL)
6675 this->make_plt_section(symtab, layout);
6676 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout,
6677 relobj,
6678 local_sym_index);
6679 relobj->set_local_plt_offset(local_sym_index, plt_offset);
6680 }
6681
6682 template<int size, bool big_endian>
6683 void
6684 Target_aarch64<size, big_endian>::gc_process_relocs(
6685 Symbol_table* symtab,
6686 Layout* layout,
6687 Sized_relobj_file<size, big_endian>* object,
6688 unsigned int data_shndx,
6689 unsigned int sh_type,
6690 const unsigned char* prelocs,
6691 size_t reloc_count,
6692 Output_section* output_section,
6693 bool needs_special_offset_handling,
6694 size_t local_symbol_count,
6695 const unsigned char* plocal_symbols)
6696 {
6697 typedef Target_aarch64<size, big_endian> Aarch64;
6698 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6699 Classify_reloc;
6700
6701 if (sh_type == elfcpp::SHT_REL)
6702 {
6703 return;
6704 }
6705
6706 gold::gc_process_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6707 symtab,
6708 layout,
6709 this,
6710 object,
6711 data_shndx,
6712 prelocs,
6713 reloc_count,
6714 output_section,
6715 needs_special_offset_handling,
6716 local_symbol_count,
6717 plocal_symbols);
6718 }
6719
6720 // Scan relocations for a section.
6721
6722 template<int size, bool big_endian>
6723 void
6724 Target_aarch64<size, big_endian>::scan_relocs(
6725 Symbol_table* symtab,
6726 Layout* layout,
6727 Sized_relobj_file<size, big_endian>* object,
6728 unsigned int data_shndx,
6729 unsigned int sh_type,
6730 const unsigned char* prelocs,
6731 size_t reloc_count,
6732 Output_section* output_section,
6733 bool needs_special_offset_handling,
6734 size_t local_symbol_count,
6735 const unsigned char* plocal_symbols)
6736 {
6737 typedef Target_aarch64<size, big_endian> Aarch64;
6738 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6739 Classify_reloc;
6740
6741 if (sh_type == elfcpp::SHT_REL)
6742 {
6743 gold_error(_("%s: unsupported REL reloc section"),
6744 object->name().c_str());
6745 return;
6746 }
6747
6748 gold::scan_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6749 symtab,
6750 layout,
6751 this,
6752 object,
6753 data_shndx,
6754 prelocs,
6755 reloc_count,
6756 output_section,
6757 needs_special_offset_handling,
6758 local_symbol_count,
6759 plocal_symbols);
6760 }
6761
6762 // Return the value to use for a dynamic which requires special
6763 // treatment. This is how we support equality comparisons of function
6764 // pointers across shared library boundaries, as described in the
6765 // processor specific ABI supplement.
6766
6767 template<int size, bool big_endian>
6768 uint64_t
6769 Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const
6770 {
6771 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
6772 return this->plt_address_for_global(gsym);
6773 }
6774
6775
6776 // Finalize the sections.
6777
6778 template<int size, bool big_endian>
6779 void
6780 Target_aarch64<size, big_endian>::do_finalize_sections(
6781 Layout* layout,
6782 const Input_objects*,
6783 Symbol_table* symtab)
6784 {
6785 const Reloc_section* rel_plt = (this->plt_ == NULL
6786 ? NULL
6787 : this->plt_->rela_plt());
6788 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt,
6789 this->rela_dyn_, true, false);
6790
6791 // Emit any relocs we saved in an attempt to avoid generating COPY
6792 // relocs.
6793 if (this->copy_relocs_.any_saved_relocs())
6794 this->copy_relocs_.emit(this->rela_dyn_section(layout));
6795
6796 // Fill in some more dynamic tags.
6797 Output_data_dynamic* const odyn = layout->dynamic_data();
6798 if (odyn != NULL)
6799 {
6800 if (this->plt_ != NULL
6801 && this->plt_->output_section() != NULL
6802 && this->plt_ ->has_tlsdesc_entry())
6803 {
6804 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset();
6805 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset();
6806 this->got_->finalize_data_size();
6807 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT,
6808 this->plt_, plt_offset);
6809 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT,
6810 this->got_, got_offset);
6811 }
6812 }
6813
6814 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of
6815 // the .got.plt section.
6816 Symbol* sym = this->global_offset_table_;
6817 if (sym != NULL)
6818 {
6819 uint64_t data_size = this->got_plt_->current_data_size();
6820 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size);
6821
6822 // If the .got section is more than 0x8000 bytes, we add
6823 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16
6824 // bit relocations have a greater chance of working.
6825 if (data_size >= 0x8000)
6826 symtab->get_sized_symbol<size>(sym)->set_value(
6827 symtab->get_sized_symbol<size>(sym)->value() + 0x8000);
6828 }
6829
6830 if (parameters->doing_static_link()
6831 && (this->plt_ == NULL || !this->plt_->has_irelative_section()))
6832 {
6833 // If linking statically, make sure that the __rela_iplt symbols
6834 // were defined if necessary, even if we didn't create a PLT.
6835 static const Define_symbol_in_segment syms[] =
6836 {
6837 {
6838 "__rela_iplt_start", // name
6839 elfcpp::PT_LOAD, // segment_type
6840 elfcpp::PF_W, // segment_flags_set
6841 elfcpp::PF(0), // segment_flags_clear
6842 0, // value
6843 0, // size
6844 elfcpp::STT_NOTYPE, // type
6845 elfcpp::STB_GLOBAL, // binding
6846 elfcpp::STV_HIDDEN, // visibility
6847 0, // nonvis
6848 Symbol::SEGMENT_START, // offset_from_base
6849 true // only_if_ref
6850 },
6851 {
6852 "__rela_iplt_end", // name
6853 elfcpp::PT_LOAD, // segment_type
6854 elfcpp::PF_W, // segment_flags_set
6855 elfcpp::PF(0), // segment_flags_clear
6856 0, // value
6857 0, // size
6858 elfcpp::STT_NOTYPE, // type
6859 elfcpp::STB_GLOBAL, // binding
6860 elfcpp::STV_HIDDEN, // visibility
6861 0, // nonvis
6862 Symbol::SEGMENT_START, // offset_from_base
6863 true // only_if_ref
6864 }
6865 };
6866
6867 symtab->define_symbols(layout, 2, syms,
6868 layout->script_options()->saw_sections_clause());
6869 }
6870
6871 return;
6872 }
6873
6874 // Perform a relocation.
6875
6876 template<int size, bool big_endian>
6877 inline bool
6878 Target_aarch64<size, big_endian>::Relocate::relocate(
6879 const Relocate_info<size, big_endian>* relinfo,
6880 unsigned int,
6881 Target_aarch64<size, big_endian>* target,
6882 Output_section* ,
6883 size_t relnum,
6884 const unsigned char* preloc,
6885 const Sized_symbol<size>* gsym,
6886 const Symbol_value<size>* psymval,
6887 unsigned char* view,
6888 typename elfcpp::Elf_types<size>::Elf_Addr address,
6889 section_size_type /* view_size */)
6890 {
6891 if (view == NULL)
6892 return true;
6893
6894 typedef AArch64_relocate_functions<size, big_endian> Reloc;
6895
6896 const elfcpp::Rela<size, big_endian> rela(preloc);
6897 unsigned int r_type = elfcpp::elf_r_type<size>(rela.get_r_info());
6898 const AArch64_reloc_property* reloc_property =
6899 aarch64_reloc_property_table->get_reloc_property(r_type);
6900
6901 if (reloc_property == NULL)
6902 {
6903 std::string reloc_name =
6904 aarch64_reloc_property_table->reloc_name_in_error_message(r_type);
6905 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6906 _("cannot relocate %s in object file"),
6907 reloc_name.c_str());
6908 return true;
6909 }
6910
6911 const Sized_relobj_file<size, big_endian>* object = relinfo->object;
6912
6913 // Pick the value to use for symbols defined in the PLT.
6914 Symbol_value<size> symval;
6915 if (gsym != NULL
6916 && gsym->use_plt_offset(reloc_property->reference_flags()))
6917 {
6918 symval.set_output_value(target->plt_address_for_global(gsym));
6919 psymval = &symval;
6920 }
6921 else if (gsym == NULL && psymval->is_ifunc_symbol())
6922 {
6923 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6924 if (object->local_has_plt_offset(r_sym))
6925 {
6926 symval.set_output_value(target->plt_address_for_local(object, r_sym));
6927 psymval = &symval;
6928 }
6929 }
6930
6931 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6932
6933 // Get the GOT offset if needed.
6934 // For aarch64, the GOT pointer points to the start of the GOT section.
6935 bool have_got_offset = false;
6936 int got_offset = 0;
6937 int got_base = (target->got_ != NULL
6938 ? (target->got_->current_data_size() >= 0x8000
6939 ? 0x8000 : 0)
6940 : 0);
6941 switch (r_type)
6942 {
6943 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0:
6944 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC:
6945 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1:
6946 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC:
6947 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2:
6948 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC:
6949 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3:
6950 case elfcpp::R_AARCH64_GOTREL64:
6951 case elfcpp::R_AARCH64_GOTREL32:
6952 case elfcpp::R_AARCH64_GOT_LD_PREL19:
6953 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15:
6954 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6955 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6956 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6957 if (gsym != NULL)
6958 {
6959 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
6960 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base;
6961 }
6962 else
6963 {
6964 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6965 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
6966 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
6967 - got_base);
6968 }
6969 have_got_offset = true;
6970 break;
6971
6972 default:
6973 break;
6974 }
6975
6976 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY;
6977 typename elfcpp::Elf_types<size>::Elf_Addr value;
6978 switch (r_type)
6979 {
6980 case elfcpp::R_AARCH64_NONE:
6981 break;
6982
6983 case elfcpp::R_AARCH64_ABS64:
6984 if (!parameters->options().apply_dynamic_relocs()
6985 && parameters->options().output_is_position_independent()
6986 && gsym != NULL
6987 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())
6988 && !gsym->can_use_relative_reloc(false))
6989 // We have generated an absolute dynamic relocation, so do not
6990 // apply the relocation statically. (Works around bugs in older
6991 // Android dynamic linkers.)
6992 break;
6993 reloc_status = Reloc::template rela_ua<64>(
6994 view, object, psymval, addend, reloc_property);
6995 break;
6996
6997 case elfcpp::R_AARCH64_ABS32:
6998 if (!parameters->options().apply_dynamic_relocs()
6999 && parameters->options().output_is_position_independent()
7000 && gsym != NULL
7001 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
7002 // We have generated an absolute dynamic relocation, so do not
7003 // apply the relocation statically. (Works around bugs in older
7004 // Android dynamic linkers.)
7005 break;
7006 reloc_status = Reloc::template rela_ua<32>(
7007 view, object, psymval, addend, reloc_property);
7008 break;
7009
7010 case elfcpp::R_AARCH64_ABS16:
7011 if (!parameters->options().apply_dynamic_relocs()
7012 && parameters->options().output_is_position_independent()
7013 && gsym != NULL
7014 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
7015 // We have generated an absolute dynamic relocation, so do not
7016 // apply the relocation statically. (Works around bugs in older
7017 // Android dynamic linkers.)
7018 break;
7019 reloc_status = Reloc::template rela_ua<16>(
7020 view, object, psymval, addend, reloc_property);
7021 break;
7022
7023 case elfcpp::R_AARCH64_PREL64:
7024 reloc_status = Reloc::template pcrela_ua<64>(
7025 view, object, psymval, addend, address, reloc_property);
7026 break;
7027
7028 case elfcpp::R_AARCH64_PREL32:
7029 reloc_status = Reloc::template pcrela_ua<32>(
7030 view, object, psymval, addend, address, reloc_property);
7031 break;
7032
7033 case elfcpp::R_AARCH64_PREL16:
7034 reloc_status = Reloc::template pcrela_ua<16>(
7035 view, object, psymval, addend, address, reloc_property);
7036 break;
7037
7038 case elfcpp::R_AARCH64_MOVW_UABS_G0:
7039 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC:
7040 case elfcpp::R_AARCH64_MOVW_UABS_G1:
7041 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC:
7042 case elfcpp::R_AARCH64_MOVW_UABS_G2:
7043 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC:
7044 case elfcpp::R_AARCH64_MOVW_UABS_G3:
7045 reloc_status = Reloc::template rela_general<32>(
7046 view, object, psymval, addend, reloc_property);
7047 break;
7048 case elfcpp::R_AARCH64_MOVW_SABS_G0:
7049 case elfcpp::R_AARCH64_MOVW_SABS_G1:
7050 case elfcpp::R_AARCH64_MOVW_SABS_G2:
7051 reloc_status = Reloc::movnz(view, psymval->value(object, addend),
7052 reloc_property);
7053 break;
7054
7055 case elfcpp::R_AARCH64_LD_PREL_LO19:
7056 reloc_status = Reloc::template pcrela_general<32>(
7057 view, object, psymval, addend, address, reloc_property);
7058 break;
7059
7060 case elfcpp::R_AARCH64_ADR_PREL_LO21:
7061 reloc_status = Reloc::adr(view, object, psymval, addend,
7062 address, reloc_property);
7063 break;
7064
7065 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
7066 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
7067 reloc_status = Reloc::adrp(view, object, psymval, addend, address,
7068 reloc_property);
7069 break;
7070
7071 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC:
7072 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC:
7073 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC:
7074 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC:
7075 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC:
7076 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
7077 reloc_status = Reloc::template rela_general<32>(
7078 view, object, psymval, addend, reloc_property);
7079 break;
7080
7081 case elfcpp::R_AARCH64_CALL26:
7082 if (this->skip_call_tls_get_addr_)
7083 {
7084 // Double check that the TLSGD insn has been optimized away.
7085 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7086 Insntype insn = elfcpp::Swap<32, big_endian>::readval(
7087 reinterpret_cast<Insntype*>(view));
7088 gold_assert((insn & 0xff000000) == 0x91000000);
7089
7090 reloc_status = Reloc::STATUS_OKAY;
7091 this->skip_call_tls_get_addr_ = false;
7092 // Return false to stop further processing this reloc.
7093 return false;
7094 }
7095 // Fall through.
7096 case elfcpp::R_AARCH64_JUMP26:
7097 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address,
7098 gsym, psymval, object,
7099 target->stub_group_size_))
7100 break;
7101 // Fall through.
7102 case elfcpp::R_AARCH64_TSTBR14:
7103 case elfcpp::R_AARCH64_CONDBR19:
7104 reloc_status = Reloc::template pcrela_general<32>(
7105 view, object, psymval, addend, address, reloc_property);
7106 break;
7107
7108 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
7109 gold_assert(have_got_offset);
7110 value = target->got_->address() + got_base + got_offset;
7111 reloc_status = Reloc::adrp(view, value + addend, address);
7112 break;
7113
7114 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
7115 gold_assert(have_got_offset);
7116 value = target->got_->address() + got_base + got_offset;
7117 reloc_status = Reloc::template rela_general<32>(
7118 view, value, addend, reloc_property);
7119 break;
7120
7121 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
7122 {
7123 gold_assert(have_got_offset);
7124 value = target->got_->address() + got_base + got_offset + addend -
7125 Reloc::Page(target->got_->address() + got_base);
7126 if ((value & 7) != 0)
7127 reloc_status = Reloc::STATUS_OVERFLOW;
7128 else
7129 reloc_status = Reloc::template reloc_common<32>(
7130 view, value, reloc_property);
7131 break;
7132 }
7133
7134 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7135 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7136 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7137 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7138 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7139 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7140 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7141 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7142 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7143 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7144 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7145 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7146 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7147 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7148 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7149 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7150 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7151 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7152 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7153 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7154 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7155 case elfcpp::R_AARCH64_TLSDESC_CALL:
7156 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type,
7157 gsym, psymval, view, address);
7158 break;
7159
7160 // These are dynamic relocations, which are unexpected when linking.
7161 case elfcpp::R_AARCH64_COPY:
7162 case elfcpp::R_AARCH64_GLOB_DAT:
7163 case elfcpp::R_AARCH64_JUMP_SLOT:
7164 case elfcpp::R_AARCH64_RELATIVE:
7165 case elfcpp::R_AARCH64_IRELATIVE:
7166 case elfcpp::R_AARCH64_TLS_DTPREL64:
7167 case elfcpp::R_AARCH64_TLS_DTPMOD64:
7168 case elfcpp::R_AARCH64_TLS_TPREL64:
7169 case elfcpp::R_AARCH64_TLSDESC:
7170 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7171 _("unexpected reloc %u in object file"),
7172 r_type);
7173 break;
7174
7175 default:
7176 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7177 _("unsupported reloc %s"),
7178 reloc_property->name().c_str());
7179 break;
7180 }
7181
7182 // Report any errors.
7183 switch (reloc_status)
7184 {
7185 case Reloc::STATUS_OKAY:
7186 break;
7187 case Reloc::STATUS_OVERFLOW:
7188 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7189 _("relocation overflow in %s"),
7190 reloc_property->name().c_str());
7191 break;
7192 case Reloc::STATUS_BAD_RELOC:
7193 gold_error_at_location(
7194 relinfo,
7195 relnum,
7196 rela.get_r_offset(),
7197 _("unexpected opcode while processing relocation %s"),
7198 reloc_property->name().c_str());
7199 break;
7200 default:
7201 gold_unreachable();
7202 }
7203
7204 return true;
7205 }
7206
7207
7208 template<int size, bool big_endian>
7209 inline
7210 typename AArch64_relocate_functions<size, big_endian>::Status
7211 Target_aarch64<size, big_endian>::Relocate::relocate_tls(
7212 const Relocate_info<size, big_endian>* relinfo,
7213 Target_aarch64<size, big_endian>* target,
7214 size_t relnum,
7215 const elfcpp::Rela<size, big_endian>& rela,
7216 unsigned int r_type, const Sized_symbol<size>* gsym,
7217 const Symbol_value<size>* psymval,
7218 unsigned char* view,
7219 typename elfcpp::Elf_types<size>::Elf_Addr address)
7220 {
7221 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7222 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7223
7224 Output_segment* tls_segment = relinfo->layout->tls_segment();
7225 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7226 const AArch64_reloc_property* reloc_property =
7227 aarch64_reloc_property_table->get_reloc_property(r_type);
7228 gold_assert(reloc_property != NULL);
7229
7230 const bool is_final = (gsym == NULL
7231 ? !parameters->options().shared()
7232 : gsym->final_value_is_known());
7233 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
7234 optimize_tls_reloc(is_final, r_type);
7235
7236 Sized_relobj_file<size, big_endian>* object = relinfo->object;
7237 int tls_got_offset_type;
7238 switch (r_type)
7239 {
7240 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7241 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic
7242 {
7243 if (tlsopt == tls::TLSOPT_TO_LE)
7244 {
7245 if (tls_segment == NULL)
7246 {
7247 gold_assert(parameters->errors()->error_count() > 0
7248 || issue_undefined_symbol_error(gsym));
7249 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7250 }
7251 return tls_gd_to_le(relinfo, target, rela, r_type, view,
7252 psymval);
7253 }
7254 else if (tlsopt == tls::TLSOPT_NONE)
7255 {
7256 tls_got_offset_type = GOT_TYPE_TLS_PAIR;
7257 // Firstly get the address for the got entry.
7258 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7259 if (gsym != NULL)
7260 {
7261 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7262 got_entry_address = target->got_->address() +
7263 gsym->got_offset(tls_got_offset_type);
7264 }
7265 else
7266 {
7267 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7268 gold_assert(
7269 object->local_has_got_offset(r_sym, tls_got_offset_type));
7270 got_entry_address = target->got_->address() +
7271 object->local_got_offset(r_sym, tls_got_offset_type);
7272 }
7273
7274 // Relocate the address into adrp/ld, adrp/add pair.
7275 switch (r_type)
7276 {
7277 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7278 return aarch64_reloc_funcs::adrp(
7279 view, got_entry_address + addend, address);
7280
7281 break;
7282
7283 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7284 return aarch64_reloc_funcs::template rela_general<32>(
7285 view, got_entry_address, addend, reloc_property);
7286 break;
7287
7288 default:
7289 gold_unreachable();
7290 }
7291 }
7292 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7293 _("unsupported gd_to_ie relaxation on %u"),
7294 r_type);
7295 }
7296 break;
7297
7298 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7299 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic
7300 {
7301 if (tlsopt == tls::TLSOPT_TO_LE)
7302 {
7303 if (tls_segment == NULL)
7304 {
7305 gold_assert(parameters->errors()->error_count() > 0
7306 || issue_undefined_symbol_error(gsym));
7307 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7308 }
7309 return this->tls_ld_to_le(relinfo, target, rela, r_type, view,
7310 psymval);
7311 }
7312
7313 gold_assert(tlsopt == tls::TLSOPT_NONE);
7314 // Relocate the field with the offset of the GOT entry for
7315 // the module index.
7316 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7317 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) +
7318 target->got_->address());
7319
7320 switch (r_type)
7321 {
7322 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7323 return aarch64_reloc_funcs::adrp(
7324 view, got_entry_address + addend, address);
7325 break;
7326
7327 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7328 return aarch64_reloc_funcs::template rela_general<32>(
7329 view, got_entry_address, addend, reloc_property);
7330 break;
7331
7332 default:
7333 gold_unreachable();
7334 }
7335 }
7336 break;
7337
7338 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7339 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7340 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7341 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic
7342 {
7343 AArch64_address value = psymval->value(object, 0);
7344 if (tlsopt == tls::TLSOPT_TO_LE)
7345 {
7346 if (tls_segment == NULL)
7347 {
7348 gold_assert(parameters->errors()->error_count() > 0
7349 || issue_undefined_symbol_error(gsym));
7350 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7351 }
7352 }
7353 switch (r_type)
7354 {
7355 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7356 return aarch64_reloc_funcs::movnz(view, value + addend,
7357 reloc_property);
7358 break;
7359
7360 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7361 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7362 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7363 return aarch64_reloc_funcs::template rela_general<32>(
7364 view, value, addend, reloc_property);
7365 break;
7366
7367 default:
7368 gold_unreachable();
7369 }
7370 // We should never reach here.
7371 }
7372 break;
7373
7374 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7375 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec
7376 {
7377 if (tlsopt == tls::TLSOPT_TO_LE)
7378 {
7379 if (tls_segment == NULL)
7380 {
7381 gold_assert(parameters->errors()->error_count() > 0
7382 || issue_undefined_symbol_error(gsym));
7383 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7384 }
7385 return tls_ie_to_le(relinfo, target, rela, r_type, view,
7386 psymval);
7387 }
7388 tls_got_offset_type = GOT_TYPE_TLS_OFFSET;
7389
7390 // Firstly get the address for the got entry.
7391 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7392 if (gsym != NULL)
7393 {
7394 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7395 got_entry_address = target->got_->address() +
7396 gsym->got_offset(tls_got_offset_type);
7397 }
7398 else
7399 {
7400 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7401 gold_assert(
7402 object->local_has_got_offset(r_sym, tls_got_offset_type));
7403 got_entry_address = target->got_->address() +
7404 object->local_got_offset(r_sym, tls_got_offset_type);
7405 }
7406 // Relocate the address into adrp/ld, adrp/add pair.
7407 switch (r_type)
7408 {
7409 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7410 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7411 address);
7412 break;
7413 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7414 return aarch64_reloc_funcs::template rela_general<32>(
7415 view, got_entry_address, addend, reloc_property);
7416 default:
7417 gold_unreachable();
7418 }
7419 }
7420 // We shall never reach here.
7421 break;
7422
7423 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7424 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7425 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7426 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7427 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7428 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7429 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7430 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7431 {
7432 gold_assert(tls_segment != NULL);
7433 AArch64_address value = psymval->value(object, 0);
7434
7435 if (!parameters->options().shared())
7436 {
7437 AArch64_address aligned_tcb_size =
7438 align_address(target->tcb_size(),
7439 tls_segment->maximum_alignment());
7440 value += aligned_tcb_size;
7441 switch (r_type)
7442 {
7443 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7444 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7445 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7446 return aarch64_reloc_funcs::movnz(view, value + addend,
7447 reloc_property);
7448 default:
7449 return aarch64_reloc_funcs::template
7450 rela_general<32>(view,
7451 value,
7452 addend,
7453 reloc_property);
7454 }
7455 }
7456 else
7457 gold_error(_("%s: unsupported reloc %u "
7458 "in non-static TLSLE mode."),
7459 object->name().c_str(), r_type);
7460 }
7461 break;
7462
7463 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7464 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7465 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7466 case elfcpp::R_AARCH64_TLSDESC_CALL:
7467 {
7468 if (tlsopt == tls::TLSOPT_TO_LE)
7469 {
7470 if (tls_segment == NULL)
7471 {
7472 gold_assert(parameters->errors()->error_count() > 0
7473 || issue_undefined_symbol_error(gsym));
7474 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7475 }
7476 return tls_desc_gd_to_le(relinfo, target, rela, r_type,
7477 view, psymval);
7478 }
7479 else
7480 {
7481 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE
7482 ? GOT_TYPE_TLS_OFFSET
7483 : GOT_TYPE_TLS_DESC);
7484 unsigned int got_tlsdesc_offset = 0;
7485 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL
7486 && tlsopt == tls::TLSOPT_NONE)
7487 {
7488 // We created GOT entries in the .got.tlsdesc portion of the
7489 // .got.plt section, but the offset stored in the symbol is the
7490 // offset within .got.tlsdesc.
7491 got_tlsdesc_offset = (target->got_->data_size()
7492 + target->got_plt_section()->data_size());
7493 }
7494 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7495 if (gsym != NULL)
7496 {
7497 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7498 got_entry_address = target->got_->address()
7499 + got_tlsdesc_offset
7500 + gsym->got_offset(tls_got_offset_type);
7501 }
7502 else
7503 {
7504 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7505 gold_assert(
7506 object->local_has_got_offset(r_sym, tls_got_offset_type));
7507 got_entry_address = target->got_->address() +
7508 got_tlsdesc_offset +
7509 object->local_got_offset(r_sym, tls_got_offset_type);
7510 }
7511 if (tlsopt == tls::TLSOPT_TO_IE)
7512 {
7513 return tls_desc_gd_to_ie(relinfo, target, rela, r_type,
7514 view, psymval, got_entry_address,
7515 address);
7516 }
7517
7518 // Now do tlsdesc relocation.
7519 switch (r_type)
7520 {
7521 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7522 return aarch64_reloc_funcs::adrp(view,
7523 got_entry_address + addend,
7524 address);
7525 break;
7526 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7527 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7528 return aarch64_reloc_funcs::template rela_general<32>(
7529 view, got_entry_address, addend, reloc_property);
7530 break;
7531 case elfcpp::R_AARCH64_TLSDESC_CALL:
7532 return aarch64_reloc_funcs::STATUS_OKAY;
7533 break;
7534 default:
7535 gold_unreachable();
7536 }
7537 }
7538 }
7539 break;
7540
7541 default:
7542 gold_error(_("%s: unsupported TLS reloc %u."),
7543 object->name().c_str(), r_type);
7544 }
7545 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7546 } // End of relocate_tls.
7547
7548
7549 template<int size, bool big_endian>
7550 inline
7551 typename AArch64_relocate_functions<size, big_endian>::Status
7552 Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le(
7553 const Relocate_info<size, big_endian>* relinfo,
7554 Target_aarch64<size, big_endian>* target,
7555 const elfcpp::Rela<size, big_endian>& rela,
7556 unsigned int r_type,
7557 unsigned char* view,
7558 const Symbol_value<size>* psymval)
7559 {
7560 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7561 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7562 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7563
7564 Insntype* ip = reinterpret_cast<Insntype*>(view);
7565 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7566 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7567 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7568
7569 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC)
7570 {
7571 // This is the 2nd relocs, optimization should already have been
7572 // done.
7573 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7574 return aarch64_reloc_funcs::STATUS_OKAY;
7575 }
7576
7577 // The original sequence is -
7578 // 90000000 adrp x0, 0 <main>
7579 // 91000000 add x0, x0, #0x0
7580 // 94000000 bl 0 <__tls_get_addr>
7581 // optimized to sequence -
7582 // d53bd040 mrs x0, tpidr_el0
7583 // 91400000 add x0, x0, #0x0, lsl #12
7584 // 91000000 add x0, x0, #0x0
7585
7586 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7587 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we
7588 // have to change "bl tls_get_addr", which does not have a corresponding tls
7589 // relocation type. So before proceeding, we need to make sure compiler
7590 // does not change the sequence.
7591 if(!(insn1 == 0x90000000 // adrp x0,0
7592 && insn2 == 0x91000000 // add x0, x0, #0x0
7593 && insn3 == 0x94000000)) // bl 0
7594 {
7595 // Ideally we should give up gd_to_le relaxation and do gd access.
7596 // However the gd_to_le relaxation decision has been made early
7597 // in the scan stage, where we did not allocate any GOT entry for
7598 // this symbol. Therefore we have to exit and report error now.
7599 gold_error(_("unexpected reloc insn sequence while relaxing "
7600 "tls gd to le for reloc %u."), r_type);
7601 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7602 }
7603
7604 // Write new insns.
7605 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7606 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7607 insn3 = 0x91000000; // add x0, x0, #0x0
7608 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7609 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7610 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7611
7612 // Calculate tprel value.
7613 Output_segment* tls_segment = relinfo->layout->tls_segment();
7614 gold_assert(tls_segment != NULL);
7615 AArch64_address value = psymval->value(relinfo->object, 0);
7616 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7617 AArch64_address aligned_tcb_size =
7618 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7619 AArch64_address x = value + aligned_tcb_size;
7620
7621 // After new insns are written, apply TLSLE relocs.
7622 const AArch64_reloc_property* rp1 =
7623 aarch64_reloc_property_table->get_reloc_property(
7624 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7625 const AArch64_reloc_property* rp2 =
7626 aarch64_reloc_property_table->get_reloc_property(
7627 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7628 gold_assert(rp1 != NULL && rp2 != NULL);
7629
7630 typename aarch64_reloc_funcs::Status s1 =
7631 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7632 x,
7633 addend,
7634 rp1);
7635 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7636 return s1;
7637
7638 typename aarch64_reloc_funcs::Status s2 =
7639 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7640 x,
7641 addend,
7642 rp2);
7643
7644 this->skip_call_tls_get_addr_ = true;
7645 return s2;
7646 } // End of tls_gd_to_le
7647
7648
7649 template<int size, bool big_endian>
7650 inline
7651 typename AArch64_relocate_functions<size, big_endian>::Status
7652 Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le(
7653 const Relocate_info<size, big_endian>* relinfo,
7654 Target_aarch64<size, big_endian>* target,
7655 const elfcpp::Rela<size, big_endian>& rela,
7656 unsigned int r_type,
7657 unsigned char* view,
7658 const Symbol_value<size>* psymval)
7659 {
7660 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7661 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7662 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7663
7664 Insntype* ip = reinterpret_cast<Insntype*>(view);
7665 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7666 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7667 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7668
7669 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC)
7670 {
7671 // This is the 2nd relocs, optimization should already have been
7672 // done.
7673 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7674 return aarch64_reloc_funcs::STATUS_OKAY;
7675 }
7676
7677 // The original sequence is -
7678 // 90000000 adrp x0, 0 <main>
7679 // 91000000 add x0, x0, #0x0
7680 // 94000000 bl 0 <__tls_get_addr>
7681 // optimized to sequence -
7682 // d53bd040 mrs x0, tpidr_el0
7683 // 91400000 add x0, x0, #0x0, lsl #12
7684 // 91000000 add x0, x0, #0x0
7685
7686 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7687 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we
7688 // have to change "bl tls_get_addr", which does not have a corresponding tls
7689 // relocation type. So before proceeding, we need to make sure compiler
7690 // does not change the sequence.
7691 if(!(insn1 == 0x90000000 // adrp x0,0
7692 && insn2 == 0x91000000 // add x0, x0, #0x0
7693 && insn3 == 0x94000000)) // bl 0
7694 {
7695 // Ideally we should give up gd_to_le relaxation and do gd access.
7696 // However the gd_to_le relaxation decision has been made early
7697 // in the scan stage, where we did not allocate any GOT entry for
7698 // this symbol. Therefore we have to exit and report error now.
7699 gold_error(_("unexpected reloc insn sequence while relaxing "
7700 "tls gd to le for reloc %u."), r_type);
7701 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7702 }
7703
7704 // Write new insns.
7705 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7706 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7707 insn3 = 0x91000000; // add x0, x0, #0x0
7708 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7709 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7710 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7711
7712 // Calculate tprel value.
7713 Output_segment* tls_segment = relinfo->layout->tls_segment();
7714 gold_assert(tls_segment != NULL);
7715 AArch64_address value = psymval->value(relinfo->object, 0);
7716 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7717 AArch64_address aligned_tcb_size =
7718 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7719 AArch64_address x = value + aligned_tcb_size;
7720
7721 // After new insns are written, apply TLSLE relocs.
7722 const AArch64_reloc_property* rp1 =
7723 aarch64_reloc_property_table->get_reloc_property(
7724 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7725 const AArch64_reloc_property* rp2 =
7726 aarch64_reloc_property_table->get_reloc_property(
7727 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7728 gold_assert(rp1 != NULL && rp2 != NULL);
7729
7730 typename aarch64_reloc_funcs::Status s1 =
7731 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7732 x,
7733 addend,
7734 rp1);
7735 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7736 return s1;
7737
7738 typename aarch64_reloc_funcs::Status s2 =
7739 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7740 x,
7741 addend,
7742 rp2);
7743
7744 this->skip_call_tls_get_addr_ = true;
7745 return s2;
7746
7747 } // End of tls_ld_to_le
7748
7749 template<int size, bool big_endian>
7750 inline
7751 typename AArch64_relocate_functions<size, big_endian>::Status
7752 Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le(
7753 const Relocate_info<size, big_endian>* relinfo,
7754 Target_aarch64<size, big_endian>* target,
7755 const elfcpp::Rela<size, big_endian>& rela,
7756 unsigned int r_type,
7757 unsigned char* view,
7758 const Symbol_value<size>* psymval)
7759 {
7760 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7761 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7762 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7763
7764 AArch64_address value = psymval->value(relinfo->object, 0);
7765 Output_segment* tls_segment = relinfo->layout->tls_segment();
7766 AArch64_address aligned_tcb_address =
7767 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7768 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7769 AArch64_address x = value + addend + aligned_tcb_address;
7770 // "x" is the offset to tp, we can only do this if x is within
7771 // range [0, 2^32-1]
7772 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0)))
7773 {
7774 gold_error(_("TLS variable referred by reloc %u is too far from TP."),
7775 r_type);
7776 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7777 }
7778
7779 Insntype* ip = reinterpret_cast<Insntype*>(view);
7780 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7781 unsigned int regno;
7782 Insntype newinsn;
7783 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21)
7784 {
7785 // Generate movz.
7786 regno = (insn & 0x1f);
7787 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5);
7788 }
7789 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC)
7790 {
7791 // Generate movk.
7792 regno = (insn & 0x1f);
7793 gold_assert(regno == ((insn >> 5) & 0x1f));
7794 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5);
7795 }
7796 else
7797 gold_unreachable();
7798
7799 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7800 return aarch64_reloc_funcs::STATUS_OKAY;
7801 } // End of tls_ie_to_le
7802
7803
7804 template<int size, bool big_endian>
7805 inline
7806 typename AArch64_relocate_functions<size, big_endian>::Status
7807 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le(
7808 const Relocate_info<size, big_endian>* relinfo,
7809 Target_aarch64<size, big_endian>* target,
7810 const elfcpp::Rela<size, big_endian>& rela,
7811 unsigned int r_type,
7812 unsigned char* view,
7813 const Symbol_value<size>* psymval)
7814 {
7815 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7816 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7817 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7818
7819 // TLSDESC-GD sequence is like:
7820 // adrp x0, :tlsdesc:v1
7821 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7822 // add x0, x0, :tlsdesc_lo12:v1
7823 // .tlsdesccall v1
7824 // blr x1
7825 // After desc_gd_to_le optimization, the sequence will be like:
7826 // movz x0, #0x0, lsl #16
7827 // movk x0, #0x10
7828 // nop
7829 // nop
7830
7831 // Calculate tprel value.
7832 Output_segment* tls_segment = relinfo->layout->tls_segment();
7833 gold_assert(tls_segment != NULL);
7834 Insntype* ip = reinterpret_cast<Insntype*>(view);
7835 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7836 AArch64_address value = psymval->value(relinfo->object, addend);
7837 AArch64_address aligned_tcb_size =
7838 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7839 AArch64_address x = value + aligned_tcb_size;
7840 // x is the offset to tp, we can only do this if x is within range
7841 // [0, 2^32-1]. If x is out of range, fail and exit.
7842 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0)
7843 {
7844 gold_error(_("TLS variable referred by reloc %u is too far from TP. "
7845 "We Can't do gd_to_le relaxation.\n"), r_type);
7846 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7847 }
7848 Insntype newinsn;
7849 switch (r_type)
7850 {
7851 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7852 case elfcpp::R_AARCH64_TLSDESC_CALL:
7853 // Change to nop
7854 newinsn = 0xd503201f;
7855 break;
7856
7857 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7858 // Change to movz.
7859 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5);
7860 break;
7861
7862 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7863 // Change to movk.
7864 newinsn = 0xf2800000 | ((x & 0xffff) << 5);
7865 break;
7866
7867 default:
7868 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"),
7869 r_type);
7870 gold_unreachable();
7871 }
7872 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7873 return aarch64_reloc_funcs::STATUS_OKAY;
7874 } // End of tls_desc_gd_to_le
7875
7876
7877 template<int size, bool big_endian>
7878 inline
7879 typename AArch64_relocate_functions<size, big_endian>::Status
7880 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie(
7881 const Relocate_info<size, big_endian>* /* relinfo */,
7882 Target_aarch64<size, big_endian>* /* target */,
7883 const elfcpp::Rela<size, big_endian>& rela,
7884 unsigned int r_type,
7885 unsigned char* view,
7886 const Symbol_value<size>* /* psymval */,
7887 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address,
7888 typename elfcpp::Elf_types<size>::Elf_Addr address)
7889 {
7890 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7891 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7892
7893 // TLSDESC-GD sequence is like:
7894 // adrp x0, :tlsdesc:v1
7895 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7896 // add x0, x0, :tlsdesc_lo12:v1
7897 // .tlsdesccall v1
7898 // blr x1
7899 // After desc_gd_to_ie optimization, the sequence will be like:
7900 // adrp x0, :tlsie:v1
7901 // ldr x0, [x0, :tlsie_lo12:v1]
7902 // nop
7903 // nop
7904
7905 Insntype* ip = reinterpret_cast<Insntype*>(view);
7906 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7907 Insntype newinsn;
7908 switch (r_type)
7909 {
7910 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7911 case elfcpp::R_AARCH64_TLSDESC_CALL:
7912 // Change to nop
7913 newinsn = 0xd503201f;
7914 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7915 break;
7916
7917 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7918 {
7919 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7920 address);
7921 }
7922 break;
7923
7924 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7925 {
7926 // Set ldr target register to be x0.
7927 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7928 insn &= 0xffffffe0;
7929 elfcpp::Swap<32, big_endian>::writeval(ip, insn);
7930 // Do relocation.
7931 const AArch64_reloc_property* reloc_property =
7932 aarch64_reloc_property_table->get_reloc_property(
7933 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7934 return aarch64_reloc_funcs::template rela_general<32>(
7935 view, got_entry_address, addend, reloc_property);
7936 }
7937 break;
7938
7939 default:
7940 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"),
7941 r_type);
7942 gold_unreachable();
7943 }
7944 return aarch64_reloc_funcs::STATUS_OKAY;
7945 } // End of tls_desc_gd_to_ie
7946
7947 // Relocate section data.
7948
7949 template<int size, bool big_endian>
7950 void
7951 Target_aarch64<size, big_endian>::relocate_section(
7952 const Relocate_info<size, big_endian>* relinfo,
7953 unsigned int sh_type,
7954 const unsigned char* prelocs,
7955 size_t reloc_count,
7956 Output_section* output_section,
7957 bool needs_special_offset_handling,
7958 unsigned char* view,
7959 typename elfcpp::Elf_types<size>::Elf_Addr address,
7960 section_size_type view_size,
7961 const Reloc_symbol_changes* reloc_symbol_changes)
7962 {
7963 typedef Target_aarch64<size, big_endian> Aarch64;
7964 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate;
7965 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
7966 Classify_reloc;
7967
7968 gold_assert(sh_type == elfcpp::SHT_RELA);
7969
7970 gold::relocate_section<size, big_endian, Aarch64, AArch64_relocate,
7971 gold::Default_comdat_behavior, Classify_reloc>(
7972 relinfo,
7973 this,
7974 prelocs,
7975 reloc_count,
7976 output_section,
7977 needs_special_offset_handling,
7978 view,
7979 address,
7980 view_size,
7981 reloc_symbol_changes);
7982 }
7983
7984 // Scan the relocs during a relocatable link.
7985
7986 template<int size, bool big_endian>
7987 void
7988 Target_aarch64<size, big_endian>::scan_relocatable_relocs(
7989 Symbol_table* symtab,
7990 Layout* layout,
7991 Sized_relobj_file<size, big_endian>* object,
7992 unsigned int data_shndx,
7993 unsigned int sh_type,
7994 const unsigned char* prelocs,
7995 size_t reloc_count,
7996 Output_section* output_section,
7997 bool needs_special_offset_handling,
7998 size_t local_symbol_count,
7999 const unsigned char* plocal_symbols,
8000 Relocatable_relocs* rr)
8001 {
8002 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8003 Classify_reloc;
8004 typedef gold::Default_scan_relocatable_relocs<Classify_reloc>
8005 Scan_relocatable_relocs;
8006
8007 gold_assert(sh_type == elfcpp::SHT_RELA);
8008
8009 gold::scan_relocatable_relocs<size, big_endian, Scan_relocatable_relocs>(
8010 symtab,
8011 layout,
8012 object,
8013 data_shndx,
8014 prelocs,
8015 reloc_count,
8016 output_section,
8017 needs_special_offset_handling,
8018 local_symbol_count,
8019 plocal_symbols,
8020 rr);
8021 }
8022
8023 // Scan the relocs for --emit-relocs.
8024
8025 template<int size, bool big_endian>
8026 void
8027 Target_aarch64<size, big_endian>::emit_relocs_scan(
8028 Symbol_table* symtab,
8029 Layout* layout,
8030 Sized_relobj_file<size, big_endian>* object,
8031 unsigned int data_shndx,
8032 unsigned int sh_type,
8033 const unsigned char* prelocs,
8034 size_t reloc_count,
8035 Output_section* output_section,
8036 bool needs_special_offset_handling,
8037 size_t local_symbol_count,
8038 const unsigned char* plocal_syms,
8039 Relocatable_relocs* rr)
8040 {
8041 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8042 Classify_reloc;
8043 typedef gold::Default_emit_relocs_strategy<Classify_reloc>
8044 Emit_relocs_strategy;
8045
8046 gold_assert(sh_type == elfcpp::SHT_RELA);
8047
8048 gold::scan_relocatable_relocs<size, big_endian, Emit_relocs_strategy>(
8049 symtab,
8050 layout,
8051 object,
8052 data_shndx,
8053 prelocs,
8054 reloc_count,
8055 output_section,
8056 needs_special_offset_handling,
8057 local_symbol_count,
8058 plocal_syms,
8059 rr);
8060 }
8061
8062 // Relocate a section during a relocatable link.
8063
8064 template<int size, bool big_endian>
8065 void
8066 Target_aarch64<size, big_endian>::relocate_relocs(
8067 const Relocate_info<size, big_endian>* relinfo,
8068 unsigned int sh_type,
8069 const unsigned char* prelocs,
8070 size_t reloc_count,
8071 Output_section* output_section,
8072 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
8073 unsigned char* view,
8074 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
8075 section_size_type view_size,
8076 unsigned char* reloc_view,
8077 section_size_type reloc_view_size)
8078 {
8079 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8080 Classify_reloc;
8081
8082 gold_assert(sh_type == elfcpp::SHT_RELA);
8083
8084 gold::relocate_relocs<size, big_endian, Classify_reloc>(
8085 relinfo,
8086 prelocs,
8087 reloc_count,
8088 output_section,
8089 offset_in_output_section,
8090 view,
8091 view_address,
8092 view_size,
8093 reloc_view,
8094 reloc_view_size);
8095 }
8096
8097
8098 // Return whether this is a 3-insn erratum sequence.
8099
8100 template<int size, bool big_endian>
8101 bool
8102 Target_aarch64<size, big_endian>::is_erratum_843419_sequence(
8103 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8104 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
8105 typename elfcpp::Swap<32,big_endian>::Valtype insn3)
8106 {
8107 unsigned rt1, rt2;
8108 bool load, pair;
8109
8110 // The 2nd insn is a single register load or store; or register pair
8111 // store.
8112 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load)
8113 && (!pair || (pair && !load)))
8114 {
8115 // The 3rd insn is a load or store instruction from the "Load/store
8116 // register (unsigned immediate)" encoding class, using Rn as the
8117 // base address register.
8118 if (Insn_utilities::aarch64_ldst_uimm(insn3)
8119 && (Insn_utilities::aarch64_rn(insn3)
8120 == Insn_utilities::aarch64_rd(insn1)))
8121 return true;
8122 }
8123 return false;
8124 }
8125
8126
8127 // Return whether this is a 835769 sequence.
8128 // (Similarly implemented as in elfnn-aarch64.c.)
8129
8130 template<int size, bool big_endian>
8131 bool
8132 Target_aarch64<size, big_endian>::is_erratum_835769_sequence(
8133 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8134 typename elfcpp::Swap<32,big_endian>::Valtype insn2)
8135 {
8136 uint32_t rt;
8137 uint32_t rt2 = 0;
8138 uint32_t rn;
8139 uint32_t rm;
8140 uint32_t ra;
8141 bool pair;
8142 bool load;
8143
8144 if (Insn_utilities::aarch64_mlxl(insn2)
8145 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load))
8146 {
8147 /* Any SIMD memory op is independent of the subsequent MLA
8148 by definition of the erratum. */
8149 if (Insn_utilities::aarch64_bit(insn1, 26))
8150 return true;
8151
8152 /* If not SIMD, check for integer memory ops and MLA relationship. */
8153 rn = Insn_utilities::aarch64_rn(insn2);
8154 ra = Insn_utilities::aarch64_ra(insn2);
8155 rm = Insn_utilities::aarch64_rm(insn2);
8156
8157 /* If this is a load and there's a true(RAW) dependency, we are safe
8158 and this is not an erratum sequence. */
8159 if (load &&
8160 (rt == rn || rt == rm || rt == ra
8161 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
8162 return false;
8163
8164 /* We conservatively put out stubs for all other cases (including
8165 writebacks). */
8166 return true;
8167 }
8168
8169 return false;
8170 }
8171
8172
8173 // Helper method to create erratum stub for ST_E_843419 and ST_E_835769.
8174
8175 template<int size, bool big_endian>
8176 void
8177 Target_aarch64<size, big_endian>::create_erratum_stub(
8178 AArch64_relobj<size, big_endian>* relobj,
8179 unsigned int shndx,
8180 section_size_type erratum_insn_offset,
8181 Address erratum_address,
8182 typename Insn_utilities::Insntype erratum_insn,
8183 int erratum_type,
8184 unsigned int e843419_adrp_offset)
8185 {
8186 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769);
8187 The_stub_table* stub_table = relobj->stub_table(shndx);
8188 gold_assert(stub_table != NULL);
8189 if (stub_table->find_erratum_stub(relobj,
8190 shndx,
8191 erratum_insn_offset) == NULL)
8192 {
8193 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8194 The_erratum_stub* stub;
8195 if (erratum_type == ST_E_835769)
8196 stub = new The_erratum_stub(relobj, erratum_type, shndx,
8197 erratum_insn_offset);
8198 else if (erratum_type == ST_E_843419)
8199 stub = new E843419_stub<size, big_endian>(
8200 relobj, shndx, erratum_insn_offset, e843419_adrp_offset);
8201 else
8202 gold_unreachable();
8203 stub->set_erratum_insn(erratum_insn);
8204 stub->set_erratum_address(erratum_address);
8205 // For erratum ST_E_843419 and ST_E_835769, the destination address is
8206 // always the next insn after erratum insn.
8207 stub->set_destination_address(erratum_address + BPI);
8208 stub_table->add_erratum_stub(stub);
8209 }
8210 }
8211
8212
8213 // Scan erratum for section SHNDX range [output_address + span_start,
8214 // output_address + span_end). Note here we do not share the code with
8215 // scan_erratum_843419_span function, because for 843419 we optimize by only
8216 // scanning the last few insns of a page, whereas for 835769, we need to scan
8217 // every insn.
8218
8219 template<int size, bool big_endian>
8220 void
8221 Target_aarch64<size, big_endian>::scan_erratum_835769_span(
8222 AArch64_relobj<size, big_endian>* relobj,
8223 unsigned int shndx,
8224 const section_size_type span_start,
8225 const section_size_type span_end,
8226 unsigned char* input_view,
8227 Address output_address)
8228 {
8229 typedef typename Insn_utilities::Insntype Insntype;
8230
8231 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8232
8233 // Adjust output_address and view to the start of span.
8234 output_address += span_start;
8235 input_view += span_start;
8236
8237 section_size_type span_length = span_end - span_start;
8238 section_size_type offset = 0;
8239 for (offset = 0; offset + BPI < span_length; offset += BPI)
8240 {
8241 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8242 Insntype insn1 = ip[0];
8243 Insntype insn2 = ip[1];
8244 if (is_erratum_835769_sequence(insn1, insn2))
8245 {
8246 Insntype erratum_insn = insn2;
8247 // "span_start + offset" is the offset for insn1. So for insn2, it is
8248 // "span_start + offset + BPI".
8249 section_size_type erratum_insn_offset = span_start + offset + BPI;
8250 Address erratum_address = output_address + offset + BPI;
8251 gold_info(_("Erratum 835769 found and fixed at \"%s\", "
8252 "section %d, offset 0x%08x."),
8253 relobj->name().c_str(), shndx,
8254 (unsigned int)(span_start + offset));
8255
8256 this->create_erratum_stub(relobj, shndx,
8257 erratum_insn_offset, erratum_address,
8258 erratum_insn, ST_E_835769);
8259 offset += BPI; // Skip mac insn.
8260 }
8261 }
8262 } // End of "Target_aarch64::scan_erratum_835769_span".
8263
8264
8265 // Scan erratum for section SHNDX range
8266 // [output_address + span_start, output_address + span_end).
8267
8268 template<int size, bool big_endian>
8269 void
8270 Target_aarch64<size, big_endian>::scan_erratum_843419_span(
8271 AArch64_relobj<size, big_endian>* relobj,
8272 unsigned int shndx,
8273 const section_size_type span_start,
8274 const section_size_type span_end,
8275 unsigned char* input_view,
8276 Address output_address)
8277 {
8278 typedef typename Insn_utilities::Insntype Insntype;
8279
8280 // Adjust output_address and view to the start of span.
8281 output_address += span_start;
8282 input_view += span_start;
8283
8284 if ((output_address & 0x03) != 0)
8285 return;
8286
8287 section_size_type offset = 0;
8288 section_size_type span_length = span_end - span_start;
8289 // The first instruction must be ending at 0xFF8 or 0xFFC.
8290 unsigned int page_offset = output_address & 0xFFF;
8291 // Make sure starting position, that is "output_address+offset",
8292 // starts at page position 0xff8 or 0xffc.
8293 if (page_offset < 0xff8)
8294 offset = 0xff8 - page_offset;
8295 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length)
8296 {
8297 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8298 Insntype insn1 = ip[0];
8299 if (Insn_utilities::is_adrp(insn1))
8300 {
8301 Insntype insn2 = ip[1];
8302 Insntype insn3 = ip[2];
8303 Insntype erratum_insn;
8304 unsigned insn_offset;
8305 bool do_report = false;
8306 if (is_erratum_843419_sequence(insn1, insn2, insn3))
8307 {
8308 do_report = true;
8309 erratum_insn = insn3;
8310 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN;
8311 }
8312 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length)
8313 {
8314 // Optionally we can have an insn between ins2 and ins3
8315 Insntype insn_opt = ip[2];
8316 // And insn_opt must not be a branch.
8317 if (!Insn_utilities::aarch64_b(insn_opt)
8318 && !Insn_utilities::aarch64_bl(insn_opt)
8319 && !Insn_utilities::aarch64_blr(insn_opt)
8320 && !Insn_utilities::aarch64_br(insn_opt))
8321 {
8322 // And insn_opt must not write to dest reg in insn1. However
8323 // we do a conservative scan, which means we may fix/report
8324 // more than necessary, but it doesn't hurt.
8325
8326 Insntype insn4 = ip[3];
8327 if (is_erratum_843419_sequence(insn1, insn2, insn4))
8328 {
8329 do_report = true;
8330 erratum_insn = insn4;
8331 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN;
8332 }
8333 }
8334 }
8335 if (do_report)
8336 {
8337 unsigned int erratum_insn_offset =
8338 span_start + offset + insn_offset;
8339 Address erratum_address =
8340 output_address + offset + insn_offset;
8341 create_erratum_stub(relobj, shndx,
8342 erratum_insn_offset, erratum_address,
8343 erratum_insn, ST_E_843419,
8344 span_start + offset);
8345 }
8346 }
8347
8348 // Advance to next candidate instruction. We only consider instruction
8349 // sequences starting at a page offset of 0xff8 or 0xffc.
8350 page_offset = (output_address + offset) & 0xfff;
8351 if (page_offset == 0xff8)
8352 offset += 4;
8353 else // (page_offset == 0xffc), we move to next page's 0xff8.
8354 offset += 0xffc;
8355 }
8356 } // End of "Target_aarch64::scan_erratum_843419_span".
8357
8358
8359 // The selector for aarch64 object files.
8360
8361 template<int size, bool big_endian>
8362 class Target_selector_aarch64 : public Target_selector
8363 {
8364 public:
8365 Target_selector_aarch64();
8366
8367 virtual Target*
8368 do_instantiate_target()
8369 { return new Target_aarch64<size, big_endian>(); }
8370 };
8371
8372 template<>
8373 Target_selector_aarch64<32, true>::Target_selector_aarch64()
8374 : Target_selector(elfcpp::EM_AARCH64, 32, true,
8375 "elf32-bigaarch64", "aarch64_elf32_be_vec")
8376 { }
8377
8378 template<>
8379 Target_selector_aarch64<32, false>::Target_selector_aarch64()
8380 : Target_selector(elfcpp::EM_AARCH64, 32, false,
8381 "elf32-littleaarch64", "aarch64_elf32_le_vec")
8382 { }
8383
8384 template<>
8385 Target_selector_aarch64<64, true>::Target_selector_aarch64()
8386 : Target_selector(elfcpp::EM_AARCH64, 64, true,
8387 "elf64-bigaarch64", "aarch64_elf64_be_vec")
8388 { }
8389
8390 template<>
8391 Target_selector_aarch64<64, false>::Target_selector_aarch64()
8392 : Target_selector(elfcpp::EM_AARCH64, 64, false,
8393 "elf64-littleaarch64", "aarch64_elf64_le_vec")
8394 { }
8395
8396 Target_selector_aarch64<32, true> target_selector_aarch64elf32b;
8397 Target_selector_aarch64<32, false> target_selector_aarch64elf32;
8398 Target_selector_aarch64<64, true> target_selector_aarch64elfb;
8399 Target_selector_aarch64<64, false> target_selector_aarch64elf;
8400
8401 } // End anonymous namespace.
This page took 0.321509 seconds and 4 git commands to generate.