96a4ba7dde9ceb8d89a521a01b353d53d9a1231f
[deliverable/binutils-gdb.git] / gold / aarch64.cc
1 // aarch64.cc -- aarch64 target support for gold.
2
3 // Copyright (C) 2014-2016 Free Software Foundation, Inc.
4 // Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>.
5
6 // This file is part of gold.
7
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 3 of the License, or
11 // (at your option) any later version.
12
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
17
18 // You should have received a copy of the GNU General Public License
19 // along with this program; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 // MA 02110-1301, USA.
22
23 #include "gold.h"
24
25 #include <cstring>
26 #include <map>
27 #include <set>
28
29 #include "elfcpp.h"
30 #include "dwarf.h"
31 #include "parameters.h"
32 #include "reloc.h"
33 #include "aarch64.h"
34 #include "object.h"
35 #include "symtab.h"
36 #include "layout.h"
37 #include "output.h"
38 #include "copy-relocs.h"
39 #include "target.h"
40 #include "target-reloc.h"
41 #include "target-select.h"
42 #include "tls.h"
43 #include "freebsd.h"
44 #include "nacl.h"
45 #include "gc.h"
46 #include "icf.h"
47 #include "aarch64-reloc-property.h"
48
49 // The first three .got.plt entries are reserved.
50 const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3;
51
52
53 namespace
54 {
55
56 using namespace gold;
57
58 template<int size, bool big_endian>
59 class Output_data_plt_aarch64;
60
61 template<int size, bool big_endian>
62 class Output_data_plt_aarch64_standard;
63
64 template<int size, bool big_endian>
65 class Target_aarch64;
66
67 template<int size, bool big_endian>
68 class AArch64_relocate_functions;
69
70 // Utility class dealing with insns. This is ported from macros in
71 // bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This
72 // class is used in erratum sequence scanning.
73
74 template<bool big_endian>
75 class AArch64_insn_utilities
76 {
77 public:
78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
79
80 static const int BYTES_PER_INSN;
81
82 // Zero register encoding - 31.
83 static const unsigned int AARCH64_ZR;
84
85 static unsigned int
86 aarch64_bit(Insntype insn, int pos)
87 { return ((1 << pos) & insn) >> pos; }
88
89 static unsigned int
90 aarch64_bits(Insntype insn, int pos, int l)
91 { return (insn >> pos) & ((1 << l) - 1); }
92
93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is
94 // the name defined in armv8 insn manual C3.5.9.
95 static unsigned int
96 aarch64_op31(Insntype insn)
97 { return aarch64_bits(insn, 21, 3); }
98
99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the
100 // third source register. See armv8 insn manual C3.5.9.
101 static unsigned int
102 aarch64_ra(Insntype insn)
103 { return aarch64_bits(insn, 10, 5); }
104
105 static bool
106 is_adr(const Insntype insn)
107 { return (insn & 0x9F000000) == 0x10000000; }
108
109 static bool
110 is_adrp(const Insntype insn)
111 { return (insn & 0x9F000000) == 0x90000000; }
112
113 static unsigned int
114 aarch64_rm(const Insntype insn)
115 { return aarch64_bits(insn, 16, 5); }
116
117 static unsigned int
118 aarch64_rn(const Insntype insn)
119 { return aarch64_bits(insn, 5, 5); }
120
121 static unsigned int
122 aarch64_rd(const Insntype insn)
123 { return aarch64_bits(insn, 0, 5); }
124
125 static unsigned int
126 aarch64_rt(const Insntype insn)
127 { return aarch64_bits(insn, 0, 5); }
128
129 static unsigned int
130 aarch64_rt2(const Insntype insn)
131 { return aarch64_bits(insn, 10, 5); }
132
133 // Encode imm21 into adr. Signed imm21 is in the range of [-1M, 1M).
134 static Insntype
135 aarch64_adr_encode_imm(Insntype adr, int imm21)
136 {
137 gold_assert(is_adr(adr));
138 gold_assert(-(1 << 20) <= imm21 && imm21 < (1 << 20));
139 const int mask19 = (1 << 19) - 1;
140 const int mask2 = 3;
141 adr &= ~((mask19 << 5) | (mask2 << 29));
142 adr |= ((imm21 & mask2) << 29) | (((imm21 >> 2) & mask19) << 5);
143 return adr;
144 }
145
146 // Retrieve encoded adrp 33-bit signed imm value. This value is obtained by
147 // 21-bit signed imm encoded in the insn multiplied by 4k (page size) and
148 // 64-bit sign-extended, resulting in [-4G, 4G) with 12-lsb being 0.
149 static int64_t
150 aarch64_adrp_decode_imm(const Insntype adrp)
151 {
152 const int mask19 = (1 << 19) - 1;
153 const int mask2 = 3;
154 gold_assert(is_adrp(adrp));
155 // 21-bit imm encoded in adrp.
156 uint64_t imm = ((adrp >> 29) & mask2) | (((adrp >> 5) & mask19) << 2);
157 // Retrieve msb of 21-bit-signed imm for sign extension.
158 uint64_t msbt = (imm >> 20) & 1;
159 // Real value is imm multiplied by 4k. Value now has 33-bit information.
160 int64_t value = imm << 12;
161 // Sign extend to 64-bit by repeating msbt 31 (64-33) times and merge it
162 // with value.
163 return ((((uint64_t)(1) << 32) - msbt) << 33) | value;
164 }
165
166 static bool
167 aarch64_b(const Insntype insn)
168 { return (insn & 0xFC000000) == 0x14000000; }
169
170 static bool
171 aarch64_bl(const Insntype insn)
172 { return (insn & 0xFC000000) == 0x94000000; }
173
174 static bool
175 aarch64_blr(const Insntype insn)
176 { return (insn & 0xFFFFFC1F) == 0xD63F0000; }
177
178 static bool
179 aarch64_br(const Insntype insn)
180 { return (insn & 0xFFFFFC1F) == 0xD61F0000; }
181
182 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
183 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops.
184 static bool
185 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; }
186
187 static bool
188 aarch64_ldst(Insntype insn)
189 { return (insn & 0x0a000000) == 0x08000000; }
190
191 static bool
192 aarch64_ldst_ex(Insntype insn)
193 { return (insn & 0x3f000000) == 0x08000000; }
194
195 static bool
196 aarch64_ldst_pcrel(Insntype insn)
197 { return (insn & 0x3b000000) == 0x18000000; }
198
199 static bool
200 aarch64_ldst_nap(Insntype insn)
201 { return (insn & 0x3b800000) == 0x28000000; }
202
203 static bool
204 aarch64_ldstp_pi(Insntype insn)
205 { return (insn & 0x3b800000) == 0x28800000; }
206
207 static bool
208 aarch64_ldstp_o(Insntype insn)
209 { return (insn & 0x3b800000) == 0x29000000; }
210
211 static bool
212 aarch64_ldstp_pre(Insntype insn)
213 { return (insn & 0x3b800000) == 0x29800000; }
214
215 static bool
216 aarch64_ldst_ui(Insntype insn)
217 { return (insn & 0x3b200c00) == 0x38000000; }
218
219 static bool
220 aarch64_ldst_piimm(Insntype insn)
221 { return (insn & 0x3b200c00) == 0x38000400; }
222
223 static bool
224 aarch64_ldst_u(Insntype insn)
225 { return (insn & 0x3b200c00) == 0x38000800; }
226
227 static bool
228 aarch64_ldst_preimm(Insntype insn)
229 { return (insn & 0x3b200c00) == 0x38000c00; }
230
231 static bool
232 aarch64_ldst_ro(Insntype insn)
233 { return (insn & 0x3b200c00) == 0x38200800; }
234
235 static bool
236 aarch64_ldst_uimm(Insntype insn)
237 { return (insn & 0x3b000000) == 0x39000000; }
238
239 static bool
240 aarch64_ldst_simd_m(Insntype insn)
241 { return (insn & 0xbfbf0000) == 0x0c000000; }
242
243 static bool
244 aarch64_ldst_simd_m_pi(Insntype insn)
245 { return (insn & 0xbfa00000) == 0x0c800000; }
246
247 static bool
248 aarch64_ldst_simd_s(Insntype insn)
249 { return (insn & 0xbf9f0000) == 0x0d000000; }
250
251 static bool
252 aarch64_ldst_simd_s_pi(Insntype insn)
253 { return (insn & 0xbf800000) == 0x0d800000; }
254
255 // Classify an INSN if it is indeed a load/store. Return true if INSN is a
256 // LD/ST instruction otherwise return false. For scalar LD/ST instructions
257 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair
258 // instructions PAIR is TRUE, RT and RT2 are returned.
259 static bool
260 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2,
261 bool *pair, bool *load)
262 {
263 uint32_t opcode;
264 unsigned int r;
265 uint32_t opc = 0;
266 uint32_t v = 0;
267 uint32_t opc_v = 0;
268
269 /* Bail out quickly if INSN doesn't fall into the the load-store
270 encoding space. */
271 if (!aarch64_ldst (insn))
272 return false;
273
274 *pair = false;
275 *load = false;
276 if (aarch64_ldst_ex (insn))
277 {
278 *rt = aarch64_rt (insn);
279 *rt2 = *rt;
280 if (aarch64_bit (insn, 21) == 1)
281 {
282 *pair = true;
283 *rt2 = aarch64_rt2 (insn);
284 }
285 *load = aarch64_ld (insn);
286 return true;
287 }
288 else if (aarch64_ldst_nap (insn)
289 || aarch64_ldstp_pi (insn)
290 || aarch64_ldstp_o (insn)
291 || aarch64_ldstp_pre (insn))
292 {
293 *pair = true;
294 *rt = aarch64_rt (insn);
295 *rt2 = aarch64_rt2 (insn);
296 *load = aarch64_ld (insn);
297 return true;
298 }
299 else if (aarch64_ldst_pcrel (insn)
300 || aarch64_ldst_ui (insn)
301 || aarch64_ldst_piimm (insn)
302 || aarch64_ldst_u (insn)
303 || aarch64_ldst_preimm (insn)
304 || aarch64_ldst_ro (insn)
305 || aarch64_ldst_uimm (insn))
306 {
307 *rt = aarch64_rt (insn);
308 *rt2 = *rt;
309 if (aarch64_ldst_pcrel (insn))
310 *load = true;
311 opc = aarch64_bits (insn, 22, 2);
312 v = aarch64_bit (insn, 26);
313 opc_v = opc | (v << 2);
314 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
315 || opc_v == 5 || opc_v == 7);
316 return true;
317 }
318 else if (aarch64_ldst_simd_m (insn)
319 || aarch64_ldst_simd_m_pi (insn))
320 {
321 *rt = aarch64_rt (insn);
322 *load = aarch64_bit (insn, 22);
323 opcode = (insn >> 12) & 0xf;
324 switch (opcode)
325 {
326 case 0:
327 case 2:
328 *rt2 = *rt + 3;
329 break;
330
331 case 4:
332 case 6:
333 *rt2 = *rt + 2;
334 break;
335
336 case 7:
337 *rt2 = *rt;
338 break;
339
340 case 8:
341 case 10:
342 *rt2 = *rt + 1;
343 break;
344
345 default:
346 return false;
347 }
348 return true;
349 }
350 else if (aarch64_ldst_simd_s (insn)
351 || aarch64_ldst_simd_s_pi (insn))
352 {
353 *rt = aarch64_rt (insn);
354 r = (insn >> 21) & 1;
355 *load = aarch64_bit (insn, 22);
356 opcode = (insn >> 13) & 0x7;
357 switch (opcode)
358 {
359 case 0:
360 case 2:
361 case 4:
362 *rt2 = *rt + r;
363 break;
364
365 case 1:
366 case 3:
367 case 5:
368 *rt2 = *rt + (r == 0 ? 2 : 3);
369 break;
370
371 case 6:
372 *rt2 = *rt + r;
373 break;
374
375 case 7:
376 *rt2 = *rt + (r == 0 ? 2 : 3);
377 break;
378
379 default:
380 return false;
381 }
382 return true;
383 }
384 return false;
385 } // End of "aarch64_mem_op_p".
386
387 // Return true if INSN is mac insn.
388 static bool
389 aarch64_mac(Insntype insn)
390 { return (insn & 0xff000000) == 0x9b000000; }
391
392 // Return true if INSN is multiply-accumulate.
393 // (This is similar to implementaton in elfnn-aarch64.c.)
394 static bool
395 aarch64_mlxl(Insntype insn)
396 {
397 uint32_t op31 = aarch64_op31(insn);
398 if (aarch64_mac(insn)
399 && (op31 == 0 || op31 == 1 || op31 == 5)
400 /* Exclude MUL instructions which are encoded as a multiple-accumulate
401 with RA = XZR. */
402 && aarch64_ra(insn) != AARCH64_ZR)
403 {
404 return true;
405 }
406 return false;
407 }
408 }; // End of "AArch64_insn_utilities".
409
410
411 // Insn length in byte.
412
413 template<bool big_endian>
414 const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4;
415
416
417 // Zero register encoding - 31.
418
419 template<bool big_endian>
420 const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f;
421
422
423 // Output_data_got_aarch64 class.
424
425 template<int size, bool big_endian>
426 class Output_data_got_aarch64 : public Output_data_got<size, big_endian>
427 {
428 public:
429 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype;
430 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout)
431 : Output_data_got<size, big_endian>(),
432 symbol_table_(symtab), layout_(layout)
433 { }
434
435 // Add a static entry for the GOT entry at OFFSET. GSYM is a global
436 // symbol and R_TYPE is the code of a dynamic relocation that needs to be
437 // applied in a static link.
438 void
439 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
440 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); }
441
442
443 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object
444 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic
445 // relocation that needs to be applied in a static link.
446 void
447 add_static_reloc(unsigned int got_offset, unsigned int r_type,
448 Sized_relobj_file<size, big_endian>* relobj,
449 unsigned int index)
450 {
451 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj,
452 index));
453 }
454
455
456 protected:
457 // Write out the GOT table.
458 void
459 do_write(Output_file* of) {
460 // The first entry in the GOT is the address of the .dynamic section.
461 gold_assert(this->data_size() >= size / 8);
462 Output_section* dynamic = this->layout_->dynamic_section();
463 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address();
464 this->replace_constant(0, dynamic_addr);
465 Output_data_got<size, big_endian>::do_write(of);
466
467 // Handling static relocs
468 if (this->static_relocs_.empty())
469 return;
470
471 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
472
473 gold_assert(parameters->doing_static_link());
474 const off_t offset = this->offset();
475 const section_size_type oview_size =
476 convert_to_section_size_type(this->data_size());
477 unsigned char* const oview = of->get_output_view(offset, oview_size);
478
479 Output_segment* tls_segment = this->layout_->tls_segment();
480 gold_assert(tls_segment != NULL);
481
482 AArch64_address aligned_tcb_address =
483 align_address(Target_aarch64<size, big_endian>::TCB_SIZE,
484 tls_segment->maximum_alignment());
485
486 for (size_t i = 0; i < this->static_relocs_.size(); ++i)
487 {
488 Static_reloc& reloc(this->static_relocs_[i]);
489 AArch64_address value;
490
491 if (!reloc.symbol_is_global())
492 {
493 Sized_relobj_file<size, big_endian>* object = reloc.relobj();
494 const Symbol_value<size>* psymval =
495 reloc.relobj()->local_symbol(reloc.index());
496
497 // We are doing static linking. Issue an error and skip this
498 // relocation if the symbol is undefined or in a discarded_section.
499 bool is_ordinary;
500 unsigned int shndx = psymval->input_shndx(&is_ordinary);
501 if ((shndx == elfcpp::SHN_UNDEF)
502 || (is_ordinary
503 && shndx != elfcpp::SHN_UNDEF
504 && !object->is_section_included(shndx)
505 && !this->symbol_table_->is_section_folded(object, shndx)))
506 {
507 gold_error(_("undefined or discarded local symbol %u from "
508 " object %s in GOT"),
509 reloc.index(), reloc.relobj()->name().c_str());
510 continue;
511 }
512 value = psymval->value(object, 0);
513 }
514 else
515 {
516 const Symbol* gsym = reloc.symbol();
517 gold_assert(gsym != NULL);
518 if (gsym->is_forwarder())
519 gsym = this->symbol_table_->resolve_forwards(gsym);
520
521 // We are doing static linking. Issue an error and skip this
522 // relocation if the symbol is undefined or in a discarded_section
523 // unless it is a weakly_undefined symbol.
524 if ((gsym->is_defined_in_discarded_section()
525 || gsym->is_undefined())
526 && !gsym->is_weak_undefined())
527 {
528 gold_error(_("undefined or discarded symbol %s in GOT"),
529 gsym->name());
530 continue;
531 }
532
533 if (!gsym->is_weak_undefined())
534 {
535 const Sized_symbol<size>* sym =
536 static_cast<const Sized_symbol<size>*>(gsym);
537 value = sym->value();
538 }
539 else
540 value = 0;
541 }
542
543 unsigned got_offset = reloc.got_offset();
544 gold_assert(got_offset < oview_size);
545
546 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype;
547 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset);
548 Valtype x;
549 switch (reloc.r_type())
550 {
551 case elfcpp::R_AARCH64_TLS_DTPREL64:
552 x = value;
553 break;
554 case elfcpp::R_AARCH64_TLS_TPREL64:
555 x = value + aligned_tcb_address;
556 break;
557 default:
558 gold_unreachable();
559 }
560 elfcpp::Swap<size, big_endian>::writeval(wv, x);
561 }
562
563 of->write_output_view(offset, oview_size, oview);
564 }
565
566 private:
567 // Symbol table of the output object.
568 Symbol_table* symbol_table_;
569 // A pointer to the Layout class, so that we can find the .dynamic
570 // section when we write out the GOT section.
571 Layout* layout_;
572
573 // This class represent dynamic relocations that need to be applied by
574 // gold because we are using TLS relocations in a static link.
575 class Static_reloc
576 {
577 public:
578 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
579 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true)
580 { this->u_.global.symbol = gsym; }
581
582 Static_reloc(unsigned int got_offset, unsigned int r_type,
583 Sized_relobj_file<size, big_endian>* relobj, unsigned int index)
584 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false)
585 {
586 this->u_.local.relobj = relobj;
587 this->u_.local.index = index;
588 }
589
590 // Return the GOT offset.
591 unsigned int
592 got_offset() const
593 { return this->got_offset_; }
594
595 // Relocation type.
596 unsigned int
597 r_type() const
598 { return this->r_type_; }
599
600 // Whether the symbol is global or not.
601 bool
602 symbol_is_global() const
603 { return this->symbol_is_global_; }
604
605 // For a relocation against a global symbol, the global symbol.
606 Symbol*
607 symbol() const
608 {
609 gold_assert(this->symbol_is_global_);
610 return this->u_.global.symbol;
611 }
612
613 // For a relocation against a local symbol, the defining object.
614 Sized_relobj_file<size, big_endian>*
615 relobj() const
616 {
617 gold_assert(!this->symbol_is_global_);
618 return this->u_.local.relobj;
619 }
620
621 // For a relocation against a local symbol, the local symbol index.
622 unsigned int
623 index() const
624 {
625 gold_assert(!this->symbol_is_global_);
626 return this->u_.local.index;
627 }
628
629 private:
630 // GOT offset of the entry to which this relocation is applied.
631 unsigned int got_offset_;
632 // Type of relocation.
633 unsigned int r_type_;
634 // Whether this relocation is against a global symbol.
635 bool symbol_is_global_;
636 // A global or local symbol.
637 union
638 {
639 struct
640 {
641 // For a global symbol, the symbol itself.
642 Symbol* symbol;
643 } global;
644 struct
645 {
646 // For a local symbol, the object defining the symbol.
647 Sized_relobj_file<size, big_endian>* relobj;
648 // For a local symbol, the symbol index.
649 unsigned int index;
650 } local;
651 } u_;
652 }; // End of inner class Static_reloc
653
654 std::vector<Static_reloc> static_relocs_;
655 }; // End of Output_data_got_aarch64
656
657
658 template<int size, bool big_endian>
659 class AArch64_input_section;
660
661
662 template<int size, bool big_endian>
663 class AArch64_output_section;
664
665
666 template<int size, bool big_endian>
667 class AArch64_relobj;
668
669
670 // Stub type enum constants.
671
672 enum
673 {
674 ST_NONE = 0,
675
676 // Using adrp/add pair, 4 insns (including alignment) without mem access,
677 // the fastest stub. This has a limited jump distance, which is tested by
678 // aarch64_valid_for_adrp_p.
679 ST_ADRP_BRANCH = 1,
680
681 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access,
682 // unlimited in jump distance.
683 ST_LONG_BRANCH_ABS = 2,
684
685 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1
686 // mem access, slowest one. Only used in position independent executables.
687 ST_LONG_BRANCH_PCREL = 3,
688
689 // Stub for erratum 843419 handling.
690 ST_E_843419 = 4,
691
692 // Stub for erratum 835769 handling.
693 ST_E_835769 = 5,
694
695 // Number of total stub types.
696 ST_NUMBER = 6
697 };
698
699
700 // Struct that wraps insns for a particular stub. All stub templates are
701 // created/initialized as constants by Stub_template_repertoire.
702
703 template<bool big_endian>
704 struct Stub_template
705 {
706 const typename AArch64_insn_utilities<big_endian>::Insntype* insns;
707 const int insn_num;
708 };
709
710
711 // Simple singleton class that creates/initializes/stores all types of stub
712 // templates.
713
714 template<bool big_endian>
715 class Stub_template_repertoire
716 {
717 public:
718 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
719
720 // Single static method to get stub template for a given stub type.
721 static const Stub_template<big_endian>*
722 get_stub_template(int type)
723 {
724 static Stub_template_repertoire<big_endian> singleton;
725 return singleton.stub_templates_[type];
726 }
727
728 private:
729 // Constructor - creates/initializes all stub templates.
730 Stub_template_repertoire();
731 ~Stub_template_repertoire()
732 { }
733
734 // Disallowing copy ctor and copy assignment operator.
735 Stub_template_repertoire(Stub_template_repertoire&);
736 Stub_template_repertoire& operator=(Stub_template_repertoire&);
737
738 // Data that stores all insn templates.
739 const Stub_template<big_endian>* stub_templates_[ST_NUMBER];
740 }; // End of "class Stub_template_repertoire".
741
742
743 // Constructor - creates/initilizes all stub templates.
744
745 template<bool big_endian>
746 Stub_template_repertoire<big_endian>::Stub_template_repertoire()
747 {
748 // Insn array definitions.
749 const static Insntype ST_NONE_INSNS[] = {};
750
751 const static Insntype ST_ADRP_BRANCH_INSNS[] =
752 {
753 0x90000010, /* adrp ip0, X */
754 /* ADR_PREL_PG_HI21(X) */
755 0x91000210, /* add ip0, ip0, :lo12:X */
756 /* ADD_ABS_LO12_NC(X) */
757 0xd61f0200, /* br ip0 */
758 0x00000000, /* alignment padding */
759 };
760
761 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] =
762 {
763 0x58000050, /* ldr ip0, 0x8 */
764 0xd61f0200, /* br ip0 */
765 0x00000000, /* address field */
766 0x00000000, /* address fields */
767 };
768
769 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] =
770 {
771 0x58000090, /* ldr ip0, 0x10 */
772 0x10000011, /* adr ip1, #0 */
773 0x8b110210, /* add ip0, ip0, ip1 */
774 0xd61f0200, /* br ip0 */
775 0x00000000, /* address field */
776 0x00000000, /* address field */
777 0x00000000, /* alignment padding */
778 0x00000000, /* alignment padding */
779 };
780
781 const static Insntype ST_E_843419_INSNS[] =
782 {
783 0x00000000, /* Placeholder for erratum insn. */
784 0x14000000, /* b <label> */
785 };
786
787 // ST_E_835769 has the same stub template as ST_E_843419.
788 const static Insntype* ST_E_835769_INSNS = ST_E_843419_INSNS;
789
790 #define install_insn_template(T) \
791 const static Stub_template<big_endian> template_##T = { \
792 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \
793 this->stub_templates_[T] = &template_##T
794
795 install_insn_template(ST_NONE);
796 install_insn_template(ST_ADRP_BRANCH);
797 install_insn_template(ST_LONG_BRANCH_ABS);
798 install_insn_template(ST_LONG_BRANCH_PCREL);
799 install_insn_template(ST_E_843419);
800 install_insn_template(ST_E_835769);
801
802 #undef install_insn_template
803 }
804
805
806 // Base class for stubs.
807
808 template<int size, bool big_endian>
809 class Stub_base
810 {
811 public:
812 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
813 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
814
815 static const AArch64_address invalid_address =
816 static_cast<AArch64_address>(-1);
817
818 static const section_offset_type invalid_offset =
819 static_cast<section_offset_type>(-1);
820
821 Stub_base(int type)
822 : destination_address_(invalid_address),
823 offset_(invalid_offset),
824 type_(type)
825 {}
826
827 ~Stub_base()
828 {}
829
830 // Get stub type.
831 int
832 type() const
833 { return this->type_; }
834
835 // Get stub template that provides stub insn information.
836 const Stub_template<big_endian>*
837 stub_template() const
838 {
839 return Stub_template_repertoire<big_endian>::
840 get_stub_template(this->type());
841 }
842
843 // Get destination address.
844 AArch64_address
845 destination_address() const
846 {
847 gold_assert(this->destination_address_ != this->invalid_address);
848 return this->destination_address_;
849 }
850
851 // Set destination address.
852 void
853 set_destination_address(AArch64_address address)
854 {
855 gold_assert(address != this->invalid_address);
856 this->destination_address_ = address;
857 }
858
859 // Reset the destination address.
860 void
861 reset_destination_address()
862 { this->destination_address_ = this->invalid_address; }
863
864 // Get offset of code stub. For Reloc_stub, it is the offset from the
865 // beginning of its containing stub table; for Erratum_stub, it is the offset
866 // from the end of reloc_stubs.
867 section_offset_type
868 offset() const
869 {
870 gold_assert(this->offset_ != this->invalid_offset);
871 return this->offset_;
872 }
873
874 // Set stub offset.
875 void
876 set_offset(section_offset_type offset)
877 { this->offset_ = offset; }
878
879 // Return the stub insn.
880 const Insntype*
881 insns() const
882 { return this->stub_template()->insns; }
883
884 // Return num of stub insns.
885 unsigned int
886 insn_num() const
887 { return this->stub_template()->insn_num; }
888
889 // Get size of the stub.
890 int
891 stub_size() const
892 {
893 return this->insn_num() *
894 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
895 }
896
897 // Write stub to output file.
898 void
899 write(unsigned char* view, section_size_type view_size)
900 { this->do_write(view, view_size); }
901
902 protected:
903 // Abstract method to be implemented by sub-classes.
904 virtual void
905 do_write(unsigned char*, section_size_type) = 0;
906
907 private:
908 // The last insn of a stub is a jump to destination insn. This field records
909 // the destination address.
910 AArch64_address destination_address_;
911 // The stub offset. Note this has difference interpretations between an
912 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the
913 // beginning of the containing stub_table, whereas for Erratum_stub, this is
914 // the offset from the end of reloc_stubs.
915 section_offset_type offset_;
916 // Stub type.
917 const int type_;
918 }; // End of "Stub_base".
919
920
921 // Erratum stub class. An erratum stub differs from a reloc stub in that for
922 // each erratum occurrence, we generate an erratum stub. We never share erratum
923 // stubs, whereas for reloc stubs, different branches insns share a single reloc
924 // stub as long as the branch targets are the same. (More to the point, reloc
925 // stubs can be shared because they're used to reach a specific target, whereas
926 // erratum stubs branch back to the original control flow.)
927
928 template<int size, bool big_endian>
929 class Erratum_stub : public Stub_base<size, big_endian>
930 {
931 public:
932 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
933 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
934 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
935 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
936
937 static const int STUB_ADDR_ALIGN;
938
939 static const Insntype invalid_insn = static_cast<Insntype>(-1);
940
941 Erratum_stub(The_aarch64_relobj* relobj, int type,
942 unsigned shndx, unsigned int sh_offset)
943 : Stub_base<size, big_endian>(type), relobj_(relobj),
944 shndx_(shndx), sh_offset_(sh_offset),
945 erratum_insn_(invalid_insn),
946 erratum_address_(this->invalid_address)
947 {}
948
949 ~Erratum_stub() {}
950
951 // Return the object that contains the erratum.
952 The_aarch64_relobj*
953 relobj()
954 { return this->relobj_; }
955
956 // Get section index of the erratum.
957 unsigned int
958 shndx() const
959 { return this->shndx_; }
960
961 // Get section offset of the erratum.
962 unsigned int
963 sh_offset() const
964 { return this->sh_offset_; }
965
966 // Get the erratum insn. This is the insn located at erratum_insn_address.
967 Insntype
968 erratum_insn() const
969 {
970 gold_assert(this->erratum_insn_ != this->invalid_insn);
971 return this->erratum_insn_;
972 }
973
974 // Set the insn that the erratum happens to.
975 void
976 set_erratum_insn(Insntype insn)
977 { this->erratum_insn_ = insn; }
978
979 // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a
980 // relocation spot, in this case, the erratum_insn_ recorded at scanning phase
981 // is no longer the one we want to write out to the stub, update erratum_insn_
982 // with relocated version. Also note that in this case xn must not be "PC", so
983 // it is safe to move the erratum insn from the origin place to the stub. For
984 // 835769, the erratum insn is multiply-accumulate insn, which could not be a
985 // relocation spot (assertion added though).
986 void
987 update_erratum_insn(Insntype insn)
988 {
989 gold_assert(this->erratum_insn_ != this->invalid_insn);
990 switch (this->type())
991 {
992 case ST_E_843419:
993 gold_assert(Insn_utilities::aarch64_ldst_uimm(insn));
994 gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn()));
995 gold_assert(Insn_utilities::aarch64_rd(insn) ==
996 Insn_utilities::aarch64_rd(this->erratum_insn()));
997 gold_assert(Insn_utilities::aarch64_rn(insn) ==
998 Insn_utilities::aarch64_rn(this->erratum_insn()));
999 // Update plain ld/st insn with relocated insn.
1000 this->erratum_insn_ = insn;
1001 break;
1002 case ST_E_835769:
1003 gold_assert(insn == this->erratum_insn());
1004 break;
1005 default:
1006 gold_unreachable();
1007 }
1008 }
1009
1010
1011 // Return the address where an erratum must be done.
1012 AArch64_address
1013 erratum_address() const
1014 {
1015 gold_assert(this->erratum_address_ != this->invalid_address);
1016 return this->erratum_address_;
1017 }
1018
1019 // Set the address where an erratum must be done.
1020 void
1021 set_erratum_address(AArch64_address addr)
1022 { this->erratum_address_ = addr; }
1023
1024 // Comparator used to group Erratum_stubs in a set by (obj, shndx,
1025 // sh_offset). We do not include 'type' in the calculation, because there is
1026 // at most one stub type at (obj, shndx, sh_offset).
1027 bool
1028 operator<(const Erratum_stub<size, big_endian>& k) const
1029 {
1030 if (this == &k)
1031 return false;
1032 // We group stubs by relobj.
1033 if (this->relobj_ != k.relobj_)
1034 return this->relobj_ < k.relobj_;
1035 // Then by section index.
1036 if (this->shndx_ != k.shndx_)
1037 return this->shndx_ < k.shndx_;
1038 // Lastly by section offset.
1039 return this->sh_offset_ < k.sh_offset_;
1040 }
1041
1042 protected:
1043 virtual void
1044 do_write(unsigned char*, section_size_type);
1045
1046 private:
1047 // The object that needs to be fixed.
1048 The_aarch64_relobj* relobj_;
1049 // The shndx in the object that needs to be fixed.
1050 const unsigned int shndx_;
1051 // The section offset in the obejct that needs to be fixed.
1052 const unsigned int sh_offset_;
1053 // The insn to be fixed.
1054 Insntype erratum_insn_;
1055 // The address of the above insn.
1056 AArch64_address erratum_address_;
1057 }; // End of "Erratum_stub".
1058
1059
1060 // Erratum sub class to wrap additional info needed by 843419. In fixing this
1061 // erratum, we may choose to replace 'adrp' with 'adr', in this case, we need
1062 // adrp's code position (two or three insns before erratum insn itself).
1063
1064 template<int size, bool big_endian>
1065 class E843419_stub : public Erratum_stub<size, big_endian>
1066 {
1067 public:
1068 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
1069
1070 E843419_stub(AArch64_relobj<size, big_endian>* relobj,
1071 unsigned int shndx, unsigned int sh_offset,
1072 unsigned int adrp_sh_offset)
1073 : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset),
1074 adrp_sh_offset_(adrp_sh_offset)
1075 {}
1076
1077 unsigned int
1078 adrp_sh_offset() const
1079 { return this->adrp_sh_offset_; }
1080
1081 private:
1082 // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we
1083 // can can obtain it from its parent.)
1084 const unsigned int adrp_sh_offset_;
1085 };
1086
1087
1088 template<int size, bool big_endian>
1089 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1090
1091 // Comparator used in set definition.
1092 template<int size, bool big_endian>
1093 struct Erratum_stub_less
1094 {
1095 bool
1096 operator()(const Erratum_stub<size, big_endian>* s1,
1097 const Erratum_stub<size, big_endian>* s2) const
1098 { return *s1 < *s2; }
1099 };
1100
1101 // Erratum_stub implementation for writing stub to output file.
1102
1103 template<int size, bool big_endian>
1104 void
1105 Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type)
1106 {
1107 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1108 const Insntype* insns = this->insns();
1109 uint32_t num_insns = this->insn_num();
1110 Insntype* ip = reinterpret_cast<Insntype*>(view);
1111 // For current implemented erratum 843419 and 835769, the first insn in the
1112 // stub is always a copy of the problematic insn (in 843419, the mem access
1113 // insn, in 835769, the mac insn), followed by a jump-back.
1114 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn());
1115 for (uint32_t i = 1; i < num_insns; ++i)
1116 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1117 }
1118
1119
1120 // Reloc stub class.
1121
1122 template<int size, bool big_endian>
1123 class Reloc_stub : public Stub_base<size, big_endian>
1124 {
1125 public:
1126 typedef Reloc_stub<size, big_endian> This;
1127 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1128
1129 // Branch range. This is used to calculate the section group size, as well as
1130 // determine whether a stub is needed.
1131 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2;
1132 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2);
1133
1134 // Constant used to determine if an offset fits in the adrp instruction
1135 // encoding.
1136 static const int MAX_ADRP_IMM = (1 << 20) - 1;
1137 static const int MIN_ADRP_IMM = -(1 << 20);
1138
1139 static const int BYTES_PER_INSN = 4;
1140 static const int STUB_ADDR_ALIGN;
1141
1142 // Determine whether the offset fits in the jump/branch instruction.
1143 static bool
1144 aarch64_valid_branch_offset_p(int64_t offset)
1145 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; }
1146
1147 // Determine whether the offset fits in the adrp immediate field.
1148 static bool
1149 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest)
1150 {
1151 typedef AArch64_relocate_functions<size, big_endian> Reloc;
1152 int64_t adrp_imm = (Reloc::Page(dest) - Reloc::Page(location)) >> 12;
1153 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM;
1154 }
1155
1156 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1157 // needed.
1158 static int
1159 stub_type_for_reloc(unsigned int r_type, AArch64_address address,
1160 AArch64_address target);
1161
1162 Reloc_stub(int type)
1163 : Stub_base<size, big_endian>(type)
1164 { }
1165
1166 ~Reloc_stub()
1167 { }
1168
1169 // The key class used to index the stub instance in the stub table's stub map.
1170 class Key
1171 {
1172 public:
1173 Key(int type, const Symbol* symbol, const Relobj* relobj,
1174 unsigned int r_sym, int32_t addend)
1175 : type_(type), addend_(addend)
1176 {
1177 if (symbol != NULL)
1178 {
1179 this->r_sym_ = Reloc_stub::invalid_index;
1180 this->u_.symbol = symbol;
1181 }
1182 else
1183 {
1184 gold_assert(relobj != NULL && r_sym != invalid_index);
1185 this->r_sym_ = r_sym;
1186 this->u_.relobj = relobj;
1187 }
1188 }
1189
1190 ~Key()
1191 { }
1192
1193 // Return stub type.
1194 int
1195 type() const
1196 { return this->type_; }
1197
1198 // Return the local symbol index or invalid_index.
1199 unsigned int
1200 r_sym() const
1201 { return this->r_sym_; }
1202
1203 // Return the symbol if there is one.
1204 const Symbol*
1205 symbol() const
1206 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
1207
1208 // Return the relobj if there is one.
1209 const Relobj*
1210 relobj() const
1211 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
1212
1213 // Whether this equals to another key k.
1214 bool
1215 eq(const Key& k) const
1216 {
1217 return ((this->type_ == k.type_)
1218 && (this->r_sym_ == k.r_sym_)
1219 && ((this->r_sym_ != Reloc_stub::invalid_index)
1220 ? (this->u_.relobj == k.u_.relobj)
1221 : (this->u_.symbol == k.u_.symbol))
1222 && (this->addend_ == k.addend_));
1223 }
1224
1225 // Return a hash value.
1226 size_t
1227 hash_value() const
1228 {
1229 size_t name_hash_value = gold::string_hash<char>(
1230 (this->r_sym_ != Reloc_stub::invalid_index)
1231 ? this->u_.relobj->name().c_str()
1232 : this->u_.symbol->name());
1233 // We only have 4 stub types.
1234 size_t stub_type_hash_value = 0x03 & this->type_;
1235 return (name_hash_value
1236 ^ stub_type_hash_value
1237 ^ ((this->r_sym_ & 0x3fff) << 2)
1238 ^ ((this->addend_ & 0xffff) << 16));
1239 }
1240
1241 // Functors for STL associative containers.
1242 struct hash
1243 {
1244 size_t
1245 operator()(const Key& k) const
1246 { return k.hash_value(); }
1247 };
1248
1249 struct equal_to
1250 {
1251 bool
1252 operator()(const Key& k1, const Key& k2) const
1253 { return k1.eq(k2); }
1254 };
1255
1256 private:
1257 // Stub type.
1258 const int type_;
1259 // If this is a local symbol, this is the index in the defining object.
1260 // Otherwise, it is invalid_index for a global symbol.
1261 unsigned int r_sym_;
1262 // If r_sym_ is an invalid index, this points to a global symbol.
1263 // Otherwise, it points to a relobj. We used the unsized and target
1264 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
1265 // Arm_relobj, in order to avoid making the stub class a template
1266 // as most of the stub machinery is endianness-neutral. However, it
1267 // may require a bit of casting done by users of this class.
1268 union
1269 {
1270 const Symbol* symbol;
1271 const Relobj* relobj;
1272 } u_;
1273 // Addend associated with a reloc.
1274 int32_t addend_;
1275 }; // End of inner class Reloc_stub::Key
1276
1277 protected:
1278 // This may be overridden in the child class.
1279 virtual void
1280 do_write(unsigned char*, section_size_type);
1281
1282 private:
1283 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
1284 }; // End of Reloc_stub
1285
1286 template<int size, bool big_endian>
1287 const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1288
1289 // Write data to output file.
1290
1291 template<int size, bool big_endian>
1292 void
1293 Reloc_stub<size, big_endian>::
1294 do_write(unsigned char* view, section_size_type)
1295 {
1296 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1297 const uint32_t* insns = this->insns();
1298 uint32_t num_insns = this->insn_num();
1299 Insntype* ip = reinterpret_cast<Insntype*>(view);
1300 for (uint32_t i = 0; i < num_insns; ++i)
1301 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1302 }
1303
1304
1305 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1306 // needed.
1307
1308 template<int size, bool big_endian>
1309 inline int
1310 Reloc_stub<size, big_endian>::stub_type_for_reloc(
1311 unsigned int r_type, AArch64_address location, AArch64_address dest)
1312 {
1313 int64_t branch_offset = 0;
1314 switch(r_type)
1315 {
1316 case elfcpp::R_AARCH64_CALL26:
1317 case elfcpp::R_AARCH64_JUMP26:
1318 branch_offset = dest - location;
1319 break;
1320 default:
1321 gold_unreachable();
1322 }
1323
1324 if (aarch64_valid_branch_offset_p(branch_offset))
1325 return ST_NONE;
1326
1327 if (aarch64_valid_for_adrp_p(location, dest))
1328 return ST_ADRP_BRANCH;
1329
1330 // Always use PC-relative addressing in case of -shared or -pie.
1331 if (parameters->options().output_is_position_independent())
1332 return ST_LONG_BRANCH_PCREL;
1333
1334 // This saves 2 insns per stub, compared to ST_LONG_BRANCH_PCREL.
1335 // But is only applicable to non-shared or non-pie.
1336 return ST_LONG_BRANCH_ABS;
1337 }
1338
1339 // A class to hold stubs for the ARM target.
1340
1341 template<int size, bool big_endian>
1342 class Stub_table : public Output_data
1343 {
1344 public:
1345 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1346 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1347 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
1348 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1349 typedef Reloc_stub<size, big_endian> The_reloc_stub;
1350 typedef typename The_reloc_stub::Key The_reloc_stub_key;
1351 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1352 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less;
1353 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash;
1354 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to;
1355 typedef Stub_table<size, big_endian> The_stub_table;
1356 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*,
1357 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to>
1358 Reloc_stub_map;
1359 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter;
1360 typedef Relocate_info<size, big_endian> The_relocate_info;
1361
1362 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set;
1363 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter;
1364
1365 Stub_table(The_aarch64_input_section* owner)
1366 : Output_data(), owner_(owner), reloc_stubs_size_(0),
1367 erratum_stubs_size_(0), prev_data_size_(0)
1368 { }
1369
1370 ~Stub_table()
1371 { }
1372
1373 The_aarch64_input_section*
1374 owner() const
1375 { return owner_; }
1376
1377 // Whether this stub table is empty.
1378 bool
1379 empty() const
1380 { return reloc_stubs_.empty() && erratum_stubs_.empty(); }
1381
1382 // Return the current data size.
1383 off_t
1384 current_data_size() const
1385 { return this->current_data_size_for_child(); }
1386
1387 // Add a STUB using KEY. The caller is responsible for avoiding addition
1388 // if a STUB with the same key has already been added.
1389 void
1390 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key);
1391
1392 // Add an erratum stub into the erratum stub set. The set is ordered by
1393 // (relobj, shndx, sh_offset).
1394 void
1395 add_erratum_stub(The_erratum_stub* stub);
1396
1397 // Find if such erratum exists for any given (obj, shndx, sh_offset).
1398 The_erratum_stub*
1399 find_erratum_stub(The_aarch64_relobj* a64relobj,
1400 unsigned int shndx, unsigned int sh_offset);
1401
1402 // Find all the erratums for a given input section. The return value is a pair
1403 // of iterators [begin, end).
1404 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1405 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj,
1406 unsigned int shndx);
1407
1408 // Compute the erratum stub address.
1409 AArch64_address
1410 erratum_stub_address(The_erratum_stub* stub) const
1411 {
1412 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_,
1413 The_erratum_stub::STUB_ADDR_ALIGN);
1414 r += stub->offset();
1415 return r;
1416 }
1417
1418 // Finalize stubs. No-op here, just for completeness.
1419 void
1420 finalize_stubs()
1421 { }
1422
1423 // Look up a relocation stub using KEY. Return NULL if there is none.
1424 The_reloc_stub*
1425 find_reloc_stub(The_reloc_stub_key& key)
1426 {
1427 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key);
1428 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
1429 }
1430
1431 // Relocate stubs in this stub table.
1432 void
1433 relocate_stubs(const The_relocate_info*,
1434 The_target_aarch64*,
1435 Output_section*,
1436 unsigned char*,
1437 AArch64_address,
1438 section_size_type);
1439
1440 // Update data size at the end of a relaxation pass. Return true if data size
1441 // is different from that of the previous relaxation pass.
1442 bool
1443 update_data_size_changed_p()
1444 {
1445 // No addralign changed here.
1446 off_t s = align_address(this->reloc_stubs_size_,
1447 The_erratum_stub::STUB_ADDR_ALIGN)
1448 + this->erratum_stubs_size_;
1449 bool changed = (s != this->prev_data_size_);
1450 this->prev_data_size_ = s;
1451 return changed;
1452 }
1453
1454 protected:
1455 // Write out section contents.
1456 void
1457 do_write(Output_file*);
1458
1459 // Return the required alignment.
1460 uint64_t
1461 do_addralign() const
1462 {
1463 return std::max(The_reloc_stub::STUB_ADDR_ALIGN,
1464 The_erratum_stub::STUB_ADDR_ALIGN);
1465 }
1466
1467 // Reset address and file offset.
1468 void
1469 do_reset_address_and_file_offset()
1470 { this->set_current_data_size_for_child(this->prev_data_size_); }
1471
1472 // Set final data size.
1473 void
1474 set_final_data_size()
1475 { this->set_data_size(this->current_data_size()); }
1476
1477 private:
1478 // Relocate one stub.
1479 void
1480 relocate_stub(The_reloc_stub*,
1481 const The_relocate_info*,
1482 The_target_aarch64*,
1483 Output_section*,
1484 unsigned char*,
1485 AArch64_address,
1486 section_size_type);
1487
1488 private:
1489 // Owner of this stub table.
1490 The_aarch64_input_section* owner_;
1491 // The relocation stubs.
1492 Reloc_stub_map reloc_stubs_;
1493 // The erratum stubs.
1494 Erratum_stub_set erratum_stubs_;
1495 // Size of reloc stubs.
1496 off_t reloc_stubs_size_;
1497 // Size of erratum stubs.
1498 off_t erratum_stubs_size_;
1499 // data size of this in the previous pass.
1500 off_t prev_data_size_;
1501 }; // End of Stub_table
1502
1503
1504 // Add an erratum stub into the erratum stub set. The set is ordered by
1505 // (relobj, shndx, sh_offset).
1506
1507 template<int size, bool big_endian>
1508 void
1509 Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub)
1510 {
1511 std::pair<Erratum_stub_set_iter, bool> ret =
1512 this->erratum_stubs_.insert(stub);
1513 gold_assert(ret.second);
1514 this->erratum_stubs_size_ = align_address(
1515 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1516 stub->set_offset(this->erratum_stubs_size_);
1517 this->erratum_stubs_size_ += stub->stub_size();
1518 }
1519
1520
1521 // Find if such erratum exists for given (obj, shndx, sh_offset).
1522
1523 template<int size, bool big_endian>
1524 Erratum_stub<size, big_endian>*
1525 Stub_table<size, big_endian>::find_erratum_stub(
1526 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset)
1527 {
1528 // A dummy object used as key to search in the set.
1529 The_erratum_stub key(a64relobj, ST_NONE,
1530 shndx, sh_offset);
1531 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key);
1532 if (i != this->erratum_stubs_.end())
1533 {
1534 The_erratum_stub* stub(*i);
1535 gold_assert(stub->erratum_insn() != 0);
1536 return stub;
1537 }
1538 return NULL;
1539 }
1540
1541
1542 // Find all the errata for a given input section. The return value is a pair of
1543 // iterators [begin, end).
1544
1545 template<int size, bool big_endian>
1546 std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter,
1547 typename Stub_table<size, big_endian>::Erratum_stub_set_iter>
1548 Stub_table<size, big_endian>::find_erratum_stubs_for_input_section(
1549 The_aarch64_relobj* a64relobj, unsigned int shndx)
1550 {
1551 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair;
1552 Erratum_stub_set_iter start, end;
1553 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0);
1554 start = this->erratum_stubs_.lower_bound(&low_key);
1555 if (start == this->erratum_stubs_.end())
1556 return Result_pair(this->erratum_stubs_.end(),
1557 this->erratum_stubs_.end());
1558 end = start;
1559 while (end != this->erratum_stubs_.end() &&
1560 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx)
1561 ++end;
1562 return Result_pair(start, end);
1563 }
1564
1565
1566 // Add a STUB using KEY. The caller is responsible for avoiding addition
1567 // if a STUB with the same key has already been added.
1568
1569 template<int size, bool big_endian>
1570 void
1571 Stub_table<size, big_endian>::add_reloc_stub(
1572 The_reloc_stub* stub, const The_reloc_stub_key& key)
1573 {
1574 gold_assert(stub->type() == key.type());
1575 this->reloc_stubs_[key] = stub;
1576
1577 // Assign stub offset early. We can do this because we never remove
1578 // reloc stubs and they are in the beginning of the stub table.
1579 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_,
1580 The_reloc_stub::STUB_ADDR_ALIGN);
1581 stub->set_offset(this->reloc_stubs_size_);
1582 this->reloc_stubs_size_ += stub->stub_size();
1583 }
1584
1585
1586 // Relocate all stubs in this stub table.
1587
1588 template<int size, bool big_endian>
1589 void
1590 Stub_table<size, big_endian>::
1591 relocate_stubs(const The_relocate_info* relinfo,
1592 The_target_aarch64* target_aarch64,
1593 Output_section* output_section,
1594 unsigned char* view,
1595 AArch64_address address,
1596 section_size_type view_size)
1597 {
1598 // "view_size" is the total size of the stub_table.
1599 gold_assert(address == this->address() &&
1600 view_size == static_cast<section_size_type>(this->data_size()));
1601 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin();
1602 p != this->reloc_stubs_.end(); ++p)
1603 relocate_stub(p->second, relinfo, target_aarch64, output_section,
1604 view, address, view_size);
1605
1606 // Just for convenience.
1607 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
1608
1609 // Now 'relocate' erratum stubs.
1610 for(Erratum_stub_set_iter i = this->erratum_stubs_.begin();
1611 i != this->erratum_stubs_.end(); ++i)
1612 {
1613 AArch64_address stub_address = this->erratum_stub_address(*i);
1614 // The address of "b" in the stub that is to be "relocated".
1615 AArch64_address stub_b_insn_address;
1616 // Branch offset that is to be filled in "b" insn.
1617 int b_offset = 0;
1618 switch ((*i)->type())
1619 {
1620 case ST_E_843419:
1621 case ST_E_835769:
1622 // The 1st insn of the erratum could be a relocation spot,
1623 // in this case we need to fix it with
1624 // "(*i)->erratum_insn()".
1625 elfcpp::Swap<32, big_endian>::writeval(
1626 view + (stub_address - this->address()),
1627 (*i)->erratum_insn());
1628 // For the erratum, the 2nd insn is a b-insn to be patched
1629 // (relocated).
1630 stub_b_insn_address = stub_address + 1 * BPI;
1631 b_offset = (*i)->destination_address() - stub_b_insn_address;
1632 AArch64_relocate_functions<size, big_endian>::construct_b(
1633 view + (stub_b_insn_address - this->address()),
1634 ((unsigned int)(b_offset)) & 0xfffffff);
1635 break;
1636 default:
1637 gold_unreachable();
1638 break;
1639 }
1640 }
1641 }
1642
1643
1644 // Relocate one stub. This is a helper for Stub_table::relocate_stubs().
1645
1646 template<int size, bool big_endian>
1647 void
1648 Stub_table<size, big_endian>::
1649 relocate_stub(The_reloc_stub* stub,
1650 const The_relocate_info* relinfo,
1651 The_target_aarch64* target_aarch64,
1652 Output_section* output_section,
1653 unsigned char* view,
1654 AArch64_address address,
1655 section_size_type view_size)
1656 {
1657 // "offset" is the offset from the beginning of the stub_table.
1658 section_size_type offset = stub->offset();
1659 section_size_type stub_size = stub->stub_size();
1660 // "view_size" is the total size of the stub_table.
1661 gold_assert(offset + stub_size <= view_size);
1662
1663 target_aarch64->relocate_stub(stub, relinfo, output_section,
1664 view + offset, address + offset, view_size);
1665 }
1666
1667
1668 // Write out the stubs to file.
1669
1670 template<int size, bool big_endian>
1671 void
1672 Stub_table<size, big_endian>::do_write(Output_file* of)
1673 {
1674 off_t offset = this->offset();
1675 const section_size_type oview_size =
1676 convert_to_section_size_type(this->data_size());
1677 unsigned char* const oview = of->get_output_view(offset, oview_size);
1678
1679 // Write relocation stubs.
1680 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
1681 p != this->reloc_stubs_.end(); ++p)
1682 {
1683 The_reloc_stub* stub = p->second;
1684 AArch64_address address = this->address() + stub->offset();
1685 gold_assert(address ==
1686 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN));
1687 stub->write(oview + stub->offset(), stub->stub_size());
1688 }
1689
1690 // Write erratum stubs.
1691 unsigned int erratum_stub_start_offset =
1692 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1693 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin();
1694 p != this->erratum_stubs_.end(); ++p)
1695 {
1696 The_erratum_stub* stub(*p);
1697 stub->write(oview + erratum_stub_start_offset + stub->offset(),
1698 stub->stub_size());
1699 }
1700
1701 of->write_output_view(this->offset(), oview_size, oview);
1702 }
1703
1704
1705 // AArch64_relobj class.
1706
1707 template<int size, bool big_endian>
1708 class AArch64_relobj : public Sized_relobj_file<size, big_endian>
1709 {
1710 public:
1711 typedef AArch64_relobj<size, big_endian> This;
1712 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1713 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1714 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1715 typedef Stub_table<size, big_endian> The_stub_table;
1716 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1717 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter;
1718 typedef std::vector<The_stub_table*> Stub_table_list;
1719 static const AArch64_address invalid_address =
1720 static_cast<AArch64_address>(-1);
1721
1722 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset,
1723 const typename elfcpp::Ehdr<size, big_endian>& ehdr)
1724 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr),
1725 stub_tables_()
1726 { }
1727
1728 ~AArch64_relobj()
1729 { }
1730
1731 // Return the stub table of the SHNDX-th section if there is one.
1732 The_stub_table*
1733 stub_table(unsigned int shndx) const
1734 {
1735 gold_assert(shndx < this->stub_tables_.size());
1736 return this->stub_tables_[shndx];
1737 }
1738
1739 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1740 void
1741 set_stub_table(unsigned int shndx, The_stub_table* stub_table)
1742 {
1743 gold_assert(shndx < this->stub_tables_.size());
1744 this->stub_tables_[shndx] = stub_table;
1745 }
1746
1747 // Entrance to errata scanning.
1748 void
1749 scan_errata(unsigned int shndx,
1750 const elfcpp::Shdr<size, big_endian>&,
1751 Output_section*, const Symbol_table*,
1752 The_target_aarch64*);
1753
1754 // Scan all relocation sections for stub generation.
1755 void
1756 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*,
1757 const Layout*);
1758
1759 // Whether a section is a scannable text section.
1760 bool
1761 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int,
1762 const Output_section*, const Symbol_table*);
1763
1764 // Convert regular input section with index SHNDX to a relaxed section.
1765 void
1766 convert_input_section_to_relaxed_section(unsigned /* shndx */)
1767 {
1768 // The stubs have relocations and we need to process them after writing
1769 // out the stubs. So relocation now must follow section write.
1770 this->set_relocs_must_follow_section_writes();
1771 }
1772
1773 // Structure for mapping symbol position.
1774 struct Mapping_symbol_position
1775 {
1776 Mapping_symbol_position(unsigned int shndx, AArch64_address offset):
1777 shndx_(shndx), offset_(offset)
1778 {}
1779
1780 // "<" comparator used in ordered_map container.
1781 bool
1782 operator<(const Mapping_symbol_position& p) const
1783 {
1784 return (this->shndx_ < p.shndx_
1785 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_));
1786 }
1787
1788 // Section index.
1789 unsigned int shndx_;
1790
1791 // Section offset.
1792 AArch64_address offset_;
1793 };
1794
1795 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info;
1796
1797 protected:
1798 // Post constructor setup.
1799 void
1800 do_setup()
1801 {
1802 // Call parent's setup method.
1803 Sized_relobj_file<size, big_endian>::do_setup();
1804
1805 // Initialize look-up tables.
1806 this->stub_tables_.resize(this->shnum());
1807 }
1808
1809 virtual void
1810 do_relocate_sections(
1811 const Symbol_table* symtab, const Layout* layout,
1812 const unsigned char* pshdrs, Output_file* of,
1813 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1814
1815 // Count local symbols and (optionally) record mapping info.
1816 virtual void
1817 do_count_local_symbols(Stringpool_template<char>*,
1818 Stringpool_template<char>*);
1819
1820 private:
1821 // Fix all errata in the object.
1822 void
1823 fix_errata(typename Sized_relobj_file<size, big_endian>::Views* pviews);
1824
1825 // Try to fix erratum 843419 in an optimized way. Return true if patch is
1826 // applied.
1827 bool
1828 try_fix_erratum_843419_optimized(
1829 The_erratum_stub*,
1830 typename Sized_relobj_file<size, big_endian>::View_size&);
1831
1832 // Whether a section needs to be scanned for relocation stubs.
1833 bool
1834 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&,
1835 const Relobj::Output_sections&,
1836 const Symbol_table*, const unsigned char*);
1837
1838 // List of stub tables.
1839 Stub_table_list stub_tables_;
1840
1841 // Mapping symbol information sorted by (section index, section_offset).
1842 Mapping_symbol_info mapping_symbol_info_;
1843 }; // End of AArch64_relobj
1844
1845
1846 // Override to record mapping symbol information.
1847 template<int size, bool big_endian>
1848 void
1849 AArch64_relobj<size, big_endian>::do_count_local_symbols(
1850 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool)
1851 {
1852 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool);
1853
1854 // Only erratum-fixing work needs mapping symbols, so skip this time consuming
1855 // processing if not fixing erratum.
1856 if (!parameters->options().fix_cortex_a53_843419()
1857 && !parameters->options().fix_cortex_a53_835769())
1858 return;
1859
1860 const unsigned int loccount = this->local_symbol_count();
1861 if (loccount == 0)
1862 return;
1863
1864 // Read the symbol table section header.
1865 const unsigned int symtab_shndx = this->symtab_shndx();
1866 elfcpp::Shdr<size, big_endian>
1867 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
1868 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
1869
1870 // Read the local symbols.
1871 const int sym_size =elfcpp::Elf_sizes<size>::sym_size;
1872 gold_assert(loccount == symtabshdr.get_sh_info());
1873 off_t locsize = loccount * sym_size;
1874 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
1875 locsize, true, true);
1876
1877 // For mapping symbol processing, we need to read the symbol names.
1878 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
1879 if (strtab_shndx >= this->shnum())
1880 {
1881 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
1882 return;
1883 }
1884
1885 elfcpp::Shdr<size, big_endian>
1886 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
1887 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
1888 {
1889 this->error(_("symbol table name section has wrong type: %u"),
1890 static_cast<unsigned int>(strtabshdr.get_sh_type()));
1891 return;
1892 }
1893
1894 const char* pnames =
1895 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
1896 strtabshdr.get_sh_size(),
1897 false, false));
1898
1899 // Skip the first dummy symbol.
1900 psyms += sym_size;
1901 typename Sized_relobj_file<size, big_endian>::Local_values*
1902 plocal_values = this->local_values();
1903 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
1904 {
1905 elfcpp::Sym<size, big_endian> sym(psyms);
1906 Symbol_value<size>& lv((*plocal_values)[i]);
1907 AArch64_address input_value = lv.input_value();
1908
1909 // Check to see if this is a mapping symbol. AArch64 mapping symbols are
1910 // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping
1911 // symbols.
1912 // Mapping symbols could be one of the following 4 forms -
1913 // a) $x
1914 // b) $x.<any...>
1915 // c) $d
1916 // d) $d.<any...>
1917 const char* sym_name = pnames + sym.get_st_name();
1918 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd')
1919 && (sym_name[2] == '\0' || sym_name[2] == '.'))
1920 {
1921 bool is_ordinary;
1922 unsigned int input_shndx =
1923 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary);
1924 gold_assert(is_ordinary);
1925
1926 Mapping_symbol_position msp(input_shndx, input_value);
1927 // Insert mapping_symbol_info into map whose ordering is defined by
1928 // (shndx, offset_within_section).
1929 this->mapping_symbol_info_[msp] = sym_name[1];
1930 }
1931 }
1932 }
1933
1934
1935 // Fix all errata in the object.
1936
1937 template<int size, bool big_endian>
1938 void
1939 AArch64_relobj<size, big_endian>::fix_errata(
1940 typename Sized_relobj_file<size, big_endian>::Views* pviews)
1941 {
1942 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
1943 unsigned int shnum = this->shnum();
1944 for (unsigned int i = 1; i < shnum; ++i)
1945 {
1946 The_stub_table* stub_table = this->stub_table(i);
1947 if (!stub_table)
1948 continue;
1949 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1950 ipair(stub_table->find_erratum_stubs_for_input_section(this, i));
1951 Erratum_stub_set_iter p = ipair.first, end = ipair.second;
1952 while (p != end)
1953 {
1954 The_erratum_stub* stub = *p;
1955 typename Sized_relobj_file<size, big_endian>::View_size&
1956 pview((*pviews)[i]);
1957
1958 // Double check data before fix.
1959 gold_assert(pview.address + stub->sh_offset()
1960 == stub->erratum_address());
1961
1962 // Update previously recorded erratum insn with relocated
1963 // version.
1964 Insntype* ip =
1965 reinterpret_cast<Insntype*>(pview.view + stub->sh_offset());
1966 Insntype insn_to_fix = ip[0];
1967 stub->update_erratum_insn(insn_to_fix);
1968
1969 // First try to see if erratum is 843419 and if it can be fixed
1970 // without using branch-to-stub.
1971 if (!try_fix_erratum_843419_optimized(stub, pview))
1972 {
1973 // Replace the erratum insn with a branch-to-stub.
1974 AArch64_address stub_address =
1975 stub_table->erratum_stub_address(stub);
1976 unsigned int b_offset = stub_address - stub->erratum_address();
1977 AArch64_relocate_functions<size, big_endian>::construct_b(
1978 pview.view + stub->sh_offset(), b_offset & 0xfffffff);
1979 }
1980 ++p;
1981 }
1982 }
1983 }
1984
1985
1986 // This is an optimization for 843419. This erratum requires the sequence begin
1987 // with 'adrp', when final value calculated by adrp fits in adr, we can just
1988 // replace 'adrp' with 'adr', so we save 2 jumps per occurrence. (Note, however,
1989 // in this case, we do not delete the erratum stub (too late to do so), it is
1990 // merely generated without ever being called.)
1991
1992 template<int size, bool big_endian>
1993 bool
1994 AArch64_relobj<size, big_endian>::try_fix_erratum_843419_optimized(
1995 The_erratum_stub* stub,
1996 typename Sized_relobj_file<size, big_endian>::View_size& pview)
1997 {
1998 if (stub->type() != ST_E_843419)
1999 return false;
2000
2001 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2002 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
2003 E843419_stub<size, big_endian>* e843419_stub =
2004 reinterpret_cast<E843419_stub<size, big_endian>*>(stub);
2005 AArch64_address pc = pview.address + e843419_stub->adrp_sh_offset();
2006 Insntype* adrp_view = reinterpret_cast<Insntype*>(
2007 pview.view + e843419_stub->adrp_sh_offset());
2008 Insntype adrp_insn = adrp_view[0];
2009 gold_assert(Insn_utilities::is_adrp(adrp_insn));
2010 // Get adrp 33-bit signed imm value.
2011 int64_t adrp_imm = Insn_utilities::
2012 aarch64_adrp_decode_imm(adrp_insn);
2013 // adrp - final value transferred to target register is calculated as:
2014 // PC[11:0] = Zeros(12)
2015 // adrp_dest_value = PC + adrp_imm;
2016 int64_t adrp_dest_value = (pc & ~((1 << 12) - 1)) + adrp_imm;
2017 // adr -final value transferred to target register is calucalted as:
2018 // PC + adr_imm
2019 // So we have:
2020 // PC + adr_imm = adrp_dest_value
2021 // ==>
2022 // adr_imm = adrp_dest_value - PC
2023 int64_t adr_imm = adrp_dest_value - pc;
2024 // Check if imm fits in adr (21-bit signed).
2025 if (-(1 << 20) <= adr_imm && adr_imm < (1 << 20))
2026 {
2027 // Convert 'adrp' into 'adr'.
2028 Insntype adr_insn = adrp_insn & ((1u << 31) - 1);
2029 adr_insn = Insn_utilities::
2030 aarch64_adr_encode_imm(adr_insn, adr_imm);
2031 elfcpp::Swap<32, big_endian>::writeval(adrp_view, adr_insn);
2032 return true;
2033 }
2034 return false;
2035 }
2036
2037
2038 // Relocate sections.
2039
2040 template<int size, bool big_endian>
2041 void
2042 AArch64_relobj<size, big_endian>::do_relocate_sections(
2043 const Symbol_table* symtab, const Layout* layout,
2044 const unsigned char* pshdrs, Output_file* of,
2045 typename Sized_relobj_file<size, big_endian>::Views* pviews)
2046 {
2047 // Call parent to relocate sections.
2048 Sized_relobj_file<size, big_endian>::do_relocate_sections(symtab, layout,
2049 pshdrs, of, pviews);
2050
2051 // We do not generate stubs if doing a relocatable link.
2052 if (parameters->options().relocatable())
2053 return;
2054
2055 if (parameters->options().fix_cortex_a53_843419()
2056 || parameters->options().fix_cortex_a53_835769())
2057 this->fix_errata(pviews);
2058
2059 Relocate_info<size, big_endian> relinfo;
2060 relinfo.symtab = symtab;
2061 relinfo.layout = layout;
2062 relinfo.object = this;
2063
2064 // Relocate stub tables.
2065 unsigned int shnum = this->shnum();
2066 The_target_aarch64* target = The_target_aarch64::current_target();
2067
2068 for (unsigned int i = 1; i < shnum; ++i)
2069 {
2070 The_aarch64_input_section* aarch64_input_section =
2071 target->find_aarch64_input_section(this, i);
2072 if (aarch64_input_section != NULL
2073 && aarch64_input_section->is_stub_table_owner()
2074 && !aarch64_input_section->stub_table()->empty())
2075 {
2076 Output_section* os = this->output_section(i);
2077 gold_assert(os != NULL);
2078
2079 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
2080 relinfo.reloc_shdr = NULL;
2081 relinfo.data_shndx = i;
2082 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size;
2083
2084 typename Sized_relobj_file<size, big_endian>::View_size&
2085 view_struct = (*pviews)[i];
2086 gold_assert(view_struct.view != NULL);
2087
2088 The_stub_table* stub_table = aarch64_input_section->stub_table();
2089 off_t offset = stub_table->address() - view_struct.address;
2090 unsigned char* view = view_struct.view + offset;
2091 AArch64_address address = stub_table->address();
2092 section_size_type view_size = stub_table->data_size();
2093 stub_table->relocate_stubs(&relinfo, target, os, view, address,
2094 view_size);
2095 }
2096 }
2097 }
2098
2099
2100 // Determine if an input section is scannable for stub processing. SHDR is
2101 // the header of the section and SHNDX is the section index. OS is the output
2102 // section for the input section and SYMTAB is the global symbol table used to
2103 // look up ICF information.
2104
2105 template<int size, bool big_endian>
2106 bool
2107 AArch64_relobj<size, big_endian>::text_section_is_scannable(
2108 const elfcpp::Shdr<size, big_endian>& text_shdr,
2109 unsigned int text_shndx,
2110 const Output_section* os,
2111 const Symbol_table* symtab)
2112 {
2113 // Skip any empty sections, unallocated sections or sections whose
2114 // type are not SHT_PROGBITS.
2115 if (text_shdr.get_sh_size() == 0
2116 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0
2117 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2118 return false;
2119
2120 // Skip any discarded or ICF'ed sections.
2121 if (os == NULL || symtab->is_section_folded(this, text_shndx))
2122 return false;
2123
2124 // Skip exception frame.
2125 if (strcmp(os->name(), ".eh_frame") == 0)
2126 return false ;
2127
2128 gold_assert(!this->is_output_section_offset_invalid(text_shndx) ||
2129 os->find_relaxed_input_section(this, text_shndx) != NULL);
2130
2131 return true;
2132 }
2133
2134
2135 // Determine if we want to scan the SHNDX-th section for relocation stubs.
2136 // This is a helper for AArch64_relobj::scan_sections_for_stubs().
2137
2138 template<int size, bool big_endian>
2139 bool
2140 AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning(
2141 const elfcpp::Shdr<size, big_endian>& shdr,
2142 const Relobj::Output_sections& out_sections,
2143 const Symbol_table* symtab,
2144 const unsigned char* pshdrs)
2145 {
2146 unsigned int sh_type = shdr.get_sh_type();
2147 if (sh_type != elfcpp::SHT_RELA)
2148 return false;
2149
2150 // Ignore empty section.
2151 off_t sh_size = shdr.get_sh_size();
2152 if (sh_size == 0)
2153 return false;
2154
2155 // Ignore reloc section with unexpected symbol table. The
2156 // error will be reported in the final link.
2157 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
2158 return false;
2159
2160 gold_assert(sh_type == elfcpp::SHT_RELA);
2161 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2162
2163 // Ignore reloc section with unexpected entsize or uneven size.
2164 // The error will be reported in the final link.
2165 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
2166 return false;
2167
2168 // Ignore reloc section with bad info. This error will be
2169 // reported in the final link.
2170 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info());
2171 if (text_shndx >= this->shnum())
2172 return false;
2173
2174 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2175 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs +
2176 text_shndx * shdr_size);
2177 return this->text_section_is_scannable(text_shdr, text_shndx,
2178 out_sections[text_shndx], symtab);
2179 }
2180
2181
2182 // Scan section SHNDX for erratum 843419 and 835769.
2183
2184 template<int size, bool big_endian>
2185 void
2186 AArch64_relobj<size, big_endian>::scan_errata(
2187 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr,
2188 Output_section* os, const Symbol_table* symtab,
2189 The_target_aarch64* target)
2190 {
2191 if (shdr.get_sh_size() == 0
2192 || (shdr.get_sh_flags() &
2193 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0
2194 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2195 return;
2196
2197 if (!os || symtab->is_section_folded(this, shndx)) return;
2198
2199 AArch64_address output_offset = this->get_output_section_offset(shndx);
2200 AArch64_address output_address;
2201 if (output_offset != invalid_address)
2202 output_address = os->address() + output_offset;
2203 else
2204 {
2205 const Output_relaxed_input_section* poris =
2206 os->find_relaxed_input_section(this, shndx);
2207 if (!poris) return;
2208 output_address = poris->address();
2209 }
2210
2211 section_size_type input_view_size = 0;
2212 const unsigned char* input_view =
2213 this->section_contents(shndx, &input_view_size, false);
2214
2215 Mapping_symbol_position section_start(shndx, 0);
2216 // Find the first mapping symbol record within section shndx.
2217 typename Mapping_symbol_info::const_iterator p =
2218 this->mapping_symbol_info_.lower_bound(section_start);
2219 while (p != this->mapping_symbol_info_.end() &&
2220 p->first.shndx_ == shndx)
2221 {
2222 typename Mapping_symbol_info::const_iterator prev = p;
2223 ++p;
2224 if (prev->second == 'x')
2225 {
2226 section_size_type span_start =
2227 convert_to_section_size_type(prev->first.offset_);
2228 section_size_type span_end;
2229 if (p != this->mapping_symbol_info_.end()
2230 && p->first.shndx_ == shndx)
2231 span_end = convert_to_section_size_type(p->first.offset_);
2232 else
2233 span_end = convert_to_section_size_type(shdr.get_sh_size());
2234
2235 // Here we do not share the scanning code of both errata. For 843419,
2236 // only the last few insns of each page are examined, which is fast,
2237 // whereas, for 835769, every insn pair needs to be checked.
2238
2239 if (parameters->options().fix_cortex_a53_843419())
2240 target->scan_erratum_843419_span(
2241 this, shndx, span_start, span_end,
2242 const_cast<unsigned char*>(input_view), output_address);
2243
2244 if (parameters->options().fix_cortex_a53_835769())
2245 target->scan_erratum_835769_span(
2246 this, shndx, span_start, span_end,
2247 const_cast<unsigned char*>(input_view), output_address);
2248 }
2249 }
2250 }
2251
2252
2253 // Scan relocations for stub generation.
2254
2255 template<int size, bool big_endian>
2256 void
2257 AArch64_relobj<size, big_endian>::scan_sections_for_stubs(
2258 The_target_aarch64* target,
2259 const Symbol_table* symtab,
2260 const Layout* layout)
2261 {
2262 unsigned int shnum = this->shnum();
2263 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2264
2265 // Read the section headers.
2266 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
2267 shnum * shdr_size,
2268 true, true);
2269
2270 // To speed up processing, we set up hash tables for fast lookup of
2271 // input offsets to output addresses.
2272 this->initialize_input_to_output_maps();
2273
2274 const Relobj::Output_sections& out_sections(this->output_sections());
2275
2276 Relocate_info<size, big_endian> relinfo;
2277 relinfo.symtab = symtab;
2278 relinfo.layout = layout;
2279 relinfo.object = this;
2280
2281 // Do relocation stubs scanning.
2282 const unsigned char* p = pshdrs + shdr_size;
2283 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
2284 {
2285 const elfcpp::Shdr<size, big_endian> shdr(p);
2286 if (parameters->options().fix_cortex_a53_843419()
2287 || parameters->options().fix_cortex_a53_835769())
2288 scan_errata(i, shdr, out_sections[i], symtab, target);
2289 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
2290 pshdrs))
2291 {
2292 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
2293 AArch64_address output_offset =
2294 this->get_output_section_offset(index);
2295 AArch64_address output_address;
2296 if (output_offset != invalid_address)
2297 {
2298 output_address = out_sections[index]->address() + output_offset;
2299 }
2300 else
2301 {
2302 // Currently this only happens for a relaxed section.
2303 const Output_relaxed_input_section* poris =
2304 out_sections[index]->find_relaxed_input_section(this, index);
2305 gold_assert(poris != NULL);
2306 output_address = poris->address();
2307 }
2308
2309 // Get the relocations.
2310 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
2311 shdr.get_sh_size(),
2312 true, false);
2313
2314 // Get the section contents.
2315 section_size_type input_view_size = 0;
2316 const unsigned char* input_view =
2317 this->section_contents(index, &input_view_size, false);
2318
2319 relinfo.reloc_shndx = i;
2320 relinfo.data_shndx = index;
2321 unsigned int sh_type = shdr.get_sh_type();
2322 unsigned int reloc_size;
2323 gold_assert (sh_type == elfcpp::SHT_RELA);
2324 reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2325
2326 Output_section* os = out_sections[index];
2327 target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
2328 shdr.get_sh_size() / reloc_size,
2329 os,
2330 output_offset == invalid_address,
2331 input_view, output_address,
2332 input_view_size);
2333 }
2334 }
2335 }
2336
2337
2338 // A class to wrap an ordinary input section containing executable code.
2339
2340 template<int size, bool big_endian>
2341 class AArch64_input_section : public Output_relaxed_input_section
2342 {
2343 public:
2344 typedef Stub_table<size, big_endian> The_stub_table;
2345
2346 AArch64_input_section(Relobj* relobj, unsigned int shndx)
2347 : Output_relaxed_input_section(relobj, shndx, 1),
2348 stub_table_(NULL),
2349 original_contents_(NULL), original_size_(0),
2350 original_addralign_(1)
2351 { }
2352
2353 ~AArch64_input_section()
2354 { delete[] this->original_contents_; }
2355
2356 // Initialize.
2357 void
2358 init();
2359
2360 // Set the stub_table.
2361 void
2362 set_stub_table(The_stub_table* st)
2363 { this->stub_table_ = st; }
2364
2365 // Whether this is a stub table owner.
2366 bool
2367 is_stub_table_owner() const
2368 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
2369
2370 // Return the original size of the section.
2371 uint32_t
2372 original_size() const
2373 { return this->original_size_; }
2374
2375 // Return the stub table.
2376 The_stub_table*
2377 stub_table()
2378 { return stub_table_; }
2379
2380 protected:
2381 // Write out this input section.
2382 void
2383 do_write(Output_file*);
2384
2385 // Return required alignment of this.
2386 uint64_t
2387 do_addralign() const
2388 {
2389 if (this->is_stub_table_owner())
2390 return std::max(this->stub_table_->addralign(),
2391 static_cast<uint64_t>(this->original_addralign_));
2392 else
2393 return this->original_addralign_;
2394 }
2395
2396 // Finalize data size.
2397 void
2398 set_final_data_size();
2399
2400 // Reset address and file offset.
2401 void
2402 do_reset_address_and_file_offset();
2403
2404 // Output offset.
2405 bool
2406 do_output_offset(const Relobj* object, unsigned int shndx,
2407 section_offset_type offset,
2408 section_offset_type* poutput) const
2409 {
2410 if ((object == this->relobj())
2411 && (shndx == this->shndx())
2412 && (offset >= 0)
2413 && (offset <=
2414 convert_types<section_offset_type, uint32_t>(this->original_size_)))
2415 {
2416 *poutput = offset;
2417 return true;
2418 }
2419 else
2420 return false;
2421 }
2422
2423 private:
2424 // Copying is not allowed.
2425 AArch64_input_section(const AArch64_input_section&);
2426 AArch64_input_section& operator=(const AArch64_input_section&);
2427
2428 // The relocation stubs.
2429 The_stub_table* stub_table_;
2430 // Original section contents. We have to make a copy here since the file
2431 // containing the original section may not be locked when we need to access
2432 // the contents.
2433 unsigned char* original_contents_;
2434 // Section size of the original input section.
2435 uint32_t original_size_;
2436 // Address alignment of the original input section.
2437 uint32_t original_addralign_;
2438 }; // End of AArch64_input_section
2439
2440
2441 // Finalize data size.
2442
2443 template<int size, bool big_endian>
2444 void
2445 AArch64_input_section<size, big_endian>::set_final_data_size()
2446 {
2447 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2448
2449 if (this->is_stub_table_owner())
2450 {
2451 this->stub_table_->finalize_data_size();
2452 off = align_address(off, this->stub_table_->addralign());
2453 off += this->stub_table_->data_size();
2454 }
2455 this->set_data_size(off);
2456 }
2457
2458
2459 // Reset address and file offset.
2460
2461 template<int size, bool big_endian>
2462 void
2463 AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset()
2464 {
2465 // Size of the original input section contents.
2466 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2467
2468 // If this is a stub table owner, account for the stub table size.
2469 if (this->is_stub_table_owner())
2470 {
2471 The_stub_table* stub_table = this->stub_table_;
2472
2473 // Reset the stub table's address and file offset. The
2474 // current data size for child will be updated after that.
2475 stub_table_->reset_address_and_file_offset();
2476 off = align_address(off, stub_table_->addralign());
2477 off += stub_table->current_data_size();
2478 }
2479
2480 this->set_current_data_size(off);
2481 }
2482
2483
2484 // Initialize an Arm_input_section.
2485
2486 template<int size, bool big_endian>
2487 void
2488 AArch64_input_section<size, big_endian>::init()
2489 {
2490 Relobj* relobj = this->relobj();
2491 unsigned int shndx = this->shndx();
2492
2493 // We have to cache original size, alignment and contents to avoid locking
2494 // the original file.
2495 this->original_addralign_ =
2496 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx));
2497
2498 // This is not efficient but we expect only a small number of relaxed
2499 // input sections for stubs.
2500 section_size_type section_size;
2501 const unsigned char* section_contents =
2502 relobj->section_contents(shndx, &section_size, false);
2503 this->original_size_ =
2504 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx));
2505
2506 gold_assert(this->original_contents_ == NULL);
2507 this->original_contents_ = new unsigned char[section_size];
2508 memcpy(this->original_contents_, section_contents, section_size);
2509
2510 // We want to make this look like the original input section after
2511 // output sections are finalized.
2512 Output_section* os = relobj->output_section(shndx);
2513 off_t offset = relobj->output_section_offset(shndx);
2514 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
2515 this->set_address(os->address() + offset);
2516 this->set_file_offset(os->offset() + offset);
2517 this->set_current_data_size(this->original_size_);
2518 this->finalize_data_size();
2519 }
2520
2521
2522 // Write data to output file.
2523
2524 template<int size, bool big_endian>
2525 void
2526 AArch64_input_section<size, big_endian>::do_write(Output_file* of)
2527 {
2528 // We have to write out the original section content.
2529 gold_assert(this->original_contents_ != NULL);
2530 of->write(this->offset(), this->original_contents_,
2531 this->original_size_);
2532
2533 // If this owns a stub table and it is not empty, write it.
2534 if (this->is_stub_table_owner() && !this->stub_table_->empty())
2535 this->stub_table_->write(of);
2536 }
2537
2538
2539 // Arm output section class. This is defined mainly to add a number of stub
2540 // generation methods.
2541
2542 template<int size, bool big_endian>
2543 class AArch64_output_section : public Output_section
2544 {
2545 public:
2546 typedef Target_aarch64<size, big_endian> The_target_aarch64;
2547 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2548 typedef Stub_table<size, big_endian> The_stub_table;
2549 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2550
2551 public:
2552 AArch64_output_section(const char* name, elfcpp::Elf_Word type,
2553 elfcpp::Elf_Xword flags)
2554 : Output_section(name, type, flags)
2555 { }
2556
2557 ~AArch64_output_section() {}
2558
2559 // Group input sections for stub generation.
2560 void
2561 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*,
2562 const Task*);
2563
2564 private:
2565 typedef Output_section::Input_section Input_section;
2566 typedef Output_section::Input_section_list Input_section_list;
2567
2568 // Create a stub group.
2569 void
2570 create_stub_group(Input_section_list::const_iterator,
2571 Input_section_list::const_iterator,
2572 Input_section_list::const_iterator,
2573 The_target_aarch64*,
2574 std::vector<Output_relaxed_input_section*>&,
2575 const Task*);
2576 }; // End of AArch64_output_section
2577
2578
2579 // Create a stub group for input sections from FIRST to LAST. OWNER points to
2580 // the input section that will be the owner of the stub table.
2581
2582 template<int size, bool big_endian> void
2583 AArch64_output_section<size, big_endian>::create_stub_group(
2584 Input_section_list::const_iterator first,
2585 Input_section_list::const_iterator last,
2586 Input_section_list::const_iterator owner,
2587 The_target_aarch64* target,
2588 std::vector<Output_relaxed_input_section*>& new_relaxed_sections,
2589 const Task* task)
2590 {
2591 // Currently we convert ordinary input sections into relaxed sections only
2592 // at this point.
2593 The_aarch64_input_section* input_section;
2594 if (owner->is_relaxed_input_section())
2595 gold_unreachable();
2596 else
2597 {
2598 gold_assert(owner->is_input_section());
2599 // Create a new relaxed input section. We need to lock the original
2600 // file.
2601 Task_lock_obj<Object> tl(task, owner->relobj());
2602 input_section =
2603 target->new_aarch64_input_section(owner->relobj(), owner->shndx());
2604 new_relaxed_sections.push_back(input_section);
2605 }
2606
2607 // Create a stub table.
2608 The_stub_table* stub_table =
2609 target->new_stub_table(input_section);
2610
2611 input_section->set_stub_table(stub_table);
2612
2613 Input_section_list::const_iterator p = first;
2614 // Look for input sections or relaxed input sections in [first ... last].
2615 do
2616 {
2617 if (p->is_input_section() || p->is_relaxed_input_section())
2618 {
2619 // The stub table information for input sections live
2620 // in their objects.
2621 The_aarch64_relobj* aarch64_relobj =
2622 static_cast<The_aarch64_relobj*>(p->relobj());
2623 aarch64_relobj->set_stub_table(p->shndx(), stub_table);
2624 }
2625 }
2626 while (p++ != last);
2627 }
2628
2629
2630 // Group input sections for stub generation. GROUP_SIZE is roughly the limit of
2631 // stub groups. We grow a stub group by adding input section until the size is
2632 // just below GROUP_SIZE. The last input section will be converted into a stub
2633 // table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond
2634 // after the stub table, effectively doubling the group size.
2635 //
2636 // This is similar to the group_sections() function in elf32-arm.c but is
2637 // implemented differently.
2638
2639 template<int size, bool big_endian>
2640 void AArch64_output_section<size, big_endian>::group_sections(
2641 section_size_type group_size,
2642 bool stubs_always_after_branch,
2643 Target_aarch64<size, big_endian>* target,
2644 const Task* task)
2645 {
2646 typedef enum
2647 {
2648 NO_GROUP,
2649 FINDING_STUB_SECTION,
2650 HAS_STUB_SECTION
2651 } State;
2652
2653 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
2654
2655 State state = NO_GROUP;
2656 section_size_type off = 0;
2657 section_size_type group_begin_offset = 0;
2658 section_size_type group_end_offset = 0;
2659 section_size_type stub_table_end_offset = 0;
2660 Input_section_list::const_iterator group_begin =
2661 this->input_sections().end();
2662 Input_section_list::const_iterator stub_table =
2663 this->input_sections().end();
2664 Input_section_list::const_iterator group_end = this->input_sections().end();
2665 for (Input_section_list::const_iterator p = this->input_sections().begin();
2666 p != this->input_sections().end();
2667 ++p)
2668 {
2669 section_size_type section_begin_offset =
2670 align_address(off, p->addralign());
2671 section_size_type section_end_offset =
2672 section_begin_offset + p->data_size();
2673
2674 // Check to see if we should group the previously seen sections.
2675 switch (state)
2676 {
2677 case NO_GROUP:
2678 break;
2679
2680 case FINDING_STUB_SECTION:
2681 // Adding this section makes the group larger than GROUP_SIZE.
2682 if (section_end_offset - group_begin_offset >= group_size)
2683 {
2684 if (stubs_always_after_branch)
2685 {
2686 gold_assert(group_end != this->input_sections().end());
2687 this->create_stub_group(group_begin, group_end, group_end,
2688 target, new_relaxed_sections,
2689 task);
2690 state = NO_GROUP;
2691 }
2692 else
2693 {
2694 // Input sections up to stub_group_size bytes after the stub
2695 // table can be handled by it too.
2696 state = HAS_STUB_SECTION;
2697 stub_table = group_end;
2698 stub_table_end_offset = group_end_offset;
2699 }
2700 }
2701 break;
2702
2703 case HAS_STUB_SECTION:
2704 // Adding this section makes the post stub-section group larger
2705 // than GROUP_SIZE.
2706 gold_unreachable();
2707 // NOT SUPPORTED YET. For completeness only.
2708 if (section_end_offset - stub_table_end_offset >= group_size)
2709 {
2710 gold_assert(group_end != this->input_sections().end());
2711 this->create_stub_group(group_begin, group_end, stub_table,
2712 target, new_relaxed_sections, task);
2713 state = NO_GROUP;
2714 }
2715 break;
2716
2717 default:
2718 gold_unreachable();
2719 }
2720
2721 // If we see an input section and currently there is no group, start
2722 // a new one. Skip any empty sections. We look at the data size
2723 // instead of calling p->relobj()->section_size() to avoid locking.
2724 if ((p->is_input_section() || p->is_relaxed_input_section())
2725 && (p->data_size() != 0))
2726 {
2727 if (state == NO_GROUP)
2728 {
2729 state = FINDING_STUB_SECTION;
2730 group_begin = p;
2731 group_begin_offset = section_begin_offset;
2732 }
2733
2734 // Keep track of the last input section seen.
2735 group_end = p;
2736 group_end_offset = section_end_offset;
2737 }
2738
2739 off = section_end_offset;
2740 }
2741
2742 // Create a stub group for any ungrouped sections.
2743 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
2744 {
2745 gold_assert(group_end != this->input_sections().end());
2746 this->create_stub_group(group_begin, group_end,
2747 (state == FINDING_STUB_SECTION
2748 ? group_end
2749 : stub_table),
2750 target, new_relaxed_sections, task);
2751 }
2752
2753 if (!new_relaxed_sections.empty())
2754 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
2755
2756 // Update the section offsets
2757 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
2758 {
2759 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>(
2760 new_relaxed_sections[i]->relobj());
2761 unsigned int shndx = new_relaxed_sections[i]->shndx();
2762 // Tell AArch64_relobj that this input section is converted.
2763 relobj->convert_input_section_to_relaxed_section(shndx);
2764 }
2765 } // End of AArch64_output_section::group_sections
2766
2767
2768 AArch64_reloc_property_table* aarch64_reloc_property_table = NULL;
2769
2770
2771 // The aarch64 target class.
2772 // See the ABI at
2773 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf
2774 template<int size, bool big_endian>
2775 class Target_aarch64 : public Sized_target<size, big_endian>
2776 {
2777 public:
2778 typedef Target_aarch64<size, big_endian> This;
2779 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
2780 Reloc_section;
2781 typedef Relocate_info<size, big_endian> The_relocate_info;
2782 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
2783 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2784 typedef Reloc_stub<size, big_endian> The_reloc_stub;
2785 typedef Erratum_stub<size, big_endian> The_erratum_stub;
2786 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key;
2787 typedef Stub_table<size, big_endian> The_stub_table;
2788 typedef std::vector<The_stub_table*> Stub_table_list;
2789 typedef typename Stub_table_list::iterator Stub_table_iterator;
2790 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2791 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section;
2792 typedef Unordered_map<Section_id,
2793 AArch64_input_section<size, big_endian>*,
2794 Section_id_hash> AArch64_input_section_map;
2795 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2796 const static int TCB_SIZE = size / 8 * 2;
2797
2798 Target_aarch64(const Target::Target_info* info = &aarch64_info)
2799 : Sized_target<size, big_endian>(info),
2800 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL),
2801 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL),
2802 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY),
2803 got_mod_index_offset_(-1U),
2804 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false),
2805 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_()
2806 { }
2807
2808 // Scan the relocations to determine unreferenced sections for
2809 // garbage collection.
2810 void
2811 gc_process_relocs(Symbol_table* symtab,
2812 Layout* layout,
2813 Sized_relobj_file<size, big_endian>* object,
2814 unsigned int data_shndx,
2815 unsigned int sh_type,
2816 const unsigned char* prelocs,
2817 size_t reloc_count,
2818 Output_section* output_section,
2819 bool needs_special_offset_handling,
2820 size_t local_symbol_count,
2821 const unsigned char* plocal_symbols);
2822
2823 // Scan the relocations to look for symbol adjustments.
2824 void
2825 scan_relocs(Symbol_table* symtab,
2826 Layout* layout,
2827 Sized_relobj_file<size, big_endian>* object,
2828 unsigned int data_shndx,
2829 unsigned int sh_type,
2830 const unsigned char* prelocs,
2831 size_t reloc_count,
2832 Output_section* output_section,
2833 bool needs_special_offset_handling,
2834 size_t local_symbol_count,
2835 const unsigned char* plocal_symbols);
2836
2837 // Finalize the sections.
2838 void
2839 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
2840
2841 // Return the value to use for a dynamic which requires special
2842 // treatment.
2843 uint64_t
2844 do_dynsym_value(const Symbol*) const;
2845
2846 // Relocate a section.
2847 void
2848 relocate_section(const Relocate_info<size, big_endian>*,
2849 unsigned int sh_type,
2850 const unsigned char* prelocs,
2851 size_t reloc_count,
2852 Output_section* output_section,
2853 bool needs_special_offset_handling,
2854 unsigned char* view,
2855 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2856 section_size_type view_size,
2857 const Reloc_symbol_changes*);
2858
2859 // Scan the relocs during a relocatable link.
2860 void
2861 scan_relocatable_relocs(Symbol_table* symtab,
2862 Layout* layout,
2863 Sized_relobj_file<size, big_endian>* object,
2864 unsigned int data_shndx,
2865 unsigned int sh_type,
2866 const unsigned char* prelocs,
2867 size_t reloc_count,
2868 Output_section* output_section,
2869 bool needs_special_offset_handling,
2870 size_t local_symbol_count,
2871 const unsigned char* plocal_symbols,
2872 Relocatable_relocs*);
2873
2874 // Scan the relocs for --emit-relocs.
2875 void
2876 emit_relocs_scan(Symbol_table* symtab,
2877 Layout* layout,
2878 Sized_relobj_file<size, big_endian>* object,
2879 unsigned int data_shndx,
2880 unsigned int sh_type,
2881 const unsigned char* prelocs,
2882 size_t reloc_count,
2883 Output_section* output_section,
2884 bool needs_special_offset_handling,
2885 size_t local_symbol_count,
2886 const unsigned char* plocal_syms,
2887 Relocatable_relocs* rr);
2888
2889 // Relocate a section during a relocatable link.
2890 void
2891 relocate_relocs(
2892 const Relocate_info<size, big_endian>*,
2893 unsigned int sh_type,
2894 const unsigned char* prelocs,
2895 size_t reloc_count,
2896 Output_section* output_section,
2897 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
2898 unsigned char* view,
2899 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2900 section_size_type view_size,
2901 unsigned char* reloc_view,
2902 section_size_type reloc_view_size);
2903
2904 // Return the symbol index to use for a target specific relocation.
2905 // The only target specific relocation is R_AARCH64_TLSDESC for a
2906 // local symbol, which is an absolute reloc.
2907 unsigned int
2908 do_reloc_symbol_index(void*, unsigned int r_type) const
2909 {
2910 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
2911 return 0;
2912 }
2913
2914 // Return the addend to use for a target specific relocation.
2915 uint64_t
2916 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const;
2917
2918 // Return the PLT section.
2919 uint64_t
2920 do_plt_address_for_global(const Symbol* gsym) const
2921 { return this->plt_section()->address_for_global(gsym); }
2922
2923 uint64_t
2924 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const
2925 { return this->plt_section()->address_for_local(relobj, symndx); }
2926
2927 // This function should be defined in targets that can use relocation
2928 // types to determine (implemented in local_reloc_may_be_function_pointer
2929 // and global_reloc_may_be_function_pointer)
2930 // if a function's pointer is taken. ICF uses this in safe mode to only
2931 // fold those functions whose pointer is defintely not taken.
2932 bool
2933 do_can_check_for_function_pointers() const
2934 { return true; }
2935
2936 // Return the number of entries in the PLT.
2937 unsigned int
2938 plt_entry_count() const;
2939
2940 //Return the offset of the first non-reserved PLT entry.
2941 unsigned int
2942 first_plt_entry_offset() const;
2943
2944 // Return the size of each PLT entry.
2945 unsigned int
2946 plt_entry_size() const;
2947
2948 // Create a stub table.
2949 The_stub_table*
2950 new_stub_table(The_aarch64_input_section*);
2951
2952 // Create an aarch64 input section.
2953 The_aarch64_input_section*
2954 new_aarch64_input_section(Relobj*, unsigned int);
2955
2956 // Find an aarch64 input section instance for a given OBJ and SHNDX.
2957 The_aarch64_input_section*
2958 find_aarch64_input_section(Relobj*, unsigned int) const;
2959
2960 // Return the thread control block size.
2961 unsigned int
2962 tcb_size() const { return This::TCB_SIZE; }
2963
2964 // Scan a section for stub generation.
2965 void
2966 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int,
2967 const unsigned char*, size_t, Output_section*,
2968 bool, const unsigned char*,
2969 Address,
2970 section_size_type);
2971
2972 // Scan a relocation section for stub.
2973 template<int sh_type>
2974 void
2975 scan_reloc_section_for_stubs(
2976 const The_relocate_info* relinfo,
2977 const unsigned char* prelocs,
2978 size_t reloc_count,
2979 Output_section* output_section,
2980 bool needs_special_offset_handling,
2981 const unsigned char* view,
2982 Address view_address,
2983 section_size_type);
2984
2985 // Relocate a single stub.
2986 void
2987 relocate_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*,
2988 Output_section*, unsigned char*, Address,
2989 section_size_type);
2990
2991 // Get the default AArch64 target.
2992 static This*
2993 current_target()
2994 {
2995 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64
2996 && parameters->target().get_size() == size
2997 && parameters->target().is_big_endian() == big_endian);
2998 return static_cast<This*>(parameters->sized_target<size, big_endian>());
2999 }
3000
3001
3002 // Scan erratum 843419 for a part of a section.
3003 void
3004 scan_erratum_843419_span(
3005 AArch64_relobj<size, big_endian>*,
3006 unsigned int,
3007 const section_size_type,
3008 const section_size_type,
3009 unsigned char*,
3010 Address);
3011
3012 // Scan erratum 835769 for a part of a section.
3013 void
3014 scan_erratum_835769_span(
3015 AArch64_relobj<size, big_endian>*,
3016 unsigned int,
3017 const section_size_type,
3018 const section_size_type,
3019 unsigned char*,
3020 Address);
3021
3022 protected:
3023 void
3024 do_select_as_default_target()
3025 {
3026 gold_assert(aarch64_reloc_property_table == NULL);
3027 aarch64_reloc_property_table = new AArch64_reloc_property_table();
3028 }
3029
3030 // Add a new reloc argument, returning the index in the vector.
3031 size_t
3032 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object,
3033 unsigned int r_sym)
3034 {
3035 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym));
3036 return this->tlsdesc_reloc_info_.size() - 1;
3037 }
3038
3039 virtual Output_data_plt_aarch64<size, big_endian>*
3040 do_make_data_plt(Layout* layout,
3041 Output_data_got_aarch64<size, big_endian>* got,
3042 Output_data_space* got_plt,
3043 Output_data_space* got_irelative)
3044 {
3045 return new Output_data_plt_aarch64_standard<size, big_endian>(
3046 layout, got, got_plt, got_irelative);
3047 }
3048
3049
3050 // do_make_elf_object to override the same function in the base class.
3051 Object*
3052 do_make_elf_object(const std::string&, Input_file*, off_t,
3053 const elfcpp::Ehdr<size, big_endian>&);
3054
3055 Output_data_plt_aarch64<size, big_endian>*
3056 make_data_plt(Layout* layout,
3057 Output_data_got_aarch64<size, big_endian>* got,
3058 Output_data_space* got_plt,
3059 Output_data_space* got_irelative)
3060 {
3061 return this->do_make_data_plt(layout, got, got_plt, got_irelative);
3062 }
3063
3064 // We only need to generate stubs, and hence perform relaxation if we are
3065 // not doing relocatable linking.
3066 virtual bool
3067 do_may_relax() const
3068 { return !parameters->options().relocatable(); }
3069
3070 // Relaxation hook. This is where we do stub generation.
3071 virtual bool
3072 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*);
3073
3074 void
3075 group_sections(Layout* layout,
3076 section_size_type group_size,
3077 bool stubs_always_after_branch,
3078 const Task* task);
3079
3080 void
3081 scan_reloc_for_stub(const The_relocate_info*, unsigned int,
3082 const Sized_symbol<size>*, unsigned int,
3083 const Symbol_value<size>*,
3084 typename elfcpp::Elf_types<size>::Elf_Swxword,
3085 Address Elf_Addr);
3086
3087 // Make an output section.
3088 Output_section*
3089 do_make_output_section(const char* name, elfcpp::Elf_Word type,
3090 elfcpp::Elf_Xword flags)
3091 { return new The_aarch64_output_section(name, type, flags); }
3092
3093 private:
3094 // The class which scans relocations.
3095 class Scan
3096 {
3097 public:
3098 Scan()
3099 : issued_non_pic_error_(false)
3100 { }
3101
3102 inline void
3103 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3104 Sized_relobj_file<size, big_endian>* object,
3105 unsigned int data_shndx,
3106 Output_section* output_section,
3107 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3108 const elfcpp::Sym<size, big_endian>& lsym,
3109 bool is_discarded);
3110
3111 inline void
3112 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3113 Sized_relobj_file<size, big_endian>* object,
3114 unsigned int data_shndx,
3115 Output_section* output_section,
3116 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3117 Symbol* gsym);
3118
3119 inline bool
3120 local_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3121 Target_aarch64<size, big_endian>* ,
3122 Sized_relobj_file<size, big_endian>* ,
3123 unsigned int ,
3124 Output_section* ,
3125 const elfcpp::Rela<size, big_endian>& ,
3126 unsigned int r_type,
3127 const elfcpp::Sym<size, big_endian>&);
3128
3129 inline bool
3130 global_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3131 Target_aarch64<size, big_endian>* ,
3132 Sized_relobj_file<size, big_endian>* ,
3133 unsigned int ,
3134 Output_section* ,
3135 const elfcpp::Rela<size, big_endian>& ,
3136 unsigned int r_type,
3137 Symbol* gsym);
3138
3139 private:
3140 static void
3141 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*,
3142 unsigned int r_type);
3143
3144 static void
3145 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*,
3146 unsigned int r_type, Symbol*);
3147
3148 inline bool
3149 possible_function_pointer_reloc(unsigned int r_type);
3150
3151 void
3152 check_non_pic(Relobj*, unsigned int r_type);
3153
3154 bool
3155 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*,
3156 unsigned int r_type);
3157
3158 // Whether we have issued an error about a non-PIC compilation.
3159 bool issued_non_pic_error_;
3160 };
3161
3162 // The class which implements relocation.
3163 class Relocate
3164 {
3165 public:
3166 Relocate()
3167 : skip_call_tls_get_addr_(false)
3168 { }
3169
3170 ~Relocate()
3171 { }
3172
3173 // Do a relocation. Return false if the caller should not issue
3174 // any warnings about this relocation.
3175 inline bool
3176 relocate(const Relocate_info<size, big_endian>*, unsigned int,
3177 Target_aarch64*, Output_section*, size_t, const unsigned char*,
3178 const Sized_symbol<size>*, const Symbol_value<size>*,
3179 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr,
3180 section_size_type);
3181
3182 private:
3183 inline typename AArch64_relocate_functions<size, big_endian>::Status
3184 relocate_tls(const Relocate_info<size, big_endian>*,
3185 Target_aarch64<size, big_endian>*,
3186 size_t,
3187 const elfcpp::Rela<size, big_endian>&,
3188 unsigned int r_type, const Sized_symbol<size>*,
3189 const Symbol_value<size>*,
3190 unsigned char*,
3191 typename elfcpp::Elf_types<size>::Elf_Addr);
3192
3193 inline typename AArch64_relocate_functions<size, big_endian>::Status
3194 tls_gd_to_le(
3195 const Relocate_info<size, big_endian>*,
3196 Target_aarch64<size, big_endian>*,
3197 const elfcpp::Rela<size, big_endian>&,
3198 unsigned int,
3199 unsigned char*,
3200 const Symbol_value<size>*);
3201
3202 inline typename AArch64_relocate_functions<size, big_endian>::Status
3203 tls_ld_to_le(
3204 const Relocate_info<size, big_endian>*,
3205 Target_aarch64<size, big_endian>*,
3206 const elfcpp::Rela<size, big_endian>&,
3207 unsigned int,
3208 unsigned char*,
3209 const Symbol_value<size>*);
3210
3211 inline typename AArch64_relocate_functions<size, big_endian>::Status
3212 tls_ie_to_le(
3213 const Relocate_info<size, big_endian>*,
3214 Target_aarch64<size, big_endian>*,
3215 const elfcpp::Rela<size, big_endian>&,
3216 unsigned int,
3217 unsigned char*,
3218 const Symbol_value<size>*);
3219
3220 inline typename AArch64_relocate_functions<size, big_endian>::Status
3221 tls_desc_gd_to_le(
3222 const Relocate_info<size, big_endian>*,
3223 Target_aarch64<size, big_endian>*,
3224 const elfcpp::Rela<size, big_endian>&,
3225 unsigned int,
3226 unsigned char*,
3227 const Symbol_value<size>*);
3228
3229 inline typename AArch64_relocate_functions<size, big_endian>::Status
3230 tls_desc_gd_to_ie(
3231 const Relocate_info<size, big_endian>*,
3232 Target_aarch64<size, big_endian>*,
3233 const elfcpp::Rela<size, big_endian>&,
3234 unsigned int,
3235 unsigned char*,
3236 const Symbol_value<size>*,
3237 typename elfcpp::Elf_types<size>::Elf_Addr,
3238 typename elfcpp::Elf_types<size>::Elf_Addr);
3239
3240 bool skip_call_tls_get_addr_;
3241
3242 }; // End of class Relocate
3243
3244 // Adjust TLS relocation type based on the options and whether this
3245 // is a local symbol.
3246 static tls::Tls_optimization
3247 optimize_tls_reloc(bool is_final, int r_type);
3248
3249 // Get the GOT section, creating it if necessary.
3250 Output_data_got_aarch64<size, big_endian>*
3251 got_section(Symbol_table*, Layout*);
3252
3253 // Get the GOT PLT section.
3254 Output_data_space*
3255 got_plt_section() const
3256 {
3257 gold_assert(this->got_plt_ != NULL);
3258 return this->got_plt_;
3259 }
3260
3261 // Get the GOT section for TLSDESC entries.
3262 Output_data_got<size, big_endian>*
3263 got_tlsdesc_section() const
3264 {
3265 gold_assert(this->got_tlsdesc_ != NULL);
3266 return this->got_tlsdesc_;
3267 }
3268
3269 // Create the PLT section.
3270 void
3271 make_plt_section(Symbol_table* symtab, Layout* layout);
3272
3273 // Create a PLT entry for a global symbol.
3274 void
3275 make_plt_entry(Symbol_table*, Layout*, Symbol*);
3276
3277 // Create a PLT entry for a local STT_GNU_IFUNC symbol.
3278 void
3279 make_local_ifunc_plt_entry(Symbol_table*, Layout*,
3280 Sized_relobj_file<size, big_endian>* relobj,
3281 unsigned int local_sym_index);
3282
3283 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
3284 void
3285 define_tls_base_symbol(Symbol_table*, Layout*);
3286
3287 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
3288 void
3289 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout);
3290
3291 // Create a GOT entry for the TLS module index.
3292 unsigned int
3293 got_mod_index_entry(Symbol_table* symtab, Layout* layout,
3294 Sized_relobj_file<size, big_endian>* object);
3295
3296 // Get the PLT section.
3297 Output_data_plt_aarch64<size, big_endian>*
3298 plt_section() const
3299 {
3300 gold_assert(this->plt_ != NULL);
3301 return this->plt_;
3302 }
3303
3304 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769. For
3305 // ST_E_843419, we need an additional field for adrp offset.
3306 void create_erratum_stub(
3307 AArch64_relobj<size, big_endian>* relobj,
3308 unsigned int shndx,
3309 section_size_type erratum_insn_offset,
3310 Address erratum_address,
3311 typename Insn_utilities::Insntype erratum_insn,
3312 int erratum_type,
3313 unsigned int e843419_adrp_offset=0);
3314
3315 // Return whether this is a 3-insn erratum sequence.
3316 bool is_erratum_843419_sequence(
3317 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
3318 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
3319 typename elfcpp::Swap<32,big_endian>::Valtype insn3);
3320
3321 // Return whether this is a 835769 sequence.
3322 // (Similarly implemented as in elfnn-aarch64.c.)
3323 bool is_erratum_835769_sequence(
3324 typename elfcpp::Swap<32,big_endian>::Valtype,
3325 typename elfcpp::Swap<32,big_endian>::Valtype);
3326
3327 // Get the dynamic reloc section, creating it if necessary.
3328 Reloc_section*
3329 rela_dyn_section(Layout*);
3330
3331 // Get the section to use for TLSDESC relocations.
3332 Reloc_section*
3333 rela_tlsdesc_section(Layout*) const;
3334
3335 // Get the section to use for IRELATIVE relocations.
3336 Reloc_section*
3337 rela_irelative_section(Layout*);
3338
3339 // Add a potential copy relocation.
3340 void
3341 copy_reloc(Symbol_table* symtab, Layout* layout,
3342 Sized_relobj_file<size, big_endian>* object,
3343 unsigned int shndx, Output_section* output_section,
3344 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc)
3345 {
3346 unsigned int r_type = elfcpp::elf_r_type<size>(reloc.get_r_info());
3347 this->copy_relocs_.copy_reloc(symtab, layout,
3348 symtab->get_sized_symbol<size>(sym),
3349 object, shndx, output_section,
3350 r_type, reloc.get_r_offset(),
3351 reloc.get_r_addend(),
3352 this->rela_dyn_section(layout));
3353 }
3354
3355 // Information about this specific target which we pass to the
3356 // general Target structure.
3357 static const Target::Target_info aarch64_info;
3358
3359 // The types of GOT entries needed for this platform.
3360 // These values are exposed to the ABI in an incremental link.
3361 // Do not renumber existing values without changing the version
3362 // number of the .gnu_incremental_inputs section.
3363 enum Got_type
3364 {
3365 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
3366 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset
3367 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair
3368 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair
3369 };
3370
3371 // This type is used as the argument to the target specific
3372 // relocation routines. The only target specific reloc is
3373 // R_AARCh64_TLSDESC against a local symbol.
3374 struct Tlsdesc_info
3375 {
3376 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object,
3377 unsigned int a_r_sym)
3378 : object(a_object), r_sym(a_r_sym)
3379 { }
3380
3381 // The object in which the local symbol is defined.
3382 Sized_relobj_file<size, big_endian>* object;
3383 // The local symbol index in the object.
3384 unsigned int r_sym;
3385 };
3386
3387 // The GOT section.
3388 Output_data_got_aarch64<size, big_endian>* got_;
3389 // The PLT section.
3390 Output_data_plt_aarch64<size, big_endian>* plt_;
3391 // The GOT PLT section.
3392 Output_data_space* got_plt_;
3393 // The GOT section for IRELATIVE relocations.
3394 Output_data_space* got_irelative_;
3395 // The GOT section for TLSDESC relocations.
3396 Output_data_got<size, big_endian>* got_tlsdesc_;
3397 // The _GLOBAL_OFFSET_TABLE_ symbol.
3398 Symbol* global_offset_table_;
3399 // The dynamic reloc section.
3400 Reloc_section* rela_dyn_;
3401 // The section to use for IRELATIVE relocs.
3402 Reloc_section* rela_irelative_;
3403 // Relocs saved to avoid a COPY reloc.
3404 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_;
3405 // Offset of the GOT entry for the TLS module index.
3406 unsigned int got_mod_index_offset_;
3407 // We handle R_AARCH64_TLSDESC against a local symbol as a target
3408 // specific relocation. Here we store the object and local symbol
3409 // index for the relocation.
3410 std::vector<Tlsdesc_info> tlsdesc_reloc_info_;
3411 // True if the _TLS_MODULE_BASE_ symbol has been defined.
3412 bool tls_base_symbol_defined_;
3413 // List of stub_tables
3414 Stub_table_list stub_tables_;
3415 // Actual stub group size
3416 section_size_type stub_group_size_;
3417 AArch64_input_section_map aarch64_input_section_map_;
3418 }; // End of Target_aarch64
3419
3420
3421 template<>
3422 const Target::Target_info Target_aarch64<64, false>::aarch64_info =
3423 {
3424 64, // size
3425 false, // is_big_endian
3426 elfcpp::EM_AARCH64, // machine_code
3427 false, // has_make_symbol
3428 false, // has_resolve
3429 false, // has_code_fill
3430 true, // is_default_stack_executable
3431 true, // can_icf_inline_merge_sections
3432 '\0', // wrap_char
3433 "/lib/ld.so.1", // program interpreter
3434 0x400000, // default_text_segment_address
3435 0x10000, // abi_pagesize (overridable by -z max-page-size)
3436 0x1000, // common_pagesize (overridable by -z common-page-size)
3437 false, // isolate_execinstr
3438 0, // rosegment_gap
3439 elfcpp::SHN_UNDEF, // small_common_shndx
3440 elfcpp::SHN_UNDEF, // large_common_shndx
3441 0, // small_common_section_flags
3442 0, // large_common_section_flags
3443 NULL, // attributes_section
3444 NULL, // attributes_vendor
3445 "_start", // entry_symbol_name
3446 32, // hash_entry_size
3447 };
3448
3449 template<>
3450 const Target::Target_info Target_aarch64<32, false>::aarch64_info =
3451 {
3452 32, // size
3453 false, // is_big_endian
3454 elfcpp::EM_AARCH64, // machine_code
3455 false, // has_make_symbol
3456 false, // has_resolve
3457 false, // has_code_fill
3458 true, // is_default_stack_executable
3459 false, // can_icf_inline_merge_sections
3460 '\0', // wrap_char
3461 "/lib/ld.so.1", // program interpreter
3462 0x400000, // default_text_segment_address
3463 0x10000, // abi_pagesize (overridable by -z max-page-size)
3464 0x1000, // common_pagesize (overridable by -z common-page-size)
3465 false, // isolate_execinstr
3466 0, // rosegment_gap
3467 elfcpp::SHN_UNDEF, // small_common_shndx
3468 elfcpp::SHN_UNDEF, // large_common_shndx
3469 0, // small_common_section_flags
3470 0, // large_common_section_flags
3471 NULL, // attributes_section
3472 NULL, // attributes_vendor
3473 "_start", // entry_symbol_name
3474 32, // hash_entry_size
3475 };
3476
3477 template<>
3478 const Target::Target_info Target_aarch64<64, true>::aarch64_info =
3479 {
3480 64, // size
3481 true, // is_big_endian
3482 elfcpp::EM_AARCH64, // machine_code
3483 false, // has_make_symbol
3484 false, // has_resolve
3485 false, // has_code_fill
3486 true, // is_default_stack_executable
3487 true, // can_icf_inline_merge_sections
3488 '\0', // wrap_char
3489 "/lib/ld.so.1", // program interpreter
3490 0x400000, // default_text_segment_address
3491 0x10000, // abi_pagesize (overridable by -z max-page-size)
3492 0x1000, // common_pagesize (overridable by -z common-page-size)
3493 false, // isolate_execinstr
3494 0, // rosegment_gap
3495 elfcpp::SHN_UNDEF, // small_common_shndx
3496 elfcpp::SHN_UNDEF, // large_common_shndx
3497 0, // small_common_section_flags
3498 0, // large_common_section_flags
3499 NULL, // attributes_section
3500 NULL, // attributes_vendor
3501 "_start", // entry_symbol_name
3502 32, // hash_entry_size
3503 };
3504
3505 template<>
3506 const Target::Target_info Target_aarch64<32, true>::aarch64_info =
3507 {
3508 32, // size
3509 true, // is_big_endian
3510 elfcpp::EM_AARCH64, // machine_code
3511 false, // has_make_symbol
3512 false, // has_resolve
3513 false, // has_code_fill
3514 true, // is_default_stack_executable
3515 false, // can_icf_inline_merge_sections
3516 '\0', // wrap_char
3517 "/lib/ld.so.1", // program interpreter
3518 0x400000, // default_text_segment_address
3519 0x10000, // abi_pagesize (overridable by -z max-page-size)
3520 0x1000, // common_pagesize (overridable by -z common-page-size)
3521 false, // isolate_execinstr
3522 0, // rosegment_gap
3523 elfcpp::SHN_UNDEF, // small_common_shndx
3524 elfcpp::SHN_UNDEF, // large_common_shndx
3525 0, // small_common_section_flags
3526 0, // large_common_section_flags
3527 NULL, // attributes_section
3528 NULL, // attributes_vendor
3529 "_start", // entry_symbol_name
3530 32, // hash_entry_size
3531 };
3532
3533 // Get the GOT section, creating it if necessary.
3534
3535 template<int size, bool big_endian>
3536 Output_data_got_aarch64<size, big_endian>*
3537 Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab,
3538 Layout* layout)
3539 {
3540 if (this->got_ == NULL)
3541 {
3542 gold_assert(symtab != NULL && layout != NULL);
3543
3544 // When using -z now, we can treat .got.plt as a relro section.
3545 // Without -z now, it is modified after program startup by lazy
3546 // PLT relocations.
3547 bool is_got_plt_relro = parameters->options().now();
3548 Output_section_order got_order = (is_got_plt_relro
3549 ? ORDER_RELRO
3550 : ORDER_RELRO_LAST);
3551 Output_section_order got_plt_order = (is_got_plt_relro
3552 ? ORDER_RELRO
3553 : ORDER_NON_RELRO_FIRST);
3554
3555 // Layout of .got and .got.plt sections.
3556 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_
3557 // ...
3558 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT
3559 // .gotplt[1] reserved for ld.so (resolver)
3560 // .gotplt[2] reserved
3561
3562 // Generate .got section.
3563 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab,
3564 layout);
3565 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3566 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
3567 this->got_, got_order, true);
3568 // The first word of GOT is reserved for the address of .dynamic.
3569 // We put 0 here now. The value will be replaced later in
3570 // Output_data_got_aarch64::do_write.
3571 this->got_->add_constant(0);
3572
3573 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3574 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section,
3575 // even if there is a .got.plt section.
3576 this->global_offset_table_ =
3577 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3578 Symbol_table::PREDEFINED,
3579 this->got_,
3580 0, 0, elfcpp::STT_OBJECT,
3581 elfcpp::STB_LOCAL,
3582 elfcpp::STV_HIDDEN, 0,
3583 false, false);
3584
3585 // Generate .got.plt section.
3586 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT");
3587 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3588 (elfcpp::SHF_ALLOC
3589 | elfcpp::SHF_WRITE),
3590 this->got_plt_, got_plt_order,
3591 is_got_plt_relro);
3592
3593 // The first three entries are reserved.
3594 this->got_plt_->set_current_data_size(
3595 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3596
3597 // If there are any IRELATIVE relocations, they get GOT entries
3598 // in .got.plt after the jump slot entries.
3599 this->got_irelative_ = new Output_data_space(size / 8,
3600 "** GOT IRELATIVE PLT");
3601 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3602 (elfcpp::SHF_ALLOC
3603 | elfcpp::SHF_WRITE),
3604 this->got_irelative_,
3605 got_plt_order,
3606 is_got_plt_relro);
3607
3608 // If there are any TLSDESC relocations, they get GOT entries in
3609 // .got.plt after the jump slot and IRELATIVE entries.
3610 this->got_tlsdesc_ = new Output_data_got<size, big_endian>();
3611 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3612 (elfcpp::SHF_ALLOC
3613 | elfcpp::SHF_WRITE),
3614 this->got_tlsdesc_,
3615 got_plt_order,
3616 is_got_plt_relro);
3617
3618 if (!is_got_plt_relro)
3619 {
3620 // Those bytes can go into the relro segment.
3621 layout->increase_relro(
3622 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3623 }
3624
3625 }
3626 return this->got_;
3627 }
3628
3629 // Get the dynamic reloc section, creating it if necessary.
3630
3631 template<int size, bool big_endian>
3632 typename Target_aarch64<size, big_endian>::Reloc_section*
3633 Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout)
3634 {
3635 if (this->rela_dyn_ == NULL)
3636 {
3637 gold_assert(layout != NULL);
3638 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc());
3639 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3640 elfcpp::SHF_ALLOC, this->rela_dyn_,
3641 ORDER_DYNAMIC_RELOCS, false);
3642 }
3643 return this->rela_dyn_;
3644 }
3645
3646 // Get the section to use for IRELATIVE relocs, creating it if
3647 // necessary. These go in .rela.dyn, but only after all other dynamic
3648 // relocations. They need to follow the other dynamic relocations so
3649 // that they can refer to global variables initialized by those
3650 // relocs.
3651
3652 template<int size, bool big_endian>
3653 typename Target_aarch64<size, big_endian>::Reloc_section*
3654 Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout)
3655 {
3656 if (this->rela_irelative_ == NULL)
3657 {
3658 // Make sure we have already created the dynamic reloc section.
3659 this->rela_dyn_section(layout);
3660 this->rela_irelative_ = new Reloc_section(false);
3661 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3662 elfcpp::SHF_ALLOC, this->rela_irelative_,
3663 ORDER_DYNAMIC_RELOCS, false);
3664 gold_assert(this->rela_dyn_->output_section()
3665 == this->rela_irelative_->output_section());
3666 }
3667 return this->rela_irelative_;
3668 }
3669
3670
3671 // do_make_elf_object to override the same function in the base class. We need
3672 // to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to
3673 // store backend specific information. Hence we need to have our own ELF object
3674 // creation.
3675
3676 template<int size, bool big_endian>
3677 Object*
3678 Target_aarch64<size, big_endian>::do_make_elf_object(
3679 const std::string& name,
3680 Input_file* input_file,
3681 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr)
3682 {
3683 int et = ehdr.get_e_type();
3684 // ET_EXEC files are valid input for --just-symbols/-R,
3685 // and we treat them as relocatable objects.
3686 if (et == elfcpp::ET_EXEC && input_file->just_symbols())
3687 return Sized_target<size, big_endian>::do_make_elf_object(
3688 name, input_file, offset, ehdr);
3689 else if (et == elfcpp::ET_REL)
3690 {
3691 AArch64_relobj<size, big_endian>* obj =
3692 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr);
3693 obj->setup();
3694 return obj;
3695 }
3696 else if (et == elfcpp::ET_DYN)
3697 {
3698 // Keep base implementation.
3699 Sized_dynobj<size, big_endian>* obj =
3700 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr);
3701 obj->setup();
3702 return obj;
3703 }
3704 else
3705 {
3706 gold_error(_("%s: unsupported ELF file type %d"),
3707 name.c_str(), et);
3708 return NULL;
3709 }
3710 }
3711
3712
3713 // Scan a relocation for stub generation.
3714
3715 template<int size, bool big_endian>
3716 void
3717 Target_aarch64<size, big_endian>::scan_reloc_for_stub(
3718 const Relocate_info<size, big_endian>* relinfo,
3719 unsigned int r_type,
3720 const Sized_symbol<size>* gsym,
3721 unsigned int r_sym,
3722 const Symbol_value<size>* psymval,
3723 typename elfcpp::Elf_types<size>::Elf_Swxword addend,
3724 Address address)
3725 {
3726 const AArch64_relobj<size, big_endian>* aarch64_relobj =
3727 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3728
3729 Symbol_value<size> symval;
3730 if (gsym != NULL)
3731 {
3732 const AArch64_reloc_property* arp = aarch64_reloc_property_table->
3733 get_reloc_property(r_type);
3734 if (gsym->use_plt_offset(arp->reference_flags()))
3735 {
3736 // This uses a PLT, change the symbol value.
3737 symval.set_output_value(this->plt_section()->address()
3738 + gsym->plt_offset());
3739 psymval = &symval;
3740 }
3741 else if (gsym->is_undefined())
3742 // There is no need to generate a stub symbol is undefined.
3743 return;
3744 }
3745
3746 // Get the symbol value.
3747 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0);
3748
3749 // Owing to pipelining, the PC relative branches below actually skip
3750 // two instructions when the branch offset is 0.
3751 Address destination = static_cast<Address>(-1);
3752 switch (r_type)
3753 {
3754 case elfcpp::R_AARCH64_CALL26:
3755 case elfcpp::R_AARCH64_JUMP26:
3756 destination = value + addend;
3757 break;
3758 default:
3759 gold_unreachable();
3760 }
3761
3762 int stub_type = The_reloc_stub::
3763 stub_type_for_reloc(r_type, address, destination);
3764 if (stub_type == ST_NONE)
3765 return;
3766
3767 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
3768 gold_assert(stub_table != NULL);
3769
3770 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend);
3771 The_reloc_stub* stub = stub_table->find_reloc_stub(key);
3772 if (stub == NULL)
3773 {
3774 stub = new The_reloc_stub(stub_type);
3775 stub_table->add_reloc_stub(stub, key);
3776 }
3777 stub->set_destination_address(destination);
3778 } // End of Target_aarch64::scan_reloc_for_stub
3779
3780
3781 // This function scans a relocation section for stub generation.
3782 // The template parameter Relocate must be a class type which provides
3783 // a single function, relocate(), which implements the machine
3784 // specific part of a relocation.
3785
3786 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
3787 // SHT_REL or SHT_RELA.
3788
3789 // PRELOCS points to the relocation data. RELOC_COUNT is the number
3790 // of relocs. OUTPUT_SECTION is the output section.
3791 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
3792 // mapped to output offsets.
3793
3794 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
3795 // VIEW_SIZE is the size. These refer to the input section, unless
3796 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
3797 // the output section.
3798
3799 template<int size, bool big_endian>
3800 template<int sh_type>
3801 void inline
3802 Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
3803 const Relocate_info<size, big_endian>* relinfo,
3804 const unsigned char* prelocs,
3805 size_t reloc_count,
3806 Output_section* /*output_section*/,
3807 bool /*needs_special_offset_handling*/,
3808 const unsigned char* /*view*/,
3809 Address view_address,
3810 section_size_type)
3811 {
3812 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype;
3813
3814 const int reloc_size =
3815 Reloc_types<sh_type,size,big_endian>::reloc_size;
3816 AArch64_relobj<size, big_endian>* object =
3817 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3818 unsigned int local_count = object->local_symbol_count();
3819
3820 gold::Default_comdat_behavior default_comdat_behavior;
3821 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
3822
3823 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
3824 {
3825 Reltype reloc(prelocs);
3826 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info();
3827 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info);
3828 unsigned int r_type = elfcpp::elf_r_type<size>(r_info);
3829 if (r_type != elfcpp::R_AARCH64_CALL26
3830 && r_type != elfcpp::R_AARCH64_JUMP26)
3831 continue;
3832
3833 section_offset_type offset =
3834 convert_to_section_size_type(reloc.get_r_offset());
3835
3836 // Get the addend.
3837 typename elfcpp::Elf_types<size>::Elf_Swxword addend =
3838 reloc.get_r_addend();
3839
3840 const Sized_symbol<size>* sym;
3841 Symbol_value<size> symval;
3842 const Symbol_value<size> *psymval;
3843 bool is_defined_in_discarded_section;
3844 unsigned int shndx;
3845 if (r_sym < local_count)
3846 {
3847 sym = NULL;
3848 psymval = object->local_symbol(r_sym);
3849
3850 // If the local symbol belongs to a section we are discarding,
3851 // and that section is a debug section, try to find the
3852 // corresponding kept section and map this symbol to its
3853 // counterpart in the kept section. The symbol must not
3854 // correspond to a section we are folding.
3855 bool is_ordinary;
3856 shndx = psymval->input_shndx(&is_ordinary);
3857 is_defined_in_discarded_section =
3858 (is_ordinary
3859 && shndx != elfcpp::SHN_UNDEF
3860 && !object->is_section_included(shndx)
3861 && !relinfo->symtab->is_section_folded(object, shndx));
3862
3863 // We need to compute the would-be final value of this local
3864 // symbol.
3865 if (!is_defined_in_discarded_section)
3866 {
3867 typedef Sized_relobj_file<size, big_endian> ObjType;
3868 if (psymval->is_section_symbol())
3869 symval.set_is_section_symbol();
3870 typename ObjType::Compute_final_local_value_status status =
3871 object->compute_final_local_value(r_sym, psymval, &symval,
3872 relinfo->symtab);
3873 if (status == ObjType::CFLV_OK)
3874 {
3875 // Currently we cannot handle a branch to a target in
3876 // a merged section. If this is the case, issue an error
3877 // and also free the merge symbol value.
3878 if (!symval.has_output_value())
3879 {
3880 const std::string& section_name =
3881 object->section_name(shndx);
3882 object->error(_("cannot handle branch to local %u "
3883 "in a merged section %s"),
3884 r_sym, section_name.c_str());
3885 }
3886 psymval = &symval;
3887 }
3888 else
3889 {
3890 // We cannot determine the final value.
3891 continue;
3892 }
3893 }
3894 }
3895 else
3896 {
3897 const Symbol* gsym;
3898 gsym = object->global_symbol(r_sym);
3899 gold_assert(gsym != NULL);
3900 if (gsym->is_forwarder())
3901 gsym = relinfo->symtab->resolve_forwards(gsym);
3902
3903 sym = static_cast<const Sized_symbol<size>*>(gsym);
3904 if (sym->has_symtab_index() && sym->symtab_index() != -1U)
3905 symval.set_output_symtab_index(sym->symtab_index());
3906 else
3907 symval.set_no_output_symtab_entry();
3908
3909 // We need to compute the would-be final value of this global
3910 // symbol.
3911 const Symbol_table* symtab = relinfo->symtab;
3912 const Sized_symbol<size>* sized_symbol =
3913 symtab->get_sized_symbol<size>(gsym);
3914 Symbol_table::Compute_final_value_status status;
3915 typename elfcpp::Elf_types<size>::Elf_Addr value =
3916 symtab->compute_final_value<size>(sized_symbol, &status);
3917
3918 // Skip this if the symbol has not output section.
3919 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
3920 continue;
3921 symval.set_output_value(value);
3922
3923 if (gsym->type() == elfcpp::STT_TLS)
3924 symval.set_is_tls_symbol();
3925 else if (gsym->type() == elfcpp::STT_GNU_IFUNC)
3926 symval.set_is_ifunc_symbol();
3927 psymval = &symval;
3928
3929 is_defined_in_discarded_section =
3930 (gsym->is_defined_in_discarded_section()
3931 && gsym->is_undefined());
3932 shndx = 0;
3933 }
3934
3935 Symbol_value<size> symval2;
3936 if (is_defined_in_discarded_section)
3937 {
3938 if (comdat_behavior == CB_UNDETERMINED)
3939 {
3940 std::string name = object->section_name(relinfo->data_shndx);
3941 comdat_behavior = default_comdat_behavior.get(name.c_str());
3942 }
3943 if (comdat_behavior == CB_PRETEND)
3944 {
3945 bool found;
3946 typename elfcpp::Elf_types<size>::Elf_Addr value =
3947 object->map_to_kept_section(shndx, &found);
3948 if (found)
3949 symval2.set_output_value(value + psymval->input_value());
3950 else
3951 symval2.set_output_value(0);
3952 }
3953 else
3954 {
3955 if (comdat_behavior == CB_WARNING)
3956 gold_warning_at_location(relinfo, i, offset,
3957 _("relocation refers to discarded "
3958 "section"));
3959 symval2.set_output_value(0);
3960 }
3961 symval2.set_no_output_symtab_entry();
3962 psymval = &symval2;
3963 }
3964
3965 // If symbol is a section symbol, we don't know the actual type of
3966 // destination. Give up.
3967 if (psymval->is_section_symbol())
3968 continue;
3969
3970 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
3971 addend, view_address + offset);
3972 } // End of iterating relocs in a section
3973 } // End of Target_aarch64::scan_reloc_section_for_stubs
3974
3975
3976 // Scan an input section for stub generation.
3977
3978 template<int size, bool big_endian>
3979 void
3980 Target_aarch64<size, big_endian>::scan_section_for_stubs(
3981 const Relocate_info<size, big_endian>* relinfo,
3982 unsigned int sh_type,
3983 const unsigned char* prelocs,
3984 size_t reloc_count,
3985 Output_section* output_section,
3986 bool needs_special_offset_handling,
3987 const unsigned char* view,
3988 Address view_address,
3989 section_size_type view_size)
3990 {
3991 gold_assert(sh_type == elfcpp::SHT_RELA);
3992 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
3993 relinfo,
3994 prelocs,
3995 reloc_count,
3996 output_section,
3997 needs_special_offset_handling,
3998 view,
3999 view_address,
4000 view_size);
4001 }
4002
4003
4004 // Relocate a single stub.
4005
4006 template<int size, bool big_endian>
4007 void Target_aarch64<size, big_endian>::
4008 relocate_stub(The_reloc_stub* stub,
4009 const The_relocate_info*,
4010 Output_section*,
4011 unsigned char* view,
4012 Address address,
4013 section_size_type)
4014 {
4015 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions;
4016 typedef typename The_reloc_functions::Status The_reloc_functions_status;
4017 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
4018
4019 Insntype* ip = reinterpret_cast<Insntype*>(view);
4020 int insn_number = stub->insn_num();
4021 const uint32_t* insns = stub->insns();
4022 // Check the insns are really those stub insns.
4023 for (int i = 0; i < insn_number; ++i)
4024 {
4025 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i);
4026 gold_assert(((uint32_t)insn == insns[i]));
4027 }
4028
4029 Address dest = stub->destination_address();
4030
4031 switch(stub->type())
4032 {
4033 case ST_ADRP_BRANCH:
4034 {
4035 // 1st reloc is ADR_PREL_PG_HI21
4036 The_reloc_functions_status status =
4037 The_reloc_functions::adrp(view, dest, address);
4038 // An error should never arise in the above step. If so, please
4039 // check 'aarch64_valid_for_adrp_p'.
4040 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4041
4042 // 2nd reloc is ADD_ABS_LO12_NC
4043 const AArch64_reloc_property* arp =
4044 aarch64_reloc_property_table->get_reloc_property(
4045 elfcpp::R_AARCH64_ADD_ABS_LO12_NC);
4046 gold_assert(arp != NULL);
4047 status = The_reloc_functions::template
4048 rela_general<32>(view + 4, dest, 0, arp);
4049 // An error should never arise, it is an "_NC" relocation.
4050 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4051 }
4052 break;
4053
4054 case ST_LONG_BRANCH_ABS:
4055 // 1st reloc is R_AARCH64_PREL64, at offset 8
4056 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest);
4057 break;
4058
4059 case ST_LONG_BRANCH_PCREL:
4060 {
4061 // "PC" calculation is the 2nd insn in the stub.
4062 uint64_t offset = dest - (address + 4);
4063 // Offset is placed at offset 4 and 5.
4064 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset);
4065 }
4066 break;
4067
4068 default:
4069 gold_unreachable();
4070 }
4071 }
4072
4073
4074 // A class to handle the PLT data.
4075 // This is an abstract base class that handles most of the linker details
4076 // but does not know the actual contents of PLT entries. The derived
4077 // classes below fill in those details.
4078
4079 template<int size, bool big_endian>
4080 class Output_data_plt_aarch64 : public Output_section_data
4081 {
4082 public:
4083 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
4084 Reloc_section;
4085 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4086
4087 Output_data_plt_aarch64(Layout* layout,
4088 uint64_t addralign,
4089 Output_data_got_aarch64<size, big_endian>* got,
4090 Output_data_space* got_plt,
4091 Output_data_space* got_irelative)
4092 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL),
4093 got_(got), got_plt_(got_plt), got_irelative_(got_irelative),
4094 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U)
4095 { this->init(layout); }
4096
4097 // Initialize the PLT section.
4098 void
4099 init(Layout* layout);
4100
4101 // Add an entry to the PLT.
4102 void
4103 add_entry(Symbol_table*, Layout*, Symbol* gsym);
4104
4105 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol.
4106 unsigned int
4107 add_local_ifunc_entry(Symbol_table* symtab, Layout*,
4108 Sized_relobj_file<size, big_endian>* relobj,
4109 unsigned int local_sym_index);
4110
4111 // Add the relocation for a PLT entry.
4112 void
4113 add_relocation(Symbol_table*, Layout*, Symbol* gsym,
4114 unsigned int got_offset);
4115
4116 // Add the reserved TLSDESC_PLT entry to the PLT.
4117 void
4118 reserve_tlsdesc_entry(unsigned int got_offset)
4119 { this->tlsdesc_got_offset_ = got_offset; }
4120
4121 // Return true if a TLSDESC_PLT entry has been reserved.
4122 bool
4123 has_tlsdesc_entry() const
4124 { return this->tlsdesc_got_offset_ != -1U; }
4125
4126 // Return the GOT offset for the reserved TLSDESC_PLT entry.
4127 unsigned int
4128 get_tlsdesc_got_offset() const
4129 { return this->tlsdesc_got_offset_; }
4130
4131 // Return the PLT offset of the reserved TLSDESC_PLT entry.
4132 unsigned int
4133 get_tlsdesc_plt_offset() const
4134 {
4135 return (this->first_plt_entry_offset() +
4136 (this->count_ + this->irelative_count_)
4137 * this->get_plt_entry_size());
4138 }
4139
4140 // Return the .rela.plt section data.
4141 Reloc_section*
4142 rela_plt()
4143 { return this->rel_; }
4144
4145 // Return where the TLSDESC relocations should go.
4146 Reloc_section*
4147 rela_tlsdesc(Layout*);
4148
4149 // Return where the IRELATIVE relocations should go in the PLT
4150 // relocations.
4151 Reloc_section*
4152 rela_irelative(Symbol_table*, Layout*);
4153
4154 // Return whether we created a section for IRELATIVE relocations.
4155 bool
4156 has_irelative_section() const
4157 { return this->irelative_rel_ != NULL; }
4158
4159 // Return the number of PLT entries.
4160 unsigned int
4161 entry_count() const
4162 { return this->count_ + this->irelative_count_; }
4163
4164 // Return the offset of the first non-reserved PLT entry.
4165 unsigned int
4166 first_plt_entry_offset() const
4167 { return this->do_first_plt_entry_offset(); }
4168
4169 // Return the size of a PLT entry.
4170 unsigned int
4171 get_plt_entry_size() const
4172 { return this->do_get_plt_entry_size(); }
4173
4174 // Return the reserved tlsdesc entry size.
4175 unsigned int
4176 get_plt_tlsdesc_entry_size() const
4177 { return this->do_get_plt_tlsdesc_entry_size(); }
4178
4179 // Return the PLT address to use for a global symbol.
4180 uint64_t
4181 address_for_global(const Symbol*);
4182
4183 // Return the PLT address to use for a local symbol.
4184 uint64_t
4185 address_for_local(const Relobj*, unsigned int symndx);
4186
4187 protected:
4188 // Fill in the first PLT entry.
4189 void
4190 fill_first_plt_entry(unsigned char* pov,
4191 Address got_address,
4192 Address plt_address)
4193 { this->do_fill_first_plt_entry(pov, got_address, plt_address); }
4194
4195 // Fill in a normal PLT entry.
4196 void
4197 fill_plt_entry(unsigned char* pov,
4198 Address got_address,
4199 Address plt_address,
4200 unsigned int got_offset,
4201 unsigned int plt_offset)
4202 {
4203 this->do_fill_plt_entry(pov, got_address, plt_address,
4204 got_offset, plt_offset);
4205 }
4206
4207 // Fill in the reserved TLSDESC PLT entry.
4208 void
4209 fill_tlsdesc_entry(unsigned char* pov,
4210 Address gotplt_address,
4211 Address plt_address,
4212 Address got_base,
4213 unsigned int tlsdesc_got_offset,
4214 unsigned int plt_offset)
4215 {
4216 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4217 tlsdesc_got_offset, plt_offset);
4218 }
4219
4220 virtual unsigned int
4221 do_first_plt_entry_offset() const = 0;
4222
4223 virtual unsigned int
4224 do_get_plt_entry_size() const = 0;
4225
4226 virtual unsigned int
4227 do_get_plt_tlsdesc_entry_size() const = 0;
4228
4229 virtual void
4230 do_fill_first_plt_entry(unsigned char* pov,
4231 Address got_addr,
4232 Address plt_addr) = 0;
4233
4234 virtual void
4235 do_fill_plt_entry(unsigned char* pov,
4236 Address got_address,
4237 Address plt_address,
4238 unsigned int got_offset,
4239 unsigned int plt_offset) = 0;
4240
4241 virtual void
4242 do_fill_tlsdesc_entry(unsigned char* pov,
4243 Address gotplt_address,
4244 Address plt_address,
4245 Address got_base,
4246 unsigned int tlsdesc_got_offset,
4247 unsigned int plt_offset) = 0;
4248
4249 void
4250 do_adjust_output_section(Output_section* os);
4251
4252 // Write to a map file.
4253 void
4254 do_print_to_mapfile(Mapfile* mapfile) const
4255 { mapfile->print_output_data(this, _("** PLT")); }
4256
4257 private:
4258 // Set the final size.
4259 void
4260 set_final_data_size();
4261
4262 // Write out the PLT data.
4263 void
4264 do_write(Output_file*);
4265
4266 // The reloc section.
4267 Reloc_section* rel_;
4268
4269 // The TLSDESC relocs, if necessary. These must follow the regular
4270 // PLT relocs.
4271 Reloc_section* tlsdesc_rel_;
4272
4273 // The IRELATIVE relocs, if necessary. These must follow the
4274 // regular PLT relocations.
4275 Reloc_section* irelative_rel_;
4276
4277 // The .got section.
4278 Output_data_got_aarch64<size, big_endian>* got_;
4279
4280 // The .got.plt section.
4281 Output_data_space* got_plt_;
4282
4283 // The part of the .got.plt section used for IRELATIVE relocs.
4284 Output_data_space* got_irelative_;
4285
4286 // The number of PLT entries.
4287 unsigned int count_;
4288
4289 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These
4290 // follow the regular PLT entries.
4291 unsigned int irelative_count_;
4292
4293 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline.
4294 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1
4295 // indicates an offset is not allocated.
4296 unsigned int tlsdesc_got_offset_;
4297 };
4298
4299 // Initialize the PLT section.
4300
4301 template<int size, bool big_endian>
4302 void
4303 Output_data_plt_aarch64<size, big_endian>::init(Layout* layout)
4304 {
4305 this->rel_ = new Reloc_section(false);
4306 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4307 elfcpp::SHF_ALLOC, this->rel_,
4308 ORDER_DYNAMIC_PLT_RELOCS, false);
4309 }
4310
4311 template<int size, bool big_endian>
4312 void
4313 Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section(
4314 Output_section* os)
4315 {
4316 os->set_entsize(this->get_plt_entry_size());
4317 }
4318
4319 // Add an entry to the PLT.
4320
4321 template<int size, bool big_endian>
4322 void
4323 Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab,
4324 Layout* layout, Symbol* gsym)
4325 {
4326 gold_assert(!gsym->has_plt_offset());
4327
4328 unsigned int* pcount;
4329 unsigned int plt_reserved;
4330 Output_section_data_build* got;
4331
4332 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4333 && gsym->can_use_relative_reloc(false))
4334 {
4335 pcount = &this->irelative_count_;
4336 plt_reserved = 0;
4337 got = this->got_irelative_;
4338 }
4339 else
4340 {
4341 pcount = &this->count_;
4342 plt_reserved = this->first_plt_entry_offset();
4343 got = this->got_plt_;
4344 }
4345
4346 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size()
4347 + plt_reserved);
4348
4349 ++*pcount;
4350
4351 section_offset_type got_offset = got->current_data_size();
4352
4353 // Every PLT entry needs a GOT entry which points back to the PLT
4354 // entry (this will be changed by the dynamic linker, normally
4355 // lazily when the function is called).
4356 got->set_current_data_size(got_offset + size / 8);
4357
4358 // Every PLT entry needs a reloc.
4359 this->add_relocation(symtab, layout, gsym, got_offset);
4360
4361 // Note that we don't need to save the symbol. The contents of the
4362 // PLT are independent of which symbols are used. The symbols only
4363 // appear in the relocations.
4364 }
4365
4366 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return
4367 // the PLT offset.
4368
4369 template<int size, bool big_endian>
4370 unsigned int
4371 Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry(
4372 Symbol_table* symtab,
4373 Layout* layout,
4374 Sized_relobj_file<size, big_endian>* relobj,
4375 unsigned int local_sym_index)
4376 {
4377 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size();
4378 ++this->irelative_count_;
4379
4380 section_offset_type got_offset = this->got_irelative_->current_data_size();
4381
4382 // Every PLT entry needs a GOT entry which points back to the PLT
4383 // entry.
4384 this->got_irelative_->set_current_data_size(got_offset + size / 8);
4385
4386 // Every PLT entry needs a reloc.
4387 Reloc_section* rela = this->rela_irelative(symtab, layout);
4388 rela->add_symbolless_local_addend(relobj, local_sym_index,
4389 elfcpp::R_AARCH64_IRELATIVE,
4390 this->got_irelative_, got_offset, 0);
4391
4392 return plt_offset;
4393 }
4394
4395 // Add the relocation for a PLT entry.
4396
4397 template<int size, bool big_endian>
4398 void
4399 Output_data_plt_aarch64<size, big_endian>::add_relocation(
4400 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset)
4401 {
4402 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4403 && gsym->can_use_relative_reloc(false))
4404 {
4405 Reloc_section* rela = this->rela_irelative(symtab, layout);
4406 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE,
4407 this->got_irelative_, got_offset, 0);
4408 }
4409 else
4410 {
4411 gsym->set_needs_dynsym_entry();
4412 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_,
4413 got_offset, 0);
4414 }
4415 }
4416
4417 // Return where the TLSDESC relocations should go, creating it if
4418 // necessary. These follow the JUMP_SLOT relocations.
4419
4420 template<int size, bool big_endian>
4421 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4422 Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout)
4423 {
4424 if (this->tlsdesc_rel_ == NULL)
4425 {
4426 this->tlsdesc_rel_ = new Reloc_section(false);
4427 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4428 elfcpp::SHF_ALLOC, this->tlsdesc_rel_,
4429 ORDER_DYNAMIC_PLT_RELOCS, false);
4430 gold_assert(this->tlsdesc_rel_->output_section()
4431 == this->rel_->output_section());
4432 }
4433 return this->tlsdesc_rel_;
4434 }
4435
4436 // Return where the IRELATIVE relocations should go in the PLT. These
4437 // follow the JUMP_SLOT and the TLSDESC relocations.
4438
4439 template<int size, bool big_endian>
4440 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4441 Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab,
4442 Layout* layout)
4443 {
4444 if (this->irelative_rel_ == NULL)
4445 {
4446 // Make sure we have a place for the TLSDESC relocations, in
4447 // case we see any later on.
4448 this->rela_tlsdesc(layout);
4449 this->irelative_rel_ = new Reloc_section(false);
4450 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4451 elfcpp::SHF_ALLOC, this->irelative_rel_,
4452 ORDER_DYNAMIC_PLT_RELOCS, false);
4453 gold_assert(this->irelative_rel_->output_section()
4454 == this->rel_->output_section());
4455
4456 if (parameters->doing_static_link())
4457 {
4458 // A statically linked executable will only have a .rela.plt
4459 // section to hold R_AARCH64_IRELATIVE relocs for
4460 // STT_GNU_IFUNC symbols. The library will use these
4461 // symbols to locate the IRELATIVE relocs at program startup
4462 // time.
4463 symtab->define_in_output_data("__rela_iplt_start", NULL,
4464 Symbol_table::PREDEFINED,
4465 this->irelative_rel_, 0, 0,
4466 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4467 elfcpp::STV_HIDDEN, 0, false, true);
4468 symtab->define_in_output_data("__rela_iplt_end", NULL,
4469 Symbol_table::PREDEFINED,
4470 this->irelative_rel_, 0, 0,
4471 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4472 elfcpp::STV_HIDDEN, 0, true, true);
4473 }
4474 }
4475 return this->irelative_rel_;
4476 }
4477
4478 // Return the PLT address to use for a global symbol.
4479
4480 template<int size, bool big_endian>
4481 uint64_t
4482 Output_data_plt_aarch64<size, big_endian>::address_for_global(
4483 const Symbol* gsym)
4484 {
4485 uint64_t offset = 0;
4486 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4487 && gsym->can_use_relative_reloc(false))
4488 offset = (this->first_plt_entry_offset() +
4489 this->count_ * this->get_plt_entry_size());
4490 return this->address() + offset + gsym->plt_offset();
4491 }
4492
4493 // Return the PLT address to use for a local symbol. These are always
4494 // IRELATIVE relocs.
4495
4496 template<int size, bool big_endian>
4497 uint64_t
4498 Output_data_plt_aarch64<size, big_endian>::address_for_local(
4499 const Relobj* object,
4500 unsigned int r_sym)
4501 {
4502 return (this->address()
4503 + this->first_plt_entry_offset()
4504 + this->count_ * this->get_plt_entry_size()
4505 + object->local_plt_offset(r_sym));
4506 }
4507
4508 // Set the final size.
4509
4510 template<int size, bool big_endian>
4511 void
4512 Output_data_plt_aarch64<size, big_endian>::set_final_data_size()
4513 {
4514 unsigned int count = this->count_ + this->irelative_count_;
4515 unsigned int extra_size = 0;
4516 if (this->has_tlsdesc_entry())
4517 extra_size += this->get_plt_tlsdesc_entry_size();
4518 this->set_data_size(this->first_plt_entry_offset()
4519 + count * this->get_plt_entry_size()
4520 + extra_size);
4521 }
4522
4523 template<int size, bool big_endian>
4524 class Output_data_plt_aarch64_standard :
4525 public Output_data_plt_aarch64<size, big_endian>
4526 {
4527 public:
4528 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4529 Output_data_plt_aarch64_standard(
4530 Layout* layout,
4531 Output_data_got_aarch64<size, big_endian>* got,
4532 Output_data_space* got_plt,
4533 Output_data_space* got_irelative)
4534 : Output_data_plt_aarch64<size, big_endian>(layout,
4535 size == 32 ? 4 : 8,
4536 got, got_plt,
4537 got_irelative)
4538 { }
4539
4540 protected:
4541 // Return the offset of the first non-reserved PLT entry.
4542 virtual unsigned int
4543 do_first_plt_entry_offset() const
4544 { return this->first_plt_entry_size; }
4545
4546 // Return the size of a PLT entry
4547 virtual unsigned int
4548 do_get_plt_entry_size() const
4549 { return this->plt_entry_size; }
4550
4551 // Return the size of a tlsdesc entry
4552 virtual unsigned int
4553 do_get_plt_tlsdesc_entry_size() const
4554 { return this->plt_tlsdesc_entry_size; }
4555
4556 virtual void
4557 do_fill_first_plt_entry(unsigned char* pov,
4558 Address got_address,
4559 Address plt_address);
4560
4561 virtual void
4562 do_fill_plt_entry(unsigned char* pov,
4563 Address got_address,
4564 Address plt_address,
4565 unsigned int got_offset,
4566 unsigned int plt_offset);
4567
4568 virtual void
4569 do_fill_tlsdesc_entry(unsigned char* pov,
4570 Address gotplt_address,
4571 Address plt_address,
4572 Address got_base,
4573 unsigned int tlsdesc_got_offset,
4574 unsigned int plt_offset);
4575
4576 private:
4577 // The size of the first plt entry size.
4578 static const int first_plt_entry_size = 32;
4579 // The size of the plt entry size.
4580 static const int plt_entry_size = 16;
4581 // The size of the plt tlsdesc entry size.
4582 static const int plt_tlsdesc_entry_size = 32;
4583 // Template for the first PLT entry.
4584 static const uint32_t first_plt_entry[first_plt_entry_size / 4];
4585 // Template for subsequent PLT entries.
4586 static const uint32_t plt_entry[plt_entry_size / 4];
4587 // The reserved TLSDESC entry in the PLT for an executable.
4588 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4];
4589 };
4590
4591 // The first entry in the PLT for an executable.
4592
4593 template<>
4594 const uint32_t
4595 Output_data_plt_aarch64_standard<32, false>::
4596 first_plt_entry[first_plt_entry_size / 4] =
4597 {
4598 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4599 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4600 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4601 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4602 0xd61f0220, /* br x17 */
4603 0xd503201f, /* nop */
4604 0xd503201f, /* nop */
4605 0xd503201f, /* nop */
4606 };
4607
4608
4609 template<>
4610 const uint32_t
4611 Output_data_plt_aarch64_standard<32, true>::
4612 first_plt_entry[first_plt_entry_size / 4] =
4613 {
4614 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4615 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4616 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4617 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4618 0xd61f0220, /* br x17 */
4619 0xd503201f, /* nop */
4620 0xd503201f, /* nop */
4621 0xd503201f, /* nop */
4622 };
4623
4624
4625 template<>
4626 const uint32_t
4627 Output_data_plt_aarch64_standard<64, false>::
4628 first_plt_entry[first_plt_entry_size / 4] =
4629 {
4630 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4631 0x90000010, /* adrp x16, PLT_GOT+16 */
4632 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4633 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4634 0xd61f0220, /* br x17 */
4635 0xd503201f, /* nop */
4636 0xd503201f, /* nop */
4637 0xd503201f, /* nop */
4638 };
4639
4640
4641 template<>
4642 const uint32_t
4643 Output_data_plt_aarch64_standard<64, true>::
4644 first_plt_entry[first_plt_entry_size / 4] =
4645 {
4646 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4647 0x90000010, /* adrp x16, PLT_GOT+16 */
4648 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4649 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4650 0xd61f0220, /* br x17 */
4651 0xd503201f, /* nop */
4652 0xd503201f, /* nop */
4653 0xd503201f, /* nop */
4654 };
4655
4656
4657 template<>
4658 const uint32_t
4659 Output_data_plt_aarch64_standard<32, false>::
4660 plt_entry[plt_entry_size / 4] =
4661 {
4662 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4663 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4664 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4665 0xd61f0220, /* br x17. */
4666 };
4667
4668
4669 template<>
4670 const uint32_t
4671 Output_data_plt_aarch64_standard<32, true>::
4672 plt_entry[plt_entry_size / 4] =
4673 {
4674 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4675 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4676 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4677 0xd61f0220, /* br x17. */
4678 };
4679
4680
4681 template<>
4682 const uint32_t
4683 Output_data_plt_aarch64_standard<64, false>::
4684 plt_entry[plt_entry_size / 4] =
4685 {
4686 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4687 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4688 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4689 0xd61f0220, /* br x17. */
4690 };
4691
4692
4693 template<>
4694 const uint32_t
4695 Output_data_plt_aarch64_standard<64, true>::
4696 plt_entry[plt_entry_size / 4] =
4697 {
4698 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4699 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4700 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4701 0xd61f0220, /* br x17. */
4702 };
4703
4704
4705 template<int size, bool big_endian>
4706 void
4707 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry(
4708 unsigned char* pov,
4709 Address got_address,
4710 Address plt_address)
4711 {
4712 // PLT0 of the small PLT looks like this in ELF64 -
4713 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack.
4714 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT
4715 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the
4716 // symbol resolver
4717 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the
4718 // GOTPLT entry for this.
4719 // br x17
4720 // PLT0 will be slightly different in ELF32 due to different got entry
4721 // size.
4722 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size);
4723 Address gotplt_2nd_ent = got_address + (size / 8) * 2;
4724
4725 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2.
4726 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff.
4727 // FIXME: This only works for 64bit
4728 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4,
4729 gotplt_2nd_ent, plt_address + 4);
4730
4731 // Fill in R_AARCH64_LDST8_LO12
4732 elfcpp::Swap<32, big_endian>::writeval(
4733 pov + 8,
4734 ((this->first_plt_entry[2] & 0xffc003ff)
4735 | ((gotplt_2nd_ent & 0xff8) << 7)));
4736
4737 // Fill in R_AARCH64_ADD_ABS_LO12
4738 elfcpp::Swap<32, big_endian>::writeval(
4739 pov + 12,
4740 ((this->first_plt_entry[3] & 0xffc003ff)
4741 | ((gotplt_2nd_ent & 0xfff) << 10)));
4742 }
4743
4744
4745 // Subsequent entries in the PLT for an executable.
4746 // FIXME: This only works for 64bit
4747
4748 template<int size, bool big_endian>
4749 void
4750 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry(
4751 unsigned char* pov,
4752 Address got_address,
4753 Address plt_address,
4754 unsigned int got_offset,
4755 unsigned int plt_offset)
4756 {
4757 memcpy(pov, this->plt_entry, this->plt_entry_size);
4758
4759 Address gotplt_entry_address = got_address + got_offset;
4760 Address plt_entry_address = plt_address + plt_offset;
4761
4762 // Fill in R_AARCH64_PCREL_ADR_HI21
4763 AArch64_relocate_functions<size, big_endian>::adrp(
4764 pov,
4765 gotplt_entry_address,
4766 plt_entry_address);
4767
4768 // Fill in R_AARCH64_LDST64_ABS_LO12
4769 elfcpp::Swap<32, big_endian>::writeval(
4770 pov + 4,
4771 ((this->plt_entry[1] & 0xffc003ff)
4772 | ((gotplt_entry_address & 0xff8) << 7)));
4773
4774 // Fill in R_AARCH64_ADD_ABS_LO12
4775 elfcpp::Swap<32, big_endian>::writeval(
4776 pov + 8,
4777 ((this->plt_entry[2] & 0xffc003ff)
4778 | ((gotplt_entry_address & 0xfff) <<10)));
4779
4780 }
4781
4782
4783 template<>
4784 const uint32_t
4785 Output_data_plt_aarch64_standard<32, false>::
4786 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4787 {
4788 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4789 0x90000002, /* adrp x2, 0 */
4790 0x90000003, /* adrp x3, 0 */
4791 0xb9400042, /* ldr w2, [w2, #0] */
4792 0x11000063, /* add w3, w3, 0 */
4793 0xd61f0040, /* br x2 */
4794 0xd503201f, /* nop */
4795 0xd503201f, /* nop */
4796 };
4797
4798 template<>
4799 const uint32_t
4800 Output_data_plt_aarch64_standard<32, true>::
4801 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4802 {
4803 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4804 0x90000002, /* adrp x2, 0 */
4805 0x90000003, /* adrp x3, 0 */
4806 0xb9400042, /* ldr w2, [w2, #0] */
4807 0x11000063, /* add w3, w3, 0 */
4808 0xd61f0040, /* br x2 */
4809 0xd503201f, /* nop */
4810 0xd503201f, /* nop */
4811 };
4812
4813 template<>
4814 const uint32_t
4815 Output_data_plt_aarch64_standard<64, false>::
4816 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4817 {
4818 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4819 0x90000002, /* adrp x2, 0 */
4820 0x90000003, /* adrp x3, 0 */
4821 0xf9400042, /* ldr x2, [x2, #0] */
4822 0x91000063, /* add x3, x3, 0 */
4823 0xd61f0040, /* br x2 */
4824 0xd503201f, /* nop */
4825 0xd503201f, /* nop */
4826 };
4827
4828 template<>
4829 const uint32_t
4830 Output_data_plt_aarch64_standard<64, true>::
4831 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4832 {
4833 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4834 0x90000002, /* adrp x2, 0 */
4835 0x90000003, /* adrp x3, 0 */
4836 0xf9400042, /* ldr x2, [x2, #0] */
4837 0x91000063, /* add x3, x3, 0 */
4838 0xd61f0040, /* br x2 */
4839 0xd503201f, /* nop */
4840 0xd503201f, /* nop */
4841 };
4842
4843 template<int size, bool big_endian>
4844 void
4845 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry(
4846 unsigned char* pov,
4847 Address gotplt_address,
4848 Address plt_address,
4849 Address got_base,
4850 unsigned int tlsdesc_got_offset,
4851 unsigned int plt_offset)
4852 {
4853 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size);
4854
4855 // move DT_TLSDESC_GOT address into x2
4856 // move .got.plt address into x3
4857 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset;
4858 Address plt_entry_address = plt_address + plt_offset;
4859
4860 // R_AARCH64_ADR_PREL_PG_HI21
4861 AArch64_relocate_functions<size, big_endian>::adrp(
4862 pov + 4,
4863 tlsdesc_got_entry,
4864 plt_entry_address + 4);
4865
4866 // R_AARCH64_ADR_PREL_PG_HI21
4867 AArch64_relocate_functions<size, big_endian>::adrp(
4868 pov + 8,
4869 gotplt_address,
4870 plt_entry_address + 8);
4871
4872 // R_AARCH64_LDST64_ABS_LO12
4873 elfcpp::Swap<32, big_endian>::writeval(
4874 pov + 12,
4875 ((this->tlsdesc_plt_entry[3] & 0xffc003ff)
4876 | ((tlsdesc_got_entry & 0xff8) << 7)));
4877
4878 // R_AARCH64_ADD_ABS_LO12
4879 elfcpp::Swap<32, big_endian>::writeval(
4880 pov + 16,
4881 ((this->tlsdesc_plt_entry[4] & 0xffc003ff)
4882 | ((gotplt_address & 0xfff) << 10)));
4883 }
4884
4885 // Write out the PLT. This uses the hand-coded instructions above,
4886 // and adjusts them as needed. This is specified by the AMD64 ABI.
4887
4888 template<int size, bool big_endian>
4889 void
4890 Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of)
4891 {
4892 const off_t offset = this->offset();
4893 const section_size_type oview_size =
4894 convert_to_section_size_type(this->data_size());
4895 unsigned char* const oview = of->get_output_view(offset, oview_size);
4896
4897 const off_t got_file_offset = this->got_plt_->offset();
4898 gold_assert(got_file_offset + this->got_plt_->data_size()
4899 == this->got_irelative_->offset());
4900
4901 const section_size_type got_size =
4902 convert_to_section_size_type(this->got_plt_->data_size()
4903 + this->got_irelative_->data_size());
4904 unsigned char* const got_view = of->get_output_view(got_file_offset,
4905 got_size);
4906
4907 unsigned char* pov = oview;
4908
4909 // The base address of the .plt section.
4910 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address();
4911 // The base address of the PLT portion of the .got section.
4912 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address
4913 = this->got_plt_->address();
4914
4915 this->fill_first_plt_entry(pov, gotplt_address, plt_address);
4916 pov += this->first_plt_entry_offset();
4917
4918 // The first three entries in .got.plt are reserved.
4919 unsigned char* got_pov = got_view;
4920 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT);
4921 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4922
4923 unsigned int plt_offset = this->first_plt_entry_offset();
4924 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4925 const unsigned int count = this->count_ + this->irelative_count_;
4926 for (unsigned int plt_index = 0;
4927 plt_index < count;
4928 ++plt_index,
4929 pov += this->get_plt_entry_size(),
4930 got_pov += size / 8,
4931 plt_offset += this->get_plt_entry_size(),
4932 got_offset += size / 8)
4933 {
4934 // Set and adjust the PLT entry itself.
4935 this->fill_plt_entry(pov, gotplt_address, plt_address,
4936 got_offset, plt_offset);
4937
4938 // Set the entry in the GOT, which points to plt0.
4939 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address);
4940 }
4941
4942 if (this->has_tlsdesc_entry())
4943 {
4944 // Set and adjust the reserved TLSDESC PLT entry.
4945 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
4946 // The base address of the .base section.
4947 typename elfcpp::Elf_types<size>::Elf_Addr got_base =
4948 this->got_->address();
4949 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4950 tlsdesc_got_offset, plt_offset);
4951 pov += this->get_plt_tlsdesc_entry_size();
4952 }
4953
4954 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
4955 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
4956
4957 of->write_output_view(offset, oview_size, oview);
4958 of->write_output_view(got_file_offset, got_size, got_view);
4959 }
4960
4961 // Telling how to update the immediate field of an instruction.
4962 struct AArch64_howto
4963 {
4964 // The immediate field mask.
4965 elfcpp::Elf_Xword dst_mask;
4966
4967 // The offset to apply relocation immediate
4968 int doffset;
4969
4970 // The second part offset, if the immediate field has two parts.
4971 // -1 if the immediate field has only one part.
4972 int doffset2;
4973 };
4974
4975 static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] =
4976 {
4977 {0, -1, -1}, // DATA
4978 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16
4979 {0xffffe0, 5, -1}, // LD [23:5]-imm19
4980 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi
4981 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi
4982 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12
4983 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12
4984 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14
4985 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19
4986 {0x3ffffff, 0, -1}, // B [25:0]-imm26
4987 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26
4988 };
4989
4990 // AArch64 relocate function class
4991
4992 template<int size, bool big_endian>
4993 class AArch64_relocate_functions
4994 {
4995 public:
4996 typedef enum
4997 {
4998 STATUS_OKAY, // No error during relocation.
4999 STATUS_OVERFLOW, // Relocation overflow.
5000 STATUS_BAD_RELOC, // Relocation cannot be applied.
5001 } Status;
5002
5003 typedef AArch64_relocate_functions<size, big_endian> This;
5004 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
5005 typedef Relocate_info<size, big_endian> The_relocate_info;
5006 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
5007 typedef Reloc_stub<size, big_endian> The_reloc_stub;
5008 typedef Stub_table<size, big_endian> The_stub_table;
5009 typedef elfcpp::Rela<size, big_endian> The_rela;
5010 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype;
5011
5012 // Return the page address of the address.
5013 // Page(address) = address & ~0xFFF
5014
5015 static inline AArch64_valtype
5016 Page(Address address)
5017 {
5018 return (address & (~static_cast<Address>(0xFFF)));
5019 }
5020
5021 private:
5022 // Update instruction (pointed by view) with selected bits (immed).
5023 // val = (val & ~dst_mask) | (immed << doffset)
5024
5025 template<int valsize>
5026 static inline void
5027 update_view(unsigned char* view,
5028 AArch64_valtype immed,
5029 elfcpp::Elf_Xword doffset,
5030 elfcpp::Elf_Xword dst_mask)
5031 {
5032 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5033 Valtype* wv = reinterpret_cast<Valtype*>(view);
5034 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5035
5036 // Clear immediate fields.
5037 val &= ~dst_mask;
5038 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5039 static_cast<Valtype>(val | (immed << doffset)));
5040 }
5041
5042 // Update two parts of an instruction (pointed by view) with selected
5043 // bits (immed1 and immed2).
5044 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2)
5045
5046 template<int valsize>
5047 static inline void
5048 update_view_two_parts(
5049 unsigned char* view,
5050 AArch64_valtype immed1,
5051 AArch64_valtype immed2,
5052 elfcpp::Elf_Xword doffset1,
5053 elfcpp::Elf_Xword doffset2,
5054 elfcpp::Elf_Xword dst_mask)
5055 {
5056 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5057 Valtype* wv = reinterpret_cast<Valtype*>(view);
5058 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5059 val &= ~dst_mask;
5060 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5061 static_cast<Valtype>(val | (immed1 << doffset1) |
5062 (immed2 << doffset2)));
5063 }
5064
5065 // Update adr or adrp instruction with immed.
5066 // In adr and adrp: [30:29] immlo [23:5] immhi
5067
5068 static inline void
5069 update_adr(unsigned char* view, AArch64_valtype immed)
5070 {
5071 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5);
5072 This::template update_view_two_parts<32>(
5073 view,
5074 immed & 0x3,
5075 (immed & 0x1ffffc) >> 2,
5076 29,
5077 5,
5078 dst_mask);
5079 }
5080
5081 // Update movz/movn instruction with bits immed.
5082 // Set instruction to movz if is_movz is true, otherwise set instruction
5083 // to movn.
5084
5085 static inline void
5086 update_movnz(unsigned char* view,
5087 AArch64_valtype immed,
5088 bool is_movz)
5089 {
5090 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
5091 Valtype* wv = reinterpret_cast<Valtype*>(view);
5092 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
5093
5094 const elfcpp::Elf_Xword doffset =
5095 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset;
5096 const elfcpp::Elf_Xword dst_mask =
5097 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask;
5098
5099 // Clear immediate fields and opc code.
5100 val &= ~(dst_mask | (0x3 << 29));
5101
5102 // Set instruction to movz or movn.
5103 // movz: [30:29] is 10 movn: [30:29] is 00
5104 if (is_movz)
5105 val |= (0x2 << 29);
5106
5107 elfcpp::Swap<32, big_endian>::writeval(wv,
5108 static_cast<Valtype>(val | (immed << doffset)));
5109 }
5110
5111 public:
5112
5113 // Update selected bits in text.
5114
5115 template<int valsize>
5116 static inline typename This::Status
5117 reloc_common(unsigned char* view, Address x,
5118 const AArch64_reloc_property* reloc_property)
5119 {
5120 // Select bits from X.
5121 Address immed = reloc_property->select_x_value(x);
5122
5123 // Update view.
5124 const AArch64_reloc_property::Reloc_inst inst =
5125 reloc_property->reloc_inst();
5126 // If it is a data relocation or instruction has 2 parts of immediate
5127 // fields, you should not call pcrela_general.
5128 gold_assert(aarch64_howto[inst].doffset2 == -1 &&
5129 aarch64_howto[inst].doffset != -1);
5130 This::template update_view<valsize>(view, immed,
5131 aarch64_howto[inst].doffset,
5132 aarch64_howto[inst].dst_mask);
5133
5134 // Do check overflow or alignment if needed.
5135 return (reloc_property->checkup_x_value(x)
5136 ? This::STATUS_OKAY
5137 : This::STATUS_OVERFLOW);
5138 }
5139
5140 // Construct a B insn. Note, although we group it here with other relocation
5141 // operation, there is actually no 'relocation' involved here.
5142 static inline void
5143 construct_b(unsigned char* view, unsigned int branch_offset)
5144 {
5145 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2),
5146 26, 0, 0xffffffff);
5147 }
5148
5149 // Do a simple rela relocation at unaligned addresses.
5150
5151 template<int valsize>
5152 static inline typename This::Status
5153 rela_ua(unsigned char* view,
5154 const Sized_relobj_file<size, big_endian>* object,
5155 const Symbol_value<size>* psymval,
5156 AArch64_valtype addend,
5157 const AArch64_reloc_property* reloc_property)
5158 {
5159 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5160 Valtype;
5161 typename elfcpp::Elf_types<size>::Elf_Addr x =
5162 psymval->value(object, addend);
5163 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5164 static_cast<Valtype>(x));
5165 return (reloc_property->checkup_x_value(x)
5166 ? This::STATUS_OKAY
5167 : This::STATUS_OVERFLOW);
5168 }
5169
5170 // Do a simple pc-relative relocation at unaligned addresses.
5171
5172 template<int valsize>
5173 static inline typename This::Status
5174 pcrela_ua(unsigned char* view,
5175 const Sized_relobj_file<size, big_endian>* object,
5176 const Symbol_value<size>* psymval,
5177 AArch64_valtype addend,
5178 Address address,
5179 const AArch64_reloc_property* reloc_property)
5180 {
5181 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5182 Valtype;
5183 Address x = psymval->value(object, addend) - address;
5184 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5185 static_cast<Valtype>(x));
5186 return (reloc_property->checkup_x_value(x)
5187 ? This::STATUS_OKAY
5188 : This::STATUS_OVERFLOW);
5189 }
5190
5191 // Do a simple rela relocation at aligned addresses.
5192
5193 template<int valsize>
5194 static inline typename This::Status
5195 rela(
5196 unsigned char* view,
5197 const Sized_relobj_file<size, big_endian>* object,
5198 const Symbol_value<size>* psymval,
5199 AArch64_valtype addend,
5200 const AArch64_reloc_property* reloc_property)
5201 {
5202 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5203 Valtype* wv = reinterpret_cast<Valtype*>(view);
5204 Address x = psymval->value(object, addend);
5205 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x));
5206 return (reloc_property->checkup_x_value(x)
5207 ? This::STATUS_OKAY
5208 : This::STATUS_OVERFLOW);
5209 }
5210
5211 // Do relocate. Update selected bits in text.
5212 // new_val = (val & ~dst_mask) | (immed << doffset)
5213
5214 template<int valsize>
5215 static inline typename This::Status
5216 rela_general(unsigned char* view,
5217 const Sized_relobj_file<size, big_endian>* object,
5218 const Symbol_value<size>* psymval,
5219 AArch64_valtype addend,
5220 const AArch64_reloc_property* reloc_property)
5221 {
5222 // Calculate relocation.
5223 Address x = psymval->value(object, addend);
5224 return This::template reloc_common<valsize>(view, x, reloc_property);
5225 }
5226
5227 // Do relocate. Update selected bits in text.
5228 // new val = (val & ~dst_mask) | (immed << doffset)
5229
5230 template<int valsize>
5231 static inline typename This::Status
5232 rela_general(
5233 unsigned char* view,
5234 AArch64_valtype s,
5235 AArch64_valtype addend,
5236 const AArch64_reloc_property* reloc_property)
5237 {
5238 // Calculate relocation.
5239 Address x = s + addend;
5240 return This::template reloc_common<valsize>(view, x, reloc_property);
5241 }
5242
5243 // Do address relative relocate. Update selected bits in text.
5244 // new val = (val & ~dst_mask) | (immed << doffset)
5245
5246 template<int valsize>
5247 static inline typename This::Status
5248 pcrela_general(
5249 unsigned char* view,
5250 const Sized_relobj_file<size, big_endian>* object,
5251 const Symbol_value<size>* psymval,
5252 AArch64_valtype addend,
5253 Address address,
5254 const AArch64_reloc_property* reloc_property)
5255 {
5256 // Calculate relocation.
5257 Address x = psymval->value(object, addend) - address;
5258 return This::template reloc_common<valsize>(view, x, reloc_property);
5259 }
5260
5261
5262 // Calculate (S + A) - address, update adr instruction.
5263
5264 static inline typename This::Status
5265 adr(unsigned char* view,
5266 const Sized_relobj_file<size, big_endian>* object,
5267 const Symbol_value<size>* psymval,
5268 Address addend,
5269 Address address,
5270 const AArch64_reloc_property* /* reloc_property */)
5271 {
5272 AArch64_valtype x = psymval->value(object, addend) - address;
5273 // Pick bits [20:0] of X.
5274 AArch64_valtype immed = x & 0x1fffff;
5275 update_adr(view, immed);
5276 // Check -2^20 <= X < 2^20
5277 return (size == 64 && Bits<21>::has_overflow((x))
5278 ? This::STATUS_OVERFLOW
5279 : This::STATUS_OKAY);
5280 }
5281
5282 // Calculate PG(S+A) - PG(address), update adrp instruction.
5283 // R_AARCH64_ADR_PREL_PG_HI21
5284
5285 static inline typename This::Status
5286 adrp(
5287 unsigned char* view,
5288 Address sa,
5289 Address address)
5290 {
5291 AArch64_valtype x = This::Page(sa) - This::Page(address);
5292 // Pick [32:12] of X.
5293 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5294 update_adr(view, immed);
5295 // Check -2^32 <= X < 2^32
5296 return (size == 64 && Bits<33>::has_overflow((x))
5297 ? This::STATUS_OVERFLOW
5298 : This::STATUS_OKAY);
5299 }
5300
5301 // Calculate PG(S+A) - PG(address), update adrp instruction.
5302 // R_AARCH64_ADR_PREL_PG_HI21
5303
5304 static inline typename This::Status
5305 adrp(unsigned char* view,
5306 const Sized_relobj_file<size, big_endian>* object,
5307 const Symbol_value<size>* psymval,
5308 Address addend,
5309 Address address,
5310 const AArch64_reloc_property* reloc_property)
5311 {
5312 Address sa = psymval->value(object, addend);
5313 AArch64_valtype x = This::Page(sa) - This::Page(address);
5314 // Pick [32:12] of X.
5315 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5316 update_adr(view, immed);
5317 return (reloc_property->checkup_x_value(x)
5318 ? This::STATUS_OKAY
5319 : This::STATUS_OVERFLOW);
5320 }
5321
5322 // Update mov[n/z] instruction. Check overflow if needed.
5323 // If X >=0, set the instruction to movz and its immediate value to the
5324 // selected bits S.
5325 // If X < 0, set the instruction to movn and its immediate value to
5326 // NOT (selected bits of).
5327
5328 static inline typename This::Status
5329 movnz(unsigned char* view,
5330 AArch64_valtype x,
5331 const AArch64_reloc_property* reloc_property)
5332 {
5333 // Select bits from X.
5334 Address immed;
5335 bool is_movz;
5336 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW;
5337 if (static_cast<SignedW>(x) >= 0)
5338 {
5339 immed = reloc_property->select_x_value(x);
5340 is_movz = true;
5341 }
5342 else
5343 {
5344 immed = reloc_property->select_x_value(~x);;
5345 is_movz = false;
5346 }
5347
5348 // Update movnz instruction.
5349 update_movnz(view, immed, is_movz);
5350
5351 // Do check overflow or alignment if needed.
5352 return (reloc_property->checkup_x_value(x)
5353 ? This::STATUS_OKAY
5354 : This::STATUS_OVERFLOW);
5355 }
5356
5357 static inline bool
5358 maybe_apply_stub(unsigned int,
5359 const The_relocate_info*,
5360 const The_rela&,
5361 unsigned char*,
5362 Address,
5363 const Sized_symbol<size>*,
5364 const Symbol_value<size>*,
5365 const Sized_relobj_file<size, big_endian>*,
5366 section_size_type);
5367
5368 }; // End of AArch64_relocate_functions
5369
5370
5371 // For a certain relocation type (usually jump/branch), test to see if the
5372 // destination needs a stub to fulfil. If so, re-route the destination of the
5373 // original instruction to the stub, note, at this time, the stub has already
5374 // been generated.
5375
5376 template<int size, bool big_endian>
5377 bool
5378 AArch64_relocate_functions<size, big_endian>::
5379 maybe_apply_stub(unsigned int r_type,
5380 const The_relocate_info* relinfo,
5381 const The_rela& rela,
5382 unsigned char* view,
5383 Address address,
5384 const Sized_symbol<size>* gsym,
5385 const Symbol_value<size>* psymval,
5386 const Sized_relobj_file<size, big_endian>* object,
5387 section_size_type current_group_size)
5388 {
5389 if (parameters->options().relocatable())
5390 return false;
5391
5392 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend();
5393 Address branch_target = psymval->value(object, 0) + addend;
5394 int stub_type =
5395 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target);
5396 if (stub_type == ST_NONE)
5397 return false;
5398
5399 const The_aarch64_relobj* aarch64_relobj =
5400 static_cast<const The_aarch64_relobj*>(object);
5401 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
5402 gold_assert(stub_table != NULL);
5403
5404 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5405 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
5406 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
5407 gold_assert(stub != NULL);
5408
5409 Address new_branch_target = stub_table->address() + stub->offset();
5410 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset =
5411 new_branch_target - address;
5412 const AArch64_reloc_property* arp =
5413 aarch64_reloc_property_table->get_reloc_property(r_type);
5414 gold_assert(arp != NULL);
5415 typename This::Status status = This::template
5416 rela_general<32>(view, branch_offset, 0, arp);
5417 if (status != This::STATUS_OKAY)
5418 gold_error(_("Stub is too far away, try a smaller value "
5419 "for '--stub-group-size'. The current value is 0x%lx."),
5420 static_cast<unsigned long>(current_group_size));
5421 return true;
5422 }
5423
5424
5425 // Group input sections for stub generation.
5426 //
5427 // We group input sections in an output section so that the total size,
5428 // including any padding space due to alignment is smaller than GROUP_SIZE
5429 // unless the only input section in group is bigger than GROUP_SIZE already.
5430 // Then an ARM stub table is created to follow the last input section
5431 // in group. For each group an ARM stub table is created an is placed
5432 // after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
5433 // extend the group after the stub table.
5434
5435 template<int size, bool big_endian>
5436 void
5437 Target_aarch64<size, big_endian>::group_sections(
5438 Layout* layout,
5439 section_size_type group_size,
5440 bool stubs_always_after_branch,
5441 const Task* task)
5442 {
5443 // Group input sections and insert stub table
5444 Layout::Section_list section_list;
5445 layout->get_executable_sections(&section_list);
5446 for (Layout::Section_list::const_iterator p = section_list.begin();
5447 p != section_list.end();
5448 ++p)
5449 {
5450 AArch64_output_section<size, big_endian>* output_section =
5451 static_cast<AArch64_output_section<size, big_endian>*>(*p);
5452 output_section->group_sections(group_size, stubs_always_after_branch,
5453 this, task);
5454 }
5455 }
5456
5457
5458 // Find the AArch64_input_section object corresponding to the SHNDX-th input
5459 // section of RELOBJ.
5460
5461 template<int size, bool big_endian>
5462 AArch64_input_section<size, big_endian>*
5463 Target_aarch64<size, big_endian>::find_aarch64_input_section(
5464 Relobj* relobj, unsigned int shndx) const
5465 {
5466 Section_id sid(relobj, shndx);
5467 typename AArch64_input_section_map::const_iterator p =
5468 this->aarch64_input_section_map_.find(sid);
5469 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL;
5470 }
5471
5472
5473 // Make a new AArch64_input_section object.
5474
5475 template<int size, bool big_endian>
5476 AArch64_input_section<size, big_endian>*
5477 Target_aarch64<size, big_endian>::new_aarch64_input_section(
5478 Relobj* relobj, unsigned int shndx)
5479 {
5480 Section_id sid(relobj, shndx);
5481
5482 AArch64_input_section<size, big_endian>* input_section =
5483 new AArch64_input_section<size, big_endian>(relobj, shndx);
5484 input_section->init();
5485
5486 // Register new AArch64_input_section in map for look-up.
5487 std::pair<typename AArch64_input_section_map::iterator,bool> ins =
5488 this->aarch64_input_section_map_.insert(
5489 std::make_pair(sid, input_section));
5490
5491 // Make sure that it we have not created another AArch64_input_section
5492 // for this input section already.
5493 gold_assert(ins.second);
5494
5495 return input_section;
5496 }
5497
5498
5499 // Relaxation hook. This is where we do stub generation.
5500
5501 template<int size, bool big_endian>
5502 bool
5503 Target_aarch64<size, big_endian>::do_relax(
5504 int pass,
5505 const Input_objects* input_objects,
5506 Symbol_table* symtab,
5507 Layout* layout ,
5508 const Task* task)
5509 {
5510 gold_assert(!parameters->options().relocatable());
5511 if (pass == 1)
5512 {
5513 // We don't handle negative stub_group_size right now.
5514 this->stub_group_size_ = abs(parameters->options().stub_group_size());
5515 if (this->stub_group_size_ == 1)
5516 {
5517 // Leave room for 4096 4-byte stub entries. If we exceed that, then we
5518 // will fail to link. The user will have to relink with an explicit
5519 // group size option.
5520 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET -
5521 4096 * 4;
5522 }
5523 group_sections(layout, this->stub_group_size_, true, task);
5524 }
5525 else
5526 {
5527 // If this is not the first pass, addresses and file offsets have
5528 // been reset at this point, set them here.
5529 for (Stub_table_iterator sp = this->stub_tables_.begin();
5530 sp != this->stub_tables_.end(); ++sp)
5531 {
5532 The_stub_table* stt = *sp;
5533 The_aarch64_input_section* owner = stt->owner();
5534 off_t off = align_address(owner->original_size(),
5535 stt->addralign());
5536 stt->set_address_and_file_offset(owner->address() + off,
5537 owner->offset() + off);
5538 }
5539 }
5540
5541 // Scan relocs for relocation stubs
5542 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
5543 op != input_objects->relobj_end();
5544 ++op)
5545 {
5546 The_aarch64_relobj* aarch64_relobj =
5547 static_cast<The_aarch64_relobj*>(*op);
5548 // Lock the object so we can read from it. This is only called
5549 // single-threaded from Layout::finalize, so it is OK to lock.
5550 Task_lock_obj<Object> tl(task, aarch64_relobj);
5551 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout);
5552 }
5553
5554 bool any_stub_table_changed = false;
5555 for (Stub_table_iterator siter = this->stub_tables_.begin();
5556 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter)
5557 {
5558 The_stub_table* stub_table = *siter;
5559 if (stub_table->update_data_size_changed_p())
5560 {
5561 The_aarch64_input_section* owner = stub_table->owner();
5562 uint64_t address = owner->address();
5563 off_t offset = owner->offset();
5564 owner->reset_address_and_file_offset();
5565 owner->set_address_and_file_offset(address, offset);
5566
5567 any_stub_table_changed = true;
5568 }
5569 }
5570
5571 // Do not continue relaxation.
5572 bool continue_relaxation = any_stub_table_changed;
5573 if (!continue_relaxation)
5574 for (Stub_table_iterator sp = this->stub_tables_.begin();
5575 (sp != this->stub_tables_.end());
5576 ++sp)
5577 (*sp)->finalize_stubs();
5578
5579 return continue_relaxation;
5580 }
5581
5582
5583 // Make a new Stub_table.
5584
5585 template<int size, bool big_endian>
5586 Stub_table<size, big_endian>*
5587 Target_aarch64<size, big_endian>::new_stub_table(
5588 AArch64_input_section<size, big_endian>* owner)
5589 {
5590 Stub_table<size, big_endian>* stub_table =
5591 new Stub_table<size, big_endian>(owner);
5592 stub_table->set_address(align_address(
5593 owner->address() + owner->data_size(), 8));
5594 stub_table->set_file_offset(owner->offset() + owner->data_size());
5595 stub_table->finalize_data_size();
5596
5597 this->stub_tables_.push_back(stub_table);
5598
5599 return stub_table;
5600 }
5601
5602
5603 template<int size, bool big_endian>
5604 uint64_t
5605 Target_aarch64<size, big_endian>::do_reloc_addend(
5606 void* arg, unsigned int r_type, uint64_t) const
5607 {
5608 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
5609 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg);
5610 gold_assert(intarg < this->tlsdesc_reloc_info_.size());
5611 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]);
5612 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym);
5613 gold_assert(psymval->is_tls_symbol());
5614 // The value of a TLS symbol is the offset in the TLS segment.
5615 return psymval->value(ti.object, 0);
5616 }
5617
5618 // Return the number of entries in the PLT.
5619
5620 template<int size, bool big_endian>
5621 unsigned int
5622 Target_aarch64<size, big_endian>::plt_entry_count() const
5623 {
5624 if (this->plt_ == NULL)
5625 return 0;
5626 return this->plt_->entry_count();
5627 }
5628
5629 // Return the offset of the first non-reserved PLT entry.
5630
5631 template<int size, bool big_endian>
5632 unsigned int
5633 Target_aarch64<size, big_endian>::first_plt_entry_offset() const
5634 {
5635 return this->plt_->first_plt_entry_offset();
5636 }
5637
5638 // Return the size of each PLT entry.
5639
5640 template<int size, bool big_endian>
5641 unsigned int
5642 Target_aarch64<size, big_endian>::plt_entry_size() const
5643 {
5644 return this->plt_->get_plt_entry_size();
5645 }
5646
5647 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
5648
5649 template<int size, bool big_endian>
5650 void
5651 Target_aarch64<size, big_endian>::define_tls_base_symbol(
5652 Symbol_table* symtab, Layout* layout)
5653 {
5654 if (this->tls_base_symbol_defined_)
5655 return;
5656
5657 Output_segment* tls_segment = layout->tls_segment();
5658 if (tls_segment != NULL)
5659 {
5660 // _TLS_MODULE_BASE_ always points to the beginning of tls segment.
5661 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL,
5662 Symbol_table::PREDEFINED,
5663 tls_segment, 0, 0,
5664 elfcpp::STT_TLS,
5665 elfcpp::STB_LOCAL,
5666 elfcpp::STV_HIDDEN, 0,
5667 Symbol::SEGMENT_START,
5668 true);
5669 }
5670 this->tls_base_symbol_defined_ = true;
5671 }
5672
5673 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
5674
5675 template<int size, bool big_endian>
5676 void
5677 Target_aarch64<size, big_endian>::reserve_tlsdesc_entries(
5678 Symbol_table* symtab, Layout* layout)
5679 {
5680 if (this->plt_ == NULL)
5681 this->make_plt_section(symtab, layout);
5682
5683 if (!this->plt_->has_tlsdesc_entry())
5684 {
5685 // Allocate the TLSDESC_GOT entry.
5686 Output_data_got_aarch64<size, big_endian>* got =
5687 this->got_section(symtab, layout);
5688 unsigned int got_offset = got->add_constant(0);
5689
5690 // Allocate the TLSDESC_PLT entry.
5691 this->plt_->reserve_tlsdesc_entry(got_offset);
5692 }
5693 }
5694
5695 // Create a GOT entry for the TLS module index.
5696
5697 template<int size, bool big_endian>
5698 unsigned int
5699 Target_aarch64<size, big_endian>::got_mod_index_entry(
5700 Symbol_table* symtab, Layout* layout,
5701 Sized_relobj_file<size, big_endian>* object)
5702 {
5703 if (this->got_mod_index_offset_ == -1U)
5704 {
5705 gold_assert(symtab != NULL && layout != NULL && object != NULL);
5706 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
5707 Output_data_got_aarch64<size, big_endian>* got =
5708 this->got_section(symtab, layout);
5709 unsigned int got_offset = got->add_constant(0);
5710 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got,
5711 got_offset, 0);
5712 got->add_constant(0);
5713 this->got_mod_index_offset_ = got_offset;
5714 }
5715 return this->got_mod_index_offset_;
5716 }
5717
5718 // Optimize the TLS relocation type based on what we know about the
5719 // symbol. IS_FINAL is true if the final address of this symbol is
5720 // known at link time.
5721
5722 template<int size, bool big_endian>
5723 tls::Tls_optimization
5724 Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final,
5725 int r_type)
5726 {
5727 // If we are generating a shared library, then we can't do anything
5728 // in the linker
5729 if (parameters->options().shared())
5730 return tls::TLSOPT_NONE;
5731
5732 switch (r_type)
5733 {
5734 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
5735 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
5736 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19:
5737 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21:
5738 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
5739 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
5740 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
5741 case elfcpp::R_AARCH64_TLSDESC_OFF_G1:
5742 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC:
5743 case elfcpp::R_AARCH64_TLSDESC_LDR:
5744 case elfcpp::R_AARCH64_TLSDESC_ADD:
5745 case elfcpp::R_AARCH64_TLSDESC_CALL:
5746 // These are General-Dynamic which permits fully general TLS
5747 // access. Since we know that we are generating an executable,
5748 // we can convert this to Initial-Exec. If we also know that
5749 // this is a local symbol, we can further switch to Local-Exec.
5750 if (is_final)
5751 return tls::TLSOPT_TO_LE;
5752 return tls::TLSOPT_TO_IE;
5753
5754 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
5755 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
5756 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
5757 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5758 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
5759 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5760 // These are Local-Dynamic, which refer to local symbols in the
5761 // dynamic TLS block. Since we know that we generating an
5762 // executable, we can switch to Local-Exec.
5763 return tls::TLSOPT_TO_LE;
5764
5765 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5766 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5767 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5768 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5769 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5770 // These are Initial-Exec relocs which get the thread offset
5771 // from the GOT. If we know that we are linking against the
5772 // local symbol, we can switch to Local-Exec, which links the
5773 // thread offset into the instruction.
5774 if (is_final)
5775 return tls::TLSOPT_TO_LE;
5776 return tls::TLSOPT_NONE;
5777
5778 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
5779 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
5780 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5781 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
5782 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5783 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
5784 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
5785 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5786 // When we already have Local-Exec, there is nothing further we
5787 // can do.
5788 return tls::TLSOPT_NONE;
5789
5790 default:
5791 gold_unreachable();
5792 }
5793 }
5794
5795 // Returns true if this relocation type could be that of a function pointer.
5796
5797 template<int size, bool big_endian>
5798 inline bool
5799 Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc(
5800 unsigned int r_type)
5801 {
5802 switch (r_type)
5803 {
5804 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
5805 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
5806 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
5807 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
5808 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
5809 {
5810 return true;
5811 }
5812 }
5813 return false;
5814 }
5815
5816 // For safe ICF, scan a relocation for a local symbol to check if it
5817 // corresponds to a function pointer being taken. In that case mark
5818 // the function whose pointer was taken as not foldable.
5819
5820 template<int size, bool big_endian>
5821 inline bool
5822 Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer(
5823 Symbol_table* ,
5824 Layout* ,
5825 Target_aarch64<size, big_endian>* ,
5826 Sized_relobj_file<size, big_endian>* ,
5827 unsigned int ,
5828 Output_section* ,
5829 const elfcpp::Rela<size, big_endian>& ,
5830 unsigned int r_type,
5831 const elfcpp::Sym<size, big_endian>&)
5832 {
5833 // When building a shared library, do not fold any local symbols.
5834 return (parameters->options().shared()
5835 || possible_function_pointer_reloc(r_type));
5836 }
5837
5838 // For safe ICF, scan a relocation for a global symbol to check if it
5839 // corresponds to a function pointer being taken. In that case mark
5840 // the function whose pointer was taken as not foldable.
5841
5842 template<int size, bool big_endian>
5843 inline bool
5844 Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer(
5845 Symbol_table* ,
5846 Layout* ,
5847 Target_aarch64<size, big_endian>* ,
5848 Sized_relobj_file<size, big_endian>* ,
5849 unsigned int ,
5850 Output_section* ,
5851 const elfcpp::Rela<size, big_endian>& ,
5852 unsigned int r_type,
5853 Symbol* gsym)
5854 {
5855 // When building a shared library, do not fold symbols whose visibility
5856 // is hidden, internal or protected.
5857 return ((parameters->options().shared()
5858 && (gsym->visibility() == elfcpp::STV_INTERNAL
5859 || gsym->visibility() == elfcpp::STV_PROTECTED
5860 || gsym->visibility() == elfcpp::STV_HIDDEN))
5861 || possible_function_pointer_reloc(r_type));
5862 }
5863
5864 // Report an unsupported relocation against a local symbol.
5865
5866 template<int size, bool big_endian>
5867 void
5868 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local(
5869 Sized_relobj_file<size, big_endian>* object,
5870 unsigned int r_type)
5871 {
5872 gold_error(_("%s: unsupported reloc %u against local symbol"),
5873 object->name().c_str(), r_type);
5874 }
5875
5876 // We are about to emit a dynamic relocation of type R_TYPE. If the
5877 // dynamic linker does not support it, issue an error.
5878
5879 template<int size, bool big_endian>
5880 void
5881 Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object,
5882 unsigned int r_type)
5883 {
5884 gold_assert(r_type != elfcpp::R_AARCH64_NONE);
5885
5886 switch (r_type)
5887 {
5888 // These are the relocation types supported by glibc for AARCH64.
5889 case elfcpp::R_AARCH64_NONE:
5890 case elfcpp::R_AARCH64_COPY:
5891 case elfcpp::R_AARCH64_GLOB_DAT:
5892 case elfcpp::R_AARCH64_JUMP_SLOT:
5893 case elfcpp::R_AARCH64_RELATIVE:
5894 case elfcpp::R_AARCH64_TLS_DTPREL64:
5895 case elfcpp::R_AARCH64_TLS_DTPMOD64:
5896 case elfcpp::R_AARCH64_TLS_TPREL64:
5897 case elfcpp::R_AARCH64_TLSDESC:
5898 case elfcpp::R_AARCH64_IRELATIVE:
5899 case elfcpp::R_AARCH64_ABS32:
5900 case elfcpp::R_AARCH64_ABS64:
5901 return;
5902
5903 default:
5904 break;
5905 }
5906
5907 // This prevents us from issuing more than one error per reloc
5908 // section. But we can still wind up issuing more than one
5909 // error per object file.
5910 if (this->issued_non_pic_error_)
5911 return;
5912 gold_assert(parameters->options().output_is_position_independent());
5913 object->error(_("requires unsupported dynamic reloc; "
5914 "recompile with -fPIC"));
5915 this->issued_non_pic_error_ = true;
5916 return;
5917 }
5918
5919 // Return whether we need to make a PLT entry for a relocation of the
5920 // given type against a STT_GNU_IFUNC symbol.
5921
5922 template<int size, bool big_endian>
5923 bool
5924 Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc(
5925 Sized_relobj_file<size, big_endian>* object,
5926 unsigned int r_type)
5927 {
5928 const AArch64_reloc_property* arp =
5929 aarch64_reloc_property_table->get_reloc_property(r_type);
5930 gold_assert(arp != NULL);
5931
5932 int flags = arp->reference_flags();
5933 if (flags & Symbol::TLS_REF)
5934 {
5935 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"),
5936 object->name().c_str(), arp->name().c_str());
5937 return false;
5938 }
5939 return flags != 0;
5940 }
5941
5942 // Scan a relocation for a local symbol.
5943
5944 template<int size, bool big_endian>
5945 inline void
5946 Target_aarch64<size, big_endian>::Scan::local(
5947 Symbol_table* symtab,
5948 Layout* layout,
5949 Target_aarch64<size, big_endian>* target,
5950 Sized_relobj_file<size, big_endian>* object,
5951 unsigned int data_shndx,
5952 Output_section* output_section,
5953 const elfcpp::Rela<size, big_endian>& rela,
5954 unsigned int r_type,
5955 const elfcpp::Sym<size, big_endian>& lsym,
5956 bool is_discarded)
5957 {
5958 if (is_discarded)
5959 return;
5960
5961 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
5962 Reloc_section;
5963 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5964
5965 // A local STT_GNU_IFUNC symbol may require a PLT entry.
5966 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC;
5967 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type))
5968 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym);
5969
5970 switch (r_type)
5971 {
5972 case elfcpp::R_AARCH64_NONE:
5973 break;
5974
5975 case elfcpp::R_AARCH64_ABS32:
5976 case elfcpp::R_AARCH64_ABS16:
5977 if (parameters->options().output_is_position_independent())
5978 {
5979 gold_error(_("%s: unsupported reloc %u in pos independent link."),
5980 object->name().c_str(), r_type);
5981 }
5982 break;
5983
5984 case elfcpp::R_AARCH64_ABS64:
5985 // If building a shared library or pie, we need to mark this as a dynmic
5986 // reloction, so that the dynamic loader can relocate it.
5987 if (parameters->options().output_is_position_independent())
5988 {
5989 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
5990 rela_dyn->add_local_relative(object, r_sym,
5991 elfcpp::R_AARCH64_RELATIVE,
5992 output_section,
5993 data_shndx,
5994 rela.get_r_offset(),
5995 rela.get_r_addend(),
5996 is_ifunc);
5997 }
5998 break;
5999
6000 case elfcpp::R_AARCH64_PREL64:
6001 case elfcpp::R_AARCH64_PREL32:
6002 case elfcpp::R_AARCH64_PREL16:
6003 break;
6004
6005 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6006 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6007 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6008 // The above relocations are used to access GOT entries.
6009 {
6010 Output_data_got_aarch64<size, big_endian>* got =
6011 target->got_section(symtab, layout);
6012 bool is_new = false;
6013 // This symbol requires a GOT entry.
6014 if (is_ifunc)
6015 is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD);
6016 else
6017 is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD);
6018 if (is_new && parameters->options().output_is_position_independent())
6019 target->rela_dyn_section(layout)->
6020 add_local_relative(object,
6021 r_sym,
6022 elfcpp::R_AARCH64_RELATIVE,
6023 got,
6024 object->local_got_offset(r_sym,
6025 GOT_TYPE_STANDARD),
6026 0,
6027 false);
6028 }
6029 break;
6030
6031 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6032 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6033 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6034 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6035 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6036 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6037 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6038 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6039 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6040 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6041 if (parameters->options().output_is_position_independent())
6042 {
6043 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6044 object->name().c_str(), r_type);
6045 }
6046 break;
6047
6048 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6049 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6050 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6051 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6052 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6053 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6054 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6055 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6056 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6057 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6058 break;
6059
6060 // Control flow, pc-relative. We don't need to do anything for a relative
6061 // addressing relocation against a local symbol if it does not reference
6062 // the GOT.
6063 case elfcpp::R_AARCH64_TSTBR14:
6064 case elfcpp::R_AARCH64_CONDBR19:
6065 case elfcpp::R_AARCH64_JUMP26:
6066 case elfcpp::R_AARCH64_CALL26:
6067 break;
6068
6069 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6070 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6071 {
6072 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6073 optimize_tls_reloc(!parameters->options().shared(), r_type);
6074 if (tlsopt == tls::TLSOPT_TO_LE)
6075 break;
6076
6077 layout->set_has_static_tls();
6078 // Create a GOT entry for the tp-relative offset.
6079 if (!parameters->doing_static_link())
6080 {
6081 Output_data_got_aarch64<size, big_endian>* got =
6082 target->got_section(symtab, layout);
6083 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET,
6084 target->rela_dyn_section(layout),
6085 elfcpp::R_AARCH64_TLS_TPREL64);
6086 }
6087 else if (!object->local_has_got_offset(r_sym,
6088 GOT_TYPE_TLS_OFFSET))
6089 {
6090 Output_data_got_aarch64<size, big_endian>* got =
6091 target->got_section(symtab, layout);
6092 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET);
6093 unsigned int got_offset =
6094 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET);
6095 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6096 gold_assert(addend == 0);
6097 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64,
6098 object, r_sym);
6099 }
6100 }
6101 break;
6102
6103 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6104 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
6105 {
6106 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6107 optimize_tls_reloc(!parameters->options().shared(), r_type);
6108 if (tlsopt == tls::TLSOPT_TO_LE)
6109 {
6110 layout->set_has_static_tls();
6111 break;
6112 }
6113 gold_assert(tlsopt == tls::TLSOPT_NONE);
6114
6115 Output_data_got_aarch64<size, big_endian>* got =
6116 target->got_section(symtab, layout);
6117 got->add_local_pair_with_rel(object,r_sym, data_shndx,
6118 GOT_TYPE_TLS_PAIR,
6119 target->rela_dyn_section(layout),
6120 elfcpp::R_AARCH64_TLS_DTPMOD64);
6121 }
6122 break;
6123
6124 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6125 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6126 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6127 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6128 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6129 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6130 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6131 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6132 {
6133 layout->set_has_static_tls();
6134 bool output_is_shared = parameters->options().shared();
6135 if (output_is_shared)
6136 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."),
6137 object->name().c_str(), r_type);
6138 }
6139 break;
6140
6141 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6142 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
6143 {
6144 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6145 optimize_tls_reloc(!parameters->options().shared(), r_type);
6146 if (tlsopt == tls::TLSOPT_NONE)
6147 {
6148 // Create a GOT entry for the module index.
6149 target->got_mod_index_entry(symtab, layout, object);
6150 }
6151 else if (tlsopt != tls::TLSOPT_TO_LE)
6152 unsupported_reloc_local(object, r_type);
6153 }
6154 break;
6155
6156 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6157 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6158 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6159 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6160 break;
6161
6162 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6163 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6164 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
6165 {
6166 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6167 optimize_tls_reloc(!parameters->options().shared(), r_type);
6168 target->define_tls_base_symbol(symtab, layout);
6169 if (tlsopt == tls::TLSOPT_NONE)
6170 {
6171 // Create reserved PLT and GOT entries for the resolver.
6172 target->reserve_tlsdesc_entries(symtab, layout);
6173
6174 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc.
6175 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT
6176 // entry needs to be in an area in .got.plt, not .got. Call
6177 // got_section to make sure the section has been created.
6178 target->got_section(symtab, layout);
6179 Output_data_got<size, big_endian>* got =
6180 target->got_tlsdesc_section();
6181 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6182 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC))
6183 {
6184 unsigned int got_offset = got->add_constant(0);
6185 got->add_constant(0);
6186 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC,
6187 got_offset);
6188 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6189 // We store the arguments we need in a vector, and use
6190 // the index into the vector as the parameter to pass
6191 // to the target specific routines.
6192 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym);
6193 void* arg = reinterpret_cast<void*>(intarg);
6194 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg,
6195 got, got_offset, 0);
6196 }
6197 }
6198 else if (tlsopt != tls::TLSOPT_TO_LE)
6199 unsupported_reloc_local(object, r_type);
6200 }
6201 break;
6202
6203 case elfcpp::R_AARCH64_TLSDESC_CALL:
6204 break;
6205
6206 default:
6207 unsupported_reloc_local(object, r_type);
6208 }
6209 }
6210
6211
6212 // Report an unsupported relocation against a global symbol.
6213
6214 template<int size, bool big_endian>
6215 void
6216 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global(
6217 Sized_relobj_file<size, big_endian>* object,
6218 unsigned int r_type,
6219 Symbol* gsym)
6220 {
6221 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
6222 object->name().c_str(), r_type, gsym->demangled_name().c_str());
6223 }
6224
6225 template<int size, bool big_endian>
6226 inline void
6227 Target_aarch64<size, big_endian>::Scan::global(
6228 Symbol_table* symtab,
6229 Layout* layout,
6230 Target_aarch64<size, big_endian>* target,
6231 Sized_relobj_file<size, big_endian> * object,
6232 unsigned int data_shndx,
6233 Output_section* output_section,
6234 const elfcpp::Rela<size, big_endian>& rela,
6235 unsigned int r_type,
6236 Symbol* gsym)
6237 {
6238 // A STT_GNU_IFUNC symbol may require a PLT entry.
6239 if (gsym->type() == elfcpp::STT_GNU_IFUNC
6240 && this->reloc_needs_plt_for_ifunc(object, r_type))
6241 target->make_plt_entry(symtab, layout, gsym);
6242
6243 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6244 Reloc_section;
6245 const AArch64_reloc_property* arp =
6246 aarch64_reloc_property_table->get_reloc_property(r_type);
6247 gold_assert(arp != NULL);
6248
6249 switch (r_type)
6250 {
6251 case elfcpp::R_AARCH64_NONE:
6252 break;
6253
6254 case elfcpp::R_AARCH64_ABS16:
6255 case elfcpp::R_AARCH64_ABS32:
6256 case elfcpp::R_AARCH64_ABS64:
6257 {
6258 // Make a PLT entry if necessary.
6259 if (gsym->needs_plt_entry())
6260 {
6261 target->make_plt_entry(symtab, layout, gsym);
6262 // Since this is not a PC-relative relocation, we may be
6263 // taking the address of a function. In that case we need to
6264 // set the entry in the dynamic symbol table to the address of
6265 // the PLT entry.
6266 if (gsym->is_from_dynobj() && !parameters->options().shared())
6267 gsym->set_needs_dynsym_value();
6268 }
6269 // Make a dynamic relocation if necessary.
6270 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6271 {
6272 if (!parameters->options().output_is_position_independent()
6273 && gsym->may_need_copy_reloc())
6274 {
6275 target->copy_reloc(symtab, layout, object,
6276 data_shndx, output_section, gsym, rela);
6277 }
6278 else if (r_type == elfcpp::R_AARCH64_ABS64
6279 && gsym->type() == elfcpp::STT_GNU_IFUNC
6280 && gsym->can_use_relative_reloc(false)
6281 && !gsym->is_from_dynobj()
6282 && !gsym->is_undefined()
6283 && !gsym->is_preemptible())
6284 {
6285 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC
6286 // symbol. This makes a function address in a PIE executable
6287 // match the address in a shared library that it links against.
6288 Reloc_section* rela_dyn =
6289 target->rela_irelative_section(layout);
6290 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE;
6291 rela_dyn->add_symbolless_global_addend(gsym, r_type,
6292 output_section, object,
6293 data_shndx,
6294 rela.get_r_offset(),
6295 rela.get_r_addend());
6296 }
6297 else if (r_type == elfcpp::R_AARCH64_ABS64
6298 && gsym->can_use_relative_reloc(false))
6299 {
6300 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6301 rela_dyn->add_global_relative(gsym,
6302 elfcpp::R_AARCH64_RELATIVE,
6303 output_section,
6304 object,
6305 data_shndx,
6306 rela.get_r_offset(),
6307 rela.get_r_addend(),
6308 false);
6309 }
6310 else
6311 {
6312 check_non_pic(object, r_type);
6313 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>*
6314 rela_dyn = target->rela_dyn_section(layout);
6315 rela_dyn->add_global(
6316 gsym, r_type, output_section, object,
6317 data_shndx, rela.get_r_offset(),rela.get_r_addend());
6318 }
6319 }
6320 }
6321 break;
6322
6323 case elfcpp::R_AARCH64_PREL16:
6324 case elfcpp::R_AARCH64_PREL32:
6325 case elfcpp::R_AARCH64_PREL64:
6326 // This is used to fill the GOT absolute address.
6327 if (gsym->needs_plt_entry())
6328 {
6329 target->make_plt_entry(symtab, layout, gsym);
6330 }
6331 break;
6332
6333 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6334 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6335 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6336 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6337 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6338 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6339 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6340 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6341 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6342 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6343 if (parameters->options().output_is_position_independent())
6344 {
6345 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6346 object->name().c_str(), r_type);
6347 }
6348 break;
6349
6350 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6351 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6352 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6353 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6354 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6355 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6356 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6357 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6358 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6359 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6360 {
6361 if (gsym->needs_plt_entry())
6362 target->make_plt_entry(symtab, layout, gsym);
6363 // Make a dynamic relocation if necessary.
6364 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6365 {
6366 if (parameters->options().output_is_executable()
6367 && gsym->may_need_copy_reloc())
6368 {
6369 target->copy_reloc(symtab, layout, object,
6370 data_shndx, output_section, gsym, rela);
6371 }
6372 }
6373 break;
6374 }
6375
6376 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6377 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6378 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6379 {
6380 // The above relocations are used to access GOT entries.
6381 // Note a GOT entry is an *address* to a symbol.
6382 // The symbol requires a GOT entry
6383 Output_data_got_aarch64<size, big_endian>* got =
6384 target->got_section(symtab, layout);
6385 if (gsym->final_value_is_known())
6386 {
6387 // For a STT_GNU_IFUNC symbol we want the PLT address.
6388 if (gsym->type() == elfcpp::STT_GNU_IFUNC)
6389 got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6390 else
6391 got->add_global(gsym, GOT_TYPE_STANDARD);
6392 }
6393 else
6394 {
6395 // If this symbol is not fully resolved, we need to add a dynamic
6396 // relocation for it.
6397 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6398
6399 // Use a GLOB_DAT rather than a RELATIVE reloc if:
6400 //
6401 // 1) The symbol may be defined in some other module.
6402 // 2) We are building a shared library and this is a protected
6403 // symbol; using GLOB_DAT means that the dynamic linker can use
6404 // the address of the PLT in the main executable when appropriate
6405 // so that function address comparisons work.
6406 // 3) This is a STT_GNU_IFUNC symbol in position dependent code,
6407 // again so that function address comparisons work.
6408 if (gsym->is_from_dynobj()
6409 || gsym->is_undefined()
6410 || gsym->is_preemptible()
6411 || (gsym->visibility() == elfcpp::STV_PROTECTED
6412 && parameters->options().shared())
6413 || (gsym->type() == elfcpp::STT_GNU_IFUNC
6414 && parameters->options().output_is_position_independent()))
6415 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
6416 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT);
6417 else
6418 {
6419 // For a STT_GNU_IFUNC symbol we want to write the PLT
6420 // offset into the GOT, so that function pointer
6421 // comparisons work correctly.
6422 bool is_new;
6423 if (gsym->type() != elfcpp::STT_GNU_IFUNC)
6424 is_new = got->add_global(gsym, GOT_TYPE_STANDARD);
6425 else
6426 {
6427 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6428 // Tell the dynamic linker to use the PLT address
6429 // when resolving relocations.
6430 if (gsym->is_from_dynobj()
6431 && !parameters->options().shared())
6432 gsym->set_needs_dynsym_value();
6433 }
6434 if (is_new)
6435 {
6436 rela_dyn->add_global_relative(
6437 gsym, elfcpp::R_AARCH64_RELATIVE,
6438 got,
6439 gsym->got_offset(GOT_TYPE_STANDARD),
6440 0,
6441 false);
6442 }
6443 }
6444 }
6445 break;
6446 }
6447
6448 case elfcpp::R_AARCH64_TSTBR14:
6449 case elfcpp::R_AARCH64_CONDBR19:
6450 case elfcpp::R_AARCH64_JUMP26:
6451 case elfcpp::R_AARCH64_CALL26:
6452 {
6453 if (gsym->final_value_is_known())
6454 break;
6455
6456 if (gsym->is_defined() &&
6457 !gsym->is_from_dynobj() &&
6458 !gsym->is_preemptible())
6459 break;
6460
6461 // Make plt entry for function call.
6462 target->make_plt_entry(symtab, layout, gsym);
6463 break;
6464 }
6465
6466 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6467 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic
6468 {
6469 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6470 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6471 if (tlsopt == tls::TLSOPT_TO_LE)
6472 {
6473 layout->set_has_static_tls();
6474 break;
6475 }
6476 gold_assert(tlsopt == tls::TLSOPT_NONE);
6477
6478 // General dynamic.
6479 Output_data_got_aarch64<size, big_endian>* got =
6480 target->got_section(symtab, layout);
6481 // Create 2 consecutive entries for module index and offset.
6482 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR,
6483 target->rela_dyn_section(layout),
6484 elfcpp::R_AARCH64_TLS_DTPMOD64,
6485 elfcpp::R_AARCH64_TLS_DTPREL64);
6486 }
6487 break;
6488
6489 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6490 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic
6491 {
6492 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6493 optimize_tls_reloc(!parameters->options().shared(), r_type);
6494 if (tlsopt == tls::TLSOPT_NONE)
6495 {
6496 // Create a GOT entry for the module index.
6497 target->got_mod_index_entry(symtab, layout, object);
6498 }
6499 else if (tlsopt != tls::TLSOPT_TO_LE)
6500 unsupported_reloc_local(object, r_type);
6501 }
6502 break;
6503
6504 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6505 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6506 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6507 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic
6508 break;
6509
6510 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6511 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable
6512 {
6513 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6514 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6515 if (tlsopt == tls::TLSOPT_TO_LE)
6516 break;
6517
6518 layout->set_has_static_tls();
6519 // Create a GOT entry for the tp-relative offset.
6520 Output_data_got_aarch64<size, big_endian>* got
6521 = target->got_section(symtab, layout);
6522 if (!parameters->doing_static_link())
6523 {
6524 got->add_global_with_rel(
6525 gsym, GOT_TYPE_TLS_OFFSET,
6526 target->rela_dyn_section(layout),
6527 elfcpp::R_AARCH64_TLS_TPREL64);
6528 }
6529 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET))
6530 {
6531 got->add_global(gsym, GOT_TYPE_TLS_OFFSET);
6532 unsigned int got_offset =
6533 gsym->got_offset(GOT_TYPE_TLS_OFFSET);
6534 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6535 gold_assert(addend == 0);
6536 got->add_static_reloc(got_offset,
6537 elfcpp::R_AARCH64_TLS_TPREL64, gsym);
6538 }
6539 }
6540 break;
6541
6542 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6543 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6544 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6545 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6546 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6547 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6548 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6549 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: // Local executable
6550 layout->set_has_static_tls();
6551 if (parameters->options().shared())
6552 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."),
6553 object->name().c_str(), r_type);
6554 break;
6555
6556 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6557 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6558 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor
6559 {
6560 target->define_tls_base_symbol(symtab, layout);
6561 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6562 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6563 if (tlsopt == tls::TLSOPT_NONE)
6564 {
6565 // Create reserved PLT and GOT entries for the resolver.
6566 target->reserve_tlsdesc_entries(symtab, layout);
6567
6568 // Create a double GOT entry with an R_AARCH64_TLSDESC
6569 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT
6570 // entry needs to be in an area in .got.plt, not .got. Call
6571 // got_section to make sure the section has been created.
6572 target->got_section(symtab, layout);
6573 Output_data_got<size, big_endian>* got =
6574 target->got_tlsdesc_section();
6575 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6576 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt,
6577 elfcpp::R_AARCH64_TLSDESC, 0);
6578 }
6579 else if (tlsopt == tls::TLSOPT_TO_IE)
6580 {
6581 // Create a GOT entry for the tp-relative offset.
6582 Output_data_got<size, big_endian>* got
6583 = target->got_section(symtab, layout);
6584 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET,
6585 target->rela_dyn_section(layout),
6586 elfcpp::R_AARCH64_TLS_TPREL64);
6587 }
6588 else if (tlsopt != tls::TLSOPT_TO_LE)
6589 unsupported_reloc_global(object, r_type, gsym);
6590 }
6591 break;
6592
6593 case elfcpp::R_AARCH64_TLSDESC_CALL:
6594 break;
6595
6596 default:
6597 gold_error(_("%s: unsupported reloc type in global scan"),
6598 aarch64_reloc_property_table->
6599 reloc_name_in_error_message(r_type).c_str());
6600 }
6601 return;
6602 } // End of Scan::global
6603
6604
6605 // Create the PLT section.
6606 template<int size, bool big_endian>
6607 void
6608 Target_aarch64<size, big_endian>::make_plt_section(
6609 Symbol_table* symtab, Layout* layout)
6610 {
6611 if (this->plt_ == NULL)
6612 {
6613 // Create the GOT section first.
6614 this->got_section(symtab, layout);
6615
6616 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_,
6617 this->got_irelative_);
6618
6619 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
6620 (elfcpp::SHF_ALLOC
6621 | elfcpp::SHF_EXECINSTR),
6622 this->plt_, ORDER_PLT, false);
6623
6624 // Make the sh_info field of .rela.plt point to .plt.
6625 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section();
6626 rela_plt_os->set_info_section(this->plt_->output_section());
6627 }
6628 }
6629
6630 // Return the section for TLSDESC relocations.
6631
6632 template<int size, bool big_endian>
6633 typename Target_aarch64<size, big_endian>::Reloc_section*
6634 Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const
6635 {
6636 return this->plt_section()->rela_tlsdesc(layout);
6637 }
6638
6639 // Create a PLT entry for a global symbol.
6640
6641 template<int size, bool big_endian>
6642 void
6643 Target_aarch64<size, big_endian>::make_plt_entry(
6644 Symbol_table* symtab,
6645 Layout* layout,
6646 Symbol* gsym)
6647 {
6648 if (gsym->has_plt_offset())
6649 return;
6650
6651 if (this->plt_ == NULL)
6652 this->make_plt_section(symtab, layout);
6653
6654 this->plt_->add_entry(symtab, layout, gsym);
6655 }
6656
6657 // Make a PLT entry for a local STT_GNU_IFUNC symbol.
6658
6659 template<int size, bool big_endian>
6660 void
6661 Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry(
6662 Symbol_table* symtab, Layout* layout,
6663 Sized_relobj_file<size, big_endian>* relobj,
6664 unsigned int local_sym_index)
6665 {
6666 if (relobj->local_has_plt_offset(local_sym_index))
6667 return;
6668 if (this->plt_ == NULL)
6669 this->make_plt_section(symtab, layout);
6670 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout,
6671 relobj,
6672 local_sym_index);
6673 relobj->set_local_plt_offset(local_sym_index, plt_offset);
6674 }
6675
6676 template<int size, bool big_endian>
6677 void
6678 Target_aarch64<size, big_endian>::gc_process_relocs(
6679 Symbol_table* symtab,
6680 Layout* layout,
6681 Sized_relobj_file<size, big_endian>* object,
6682 unsigned int data_shndx,
6683 unsigned int sh_type,
6684 const unsigned char* prelocs,
6685 size_t reloc_count,
6686 Output_section* output_section,
6687 bool needs_special_offset_handling,
6688 size_t local_symbol_count,
6689 const unsigned char* plocal_symbols)
6690 {
6691 typedef Target_aarch64<size, big_endian> Aarch64;
6692 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6693 Classify_reloc;
6694
6695 if (sh_type == elfcpp::SHT_REL)
6696 {
6697 return;
6698 }
6699
6700 gold::gc_process_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6701 symtab,
6702 layout,
6703 this,
6704 object,
6705 data_shndx,
6706 prelocs,
6707 reloc_count,
6708 output_section,
6709 needs_special_offset_handling,
6710 local_symbol_count,
6711 plocal_symbols);
6712 }
6713
6714 // Scan relocations for a section.
6715
6716 template<int size, bool big_endian>
6717 void
6718 Target_aarch64<size, big_endian>::scan_relocs(
6719 Symbol_table* symtab,
6720 Layout* layout,
6721 Sized_relobj_file<size, big_endian>* object,
6722 unsigned int data_shndx,
6723 unsigned int sh_type,
6724 const unsigned char* prelocs,
6725 size_t reloc_count,
6726 Output_section* output_section,
6727 bool needs_special_offset_handling,
6728 size_t local_symbol_count,
6729 const unsigned char* plocal_symbols)
6730 {
6731 typedef Target_aarch64<size, big_endian> Aarch64;
6732 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6733 Classify_reloc;
6734
6735 if (sh_type == elfcpp::SHT_REL)
6736 {
6737 gold_error(_("%s: unsupported REL reloc section"),
6738 object->name().c_str());
6739 return;
6740 }
6741
6742 gold::scan_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6743 symtab,
6744 layout,
6745 this,
6746 object,
6747 data_shndx,
6748 prelocs,
6749 reloc_count,
6750 output_section,
6751 needs_special_offset_handling,
6752 local_symbol_count,
6753 plocal_symbols);
6754 }
6755
6756 // Return the value to use for a dynamic which requires special
6757 // treatment. This is how we support equality comparisons of function
6758 // pointers across shared library boundaries, as described in the
6759 // processor specific ABI supplement.
6760
6761 template<int size, bool big_endian>
6762 uint64_t
6763 Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const
6764 {
6765 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
6766 return this->plt_address_for_global(gsym);
6767 }
6768
6769
6770 // Finalize the sections.
6771
6772 template<int size, bool big_endian>
6773 void
6774 Target_aarch64<size, big_endian>::do_finalize_sections(
6775 Layout* layout,
6776 const Input_objects*,
6777 Symbol_table* symtab)
6778 {
6779 const Reloc_section* rel_plt = (this->plt_ == NULL
6780 ? NULL
6781 : this->plt_->rela_plt());
6782 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt,
6783 this->rela_dyn_, true, false);
6784
6785 // Emit any relocs we saved in an attempt to avoid generating COPY
6786 // relocs.
6787 if (this->copy_relocs_.any_saved_relocs())
6788 this->copy_relocs_.emit(this->rela_dyn_section(layout));
6789
6790 // Fill in some more dynamic tags.
6791 Output_data_dynamic* const odyn = layout->dynamic_data();
6792 if (odyn != NULL)
6793 {
6794 if (this->plt_ != NULL
6795 && this->plt_->output_section() != NULL
6796 && this->plt_ ->has_tlsdesc_entry())
6797 {
6798 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset();
6799 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset();
6800 this->got_->finalize_data_size();
6801 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT,
6802 this->plt_, plt_offset);
6803 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT,
6804 this->got_, got_offset);
6805 }
6806 }
6807
6808 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of
6809 // the .got.plt section.
6810 Symbol* sym = this->global_offset_table_;
6811 if (sym != NULL)
6812 {
6813 uint64_t data_size = this->got_plt_->current_data_size();
6814 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size);
6815
6816 // If the .got section is more than 0x8000 bytes, we add
6817 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16
6818 // bit relocations have a greater chance of working.
6819 if (data_size >= 0x8000)
6820 symtab->get_sized_symbol<size>(sym)->set_value(
6821 symtab->get_sized_symbol<size>(sym)->value() + 0x8000);
6822 }
6823
6824 if (parameters->doing_static_link()
6825 && (this->plt_ == NULL || !this->plt_->has_irelative_section()))
6826 {
6827 // If linking statically, make sure that the __rela_iplt symbols
6828 // were defined if necessary, even if we didn't create a PLT.
6829 static const Define_symbol_in_segment syms[] =
6830 {
6831 {
6832 "__rela_iplt_start", // name
6833 elfcpp::PT_LOAD, // segment_type
6834 elfcpp::PF_W, // segment_flags_set
6835 elfcpp::PF(0), // segment_flags_clear
6836 0, // value
6837 0, // size
6838 elfcpp::STT_NOTYPE, // type
6839 elfcpp::STB_GLOBAL, // binding
6840 elfcpp::STV_HIDDEN, // visibility
6841 0, // nonvis
6842 Symbol::SEGMENT_START, // offset_from_base
6843 true // only_if_ref
6844 },
6845 {
6846 "__rela_iplt_end", // name
6847 elfcpp::PT_LOAD, // segment_type
6848 elfcpp::PF_W, // segment_flags_set
6849 elfcpp::PF(0), // segment_flags_clear
6850 0, // value
6851 0, // size
6852 elfcpp::STT_NOTYPE, // type
6853 elfcpp::STB_GLOBAL, // binding
6854 elfcpp::STV_HIDDEN, // visibility
6855 0, // nonvis
6856 Symbol::SEGMENT_START, // offset_from_base
6857 true // only_if_ref
6858 }
6859 };
6860
6861 symtab->define_symbols(layout, 2, syms,
6862 layout->script_options()->saw_sections_clause());
6863 }
6864
6865 return;
6866 }
6867
6868 // Perform a relocation.
6869
6870 template<int size, bool big_endian>
6871 inline bool
6872 Target_aarch64<size, big_endian>::Relocate::relocate(
6873 const Relocate_info<size, big_endian>* relinfo,
6874 unsigned int,
6875 Target_aarch64<size, big_endian>* target,
6876 Output_section* ,
6877 size_t relnum,
6878 const unsigned char* preloc,
6879 const Sized_symbol<size>* gsym,
6880 const Symbol_value<size>* psymval,
6881 unsigned char* view,
6882 typename elfcpp::Elf_types<size>::Elf_Addr address,
6883 section_size_type /* view_size */)
6884 {
6885 if (view == NULL)
6886 return true;
6887
6888 typedef AArch64_relocate_functions<size, big_endian> Reloc;
6889
6890 const elfcpp::Rela<size, big_endian> rela(preloc);
6891 unsigned int r_type = elfcpp::elf_r_type<size>(rela.get_r_info());
6892 const AArch64_reloc_property* reloc_property =
6893 aarch64_reloc_property_table->get_reloc_property(r_type);
6894
6895 if (reloc_property == NULL)
6896 {
6897 std::string reloc_name =
6898 aarch64_reloc_property_table->reloc_name_in_error_message(r_type);
6899 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6900 _("cannot relocate %s in object file"),
6901 reloc_name.c_str());
6902 return true;
6903 }
6904
6905 const Sized_relobj_file<size, big_endian>* object = relinfo->object;
6906
6907 // Pick the value to use for symbols defined in the PLT.
6908 Symbol_value<size> symval;
6909 if (gsym != NULL
6910 && gsym->use_plt_offset(reloc_property->reference_flags()))
6911 {
6912 symval.set_output_value(target->plt_address_for_global(gsym));
6913 psymval = &symval;
6914 }
6915 else if (gsym == NULL && psymval->is_ifunc_symbol())
6916 {
6917 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6918 if (object->local_has_plt_offset(r_sym))
6919 {
6920 symval.set_output_value(target->plt_address_for_local(object, r_sym));
6921 psymval = &symval;
6922 }
6923 }
6924
6925 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6926
6927 // Get the GOT offset if needed.
6928 // For aarch64, the GOT pointer points to the start of the GOT section.
6929 bool have_got_offset = false;
6930 int got_offset = 0;
6931 int got_base = (target->got_ != NULL
6932 ? (target->got_->current_data_size() >= 0x8000
6933 ? 0x8000 : 0)
6934 : 0);
6935 switch (r_type)
6936 {
6937 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0:
6938 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC:
6939 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1:
6940 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC:
6941 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2:
6942 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC:
6943 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3:
6944 case elfcpp::R_AARCH64_GOTREL64:
6945 case elfcpp::R_AARCH64_GOTREL32:
6946 case elfcpp::R_AARCH64_GOT_LD_PREL19:
6947 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15:
6948 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6949 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6950 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6951 if (gsym != NULL)
6952 {
6953 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
6954 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base;
6955 }
6956 else
6957 {
6958 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6959 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
6960 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
6961 - got_base);
6962 }
6963 have_got_offset = true;
6964 break;
6965
6966 default:
6967 break;
6968 }
6969
6970 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY;
6971 typename elfcpp::Elf_types<size>::Elf_Addr value;
6972 switch (r_type)
6973 {
6974 case elfcpp::R_AARCH64_NONE:
6975 break;
6976
6977 case elfcpp::R_AARCH64_ABS64:
6978 if (!parameters->options().apply_dynamic_relocs()
6979 && parameters->options().output_is_position_independent()
6980 && gsym != NULL
6981 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())
6982 && !gsym->can_use_relative_reloc(false))
6983 // We have generated an absolute dynamic relocation, so do not
6984 // apply the relocation statically. (Works around bugs in older
6985 // Android dynamic linkers.)
6986 break;
6987 reloc_status = Reloc::template rela_ua<64>(
6988 view, object, psymval, addend, reloc_property);
6989 break;
6990
6991 case elfcpp::R_AARCH64_ABS32:
6992 if (!parameters->options().apply_dynamic_relocs()
6993 && parameters->options().output_is_position_independent()
6994 && gsym != NULL
6995 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
6996 // We have generated an absolute dynamic relocation, so do not
6997 // apply the relocation statically. (Works around bugs in older
6998 // Android dynamic linkers.)
6999 break;
7000 reloc_status = Reloc::template rela_ua<32>(
7001 view, object, psymval, addend, reloc_property);
7002 break;
7003
7004 case elfcpp::R_AARCH64_ABS16:
7005 if (!parameters->options().apply_dynamic_relocs()
7006 && parameters->options().output_is_position_independent()
7007 && gsym != NULL
7008 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
7009 // We have generated an absolute dynamic relocation, so do not
7010 // apply the relocation statically. (Works around bugs in older
7011 // Android dynamic linkers.)
7012 break;
7013 reloc_status = Reloc::template rela_ua<16>(
7014 view, object, psymval, addend, reloc_property);
7015 break;
7016
7017 case elfcpp::R_AARCH64_PREL64:
7018 reloc_status = Reloc::template pcrela_ua<64>(
7019 view, object, psymval, addend, address, reloc_property);
7020 break;
7021
7022 case elfcpp::R_AARCH64_PREL32:
7023 reloc_status = Reloc::template pcrela_ua<32>(
7024 view, object, psymval, addend, address, reloc_property);
7025 break;
7026
7027 case elfcpp::R_AARCH64_PREL16:
7028 reloc_status = Reloc::template pcrela_ua<16>(
7029 view, object, psymval, addend, address, reloc_property);
7030 break;
7031
7032 case elfcpp::R_AARCH64_MOVW_UABS_G0:
7033 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC:
7034 case elfcpp::R_AARCH64_MOVW_UABS_G1:
7035 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC:
7036 case elfcpp::R_AARCH64_MOVW_UABS_G2:
7037 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC:
7038 case elfcpp::R_AARCH64_MOVW_UABS_G3:
7039 reloc_status = Reloc::template rela_general<32>(
7040 view, object, psymval, addend, reloc_property);
7041 break;
7042 case elfcpp::R_AARCH64_MOVW_SABS_G0:
7043 case elfcpp::R_AARCH64_MOVW_SABS_G1:
7044 case elfcpp::R_AARCH64_MOVW_SABS_G2:
7045 reloc_status = Reloc::movnz(view, psymval->value(object, addend),
7046 reloc_property);
7047 break;
7048
7049 case elfcpp::R_AARCH64_LD_PREL_LO19:
7050 reloc_status = Reloc::template pcrela_general<32>(
7051 view, object, psymval, addend, address, reloc_property);
7052 break;
7053
7054 case elfcpp::R_AARCH64_ADR_PREL_LO21:
7055 reloc_status = Reloc::adr(view, object, psymval, addend,
7056 address, reloc_property);
7057 break;
7058
7059 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
7060 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
7061 reloc_status = Reloc::adrp(view, object, psymval, addend, address,
7062 reloc_property);
7063 break;
7064
7065 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC:
7066 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC:
7067 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC:
7068 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC:
7069 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC:
7070 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
7071 reloc_status = Reloc::template rela_general<32>(
7072 view, object, psymval, addend, reloc_property);
7073 break;
7074
7075 case elfcpp::R_AARCH64_CALL26:
7076 if (this->skip_call_tls_get_addr_)
7077 {
7078 // Double check that the TLSGD insn has been optimized away.
7079 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7080 Insntype insn = elfcpp::Swap<32, big_endian>::readval(
7081 reinterpret_cast<Insntype*>(view));
7082 gold_assert((insn & 0xff000000) == 0x91000000);
7083
7084 reloc_status = Reloc::STATUS_OKAY;
7085 this->skip_call_tls_get_addr_ = false;
7086 // Return false to stop further processing this reloc.
7087 return false;
7088 }
7089 // Fall through.
7090 case elfcpp::R_AARCH64_JUMP26:
7091 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address,
7092 gsym, psymval, object,
7093 target->stub_group_size_))
7094 break;
7095 // Fall through.
7096 case elfcpp::R_AARCH64_TSTBR14:
7097 case elfcpp::R_AARCH64_CONDBR19:
7098 reloc_status = Reloc::template pcrela_general<32>(
7099 view, object, psymval, addend, address, reloc_property);
7100 break;
7101
7102 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
7103 gold_assert(have_got_offset);
7104 value = target->got_->address() + got_base + got_offset;
7105 reloc_status = Reloc::adrp(view, value + addend, address);
7106 break;
7107
7108 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
7109 gold_assert(have_got_offset);
7110 value = target->got_->address() + got_base + got_offset;
7111 reloc_status = Reloc::template rela_general<32>(
7112 view, value, addend, reloc_property);
7113 break;
7114
7115 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
7116 {
7117 gold_assert(have_got_offset);
7118 value = target->got_->address() + got_base + got_offset + addend -
7119 Reloc::Page(target->got_->address() + got_base);
7120 if ((value & 7) != 0)
7121 reloc_status = Reloc::STATUS_OVERFLOW;
7122 else
7123 reloc_status = Reloc::template reloc_common<32>(
7124 view, value, reloc_property);
7125 break;
7126 }
7127
7128 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7129 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7130 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7131 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7132 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7133 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7134 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7135 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7136 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7137 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7138 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7139 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7140 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7141 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7142 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7143 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7144 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7145 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7146 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7147 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7148 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7149 case elfcpp::R_AARCH64_TLSDESC_CALL:
7150 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type,
7151 gsym, psymval, view, address);
7152 break;
7153
7154 // These are dynamic relocations, which are unexpected when linking.
7155 case elfcpp::R_AARCH64_COPY:
7156 case elfcpp::R_AARCH64_GLOB_DAT:
7157 case elfcpp::R_AARCH64_JUMP_SLOT:
7158 case elfcpp::R_AARCH64_RELATIVE:
7159 case elfcpp::R_AARCH64_IRELATIVE:
7160 case elfcpp::R_AARCH64_TLS_DTPREL64:
7161 case elfcpp::R_AARCH64_TLS_DTPMOD64:
7162 case elfcpp::R_AARCH64_TLS_TPREL64:
7163 case elfcpp::R_AARCH64_TLSDESC:
7164 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7165 _("unexpected reloc %u in object file"),
7166 r_type);
7167 break;
7168
7169 default:
7170 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7171 _("unsupported reloc %s"),
7172 reloc_property->name().c_str());
7173 break;
7174 }
7175
7176 // Report any errors.
7177 switch (reloc_status)
7178 {
7179 case Reloc::STATUS_OKAY:
7180 break;
7181 case Reloc::STATUS_OVERFLOW:
7182 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7183 _("relocation overflow in %s"),
7184 reloc_property->name().c_str());
7185 break;
7186 case Reloc::STATUS_BAD_RELOC:
7187 gold_error_at_location(
7188 relinfo,
7189 relnum,
7190 rela.get_r_offset(),
7191 _("unexpected opcode while processing relocation %s"),
7192 reloc_property->name().c_str());
7193 break;
7194 default:
7195 gold_unreachable();
7196 }
7197
7198 return true;
7199 }
7200
7201
7202 template<int size, bool big_endian>
7203 inline
7204 typename AArch64_relocate_functions<size, big_endian>::Status
7205 Target_aarch64<size, big_endian>::Relocate::relocate_tls(
7206 const Relocate_info<size, big_endian>* relinfo,
7207 Target_aarch64<size, big_endian>* target,
7208 size_t relnum,
7209 const elfcpp::Rela<size, big_endian>& rela,
7210 unsigned int r_type, const Sized_symbol<size>* gsym,
7211 const Symbol_value<size>* psymval,
7212 unsigned char* view,
7213 typename elfcpp::Elf_types<size>::Elf_Addr address)
7214 {
7215 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7216 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7217
7218 Output_segment* tls_segment = relinfo->layout->tls_segment();
7219 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7220 const AArch64_reloc_property* reloc_property =
7221 aarch64_reloc_property_table->get_reloc_property(r_type);
7222 gold_assert(reloc_property != NULL);
7223
7224 const bool is_final = (gsym == NULL
7225 ? !parameters->options().shared()
7226 : gsym->final_value_is_known());
7227 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
7228 optimize_tls_reloc(is_final, r_type);
7229
7230 Sized_relobj_file<size, big_endian>* object = relinfo->object;
7231 int tls_got_offset_type;
7232 switch (r_type)
7233 {
7234 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7235 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic
7236 {
7237 if (tlsopt == tls::TLSOPT_TO_LE)
7238 {
7239 if (tls_segment == NULL)
7240 {
7241 gold_assert(parameters->errors()->error_count() > 0
7242 || issue_undefined_symbol_error(gsym));
7243 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7244 }
7245 return tls_gd_to_le(relinfo, target, rela, r_type, view,
7246 psymval);
7247 }
7248 else if (tlsopt == tls::TLSOPT_NONE)
7249 {
7250 tls_got_offset_type = GOT_TYPE_TLS_PAIR;
7251 // Firstly get the address for the got entry.
7252 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7253 if (gsym != NULL)
7254 {
7255 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7256 got_entry_address = target->got_->address() +
7257 gsym->got_offset(tls_got_offset_type);
7258 }
7259 else
7260 {
7261 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7262 gold_assert(
7263 object->local_has_got_offset(r_sym, tls_got_offset_type));
7264 got_entry_address = target->got_->address() +
7265 object->local_got_offset(r_sym, tls_got_offset_type);
7266 }
7267
7268 // Relocate the address into adrp/ld, adrp/add pair.
7269 switch (r_type)
7270 {
7271 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7272 return aarch64_reloc_funcs::adrp(
7273 view, got_entry_address + addend, address);
7274
7275 break;
7276
7277 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7278 return aarch64_reloc_funcs::template rela_general<32>(
7279 view, got_entry_address, addend, reloc_property);
7280 break;
7281
7282 default:
7283 gold_unreachable();
7284 }
7285 }
7286 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7287 _("unsupported gd_to_ie relaxation on %u"),
7288 r_type);
7289 }
7290 break;
7291
7292 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7293 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic
7294 {
7295 if (tlsopt == tls::TLSOPT_TO_LE)
7296 {
7297 if (tls_segment == NULL)
7298 {
7299 gold_assert(parameters->errors()->error_count() > 0
7300 || issue_undefined_symbol_error(gsym));
7301 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7302 }
7303 return this->tls_ld_to_le(relinfo, target, rela, r_type, view,
7304 psymval);
7305 }
7306
7307 gold_assert(tlsopt == tls::TLSOPT_NONE);
7308 // Relocate the field with the offset of the GOT entry for
7309 // the module index.
7310 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7311 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) +
7312 target->got_->address());
7313
7314 switch (r_type)
7315 {
7316 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7317 return aarch64_reloc_funcs::adrp(
7318 view, got_entry_address + addend, address);
7319 break;
7320
7321 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7322 return aarch64_reloc_funcs::template rela_general<32>(
7323 view, got_entry_address, addend, reloc_property);
7324 break;
7325
7326 default:
7327 gold_unreachable();
7328 }
7329 }
7330 break;
7331
7332 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7333 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7334 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7335 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic
7336 {
7337 AArch64_address value = psymval->value(object, 0);
7338 if (tlsopt == tls::TLSOPT_TO_LE)
7339 {
7340 if (tls_segment == NULL)
7341 {
7342 gold_assert(parameters->errors()->error_count() > 0
7343 || issue_undefined_symbol_error(gsym));
7344 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7345 }
7346 }
7347 switch (r_type)
7348 {
7349 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7350 return aarch64_reloc_funcs::movnz(view, value + addend,
7351 reloc_property);
7352 break;
7353
7354 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7355 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7356 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7357 return aarch64_reloc_funcs::template rela_general<32>(
7358 view, value, addend, reloc_property);
7359 break;
7360
7361 default:
7362 gold_unreachable();
7363 }
7364 // We should never reach here.
7365 }
7366 break;
7367
7368 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7369 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec
7370 {
7371 if (tlsopt == tls::TLSOPT_TO_LE)
7372 {
7373 if (tls_segment == NULL)
7374 {
7375 gold_assert(parameters->errors()->error_count() > 0
7376 || issue_undefined_symbol_error(gsym));
7377 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7378 }
7379 return tls_ie_to_le(relinfo, target, rela, r_type, view,
7380 psymval);
7381 }
7382 tls_got_offset_type = GOT_TYPE_TLS_OFFSET;
7383
7384 // Firstly get the address for the got entry.
7385 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7386 if (gsym != NULL)
7387 {
7388 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7389 got_entry_address = target->got_->address() +
7390 gsym->got_offset(tls_got_offset_type);
7391 }
7392 else
7393 {
7394 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7395 gold_assert(
7396 object->local_has_got_offset(r_sym, tls_got_offset_type));
7397 got_entry_address = target->got_->address() +
7398 object->local_got_offset(r_sym, tls_got_offset_type);
7399 }
7400 // Relocate the address into adrp/ld, adrp/add pair.
7401 switch (r_type)
7402 {
7403 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7404 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7405 address);
7406 break;
7407 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7408 return aarch64_reloc_funcs::template rela_general<32>(
7409 view, got_entry_address, addend, reloc_property);
7410 default:
7411 gold_unreachable();
7412 }
7413 }
7414 // We shall never reach here.
7415 break;
7416
7417 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7418 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7419 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7420 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7421 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7422 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7423 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7424 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7425 {
7426 gold_assert(tls_segment != NULL);
7427 AArch64_address value = psymval->value(object, 0);
7428
7429 if (!parameters->options().shared())
7430 {
7431 AArch64_address aligned_tcb_size =
7432 align_address(target->tcb_size(),
7433 tls_segment->maximum_alignment());
7434 value += aligned_tcb_size;
7435 switch (r_type)
7436 {
7437 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7438 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7439 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7440 return aarch64_reloc_funcs::movnz(view, value + addend,
7441 reloc_property);
7442 default:
7443 return aarch64_reloc_funcs::template
7444 rela_general<32>(view,
7445 value,
7446 addend,
7447 reloc_property);
7448 }
7449 }
7450 else
7451 gold_error(_("%s: unsupported reloc %u "
7452 "in non-static TLSLE mode."),
7453 object->name().c_str(), r_type);
7454 }
7455 break;
7456
7457 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7458 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7459 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7460 case elfcpp::R_AARCH64_TLSDESC_CALL:
7461 {
7462 if (tlsopt == tls::TLSOPT_TO_LE)
7463 {
7464 if (tls_segment == NULL)
7465 {
7466 gold_assert(parameters->errors()->error_count() > 0
7467 || issue_undefined_symbol_error(gsym));
7468 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7469 }
7470 return tls_desc_gd_to_le(relinfo, target, rela, r_type,
7471 view, psymval);
7472 }
7473 else
7474 {
7475 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE
7476 ? GOT_TYPE_TLS_OFFSET
7477 : GOT_TYPE_TLS_DESC);
7478 unsigned int got_tlsdesc_offset = 0;
7479 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL
7480 && tlsopt == tls::TLSOPT_NONE)
7481 {
7482 // We created GOT entries in the .got.tlsdesc portion of the
7483 // .got.plt section, but the offset stored in the symbol is the
7484 // offset within .got.tlsdesc.
7485 got_tlsdesc_offset = (target->got_->data_size()
7486 + target->got_plt_section()->data_size());
7487 }
7488 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7489 if (gsym != NULL)
7490 {
7491 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7492 got_entry_address = target->got_->address()
7493 + got_tlsdesc_offset
7494 + gsym->got_offset(tls_got_offset_type);
7495 }
7496 else
7497 {
7498 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7499 gold_assert(
7500 object->local_has_got_offset(r_sym, tls_got_offset_type));
7501 got_entry_address = target->got_->address() +
7502 got_tlsdesc_offset +
7503 object->local_got_offset(r_sym, tls_got_offset_type);
7504 }
7505 if (tlsopt == tls::TLSOPT_TO_IE)
7506 {
7507 return tls_desc_gd_to_ie(relinfo, target, rela, r_type,
7508 view, psymval, got_entry_address,
7509 address);
7510 }
7511
7512 // Now do tlsdesc relocation.
7513 switch (r_type)
7514 {
7515 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7516 return aarch64_reloc_funcs::adrp(view,
7517 got_entry_address + addend,
7518 address);
7519 break;
7520 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7521 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7522 return aarch64_reloc_funcs::template rela_general<32>(
7523 view, got_entry_address, addend, reloc_property);
7524 break;
7525 case elfcpp::R_AARCH64_TLSDESC_CALL:
7526 return aarch64_reloc_funcs::STATUS_OKAY;
7527 break;
7528 default:
7529 gold_unreachable();
7530 }
7531 }
7532 }
7533 break;
7534
7535 default:
7536 gold_error(_("%s: unsupported TLS reloc %u."),
7537 object->name().c_str(), r_type);
7538 }
7539 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7540 } // End of relocate_tls.
7541
7542
7543 template<int size, bool big_endian>
7544 inline
7545 typename AArch64_relocate_functions<size, big_endian>::Status
7546 Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le(
7547 const Relocate_info<size, big_endian>* relinfo,
7548 Target_aarch64<size, big_endian>* target,
7549 const elfcpp::Rela<size, big_endian>& rela,
7550 unsigned int r_type,
7551 unsigned char* view,
7552 const Symbol_value<size>* psymval)
7553 {
7554 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7555 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7556 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7557
7558 Insntype* ip = reinterpret_cast<Insntype*>(view);
7559 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7560 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7561 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7562
7563 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC)
7564 {
7565 // This is the 2nd relocs, optimization should already have been
7566 // done.
7567 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7568 return aarch64_reloc_funcs::STATUS_OKAY;
7569 }
7570
7571 // The original sequence is -
7572 // 90000000 adrp x0, 0 <main>
7573 // 91000000 add x0, x0, #0x0
7574 // 94000000 bl 0 <__tls_get_addr>
7575 // optimized to sequence -
7576 // d53bd040 mrs x0, tpidr_el0
7577 // 91400000 add x0, x0, #0x0, lsl #12
7578 // 91000000 add x0, x0, #0x0
7579
7580 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7581 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we
7582 // have to change "bl tls_get_addr", which does not have a corresponding tls
7583 // relocation type. So before proceeding, we need to make sure compiler
7584 // does not change the sequence.
7585 if(!(insn1 == 0x90000000 // adrp x0,0
7586 && insn2 == 0x91000000 // add x0, x0, #0x0
7587 && insn3 == 0x94000000)) // bl 0
7588 {
7589 // Ideally we should give up gd_to_le relaxation and do gd access.
7590 // However the gd_to_le relaxation decision has been made early
7591 // in the scan stage, where we did not allocate any GOT entry for
7592 // this symbol. Therefore we have to exit and report error now.
7593 gold_error(_("unexpected reloc insn sequence while relaxing "
7594 "tls gd to le for reloc %u."), r_type);
7595 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7596 }
7597
7598 // Write new insns.
7599 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7600 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7601 insn3 = 0x91000000; // add x0, x0, #0x0
7602 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7603 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7604 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7605
7606 // Calculate tprel value.
7607 Output_segment* tls_segment = relinfo->layout->tls_segment();
7608 gold_assert(tls_segment != NULL);
7609 AArch64_address value = psymval->value(relinfo->object, 0);
7610 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7611 AArch64_address aligned_tcb_size =
7612 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7613 AArch64_address x = value + aligned_tcb_size;
7614
7615 // After new insns are written, apply TLSLE relocs.
7616 const AArch64_reloc_property* rp1 =
7617 aarch64_reloc_property_table->get_reloc_property(
7618 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7619 const AArch64_reloc_property* rp2 =
7620 aarch64_reloc_property_table->get_reloc_property(
7621 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7622 gold_assert(rp1 != NULL && rp2 != NULL);
7623
7624 typename aarch64_reloc_funcs::Status s1 =
7625 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7626 x,
7627 addend,
7628 rp1);
7629 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7630 return s1;
7631
7632 typename aarch64_reloc_funcs::Status s2 =
7633 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7634 x,
7635 addend,
7636 rp2);
7637
7638 this->skip_call_tls_get_addr_ = true;
7639 return s2;
7640 } // End of tls_gd_to_le
7641
7642
7643 template<int size, bool big_endian>
7644 inline
7645 typename AArch64_relocate_functions<size, big_endian>::Status
7646 Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le(
7647 const Relocate_info<size, big_endian>* relinfo,
7648 Target_aarch64<size, big_endian>* target,
7649 const elfcpp::Rela<size, big_endian>& rela,
7650 unsigned int r_type,
7651 unsigned char* view,
7652 const Symbol_value<size>* psymval)
7653 {
7654 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7655 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7656 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7657
7658 Insntype* ip = reinterpret_cast<Insntype*>(view);
7659 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7660 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7661 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7662
7663 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC)
7664 {
7665 // This is the 2nd relocs, optimization should already have been
7666 // done.
7667 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7668 return aarch64_reloc_funcs::STATUS_OKAY;
7669 }
7670
7671 // The original sequence is -
7672 // 90000000 adrp x0, 0 <main>
7673 // 91000000 add x0, x0, #0x0
7674 // 94000000 bl 0 <__tls_get_addr>
7675 // optimized to sequence -
7676 // d53bd040 mrs x0, tpidr_el0
7677 // 91400000 add x0, x0, #0x0, lsl #12
7678 // 91000000 add x0, x0, #0x0
7679
7680 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7681 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we
7682 // have to change "bl tls_get_addr", which does not have a corresponding tls
7683 // relocation type. So before proceeding, we need to make sure compiler
7684 // does not change the sequence.
7685 if(!(insn1 == 0x90000000 // adrp x0,0
7686 && insn2 == 0x91000000 // add x0, x0, #0x0
7687 && insn3 == 0x94000000)) // bl 0
7688 {
7689 // Ideally we should give up gd_to_le relaxation and do gd access.
7690 // However the gd_to_le relaxation decision has been made early
7691 // in the scan stage, where we did not allocate any GOT entry for
7692 // this symbol. Therefore we have to exit and report error now.
7693 gold_error(_("unexpected reloc insn sequence while relaxing "
7694 "tls gd to le for reloc %u."), r_type);
7695 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7696 }
7697
7698 // Write new insns.
7699 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7700 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7701 insn3 = 0x91000000; // add x0, x0, #0x0
7702 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7703 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7704 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7705
7706 // Calculate tprel value.
7707 Output_segment* tls_segment = relinfo->layout->tls_segment();
7708 gold_assert(tls_segment != NULL);
7709 AArch64_address value = psymval->value(relinfo->object, 0);
7710 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7711 AArch64_address aligned_tcb_size =
7712 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7713 AArch64_address x = value + aligned_tcb_size;
7714
7715 // After new insns are written, apply TLSLE relocs.
7716 const AArch64_reloc_property* rp1 =
7717 aarch64_reloc_property_table->get_reloc_property(
7718 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7719 const AArch64_reloc_property* rp2 =
7720 aarch64_reloc_property_table->get_reloc_property(
7721 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7722 gold_assert(rp1 != NULL && rp2 != NULL);
7723
7724 typename aarch64_reloc_funcs::Status s1 =
7725 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7726 x,
7727 addend,
7728 rp1);
7729 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7730 return s1;
7731
7732 typename aarch64_reloc_funcs::Status s2 =
7733 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7734 x,
7735 addend,
7736 rp2);
7737
7738 this->skip_call_tls_get_addr_ = true;
7739 return s2;
7740
7741 } // End of tls_ld_to_le
7742
7743 template<int size, bool big_endian>
7744 inline
7745 typename AArch64_relocate_functions<size, big_endian>::Status
7746 Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le(
7747 const Relocate_info<size, big_endian>* relinfo,
7748 Target_aarch64<size, big_endian>* target,
7749 const elfcpp::Rela<size, big_endian>& rela,
7750 unsigned int r_type,
7751 unsigned char* view,
7752 const Symbol_value<size>* psymval)
7753 {
7754 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7755 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7756 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7757
7758 AArch64_address value = psymval->value(relinfo->object, 0);
7759 Output_segment* tls_segment = relinfo->layout->tls_segment();
7760 AArch64_address aligned_tcb_address =
7761 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7762 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7763 AArch64_address x = value + addend + aligned_tcb_address;
7764 // "x" is the offset to tp, we can only do this if x is within
7765 // range [0, 2^32-1]
7766 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0)))
7767 {
7768 gold_error(_("TLS variable referred by reloc %u is too far from TP."),
7769 r_type);
7770 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7771 }
7772
7773 Insntype* ip = reinterpret_cast<Insntype*>(view);
7774 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7775 unsigned int regno;
7776 Insntype newinsn;
7777 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21)
7778 {
7779 // Generate movz.
7780 regno = (insn & 0x1f);
7781 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5);
7782 }
7783 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC)
7784 {
7785 // Generate movk.
7786 regno = (insn & 0x1f);
7787 gold_assert(regno == ((insn >> 5) & 0x1f));
7788 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5);
7789 }
7790 else
7791 gold_unreachable();
7792
7793 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7794 return aarch64_reloc_funcs::STATUS_OKAY;
7795 } // End of tls_ie_to_le
7796
7797
7798 template<int size, bool big_endian>
7799 inline
7800 typename AArch64_relocate_functions<size, big_endian>::Status
7801 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le(
7802 const Relocate_info<size, big_endian>* relinfo,
7803 Target_aarch64<size, big_endian>* target,
7804 const elfcpp::Rela<size, big_endian>& rela,
7805 unsigned int r_type,
7806 unsigned char* view,
7807 const Symbol_value<size>* psymval)
7808 {
7809 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7810 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7811 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7812
7813 // TLSDESC-GD sequence is like:
7814 // adrp x0, :tlsdesc:v1
7815 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7816 // add x0, x0, :tlsdesc_lo12:v1
7817 // .tlsdesccall v1
7818 // blr x1
7819 // After desc_gd_to_le optimization, the sequence will be like:
7820 // movz x0, #0x0, lsl #16
7821 // movk x0, #0x10
7822 // nop
7823 // nop
7824
7825 // Calculate tprel value.
7826 Output_segment* tls_segment = relinfo->layout->tls_segment();
7827 gold_assert(tls_segment != NULL);
7828 Insntype* ip = reinterpret_cast<Insntype*>(view);
7829 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7830 AArch64_address value = psymval->value(relinfo->object, addend);
7831 AArch64_address aligned_tcb_size =
7832 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7833 AArch64_address x = value + aligned_tcb_size;
7834 // x is the offset to tp, we can only do this if x is within range
7835 // [0, 2^32-1]. If x is out of range, fail and exit.
7836 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0)
7837 {
7838 gold_error(_("TLS variable referred by reloc %u is too far from TP. "
7839 "We Can't do gd_to_le relaxation.\n"), r_type);
7840 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7841 }
7842 Insntype newinsn;
7843 switch (r_type)
7844 {
7845 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7846 case elfcpp::R_AARCH64_TLSDESC_CALL:
7847 // Change to nop
7848 newinsn = 0xd503201f;
7849 break;
7850
7851 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7852 // Change to movz.
7853 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5);
7854 break;
7855
7856 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7857 // Change to movk.
7858 newinsn = 0xf2800000 | ((x & 0xffff) << 5);
7859 break;
7860
7861 default:
7862 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"),
7863 r_type);
7864 gold_unreachable();
7865 }
7866 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7867 return aarch64_reloc_funcs::STATUS_OKAY;
7868 } // End of tls_desc_gd_to_le
7869
7870
7871 template<int size, bool big_endian>
7872 inline
7873 typename AArch64_relocate_functions<size, big_endian>::Status
7874 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie(
7875 const Relocate_info<size, big_endian>* /* relinfo */,
7876 Target_aarch64<size, big_endian>* /* target */,
7877 const elfcpp::Rela<size, big_endian>& rela,
7878 unsigned int r_type,
7879 unsigned char* view,
7880 const Symbol_value<size>* /* psymval */,
7881 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address,
7882 typename elfcpp::Elf_types<size>::Elf_Addr address)
7883 {
7884 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7885 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7886
7887 // TLSDESC-GD sequence is like:
7888 // adrp x0, :tlsdesc:v1
7889 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7890 // add x0, x0, :tlsdesc_lo12:v1
7891 // .tlsdesccall v1
7892 // blr x1
7893 // After desc_gd_to_ie optimization, the sequence will be like:
7894 // adrp x0, :tlsie:v1
7895 // ldr x0, [x0, :tlsie_lo12:v1]
7896 // nop
7897 // nop
7898
7899 Insntype* ip = reinterpret_cast<Insntype*>(view);
7900 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7901 Insntype newinsn;
7902 switch (r_type)
7903 {
7904 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7905 case elfcpp::R_AARCH64_TLSDESC_CALL:
7906 // Change to nop
7907 newinsn = 0xd503201f;
7908 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7909 break;
7910
7911 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7912 {
7913 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7914 address);
7915 }
7916 break;
7917
7918 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7919 {
7920 // Set ldr target register to be x0.
7921 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7922 insn &= 0xffffffe0;
7923 elfcpp::Swap<32, big_endian>::writeval(ip, insn);
7924 // Do relocation.
7925 const AArch64_reloc_property* reloc_property =
7926 aarch64_reloc_property_table->get_reloc_property(
7927 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7928 return aarch64_reloc_funcs::template rela_general<32>(
7929 view, got_entry_address, addend, reloc_property);
7930 }
7931 break;
7932
7933 default:
7934 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"),
7935 r_type);
7936 gold_unreachable();
7937 }
7938 return aarch64_reloc_funcs::STATUS_OKAY;
7939 } // End of tls_desc_gd_to_ie
7940
7941 // Relocate section data.
7942
7943 template<int size, bool big_endian>
7944 void
7945 Target_aarch64<size, big_endian>::relocate_section(
7946 const Relocate_info<size, big_endian>* relinfo,
7947 unsigned int sh_type,
7948 const unsigned char* prelocs,
7949 size_t reloc_count,
7950 Output_section* output_section,
7951 bool needs_special_offset_handling,
7952 unsigned char* view,
7953 typename elfcpp::Elf_types<size>::Elf_Addr address,
7954 section_size_type view_size,
7955 const Reloc_symbol_changes* reloc_symbol_changes)
7956 {
7957 typedef Target_aarch64<size, big_endian> Aarch64;
7958 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate;
7959 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
7960 Classify_reloc;
7961
7962 gold_assert(sh_type == elfcpp::SHT_RELA);
7963
7964 gold::relocate_section<size, big_endian, Aarch64, AArch64_relocate,
7965 gold::Default_comdat_behavior, Classify_reloc>(
7966 relinfo,
7967 this,
7968 prelocs,
7969 reloc_count,
7970 output_section,
7971 needs_special_offset_handling,
7972 view,
7973 address,
7974 view_size,
7975 reloc_symbol_changes);
7976 }
7977
7978 // Scan the relocs during a relocatable link.
7979
7980 template<int size, bool big_endian>
7981 void
7982 Target_aarch64<size, big_endian>::scan_relocatable_relocs(
7983 Symbol_table* symtab,
7984 Layout* layout,
7985 Sized_relobj_file<size, big_endian>* object,
7986 unsigned int data_shndx,
7987 unsigned int sh_type,
7988 const unsigned char* prelocs,
7989 size_t reloc_count,
7990 Output_section* output_section,
7991 bool needs_special_offset_handling,
7992 size_t local_symbol_count,
7993 const unsigned char* plocal_symbols,
7994 Relocatable_relocs* rr)
7995 {
7996 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
7997 Classify_reloc;
7998 typedef gold::Default_scan_relocatable_relocs<Classify_reloc>
7999 Scan_relocatable_relocs;
8000
8001 gold_assert(sh_type == elfcpp::SHT_RELA);
8002
8003 gold::scan_relocatable_relocs<size, big_endian, Scan_relocatable_relocs>(
8004 symtab,
8005 layout,
8006 object,
8007 data_shndx,
8008 prelocs,
8009 reloc_count,
8010 output_section,
8011 needs_special_offset_handling,
8012 local_symbol_count,
8013 plocal_symbols,
8014 rr);
8015 }
8016
8017 // Scan the relocs for --emit-relocs.
8018
8019 template<int size, bool big_endian>
8020 void
8021 Target_aarch64<size, big_endian>::emit_relocs_scan(
8022 Symbol_table* symtab,
8023 Layout* layout,
8024 Sized_relobj_file<size, big_endian>* object,
8025 unsigned int data_shndx,
8026 unsigned int sh_type,
8027 const unsigned char* prelocs,
8028 size_t reloc_count,
8029 Output_section* output_section,
8030 bool needs_special_offset_handling,
8031 size_t local_symbol_count,
8032 const unsigned char* plocal_syms,
8033 Relocatable_relocs* rr)
8034 {
8035 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8036 Classify_reloc;
8037 typedef gold::Default_emit_relocs_strategy<Classify_reloc>
8038 Emit_relocs_strategy;
8039
8040 gold_assert(sh_type == elfcpp::SHT_RELA);
8041
8042 gold::scan_relocatable_relocs<size, big_endian, Emit_relocs_strategy>(
8043 symtab,
8044 layout,
8045 object,
8046 data_shndx,
8047 prelocs,
8048 reloc_count,
8049 output_section,
8050 needs_special_offset_handling,
8051 local_symbol_count,
8052 plocal_syms,
8053 rr);
8054 }
8055
8056 // Relocate a section during a relocatable link.
8057
8058 template<int size, bool big_endian>
8059 void
8060 Target_aarch64<size, big_endian>::relocate_relocs(
8061 const Relocate_info<size, big_endian>* relinfo,
8062 unsigned int sh_type,
8063 const unsigned char* prelocs,
8064 size_t reloc_count,
8065 Output_section* output_section,
8066 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
8067 unsigned char* view,
8068 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
8069 section_size_type view_size,
8070 unsigned char* reloc_view,
8071 section_size_type reloc_view_size)
8072 {
8073 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8074 Classify_reloc;
8075
8076 gold_assert(sh_type == elfcpp::SHT_RELA);
8077
8078 gold::relocate_relocs<size, big_endian, Classify_reloc>(
8079 relinfo,
8080 prelocs,
8081 reloc_count,
8082 output_section,
8083 offset_in_output_section,
8084 view,
8085 view_address,
8086 view_size,
8087 reloc_view,
8088 reloc_view_size);
8089 }
8090
8091
8092 // Return whether this is a 3-insn erratum sequence.
8093
8094 template<int size, bool big_endian>
8095 bool
8096 Target_aarch64<size, big_endian>::is_erratum_843419_sequence(
8097 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8098 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
8099 typename elfcpp::Swap<32,big_endian>::Valtype insn3)
8100 {
8101 unsigned rt1, rt2;
8102 bool load, pair;
8103
8104 // The 2nd insn is a single register load or store; or register pair
8105 // store.
8106 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load)
8107 && (!pair || (pair && !load)))
8108 {
8109 // The 3rd insn is a load or store instruction from the "Load/store
8110 // register (unsigned immediate)" encoding class, using Rn as the
8111 // base address register.
8112 if (Insn_utilities::aarch64_ldst_uimm(insn3)
8113 && (Insn_utilities::aarch64_rn(insn3)
8114 == Insn_utilities::aarch64_rd(insn1)))
8115 return true;
8116 }
8117 return false;
8118 }
8119
8120
8121 // Return whether this is a 835769 sequence.
8122 // (Similarly implemented as in elfnn-aarch64.c.)
8123
8124 template<int size, bool big_endian>
8125 bool
8126 Target_aarch64<size, big_endian>::is_erratum_835769_sequence(
8127 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8128 typename elfcpp::Swap<32,big_endian>::Valtype insn2)
8129 {
8130 uint32_t rt;
8131 uint32_t rt2 = 0;
8132 uint32_t rn;
8133 uint32_t rm;
8134 uint32_t ra;
8135 bool pair;
8136 bool load;
8137
8138 if (Insn_utilities::aarch64_mlxl(insn2)
8139 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load))
8140 {
8141 /* Any SIMD memory op is independent of the subsequent MLA
8142 by definition of the erratum. */
8143 if (Insn_utilities::aarch64_bit(insn1, 26))
8144 return true;
8145
8146 /* If not SIMD, check for integer memory ops and MLA relationship. */
8147 rn = Insn_utilities::aarch64_rn(insn2);
8148 ra = Insn_utilities::aarch64_ra(insn2);
8149 rm = Insn_utilities::aarch64_rm(insn2);
8150
8151 /* If this is a load and there's a true(RAW) dependency, we are safe
8152 and this is not an erratum sequence. */
8153 if (load &&
8154 (rt == rn || rt == rm || rt == ra
8155 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
8156 return false;
8157
8158 /* We conservatively put out stubs for all other cases (including
8159 writebacks). */
8160 return true;
8161 }
8162
8163 return false;
8164 }
8165
8166
8167 // Helper method to create erratum stub for ST_E_843419 and ST_E_835769.
8168
8169 template<int size, bool big_endian>
8170 void
8171 Target_aarch64<size, big_endian>::create_erratum_stub(
8172 AArch64_relobj<size, big_endian>* relobj,
8173 unsigned int shndx,
8174 section_size_type erratum_insn_offset,
8175 Address erratum_address,
8176 typename Insn_utilities::Insntype erratum_insn,
8177 int erratum_type,
8178 unsigned int e843419_adrp_offset)
8179 {
8180 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769);
8181 The_stub_table* stub_table = relobj->stub_table(shndx);
8182 gold_assert(stub_table != NULL);
8183 if (stub_table->find_erratum_stub(relobj,
8184 shndx,
8185 erratum_insn_offset) == NULL)
8186 {
8187 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8188 The_erratum_stub* stub;
8189 if (erratum_type == ST_E_835769)
8190 stub = new The_erratum_stub(relobj, erratum_type, shndx,
8191 erratum_insn_offset);
8192 else if (erratum_type == ST_E_843419)
8193 stub = new E843419_stub<size, big_endian>(
8194 relobj, shndx, erratum_insn_offset, e843419_adrp_offset);
8195 else
8196 gold_unreachable();
8197 stub->set_erratum_insn(erratum_insn);
8198 stub->set_erratum_address(erratum_address);
8199 // For erratum ST_E_843419 and ST_E_835769, the destination address is
8200 // always the next insn after erratum insn.
8201 stub->set_destination_address(erratum_address + BPI);
8202 stub_table->add_erratum_stub(stub);
8203 }
8204 }
8205
8206
8207 // Scan erratum for section SHNDX range [output_address + span_start,
8208 // output_address + span_end). Note here we do not share the code with
8209 // scan_erratum_843419_span function, because for 843419 we optimize by only
8210 // scanning the last few insns of a page, whereas for 835769, we need to scan
8211 // every insn.
8212
8213 template<int size, bool big_endian>
8214 void
8215 Target_aarch64<size, big_endian>::scan_erratum_835769_span(
8216 AArch64_relobj<size, big_endian>* relobj,
8217 unsigned int shndx,
8218 const section_size_type span_start,
8219 const section_size_type span_end,
8220 unsigned char* input_view,
8221 Address output_address)
8222 {
8223 typedef typename Insn_utilities::Insntype Insntype;
8224
8225 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8226
8227 // Adjust output_address and view to the start of span.
8228 output_address += span_start;
8229 input_view += span_start;
8230
8231 section_size_type span_length = span_end - span_start;
8232 section_size_type offset = 0;
8233 for (offset = 0; offset + BPI < span_length; offset += BPI)
8234 {
8235 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8236 Insntype insn1 = ip[0];
8237 Insntype insn2 = ip[1];
8238 if (is_erratum_835769_sequence(insn1, insn2))
8239 {
8240 Insntype erratum_insn = insn2;
8241 // "span_start + offset" is the offset for insn1. So for insn2, it is
8242 // "span_start + offset + BPI".
8243 section_size_type erratum_insn_offset = span_start + offset + BPI;
8244 Address erratum_address = output_address + offset + BPI;
8245 gold_info(_("Erratum 835769 found and fixed at \"%s\", "
8246 "section %d, offset 0x%08x."),
8247 relobj->name().c_str(), shndx,
8248 (unsigned int)(span_start + offset));
8249
8250 this->create_erratum_stub(relobj, shndx,
8251 erratum_insn_offset, erratum_address,
8252 erratum_insn, ST_E_835769);
8253 offset += BPI; // Skip mac insn.
8254 }
8255 }
8256 } // End of "Target_aarch64::scan_erratum_835769_span".
8257
8258
8259 // Scan erratum for section SHNDX range
8260 // [output_address + span_start, output_address + span_end).
8261
8262 template<int size, bool big_endian>
8263 void
8264 Target_aarch64<size, big_endian>::scan_erratum_843419_span(
8265 AArch64_relobj<size, big_endian>* relobj,
8266 unsigned int shndx,
8267 const section_size_type span_start,
8268 const section_size_type span_end,
8269 unsigned char* input_view,
8270 Address output_address)
8271 {
8272 typedef typename Insn_utilities::Insntype Insntype;
8273
8274 // Adjust output_address and view to the start of span.
8275 output_address += span_start;
8276 input_view += span_start;
8277
8278 if ((output_address & 0x03) != 0)
8279 return;
8280
8281 section_size_type offset = 0;
8282 section_size_type span_length = span_end - span_start;
8283 // The first instruction must be ending at 0xFF8 or 0xFFC.
8284 unsigned int page_offset = output_address & 0xFFF;
8285 // Make sure starting position, that is "output_address+offset",
8286 // starts at page position 0xff8 or 0xffc.
8287 if (page_offset < 0xff8)
8288 offset = 0xff8 - page_offset;
8289 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length)
8290 {
8291 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8292 Insntype insn1 = ip[0];
8293 if (Insn_utilities::is_adrp(insn1))
8294 {
8295 Insntype insn2 = ip[1];
8296 Insntype insn3 = ip[2];
8297 Insntype erratum_insn;
8298 unsigned insn_offset;
8299 bool do_report = false;
8300 if (is_erratum_843419_sequence(insn1, insn2, insn3))
8301 {
8302 do_report = true;
8303 erratum_insn = insn3;
8304 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN;
8305 }
8306 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length)
8307 {
8308 // Optionally we can have an insn between ins2 and ins3
8309 Insntype insn_opt = ip[2];
8310 // And insn_opt must not be a branch.
8311 if (!Insn_utilities::aarch64_b(insn_opt)
8312 && !Insn_utilities::aarch64_bl(insn_opt)
8313 && !Insn_utilities::aarch64_blr(insn_opt)
8314 && !Insn_utilities::aarch64_br(insn_opt))
8315 {
8316 // And insn_opt must not write to dest reg in insn1. However
8317 // we do a conservative scan, which means we may fix/report
8318 // more than necessary, but it doesn't hurt.
8319
8320 Insntype insn4 = ip[3];
8321 if (is_erratum_843419_sequence(insn1, insn2, insn4))
8322 {
8323 do_report = true;
8324 erratum_insn = insn4;
8325 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN;
8326 }
8327 }
8328 }
8329 if (do_report)
8330 {
8331 unsigned int erratum_insn_offset =
8332 span_start + offset + insn_offset;
8333 Address erratum_address =
8334 output_address + offset + insn_offset;
8335 create_erratum_stub(relobj, shndx,
8336 erratum_insn_offset, erratum_address,
8337 erratum_insn, ST_E_843419,
8338 span_start + offset);
8339 }
8340 }
8341
8342 // Advance to next candidate instruction. We only consider instruction
8343 // sequences starting at a page offset of 0xff8 or 0xffc.
8344 page_offset = (output_address + offset) & 0xfff;
8345 if (page_offset == 0xff8)
8346 offset += 4;
8347 else // (page_offset == 0xffc), we move to next page's 0xff8.
8348 offset += 0xffc;
8349 }
8350 } // End of "Target_aarch64::scan_erratum_843419_span".
8351
8352
8353 // The selector for aarch64 object files.
8354
8355 template<int size, bool big_endian>
8356 class Target_selector_aarch64 : public Target_selector
8357 {
8358 public:
8359 Target_selector_aarch64();
8360
8361 virtual Target*
8362 do_instantiate_target()
8363 { return new Target_aarch64<size, big_endian>(); }
8364 };
8365
8366 template<>
8367 Target_selector_aarch64<32, true>::Target_selector_aarch64()
8368 : Target_selector(elfcpp::EM_AARCH64, 32, true,
8369 "elf32-bigaarch64", "aarch64_elf32_be_vec")
8370 { }
8371
8372 template<>
8373 Target_selector_aarch64<32, false>::Target_selector_aarch64()
8374 : Target_selector(elfcpp::EM_AARCH64, 32, false,
8375 "elf32-littleaarch64", "aarch64_elf32_le_vec")
8376 { }
8377
8378 template<>
8379 Target_selector_aarch64<64, true>::Target_selector_aarch64()
8380 : Target_selector(elfcpp::EM_AARCH64, 64, true,
8381 "elf64-bigaarch64", "aarch64_elf64_be_vec")
8382 { }
8383
8384 template<>
8385 Target_selector_aarch64<64, false>::Target_selector_aarch64()
8386 : Target_selector(elfcpp::EM_AARCH64, 64, false,
8387 "elf64-littleaarch64", "aarch64_elf64_le_vec")
8388 { }
8389
8390 Target_selector_aarch64<32, true> target_selector_aarch64elf32b;
8391 Target_selector_aarch64<32, false> target_selector_aarch64elf32;
8392 Target_selector_aarch64<64, true> target_selector_aarch64elfb;
8393 Target_selector_aarch64<64, false> target_selector_aarch64elf;
8394
8395 } // End anonymous namespace.
This page took 0.22227 seconds and 4 git commands to generate.