[AArch64] Fix extern protected data handling
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
189 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
190
191 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
192 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
204
205 #define ELIMINATE_COPY_RELOCS 0
206
207 /* Return size of a relocation entry. HTAB is the bfd's
208 elf_aarch64_link_hash_entry. */
209 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
210
211 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
212 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
213 #define PLT_ENTRY_SIZE (32)
214 #define PLT_SMALL_ENTRY_SIZE (16)
215 #define PLT_TLSDESC_ENTRY_SIZE (32)
216
217 /* Encoding of the nop instruction */
218 #define INSN_NOP 0xd503201f
219
220 #define aarch64_compute_jump_table_size(htab) \
221 (((htab)->root.srelplt == NULL) ? 0 \
222 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
223
224 /* The first entry in a procedure linkage table looks like this
225 if the distance between the PLTGOT and the PLT is < 4GB use
226 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
227 in x16 and needs to work out PLTGOT[1] by using an address of
228 [x16,#-GOT_ENTRY_SIZE]. */
229 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
230 {
231 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
232 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
233 #if ARCH_SIZE == 64
234 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
235 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
236 #else
237 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
238 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
239 #endif
240 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
243 0x1f, 0x20, 0x03, 0xd5, /* nop */
244 };
245
246 /* Per function entry in a procedure linkage table looks like this
247 if the distance between the PLTGOT and the PLT is < 4GB use
248 these PLT entries. */
249 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
250 {
251 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
252 #if ARCH_SIZE == 64
253 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
254 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
255 #else
256 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
257 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
258 #endif
259 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
260 };
261
262 static const bfd_byte
263 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
264 {
265 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
266 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
267 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
268 #if ARCH_SIZE == 64
269 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
270 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
271 #else
272 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
273 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
274 #endif
275 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
276 0x1f, 0x20, 0x03, 0xd5, /* nop */
277 0x1f, 0x20, 0x03, 0xd5, /* nop */
278 };
279
280 #define elf_info_to_howto elfNN_aarch64_info_to_howto
281 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
282
283 #define AARCH64_ELF_ABI_VERSION 0
284
285 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
286 #define ALL_ONES (~ (bfd_vma) 0)
287
288 /* Indexed by the bfd interal reloc enumerators.
289 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
290 in reloc.c. */
291
292 static reloc_howto_type elfNN_aarch64_howto_table[] =
293 {
294 EMPTY_HOWTO (0),
295
296 /* Basic data relocations. */
297
298 #if ARCH_SIZE == 64
299 HOWTO (R_AARCH64_NULL, /* type */
300 0, /* rightshift */
301 3, /* size (0 = byte, 1 = short, 2 = long) */
302 0, /* bitsize */
303 FALSE, /* pc_relative */
304 0, /* bitpos */
305 complain_overflow_dont, /* complain_on_overflow */
306 bfd_elf_generic_reloc, /* special_function */
307 "R_AARCH64_NULL", /* name */
308 FALSE, /* partial_inplace */
309 0, /* src_mask */
310 0, /* dst_mask */
311 FALSE), /* pcrel_offset */
312 #else
313 HOWTO (R_AARCH64_NONE, /* type */
314 0, /* rightshift */
315 3, /* size (0 = byte, 1 = short, 2 = long) */
316 0, /* bitsize */
317 FALSE, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_dont, /* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_AARCH64_NONE", /* name */
322 FALSE, /* partial_inplace */
323 0, /* src_mask */
324 0, /* dst_mask */
325 FALSE), /* pcrel_offset */
326 #endif
327
328 /* .xword: (S+A) */
329 HOWTO64 (AARCH64_R (ABS64), /* type */
330 0, /* rightshift */
331 4, /* size (4 = long long) */
332 64, /* bitsize */
333 FALSE, /* pc_relative */
334 0, /* bitpos */
335 complain_overflow_unsigned, /* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 AARCH64_R_STR (ABS64), /* name */
338 FALSE, /* partial_inplace */
339 ALL_ONES, /* src_mask */
340 ALL_ONES, /* dst_mask */
341 FALSE), /* pcrel_offset */
342
343 /* .word: (S+A) */
344 HOWTO (AARCH64_R (ABS32), /* type */
345 0, /* rightshift */
346 2, /* size (0 = byte, 1 = short, 2 = long) */
347 32, /* bitsize */
348 FALSE, /* pc_relative */
349 0, /* bitpos */
350 complain_overflow_unsigned, /* complain_on_overflow */
351 bfd_elf_generic_reloc, /* special_function */
352 AARCH64_R_STR (ABS32), /* name */
353 FALSE, /* partial_inplace */
354 0xffffffff, /* src_mask */
355 0xffffffff, /* dst_mask */
356 FALSE), /* pcrel_offset */
357
358 /* .half: (S+A) */
359 HOWTO (AARCH64_R (ABS16), /* type */
360 0, /* rightshift */
361 1, /* size (0 = byte, 1 = short, 2 = long) */
362 16, /* bitsize */
363 FALSE, /* pc_relative */
364 0, /* bitpos */
365 complain_overflow_unsigned, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 AARCH64_R_STR (ABS16), /* name */
368 FALSE, /* partial_inplace */
369 0xffff, /* src_mask */
370 0xffff, /* dst_mask */
371 FALSE), /* pcrel_offset */
372
373 /* .xword: (S+A-P) */
374 HOWTO64 (AARCH64_R (PREL64), /* type */
375 0, /* rightshift */
376 4, /* size (4 = long long) */
377 64, /* bitsize */
378 TRUE, /* pc_relative */
379 0, /* bitpos */
380 complain_overflow_signed, /* complain_on_overflow */
381 bfd_elf_generic_reloc, /* special_function */
382 AARCH64_R_STR (PREL64), /* name */
383 FALSE, /* partial_inplace */
384 ALL_ONES, /* src_mask */
385 ALL_ONES, /* dst_mask */
386 TRUE), /* pcrel_offset */
387
388 /* .word: (S+A-P) */
389 HOWTO (AARCH64_R (PREL32), /* type */
390 0, /* rightshift */
391 2, /* size (0 = byte, 1 = short, 2 = long) */
392 32, /* bitsize */
393 TRUE, /* pc_relative */
394 0, /* bitpos */
395 complain_overflow_signed, /* complain_on_overflow */
396 bfd_elf_generic_reloc, /* special_function */
397 AARCH64_R_STR (PREL32), /* name */
398 FALSE, /* partial_inplace */
399 0xffffffff, /* src_mask */
400 0xffffffff, /* dst_mask */
401 TRUE), /* pcrel_offset */
402
403 /* .half: (S+A-P) */
404 HOWTO (AARCH64_R (PREL16), /* type */
405 0, /* rightshift */
406 1, /* size (0 = byte, 1 = short, 2 = long) */
407 16, /* bitsize */
408 TRUE, /* pc_relative */
409 0, /* bitpos */
410 complain_overflow_signed, /* complain_on_overflow */
411 bfd_elf_generic_reloc, /* special_function */
412 AARCH64_R_STR (PREL16), /* name */
413 FALSE, /* partial_inplace */
414 0xffff, /* src_mask */
415 0xffff, /* dst_mask */
416 TRUE), /* pcrel_offset */
417
418 /* Group relocations to create a 16, 32, 48 or 64 bit
419 unsigned data or abs address inline. */
420
421 /* MOVZ: ((S+A) >> 0) & 0xffff */
422 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
423 0, /* rightshift */
424 2, /* size (0 = byte, 1 = short, 2 = long) */
425 16, /* bitsize */
426 FALSE, /* pc_relative */
427 0, /* bitpos */
428 complain_overflow_unsigned, /* complain_on_overflow */
429 bfd_elf_generic_reloc, /* special_function */
430 AARCH64_R_STR (MOVW_UABS_G0), /* name */
431 FALSE, /* partial_inplace */
432 0xffff, /* src_mask */
433 0xffff, /* dst_mask */
434 FALSE), /* pcrel_offset */
435
436 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
437 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
438 0, /* rightshift */
439 2, /* size (0 = byte, 1 = short, 2 = long) */
440 16, /* bitsize */
441 FALSE, /* pc_relative */
442 0, /* bitpos */
443 complain_overflow_dont, /* complain_on_overflow */
444 bfd_elf_generic_reloc, /* special_function */
445 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
446 FALSE, /* partial_inplace */
447 0xffff, /* src_mask */
448 0xffff, /* dst_mask */
449 FALSE), /* pcrel_offset */
450
451 /* MOVZ: ((S+A) >> 16) & 0xffff */
452 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
453 16, /* rightshift */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
455 16, /* bitsize */
456 FALSE, /* pc_relative */
457 0, /* bitpos */
458 complain_overflow_unsigned, /* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 AARCH64_R_STR (MOVW_UABS_G1), /* name */
461 FALSE, /* partial_inplace */
462 0xffff, /* src_mask */
463 0xffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
465
466 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
467 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
468 16, /* rightshift */
469 2, /* size (0 = byte, 1 = short, 2 = long) */
470 16, /* bitsize */
471 FALSE, /* pc_relative */
472 0, /* bitpos */
473 complain_overflow_dont, /* complain_on_overflow */
474 bfd_elf_generic_reloc, /* special_function */
475 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
476 FALSE, /* partial_inplace */
477 0xffff, /* src_mask */
478 0xffff, /* dst_mask */
479 FALSE), /* pcrel_offset */
480
481 /* MOVZ: ((S+A) >> 32) & 0xffff */
482 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
483 32, /* rightshift */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
485 16, /* bitsize */
486 FALSE, /* pc_relative */
487 0, /* bitpos */
488 complain_overflow_unsigned, /* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 AARCH64_R_STR (MOVW_UABS_G2), /* name */
491 FALSE, /* partial_inplace */
492 0xffff, /* src_mask */
493 0xffff, /* dst_mask */
494 FALSE), /* pcrel_offset */
495
496 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
497 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
498 32, /* rightshift */
499 2, /* size (0 = byte, 1 = short, 2 = long) */
500 16, /* bitsize */
501 FALSE, /* pc_relative */
502 0, /* bitpos */
503 complain_overflow_dont, /* complain_on_overflow */
504 bfd_elf_generic_reloc, /* special_function */
505 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
506 FALSE, /* partial_inplace */
507 0xffff, /* src_mask */
508 0xffff, /* dst_mask */
509 FALSE), /* pcrel_offset */
510
511 /* MOVZ: ((S+A) >> 48) & 0xffff */
512 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
513 48, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 16, /* bitsize */
516 FALSE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_unsigned, /* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 AARCH64_R_STR (MOVW_UABS_G3), /* name */
521 FALSE, /* partial_inplace */
522 0xffff, /* src_mask */
523 0xffff, /* dst_mask */
524 FALSE), /* pcrel_offset */
525
526 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
527 signed data or abs address inline. Will change instruction
528 to MOVN or MOVZ depending on sign of calculated value. */
529
530 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
531 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
532 0, /* rightshift */
533 2, /* size (0 = byte, 1 = short, 2 = long) */
534 16, /* bitsize */
535 FALSE, /* pc_relative */
536 0, /* bitpos */
537 complain_overflow_signed, /* complain_on_overflow */
538 bfd_elf_generic_reloc, /* special_function */
539 AARCH64_R_STR (MOVW_SABS_G0), /* name */
540 FALSE, /* partial_inplace */
541 0xffff, /* src_mask */
542 0xffff, /* dst_mask */
543 FALSE), /* pcrel_offset */
544
545 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
546 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
547 16, /* rightshift */
548 2, /* size (0 = byte, 1 = short, 2 = long) */
549 16, /* bitsize */
550 FALSE, /* pc_relative */
551 0, /* bitpos */
552 complain_overflow_signed, /* complain_on_overflow */
553 bfd_elf_generic_reloc, /* special_function */
554 AARCH64_R_STR (MOVW_SABS_G1), /* name */
555 FALSE, /* partial_inplace */
556 0xffff, /* src_mask */
557 0xffff, /* dst_mask */
558 FALSE), /* pcrel_offset */
559
560 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
561 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
562 32, /* rightshift */
563 2, /* size (0 = byte, 1 = short, 2 = long) */
564 16, /* bitsize */
565 FALSE, /* pc_relative */
566 0, /* bitpos */
567 complain_overflow_signed, /* complain_on_overflow */
568 bfd_elf_generic_reloc, /* special_function */
569 AARCH64_R_STR (MOVW_SABS_G2), /* name */
570 FALSE, /* partial_inplace */
571 0xffff, /* src_mask */
572 0xffff, /* dst_mask */
573 FALSE), /* pcrel_offset */
574
575 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
576 addresses: PG(x) is (x & ~0xfff). */
577
578 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
579 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
580 2, /* rightshift */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
582 19, /* bitsize */
583 TRUE, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_signed, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 AARCH64_R_STR (LD_PREL_LO19), /* name */
588 FALSE, /* partial_inplace */
589 0x7ffff, /* src_mask */
590 0x7ffff, /* dst_mask */
591 TRUE), /* pcrel_offset */
592
593 /* ADR: (S+A-P) & 0x1fffff */
594 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
595 0, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 21, /* bitsize */
598 TRUE, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_signed, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 AARCH64_R_STR (ADR_PREL_LO21), /* name */
603 FALSE, /* partial_inplace */
604 0x1fffff, /* src_mask */
605 0x1fffff, /* dst_mask */
606 TRUE), /* pcrel_offset */
607
608 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
609 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
610 12, /* rightshift */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
612 21, /* bitsize */
613 TRUE, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_signed, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
618 FALSE, /* partial_inplace */
619 0x1fffff, /* src_mask */
620 0x1fffff, /* dst_mask */
621 TRUE), /* pcrel_offset */
622
623 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
624 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
625 12, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 21, /* bitsize */
628 TRUE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont, /* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
633 FALSE, /* partial_inplace */
634 0x1fffff, /* src_mask */
635 0x1fffff, /* dst_mask */
636 TRUE), /* pcrel_offset */
637
638 /* ADD: (S+A) & 0xfff [no overflow check] */
639 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
640 0, /* rightshift */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
642 12, /* bitsize */
643 FALSE, /* pc_relative */
644 10, /* bitpos */
645 complain_overflow_dont, /* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
648 FALSE, /* partial_inplace */
649 0x3ffc00, /* src_mask */
650 0x3ffc00, /* dst_mask */
651 FALSE), /* pcrel_offset */
652
653 /* LD/ST8: (S+A) & 0xfff */
654 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
655 0, /* rightshift */
656 2, /* size (0 = byte, 1 = short, 2 = long) */
657 12, /* bitsize */
658 FALSE, /* pc_relative */
659 0, /* bitpos */
660 complain_overflow_dont, /* complain_on_overflow */
661 bfd_elf_generic_reloc, /* special_function */
662 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
663 FALSE, /* partial_inplace */
664 0xfff, /* src_mask */
665 0xfff, /* dst_mask */
666 FALSE), /* pcrel_offset */
667
668 /* Relocations for control-flow instructions. */
669
670 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
671 HOWTO (AARCH64_R (TSTBR14), /* type */
672 2, /* rightshift */
673 2, /* size (0 = byte, 1 = short, 2 = long) */
674 14, /* bitsize */
675 TRUE, /* pc_relative */
676 0, /* bitpos */
677 complain_overflow_signed, /* complain_on_overflow */
678 bfd_elf_generic_reloc, /* special_function */
679 AARCH64_R_STR (TSTBR14), /* name */
680 FALSE, /* partial_inplace */
681 0x3fff, /* src_mask */
682 0x3fff, /* dst_mask */
683 TRUE), /* pcrel_offset */
684
685 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
686 HOWTO (AARCH64_R (CONDBR19), /* type */
687 2, /* rightshift */
688 2, /* size (0 = byte, 1 = short, 2 = long) */
689 19, /* bitsize */
690 TRUE, /* pc_relative */
691 0, /* bitpos */
692 complain_overflow_signed, /* complain_on_overflow */
693 bfd_elf_generic_reloc, /* special_function */
694 AARCH64_R_STR (CONDBR19), /* name */
695 FALSE, /* partial_inplace */
696 0x7ffff, /* src_mask */
697 0x7ffff, /* dst_mask */
698 TRUE), /* pcrel_offset */
699
700 /* B: ((S+A-P) >> 2) & 0x3ffffff */
701 HOWTO (AARCH64_R (JUMP26), /* type */
702 2, /* rightshift */
703 2, /* size (0 = byte, 1 = short, 2 = long) */
704 26, /* bitsize */
705 TRUE, /* pc_relative */
706 0, /* bitpos */
707 complain_overflow_signed, /* complain_on_overflow */
708 bfd_elf_generic_reloc, /* special_function */
709 AARCH64_R_STR (JUMP26), /* name */
710 FALSE, /* partial_inplace */
711 0x3ffffff, /* src_mask */
712 0x3ffffff, /* dst_mask */
713 TRUE), /* pcrel_offset */
714
715 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
716 HOWTO (AARCH64_R (CALL26), /* type */
717 2, /* rightshift */
718 2, /* size (0 = byte, 1 = short, 2 = long) */
719 26, /* bitsize */
720 TRUE, /* pc_relative */
721 0, /* bitpos */
722 complain_overflow_signed, /* complain_on_overflow */
723 bfd_elf_generic_reloc, /* special_function */
724 AARCH64_R_STR (CALL26), /* name */
725 FALSE, /* partial_inplace */
726 0x3ffffff, /* src_mask */
727 0x3ffffff, /* dst_mask */
728 TRUE), /* pcrel_offset */
729
730 /* LD/ST16: (S+A) & 0xffe */
731 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
732 1, /* rightshift */
733 2, /* size (0 = byte, 1 = short, 2 = long) */
734 12, /* bitsize */
735 FALSE, /* pc_relative */
736 0, /* bitpos */
737 complain_overflow_dont, /* complain_on_overflow */
738 bfd_elf_generic_reloc, /* special_function */
739 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
740 FALSE, /* partial_inplace */
741 0xffe, /* src_mask */
742 0xffe, /* dst_mask */
743 FALSE), /* pcrel_offset */
744
745 /* LD/ST32: (S+A) & 0xffc */
746 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
747 2, /* rightshift */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
749 12, /* bitsize */
750 FALSE, /* pc_relative */
751 0, /* bitpos */
752 complain_overflow_dont, /* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
755 FALSE, /* partial_inplace */
756 0xffc, /* src_mask */
757 0xffc, /* dst_mask */
758 FALSE), /* pcrel_offset */
759
760 /* LD/ST64: (S+A) & 0xff8 */
761 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
762 3, /* rightshift */
763 2, /* size (0 = byte, 1 = short, 2 = long) */
764 12, /* bitsize */
765 FALSE, /* pc_relative */
766 0, /* bitpos */
767 complain_overflow_dont, /* complain_on_overflow */
768 bfd_elf_generic_reloc, /* special_function */
769 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
770 FALSE, /* partial_inplace */
771 0xff8, /* src_mask */
772 0xff8, /* dst_mask */
773 FALSE), /* pcrel_offset */
774
775 /* LD/ST128: (S+A) & 0xff0 */
776 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
777 4, /* rightshift */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
779 12, /* bitsize */
780 FALSE, /* pc_relative */
781 0, /* bitpos */
782 complain_overflow_dont, /* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
785 FALSE, /* partial_inplace */
786 0xff0, /* src_mask */
787 0xff0, /* dst_mask */
788 FALSE), /* pcrel_offset */
789
790 /* Set a load-literal immediate field to bits
791 0x1FFFFC of G(S)-P */
792 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
793 2, /* rightshift */
794 2, /* size (0 = byte,1 = short,2 = long) */
795 19, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_signed, /* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 AARCH64_R_STR (GOT_LD_PREL19), /* name */
801 FALSE, /* partial_inplace */
802 0xffffe0, /* src_mask */
803 0xffffe0, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 /* Get to the page for the GOT entry for the symbol
807 (G(S) - P) using an ADRP instruction. */
808 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
809 12, /* rightshift */
810 2, /* size (0 = byte, 1 = short, 2 = long) */
811 21, /* bitsize */
812 TRUE, /* pc_relative */
813 0, /* bitpos */
814 complain_overflow_dont, /* complain_on_overflow */
815 bfd_elf_generic_reloc, /* special_function */
816 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
817 FALSE, /* partial_inplace */
818 0x1fffff, /* src_mask */
819 0x1fffff, /* dst_mask */
820 TRUE), /* pcrel_offset */
821
822 /* LD64: GOT offset G(S) & 0xff8 */
823 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
824 3, /* rightshift */
825 2, /* size (0 = byte, 1 = short, 2 = long) */
826 12, /* bitsize */
827 FALSE, /* pc_relative */
828 0, /* bitpos */
829 complain_overflow_dont, /* complain_on_overflow */
830 bfd_elf_generic_reloc, /* special_function */
831 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
832 FALSE, /* partial_inplace */
833 0xff8, /* src_mask */
834 0xff8, /* dst_mask */
835 FALSE), /* pcrel_offset */
836
837 /* LD32: GOT offset G(S) & 0xffc */
838 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
839 2, /* rightshift */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
841 12, /* bitsize */
842 FALSE, /* pc_relative */
843 0, /* bitpos */
844 complain_overflow_dont, /* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
847 FALSE, /* partial_inplace */
848 0xffc, /* src_mask */
849 0xffc, /* dst_mask */
850 FALSE), /* pcrel_offset */
851
852 /* LD64: GOT offset for the symbol. */
853 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
854 3, /* rightshift */
855 2, /* size (0 = byte, 1 = short, 2 = long) */
856 12, /* bitsize */
857 FALSE, /* pc_relative */
858 0, /* bitpos */
859 complain_overflow_unsigned, /* complain_on_overflow */
860 bfd_elf_generic_reloc, /* special_function */
861 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
862 FALSE, /* partial_inplace */
863 0x7ff8, /* src_mask */
864 0x7ff8, /* dst_mask */
865 FALSE), /* pcrel_offset */
866
867 /* LD32: GOT offset to the page address of GOT table.
868 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
869 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
870 2, /* rightshift */
871 2, /* size (0 = byte, 1 = short, 2 = long) */
872 12, /* bitsize */
873 FALSE, /* pc_relative */
874 0, /* bitpos */
875 complain_overflow_unsigned, /* complain_on_overflow */
876 bfd_elf_generic_reloc, /* special_function */
877 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
878 FALSE, /* partial_inplace */
879 0x5ffc, /* src_mask */
880 0x5ffc, /* dst_mask */
881 FALSE), /* pcrel_offset */
882
883 /* LD64: GOT offset to the page address of GOT table.
884 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
885 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
886 3, /* rightshift */
887 2, /* size (0 = byte, 1 = short, 2 = long) */
888 12, /* bitsize */
889 FALSE, /* pc_relative */
890 0, /* bitpos */
891 complain_overflow_unsigned, /* complain_on_overflow */
892 bfd_elf_generic_reloc, /* special_function */
893 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
894 FALSE, /* partial_inplace */
895 0x7ff8, /* src_mask */
896 0x7ff8, /* dst_mask */
897 FALSE), /* pcrel_offset */
898
899 /* Get to the page for the GOT entry for the symbol
900 (G(S) - P) using an ADRP instruction. */
901 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
902 12, /* rightshift */
903 2, /* size (0 = byte, 1 = short, 2 = long) */
904 21, /* bitsize */
905 TRUE, /* pc_relative */
906 0, /* bitpos */
907 complain_overflow_dont, /* complain_on_overflow */
908 bfd_elf_generic_reloc, /* special_function */
909 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
910 FALSE, /* partial_inplace */
911 0x1fffff, /* src_mask */
912 0x1fffff, /* dst_mask */
913 TRUE), /* pcrel_offset */
914
915 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
916 0, /* rightshift */
917 2, /* size (0 = byte, 1 = short, 2 = long) */
918 21, /* bitsize */
919 TRUE, /* pc_relative */
920 0, /* bitpos */
921 complain_overflow_dont, /* complain_on_overflow */
922 bfd_elf_generic_reloc, /* special_function */
923 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
924 FALSE, /* partial_inplace */
925 0x1fffff, /* src_mask */
926 0x1fffff, /* dst_mask */
927 TRUE), /* pcrel_offset */
928
929 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
930 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
931 0, /* rightshift */
932 2, /* size (0 = byte, 1 = short, 2 = long) */
933 12, /* bitsize */
934 FALSE, /* pc_relative */
935 0, /* bitpos */
936 complain_overflow_dont, /* complain_on_overflow */
937 bfd_elf_generic_reloc, /* special_function */
938 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
939 FALSE, /* partial_inplace */
940 0xfff, /* src_mask */
941 0xfff, /* dst_mask */
942 FALSE), /* pcrel_offset */
943
944 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
945 16, /* rightshift */
946 2, /* size (0 = byte, 1 = short, 2 = long) */
947 16, /* bitsize */
948 FALSE, /* pc_relative */
949 0, /* bitpos */
950 complain_overflow_dont, /* complain_on_overflow */
951 bfd_elf_generic_reloc, /* special_function */
952 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
953 FALSE, /* partial_inplace */
954 0xffff, /* src_mask */
955 0xffff, /* dst_mask */
956 FALSE), /* pcrel_offset */
957
958 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
959 0, /* rightshift */
960 2, /* size (0 = byte, 1 = short, 2 = long) */
961 16, /* bitsize */
962 FALSE, /* pc_relative */
963 0, /* bitpos */
964 complain_overflow_dont, /* complain_on_overflow */
965 bfd_elf_generic_reloc, /* special_function */
966 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
967 FALSE, /* partial_inplace */
968 0xffff, /* src_mask */
969 0xffff, /* dst_mask */
970 FALSE), /* pcrel_offset */
971
972 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
973 12, /* rightshift */
974 2, /* size (0 = byte, 1 = short, 2 = long) */
975 21, /* bitsize */
976 FALSE, /* pc_relative */
977 0, /* bitpos */
978 complain_overflow_dont, /* complain_on_overflow */
979 bfd_elf_generic_reloc, /* special_function */
980 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
981 FALSE, /* partial_inplace */
982 0x1fffff, /* src_mask */
983 0x1fffff, /* dst_mask */
984 FALSE), /* pcrel_offset */
985
986 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
987 3, /* rightshift */
988 2, /* size (0 = byte, 1 = short, 2 = long) */
989 12, /* bitsize */
990 FALSE, /* pc_relative */
991 0, /* bitpos */
992 complain_overflow_dont, /* complain_on_overflow */
993 bfd_elf_generic_reloc, /* special_function */
994 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
995 FALSE, /* partial_inplace */
996 0xff8, /* src_mask */
997 0xff8, /* dst_mask */
998 FALSE), /* pcrel_offset */
999
1000 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1001 2, /* rightshift */
1002 2, /* size (0 = byte, 1 = short, 2 = long) */
1003 12, /* bitsize */
1004 FALSE, /* pc_relative */
1005 0, /* bitpos */
1006 complain_overflow_dont, /* complain_on_overflow */
1007 bfd_elf_generic_reloc, /* special_function */
1008 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1009 FALSE, /* partial_inplace */
1010 0xffc, /* src_mask */
1011 0xffc, /* dst_mask */
1012 FALSE), /* pcrel_offset */
1013
1014 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1015 2, /* rightshift */
1016 2, /* size (0 = byte, 1 = short, 2 = long) */
1017 19, /* bitsize */
1018 FALSE, /* pc_relative */
1019 0, /* bitpos */
1020 complain_overflow_dont, /* complain_on_overflow */
1021 bfd_elf_generic_reloc, /* special_function */
1022 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1023 FALSE, /* partial_inplace */
1024 0x1ffffc, /* src_mask */
1025 0x1ffffc, /* dst_mask */
1026 FALSE), /* pcrel_offset */
1027
1028 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1029 0, /* rightshift */
1030 2, /* size (0 = byte, 1 = short, 2 = long) */
1031 21, /* bitsize */
1032 TRUE, /* pc_relative */
1033 0, /* bitpos */
1034 complain_overflow_signed, /* complain_on_overflow */
1035 bfd_elf_generic_reloc, /* special_function */
1036 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1037 FALSE, /* partial_inplace */
1038 0x1fffff, /* src_mask */
1039 0x1fffff, /* dst_mask */
1040 TRUE), /* pcrel_offset */
1041
1042 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1043 32, /* rightshift */
1044 2, /* size (0 = byte, 1 = short, 2 = long) */
1045 16, /* bitsize */
1046 FALSE, /* pc_relative */
1047 0, /* bitpos */
1048 complain_overflow_unsigned, /* complain_on_overflow */
1049 bfd_elf_generic_reloc, /* special_function */
1050 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1051 FALSE, /* partial_inplace */
1052 0xffff, /* src_mask */
1053 0xffff, /* dst_mask */
1054 FALSE), /* pcrel_offset */
1055
1056 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1057 16, /* rightshift */
1058 2, /* size (0 = byte, 1 = short, 2 = long) */
1059 16, /* bitsize */
1060 FALSE, /* pc_relative */
1061 0, /* bitpos */
1062 complain_overflow_dont, /* complain_on_overflow */
1063 bfd_elf_generic_reloc, /* special_function */
1064 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1065 FALSE, /* partial_inplace */
1066 0xffff, /* src_mask */
1067 0xffff, /* dst_mask */
1068 FALSE), /* pcrel_offset */
1069
1070 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1071 16, /* rightshift */
1072 2, /* size (0 = byte, 1 = short, 2 = long) */
1073 16, /* bitsize */
1074 FALSE, /* pc_relative */
1075 0, /* bitpos */
1076 complain_overflow_dont, /* complain_on_overflow */
1077 bfd_elf_generic_reloc, /* special_function */
1078 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1079 FALSE, /* partial_inplace */
1080 0xffff, /* src_mask */
1081 0xffff, /* dst_mask */
1082 FALSE), /* pcrel_offset */
1083
1084 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1085 0, /* rightshift */
1086 2, /* size (0 = byte, 1 = short, 2 = long) */
1087 16, /* bitsize */
1088 FALSE, /* pc_relative */
1089 0, /* bitpos */
1090 complain_overflow_dont, /* complain_on_overflow */
1091 bfd_elf_generic_reloc, /* special_function */
1092 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1093 FALSE, /* partial_inplace */
1094 0xffff, /* src_mask */
1095 0xffff, /* dst_mask */
1096 FALSE), /* pcrel_offset */
1097
1098 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1099 0, /* rightshift */
1100 2, /* size (0 = byte, 1 = short, 2 = long) */
1101 16, /* bitsize */
1102 FALSE, /* pc_relative */
1103 0, /* bitpos */
1104 complain_overflow_dont, /* complain_on_overflow */
1105 bfd_elf_generic_reloc, /* special_function */
1106 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1107 FALSE, /* partial_inplace */
1108 0xffff, /* src_mask */
1109 0xffff, /* dst_mask */
1110 FALSE), /* pcrel_offset */
1111
1112 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1113 12, /* rightshift */
1114 2, /* size (0 = byte, 1 = short, 2 = long) */
1115 12, /* bitsize */
1116 FALSE, /* pc_relative */
1117 0, /* bitpos */
1118 complain_overflow_unsigned, /* complain_on_overflow */
1119 bfd_elf_generic_reloc, /* special_function */
1120 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1121 FALSE, /* partial_inplace */
1122 0xfff, /* src_mask */
1123 0xfff, /* dst_mask */
1124 FALSE), /* pcrel_offset */
1125
1126 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1127 0, /* rightshift */
1128 2, /* size (0 = byte, 1 = short, 2 = long) */
1129 12, /* bitsize */
1130 FALSE, /* pc_relative */
1131 0, /* bitpos */
1132 complain_overflow_unsigned, /* complain_on_overflow */
1133 bfd_elf_generic_reloc, /* special_function */
1134 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1135 FALSE, /* partial_inplace */
1136 0xfff, /* src_mask */
1137 0xfff, /* dst_mask */
1138 FALSE), /* pcrel_offset */
1139
1140 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1141 0, /* rightshift */
1142 2, /* size (0 = byte, 1 = short, 2 = long) */
1143 12, /* bitsize */
1144 FALSE, /* pc_relative */
1145 0, /* bitpos */
1146 complain_overflow_dont, /* complain_on_overflow */
1147 bfd_elf_generic_reloc, /* special_function */
1148 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1149 FALSE, /* partial_inplace */
1150 0xfff, /* src_mask */
1151 0xfff, /* dst_mask */
1152 FALSE), /* pcrel_offset */
1153
1154 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1155 2, /* rightshift */
1156 2, /* size (0 = byte, 1 = short, 2 = long) */
1157 19, /* bitsize */
1158 TRUE, /* pc_relative */
1159 0, /* bitpos */
1160 complain_overflow_dont, /* complain_on_overflow */
1161 bfd_elf_generic_reloc, /* special_function */
1162 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1163 FALSE, /* partial_inplace */
1164 0x0ffffe0, /* src_mask */
1165 0x0ffffe0, /* dst_mask */
1166 TRUE), /* pcrel_offset */
1167
1168 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1169 0, /* rightshift */
1170 2, /* size (0 = byte, 1 = short, 2 = long) */
1171 21, /* bitsize */
1172 TRUE, /* pc_relative */
1173 0, /* bitpos */
1174 complain_overflow_dont, /* complain_on_overflow */
1175 bfd_elf_generic_reloc, /* special_function */
1176 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1177 FALSE, /* partial_inplace */
1178 0x1fffff, /* src_mask */
1179 0x1fffff, /* dst_mask */
1180 TRUE), /* pcrel_offset */
1181
1182 /* Get to the page for the GOT entry for the symbol
1183 (G(S) - P) using an ADRP instruction. */
1184 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1185 12, /* rightshift */
1186 2, /* size (0 = byte, 1 = short, 2 = long) */
1187 21, /* bitsize */
1188 TRUE, /* pc_relative */
1189 0, /* bitpos */
1190 complain_overflow_dont, /* complain_on_overflow */
1191 bfd_elf_generic_reloc, /* special_function */
1192 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1193 FALSE, /* partial_inplace */
1194 0x1fffff, /* src_mask */
1195 0x1fffff, /* dst_mask */
1196 TRUE), /* pcrel_offset */
1197
1198 /* LD64: GOT offset G(S) & 0xff8. */
1199 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1200 3, /* rightshift */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1202 12, /* bitsize */
1203 FALSE, /* pc_relative */
1204 0, /* bitpos */
1205 complain_overflow_dont, /* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1208 FALSE, /* partial_inplace */
1209 0xff8, /* src_mask */
1210 0xff8, /* dst_mask */
1211 FALSE), /* pcrel_offset */
1212
1213 /* LD32: GOT offset G(S) & 0xffc. */
1214 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1215 2, /* rightshift */
1216 2, /* size (0 = byte, 1 = short, 2 = long) */
1217 12, /* bitsize */
1218 FALSE, /* pc_relative */
1219 0, /* bitpos */
1220 complain_overflow_dont, /* complain_on_overflow */
1221 bfd_elf_generic_reloc, /* special_function */
1222 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1223 FALSE, /* partial_inplace */
1224 0xffc, /* src_mask */
1225 0xffc, /* dst_mask */
1226 FALSE), /* pcrel_offset */
1227
1228 /* ADD: GOT offset G(S) & 0xfff. */
1229 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1230 0, /* rightshift */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1232 12, /* bitsize */
1233 FALSE, /* pc_relative */
1234 0, /* bitpos */
1235 complain_overflow_dont, /* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1238 FALSE, /* partial_inplace */
1239 0xfff, /* src_mask */
1240 0xfff, /* dst_mask */
1241 FALSE), /* pcrel_offset */
1242
1243 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1244 16, /* rightshift */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1246 12, /* bitsize */
1247 FALSE, /* pc_relative */
1248 0, /* bitpos */
1249 complain_overflow_dont, /* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1252 FALSE, /* partial_inplace */
1253 0xffff, /* src_mask */
1254 0xffff, /* dst_mask */
1255 FALSE), /* pcrel_offset */
1256
1257 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1258 0, /* rightshift */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1260 12, /* bitsize */
1261 FALSE, /* pc_relative */
1262 0, /* bitpos */
1263 complain_overflow_dont, /* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1266 FALSE, /* partial_inplace */
1267 0xffff, /* src_mask */
1268 0xffff, /* dst_mask */
1269 FALSE), /* pcrel_offset */
1270
1271 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1272 0, /* rightshift */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1274 12, /* bitsize */
1275 FALSE, /* pc_relative */
1276 0, /* bitpos */
1277 complain_overflow_dont, /* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 AARCH64_R_STR (TLSDESC_LDR), /* name */
1280 FALSE, /* partial_inplace */
1281 0x0, /* src_mask */
1282 0x0, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1284
1285 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1286 0, /* rightshift */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1288 12, /* bitsize */
1289 FALSE, /* pc_relative */
1290 0, /* bitpos */
1291 complain_overflow_dont, /* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 AARCH64_R_STR (TLSDESC_ADD), /* name */
1294 FALSE, /* partial_inplace */
1295 0x0, /* src_mask */
1296 0x0, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1298
1299 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1300 0, /* rightshift */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1302 0, /* bitsize */
1303 FALSE, /* pc_relative */
1304 0, /* bitpos */
1305 complain_overflow_dont, /* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 AARCH64_R_STR (TLSDESC_CALL), /* name */
1308 FALSE, /* partial_inplace */
1309 0x0, /* src_mask */
1310 0x0, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1312
1313 HOWTO (AARCH64_R (COPY), /* type */
1314 0, /* rightshift */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1316 64, /* bitsize */
1317 FALSE, /* pc_relative */
1318 0, /* bitpos */
1319 complain_overflow_bitfield, /* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 AARCH64_R_STR (COPY), /* name */
1322 TRUE, /* partial_inplace */
1323 0xffffffff, /* src_mask */
1324 0xffffffff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1326
1327 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1328 0, /* rightshift */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1330 64, /* bitsize */
1331 FALSE, /* pc_relative */
1332 0, /* bitpos */
1333 complain_overflow_bitfield, /* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 AARCH64_R_STR (GLOB_DAT), /* name */
1336 TRUE, /* partial_inplace */
1337 0xffffffff, /* src_mask */
1338 0xffffffff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1340
1341 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1342 0, /* rightshift */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1344 64, /* bitsize */
1345 FALSE, /* pc_relative */
1346 0, /* bitpos */
1347 complain_overflow_bitfield, /* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 AARCH64_R_STR (JUMP_SLOT), /* name */
1350 TRUE, /* partial_inplace */
1351 0xffffffff, /* src_mask */
1352 0xffffffff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1354
1355 HOWTO (AARCH64_R (RELATIVE), /* type */
1356 0, /* rightshift */
1357 2, /* size (0 = byte, 1 = short, 2 = long) */
1358 64, /* bitsize */
1359 FALSE, /* pc_relative */
1360 0, /* bitpos */
1361 complain_overflow_bitfield, /* complain_on_overflow */
1362 bfd_elf_generic_reloc, /* special_function */
1363 AARCH64_R_STR (RELATIVE), /* name */
1364 TRUE, /* partial_inplace */
1365 ALL_ONES, /* src_mask */
1366 ALL_ONES, /* dst_mask */
1367 FALSE), /* pcrel_offset */
1368
1369 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1370 0, /* rightshift */
1371 2, /* size (0 = byte, 1 = short, 2 = long) */
1372 64, /* bitsize */
1373 FALSE, /* pc_relative */
1374 0, /* bitpos */
1375 complain_overflow_dont, /* complain_on_overflow */
1376 bfd_elf_generic_reloc, /* special_function */
1377 #if ARCH_SIZE == 64
1378 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1379 #else
1380 AARCH64_R_STR (TLS_DTPMOD), /* name */
1381 #endif
1382 FALSE, /* partial_inplace */
1383 0, /* src_mask */
1384 ALL_ONES, /* dst_mask */
1385 FALSE), /* pc_reloffset */
1386
1387 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 64, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_dont, /* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 #if ARCH_SIZE == 64
1396 AARCH64_R_STR (TLS_DTPREL64), /* name */
1397 #else
1398 AARCH64_R_STR (TLS_DTPREL), /* name */
1399 #endif
1400 FALSE, /* partial_inplace */
1401 0, /* src_mask */
1402 ALL_ONES, /* dst_mask */
1403 FALSE), /* pcrel_offset */
1404
1405 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1406 0, /* rightshift */
1407 2, /* size (0 = byte, 1 = short, 2 = long) */
1408 64, /* bitsize */
1409 FALSE, /* pc_relative */
1410 0, /* bitpos */
1411 complain_overflow_dont, /* complain_on_overflow */
1412 bfd_elf_generic_reloc, /* special_function */
1413 #if ARCH_SIZE == 64
1414 AARCH64_R_STR (TLS_TPREL64), /* name */
1415 #else
1416 AARCH64_R_STR (TLS_TPREL), /* name */
1417 #endif
1418 FALSE, /* partial_inplace */
1419 0, /* src_mask */
1420 ALL_ONES, /* dst_mask */
1421 FALSE), /* pcrel_offset */
1422
1423 HOWTO (AARCH64_R (TLSDESC), /* type */
1424 0, /* rightshift */
1425 2, /* size (0 = byte, 1 = short, 2 = long) */
1426 64, /* bitsize */
1427 FALSE, /* pc_relative */
1428 0, /* bitpos */
1429 complain_overflow_dont, /* complain_on_overflow */
1430 bfd_elf_generic_reloc, /* special_function */
1431 AARCH64_R_STR (TLSDESC), /* name */
1432 FALSE, /* partial_inplace */
1433 0, /* src_mask */
1434 ALL_ONES, /* dst_mask */
1435 FALSE), /* pcrel_offset */
1436
1437 HOWTO (AARCH64_R (IRELATIVE), /* type */
1438 0, /* rightshift */
1439 2, /* size (0 = byte, 1 = short, 2 = long) */
1440 64, /* bitsize */
1441 FALSE, /* pc_relative */
1442 0, /* bitpos */
1443 complain_overflow_bitfield, /* complain_on_overflow */
1444 bfd_elf_generic_reloc, /* special_function */
1445 AARCH64_R_STR (IRELATIVE), /* name */
1446 FALSE, /* partial_inplace */
1447 0, /* src_mask */
1448 ALL_ONES, /* dst_mask */
1449 FALSE), /* pcrel_offset */
1450
1451 EMPTY_HOWTO (0),
1452 };
1453
1454 static reloc_howto_type elfNN_aarch64_howto_none =
1455 HOWTO (R_AARCH64_NONE, /* type */
1456 0, /* rightshift */
1457 3, /* size (0 = byte, 1 = short, 2 = long) */
1458 0, /* bitsize */
1459 FALSE, /* pc_relative */
1460 0, /* bitpos */
1461 complain_overflow_dont,/* complain_on_overflow */
1462 bfd_elf_generic_reloc, /* special_function */
1463 "R_AARCH64_NONE", /* name */
1464 FALSE, /* partial_inplace */
1465 0, /* src_mask */
1466 0, /* dst_mask */
1467 FALSE); /* pcrel_offset */
1468
1469 /* Given HOWTO, return the bfd internal relocation enumerator. */
1470
1471 static bfd_reloc_code_real_type
1472 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1473 {
1474 const int size
1475 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1476 const ptrdiff_t offset
1477 = howto - elfNN_aarch64_howto_table;
1478
1479 if (offset > 0 && offset < size - 1)
1480 return BFD_RELOC_AARCH64_RELOC_START + offset;
1481
1482 if (howto == &elfNN_aarch64_howto_none)
1483 return BFD_RELOC_AARCH64_NONE;
1484
1485 return BFD_RELOC_AARCH64_RELOC_START;
1486 }
1487
1488 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1489
1490 static bfd_reloc_code_real_type
1491 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1492 {
1493 static bfd_boolean initialized_p = FALSE;
1494 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1495 static unsigned int offsets[R_AARCH64_end];
1496
1497 if (initialized_p == FALSE)
1498 {
1499 unsigned int i;
1500
1501 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1502 if (elfNN_aarch64_howto_table[i].type != 0)
1503 offsets[elfNN_aarch64_howto_table[i].type] = i;
1504
1505 initialized_p = TRUE;
1506 }
1507
1508 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1509 return BFD_RELOC_AARCH64_NONE;
1510
1511 /* PR 17512: file: b371e70a. */
1512 if (r_type >= R_AARCH64_end)
1513 {
1514 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1515 bfd_set_error (bfd_error_bad_value);
1516 return BFD_RELOC_AARCH64_NONE;
1517 }
1518
1519 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1520 }
1521
1522 struct elf_aarch64_reloc_map
1523 {
1524 bfd_reloc_code_real_type from;
1525 bfd_reloc_code_real_type to;
1526 };
1527
1528 /* Map bfd generic reloc to AArch64-specific reloc. */
1529 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1530 {
1531 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1532
1533 /* Basic data relocations. */
1534 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1535 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1536 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1537 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1538 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1539 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1540 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1541 };
1542
1543 /* Given the bfd internal relocation enumerator in CODE, return the
1544 corresponding howto entry. */
1545
1546 static reloc_howto_type *
1547 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1548 {
1549 unsigned int i;
1550
1551 /* Convert bfd generic reloc to AArch64-specific reloc. */
1552 if (code < BFD_RELOC_AARCH64_RELOC_START
1553 || code > BFD_RELOC_AARCH64_RELOC_END)
1554 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1555 if (elf_aarch64_reloc_map[i].from == code)
1556 {
1557 code = elf_aarch64_reloc_map[i].to;
1558 break;
1559 }
1560
1561 if (code > BFD_RELOC_AARCH64_RELOC_START
1562 && code < BFD_RELOC_AARCH64_RELOC_END)
1563 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1564 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1565
1566 if (code == BFD_RELOC_AARCH64_NONE)
1567 return &elfNN_aarch64_howto_none;
1568
1569 return NULL;
1570 }
1571
1572 static reloc_howto_type *
1573 elfNN_aarch64_howto_from_type (unsigned int r_type)
1574 {
1575 bfd_reloc_code_real_type val;
1576 reloc_howto_type *howto;
1577
1578 #if ARCH_SIZE == 32
1579 if (r_type > 256)
1580 {
1581 bfd_set_error (bfd_error_bad_value);
1582 return NULL;
1583 }
1584 #endif
1585
1586 if (r_type == R_AARCH64_NONE)
1587 return &elfNN_aarch64_howto_none;
1588
1589 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1590 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1591
1592 if (howto != NULL)
1593 return howto;
1594
1595 bfd_set_error (bfd_error_bad_value);
1596 return NULL;
1597 }
1598
1599 static void
1600 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1601 Elf_Internal_Rela *elf_reloc)
1602 {
1603 unsigned int r_type;
1604
1605 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1606 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1607 }
1608
1609 static reloc_howto_type *
1610 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1611 bfd_reloc_code_real_type code)
1612 {
1613 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1614
1615 if (howto != NULL)
1616 return howto;
1617
1618 bfd_set_error (bfd_error_bad_value);
1619 return NULL;
1620 }
1621
1622 static reloc_howto_type *
1623 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1624 const char *r_name)
1625 {
1626 unsigned int i;
1627
1628 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1629 if (elfNN_aarch64_howto_table[i].name != NULL
1630 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1631 return &elfNN_aarch64_howto_table[i];
1632
1633 return NULL;
1634 }
1635
1636 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1637 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1638 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1639 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1640
1641 /* The linker script knows the section names for placement.
1642 The entry_names are used to do simple name mangling on the stubs.
1643 Given a function name, and its type, the stub can be found. The
1644 name can be changed. The only requirement is the %s be present. */
1645 #define STUB_ENTRY_NAME "__%s_veneer"
1646
1647 /* The name of the dynamic interpreter. This is put in the .interp
1648 section. */
1649 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1650
1651 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1652 (((1 << 25) - 1) << 2)
1653 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1654 (-((1 << 25) << 2))
1655
1656 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1657 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1658
1659 static int
1660 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1661 {
1662 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1663 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1664 }
1665
1666 static int
1667 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1668 {
1669 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1670 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1671 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1672 }
1673
1674 static const uint32_t aarch64_adrp_branch_stub [] =
1675 {
1676 0x90000010, /* adrp ip0, X */
1677 /* R_AARCH64_ADR_HI21_PCREL(X) */
1678 0x91000210, /* add ip0, ip0, :lo12:X */
1679 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1680 0xd61f0200, /* br ip0 */
1681 };
1682
1683 static const uint32_t aarch64_long_branch_stub[] =
1684 {
1685 #if ARCH_SIZE == 64
1686 0x58000090, /* ldr ip0, 1f */
1687 #else
1688 0x18000090, /* ldr wip0, 1f */
1689 #endif
1690 0x10000011, /* adr ip1, #0 */
1691 0x8b110210, /* add ip0, ip0, ip1 */
1692 0xd61f0200, /* br ip0 */
1693 0x00000000, /* 1: .xword or .word
1694 R_AARCH64_PRELNN(X) + 12
1695 */
1696 0x00000000,
1697 };
1698
1699 static const uint32_t aarch64_erratum_835769_stub[] =
1700 {
1701 0x00000000, /* Placeholder for multiply accumulate. */
1702 0x14000000, /* b <label> */
1703 };
1704
1705 static const uint32_t aarch64_erratum_843419_stub[] =
1706 {
1707 0x00000000, /* Placeholder for LDR instruction. */
1708 0x14000000, /* b <label> */
1709 };
1710
1711 /* Section name for stubs is the associated section name plus this
1712 string. */
1713 #define STUB_SUFFIX ".stub"
1714
1715 enum elf_aarch64_stub_type
1716 {
1717 aarch64_stub_none,
1718 aarch64_stub_adrp_branch,
1719 aarch64_stub_long_branch,
1720 aarch64_stub_erratum_835769_veneer,
1721 aarch64_stub_erratum_843419_veneer,
1722 };
1723
1724 struct elf_aarch64_stub_hash_entry
1725 {
1726 /* Base hash table entry structure. */
1727 struct bfd_hash_entry root;
1728
1729 /* The stub section. */
1730 asection *stub_sec;
1731
1732 /* Offset within stub_sec of the beginning of this stub. */
1733 bfd_vma stub_offset;
1734
1735 /* Given the symbol's value and its section we can determine its final
1736 value when building the stubs (so the stub knows where to jump). */
1737 bfd_vma target_value;
1738 asection *target_section;
1739
1740 enum elf_aarch64_stub_type stub_type;
1741
1742 /* The symbol table entry, if any, that this was derived from. */
1743 struct elf_aarch64_link_hash_entry *h;
1744
1745 /* Destination symbol type */
1746 unsigned char st_type;
1747
1748 /* Where this stub is being called from, or, in the case of combined
1749 stub sections, the first input section in the group. */
1750 asection *id_sec;
1751
1752 /* The name for the local symbol at the start of this stub. The
1753 stub name in the hash table has to be unique; this does not, so
1754 it can be friendlier. */
1755 char *output_name;
1756
1757 /* The instruction which caused this stub to be generated (only valid for
1758 erratum 835769 workaround stubs at present). */
1759 uint32_t veneered_insn;
1760
1761 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
1762 bfd_vma adrp_offset;
1763 };
1764
1765 /* Used to build a map of a section. This is required for mixed-endian
1766 code/data. */
1767
1768 typedef struct elf_elf_section_map
1769 {
1770 bfd_vma vma;
1771 char type;
1772 }
1773 elf_aarch64_section_map;
1774
1775
1776 typedef struct _aarch64_elf_section_data
1777 {
1778 struct bfd_elf_section_data elf;
1779 unsigned int mapcount;
1780 unsigned int mapsize;
1781 elf_aarch64_section_map *map;
1782 }
1783 _aarch64_elf_section_data;
1784
1785 #define elf_aarch64_section_data(sec) \
1786 ((_aarch64_elf_section_data *) elf_section_data (sec))
1787
1788 /* The size of the thread control block which is defined to be two pointers. */
1789 #define TCB_SIZE (ARCH_SIZE/8)*2
1790
1791 struct elf_aarch64_local_symbol
1792 {
1793 unsigned int got_type;
1794 bfd_signed_vma got_refcount;
1795 bfd_vma got_offset;
1796
1797 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1798 offset is from the end of the jump table and reserved entries
1799 within the PLTGOT.
1800
1801 The magic value (bfd_vma) -1 indicates that an offset has not be
1802 allocated. */
1803 bfd_vma tlsdesc_got_jump_table_offset;
1804 };
1805
1806 struct elf_aarch64_obj_tdata
1807 {
1808 struct elf_obj_tdata root;
1809
1810 /* local symbol descriptors */
1811 struct elf_aarch64_local_symbol *locals;
1812
1813 /* Zero to warn when linking objects with incompatible enum sizes. */
1814 int no_enum_size_warning;
1815
1816 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1817 int no_wchar_size_warning;
1818 };
1819
1820 #define elf_aarch64_tdata(bfd) \
1821 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1822
1823 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1824
1825 #define is_aarch64_elf(bfd) \
1826 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1827 && elf_tdata (bfd) != NULL \
1828 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1829
1830 static bfd_boolean
1831 elfNN_aarch64_mkobject (bfd *abfd)
1832 {
1833 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1834 AARCH64_ELF_DATA);
1835 }
1836
1837 #define elf_aarch64_hash_entry(ent) \
1838 ((struct elf_aarch64_link_hash_entry *)(ent))
1839
1840 #define GOT_UNKNOWN 0
1841 #define GOT_NORMAL 1
1842 #define GOT_TLS_GD 2
1843 #define GOT_TLS_IE 4
1844 #define GOT_TLSDESC_GD 8
1845
1846 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1847
1848 /* AArch64 ELF linker hash entry. */
1849 struct elf_aarch64_link_hash_entry
1850 {
1851 struct elf_link_hash_entry root;
1852
1853 /* Track dynamic relocs copied for this symbol. */
1854 struct elf_dyn_relocs *dyn_relocs;
1855
1856 /* Since PLT entries have variable size, we need to record the
1857 index into .got.plt instead of recomputing it from the PLT
1858 offset. */
1859 bfd_signed_vma plt_got_offset;
1860
1861 /* Bit mask representing the type of GOT entry(s) if any required by
1862 this symbol. */
1863 unsigned int got_type;
1864
1865 /* A pointer to the most recently used stub hash entry against this
1866 symbol. */
1867 struct elf_aarch64_stub_hash_entry *stub_cache;
1868
1869 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1870 is from the end of the jump table and reserved entries within the PLTGOT.
1871
1872 The magic value (bfd_vma) -1 indicates that an offset has not
1873 be allocated. */
1874 bfd_vma tlsdesc_got_jump_table_offset;
1875 };
1876
1877 static unsigned int
1878 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1879 bfd *abfd,
1880 unsigned long r_symndx)
1881 {
1882 if (h)
1883 return elf_aarch64_hash_entry (h)->got_type;
1884
1885 if (! elf_aarch64_locals (abfd))
1886 return GOT_UNKNOWN;
1887
1888 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1889 }
1890
1891 /* Get the AArch64 elf linker hash table from a link_info structure. */
1892 #define elf_aarch64_hash_table(info) \
1893 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1894
1895 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1896 ((struct elf_aarch64_stub_hash_entry *) \
1897 bfd_hash_lookup ((table), (string), (create), (copy)))
1898
1899 /* AArch64 ELF linker hash table. */
1900 struct elf_aarch64_link_hash_table
1901 {
1902 /* The main hash table. */
1903 struct elf_link_hash_table root;
1904
1905 /* Nonzero to force PIC branch veneers. */
1906 int pic_veneer;
1907
1908 /* Fix erratum 835769. */
1909 int fix_erratum_835769;
1910
1911 /* Fix erratum 843419. */
1912 int fix_erratum_843419;
1913
1914 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
1915 int fix_erratum_843419_adr;
1916
1917 /* The number of bytes in the initial entry in the PLT. */
1918 bfd_size_type plt_header_size;
1919
1920 /* The number of bytes in the subsequent PLT etries. */
1921 bfd_size_type plt_entry_size;
1922
1923 /* Short-cuts to get to dynamic linker sections. */
1924 asection *sdynbss;
1925 asection *srelbss;
1926
1927 /* Small local sym cache. */
1928 struct sym_cache sym_cache;
1929
1930 /* For convenience in allocate_dynrelocs. */
1931 bfd *obfd;
1932
1933 /* The amount of space used by the reserved portion of the sgotplt
1934 section, plus whatever space is used by the jump slots. */
1935 bfd_vma sgotplt_jump_table_size;
1936
1937 /* The stub hash table. */
1938 struct bfd_hash_table stub_hash_table;
1939
1940 /* Linker stub bfd. */
1941 bfd *stub_bfd;
1942
1943 /* Linker call-backs. */
1944 asection *(*add_stub_section) (const char *, asection *);
1945 void (*layout_sections_again) (void);
1946
1947 /* Array to keep track of which stub sections have been created, and
1948 information on stub grouping. */
1949 struct map_stub
1950 {
1951 /* This is the section to which stubs in the group will be
1952 attached. */
1953 asection *link_sec;
1954 /* The stub section. */
1955 asection *stub_sec;
1956 } *stub_group;
1957
1958 /* Assorted information used by elfNN_aarch64_size_stubs. */
1959 unsigned int bfd_count;
1960 int top_index;
1961 asection **input_list;
1962
1963 /* The offset into splt of the PLT entry for the TLS descriptor
1964 resolver. Special values are 0, if not necessary (or not found
1965 to be necessary yet), and -1 if needed but not determined
1966 yet. */
1967 bfd_vma tlsdesc_plt;
1968
1969 /* The GOT offset for the lazy trampoline. Communicated to the
1970 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1971 indicates an offset is not allocated. */
1972 bfd_vma dt_tlsdesc_got;
1973
1974 /* Used by local STT_GNU_IFUNC symbols. */
1975 htab_t loc_hash_table;
1976 void * loc_hash_memory;
1977 };
1978
1979 /* Create an entry in an AArch64 ELF linker hash table. */
1980
1981 static struct bfd_hash_entry *
1982 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1983 struct bfd_hash_table *table,
1984 const char *string)
1985 {
1986 struct elf_aarch64_link_hash_entry *ret =
1987 (struct elf_aarch64_link_hash_entry *) entry;
1988
1989 /* Allocate the structure if it has not already been allocated by a
1990 subclass. */
1991 if (ret == NULL)
1992 ret = bfd_hash_allocate (table,
1993 sizeof (struct elf_aarch64_link_hash_entry));
1994 if (ret == NULL)
1995 return (struct bfd_hash_entry *) ret;
1996
1997 /* Call the allocation method of the superclass. */
1998 ret = ((struct elf_aarch64_link_hash_entry *)
1999 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2000 table, string));
2001 if (ret != NULL)
2002 {
2003 ret->dyn_relocs = NULL;
2004 ret->got_type = GOT_UNKNOWN;
2005 ret->plt_got_offset = (bfd_vma) - 1;
2006 ret->stub_cache = NULL;
2007 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2008 }
2009
2010 return (struct bfd_hash_entry *) ret;
2011 }
2012
2013 /* Initialize an entry in the stub hash table. */
2014
2015 static struct bfd_hash_entry *
2016 stub_hash_newfunc (struct bfd_hash_entry *entry,
2017 struct bfd_hash_table *table, const char *string)
2018 {
2019 /* Allocate the structure if it has not already been allocated by a
2020 subclass. */
2021 if (entry == NULL)
2022 {
2023 entry = bfd_hash_allocate (table,
2024 sizeof (struct
2025 elf_aarch64_stub_hash_entry));
2026 if (entry == NULL)
2027 return entry;
2028 }
2029
2030 /* Call the allocation method of the superclass. */
2031 entry = bfd_hash_newfunc (entry, table, string);
2032 if (entry != NULL)
2033 {
2034 struct elf_aarch64_stub_hash_entry *eh;
2035
2036 /* Initialize the local fields. */
2037 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2038 eh->adrp_offset = 0;
2039 eh->stub_sec = NULL;
2040 eh->stub_offset = 0;
2041 eh->target_value = 0;
2042 eh->target_section = NULL;
2043 eh->stub_type = aarch64_stub_none;
2044 eh->h = NULL;
2045 eh->id_sec = NULL;
2046 }
2047
2048 return entry;
2049 }
2050
2051 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2052 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2053 as global symbol. We reuse indx and dynstr_index for local symbol
2054 hash since they aren't used by global symbols in this backend. */
2055
2056 static hashval_t
2057 elfNN_aarch64_local_htab_hash (const void *ptr)
2058 {
2059 struct elf_link_hash_entry *h
2060 = (struct elf_link_hash_entry *) ptr;
2061 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2062 }
2063
2064 /* Compare local hash entries. */
2065
2066 static int
2067 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2068 {
2069 struct elf_link_hash_entry *h1
2070 = (struct elf_link_hash_entry *) ptr1;
2071 struct elf_link_hash_entry *h2
2072 = (struct elf_link_hash_entry *) ptr2;
2073
2074 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2075 }
2076
2077 /* Find and/or create a hash entry for local symbol. */
2078
2079 static struct elf_link_hash_entry *
2080 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2081 bfd *abfd, const Elf_Internal_Rela *rel,
2082 bfd_boolean create)
2083 {
2084 struct elf_aarch64_link_hash_entry e, *ret;
2085 asection *sec = abfd->sections;
2086 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2087 ELFNN_R_SYM (rel->r_info));
2088 void **slot;
2089
2090 e.root.indx = sec->id;
2091 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2092 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2093 create ? INSERT : NO_INSERT);
2094
2095 if (!slot)
2096 return NULL;
2097
2098 if (*slot)
2099 {
2100 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2101 return &ret->root;
2102 }
2103
2104 ret = (struct elf_aarch64_link_hash_entry *)
2105 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2106 sizeof (struct elf_aarch64_link_hash_entry));
2107 if (ret)
2108 {
2109 memset (ret, 0, sizeof (*ret));
2110 ret->root.indx = sec->id;
2111 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2112 ret->root.dynindx = -1;
2113 *slot = ret;
2114 }
2115 return &ret->root;
2116 }
2117
2118 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2119
2120 static void
2121 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2122 struct elf_link_hash_entry *dir,
2123 struct elf_link_hash_entry *ind)
2124 {
2125 struct elf_aarch64_link_hash_entry *edir, *eind;
2126
2127 edir = (struct elf_aarch64_link_hash_entry *) dir;
2128 eind = (struct elf_aarch64_link_hash_entry *) ind;
2129
2130 if (eind->dyn_relocs != NULL)
2131 {
2132 if (edir->dyn_relocs != NULL)
2133 {
2134 struct elf_dyn_relocs **pp;
2135 struct elf_dyn_relocs *p;
2136
2137 /* Add reloc counts against the indirect sym to the direct sym
2138 list. Merge any entries against the same section. */
2139 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2140 {
2141 struct elf_dyn_relocs *q;
2142
2143 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2144 if (q->sec == p->sec)
2145 {
2146 q->pc_count += p->pc_count;
2147 q->count += p->count;
2148 *pp = p->next;
2149 break;
2150 }
2151 if (q == NULL)
2152 pp = &p->next;
2153 }
2154 *pp = edir->dyn_relocs;
2155 }
2156
2157 edir->dyn_relocs = eind->dyn_relocs;
2158 eind->dyn_relocs = NULL;
2159 }
2160
2161 if (ind->root.type == bfd_link_hash_indirect)
2162 {
2163 /* Copy over PLT info. */
2164 if (dir->got.refcount <= 0)
2165 {
2166 edir->got_type = eind->got_type;
2167 eind->got_type = GOT_UNKNOWN;
2168 }
2169 }
2170
2171 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2172 }
2173
2174 /* Destroy an AArch64 elf linker hash table. */
2175
2176 static void
2177 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2178 {
2179 struct elf_aarch64_link_hash_table *ret
2180 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2181
2182 if (ret->loc_hash_table)
2183 htab_delete (ret->loc_hash_table);
2184 if (ret->loc_hash_memory)
2185 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2186
2187 bfd_hash_table_free (&ret->stub_hash_table);
2188 _bfd_elf_link_hash_table_free (obfd);
2189 }
2190
2191 /* Create an AArch64 elf linker hash table. */
2192
2193 static struct bfd_link_hash_table *
2194 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2195 {
2196 struct elf_aarch64_link_hash_table *ret;
2197 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2198
2199 ret = bfd_zmalloc (amt);
2200 if (ret == NULL)
2201 return NULL;
2202
2203 if (!_bfd_elf_link_hash_table_init
2204 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2205 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2206 {
2207 free (ret);
2208 return NULL;
2209 }
2210
2211 ret->plt_header_size = PLT_ENTRY_SIZE;
2212 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2213 ret->obfd = abfd;
2214 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2215
2216 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2217 sizeof (struct elf_aarch64_stub_hash_entry)))
2218 {
2219 _bfd_elf_link_hash_table_free (abfd);
2220 return NULL;
2221 }
2222
2223 ret->loc_hash_table = htab_try_create (1024,
2224 elfNN_aarch64_local_htab_hash,
2225 elfNN_aarch64_local_htab_eq,
2226 NULL);
2227 ret->loc_hash_memory = objalloc_create ();
2228 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2229 {
2230 elfNN_aarch64_link_hash_table_free (abfd);
2231 return NULL;
2232 }
2233 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2234
2235 return &ret->root.root;
2236 }
2237
2238 static bfd_boolean
2239 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2240 bfd_vma offset, bfd_vma value)
2241 {
2242 reloc_howto_type *howto;
2243 bfd_vma place;
2244
2245 howto = elfNN_aarch64_howto_from_type (r_type);
2246 place = (input_section->output_section->vma + input_section->output_offset
2247 + offset);
2248
2249 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2250 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2251 return _bfd_aarch64_elf_put_addend (input_bfd,
2252 input_section->contents + offset, r_type,
2253 howto, value);
2254 }
2255
2256 static enum elf_aarch64_stub_type
2257 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2258 {
2259 if (aarch64_valid_for_adrp_p (value, place))
2260 return aarch64_stub_adrp_branch;
2261 return aarch64_stub_long_branch;
2262 }
2263
2264 /* Determine the type of stub needed, if any, for a call. */
2265
2266 static enum elf_aarch64_stub_type
2267 aarch64_type_of_stub (struct bfd_link_info *info,
2268 asection *input_sec,
2269 const Elf_Internal_Rela *rel,
2270 unsigned char st_type,
2271 struct elf_aarch64_link_hash_entry *hash,
2272 bfd_vma destination)
2273 {
2274 bfd_vma location;
2275 bfd_signed_vma branch_offset;
2276 unsigned int r_type;
2277 struct elf_aarch64_link_hash_table *globals;
2278 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2279 bfd_boolean via_plt_p;
2280
2281 if (st_type != STT_FUNC)
2282 return stub_type;
2283
2284 globals = elf_aarch64_hash_table (info);
2285 via_plt_p = (globals->root.splt != NULL && hash != NULL
2286 && hash->root.plt.offset != (bfd_vma) - 1);
2287
2288 if (via_plt_p)
2289 return stub_type;
2290
2291 /* Determine where the call point is. */
2292 location = (input_sec->output_offset
2293 + input_sec->output_section->vma + rel->r_offset);
2294
2295 branch_offset = (bfd_signed_vma) (destination - location);
2296
2297 r_type = ELFNN_R_TYPE (rel->r_info);
2298
2299 /* We don't want to redirect any old unconditional jump in this way,
2300 only one which is being used for a sibcall, where it is
2301 acceptable for the IP0 and IP1 registers to be clobbered. */
2302 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2303 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2304 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2305 {
2306 stub_type = aarch64_stub_long_branch;
2307 }
2308
2309 return stub_type;
2310 }
2311
2312 /* Build a name for an entry in the stub hash table. */
2313
2314 static char *
2315 elfNN_aarch64_stub_name (const asection *input_section,
2316 const asection *sym_sec,
2317 const struct elf_aarch64_link_hash_entry *hash,
2318 const Elf_Internal_Rela *rel)
2319 {
2320 char *stub_name;
2321 bfd_size_type len;
2322
2323 if (hash)
2324 {
2325 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2326 stub_name = bfd_malloc (len);
2327 if (stub_name != NULL)
2328 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2329 (unsigned int) input_section->id,
2330 hash->root.root.root.string,
2331 rel->r_addend);
2332 }
2333 else
2334 {
2335 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2336 stub_name = bfd_malloc (len);
2337 if (stub_name != NULL)
2338 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2339 (unsigned int) input_section->id,
2340 (unsigned int) sym_sec->id,
2341 (unsigned int) ELFNN_R_SYM (rel->r_info),
2342 rel->r_addend);
2343 }
2344
2345 return stub_name;
2346 }
2347
2348 /* Look up an entry in the stub hash. Stub entries are cached because
2349 creating the stub name takes a bit of time. */
2350
2351 static struct elf_aarch64_stub_hash_entry *
2352 elfNN_aarch64_get_stub_entry (const asection *input_section,
2353 const asection *sym_sec,
2354 struct elf_link_hash_entry *hash,
2355 const Elf_Internal_Rela *rel,
2356 struct elf_aarch64_link_hash_table *htab)
2357 {
2358 struct elf_aarch64_stub_hash_entry *stub_entry;
2359 struct elf_aarch64_link_hash_entry *h =
2360 (struct elf_aarch64_link_hash_entry *) hash;
2361 const asection *id_sec;
2362
2363 if ((input_section->flags & SEC_CODE) == 0)
2364 return NULL;
2365
2366 /* If this input section is part of a group of sections sharing one
2367 stub section, then use the id of the first section in the group.
2368 Stub names need to include a section id, as there may well be
2369 more than one stub used to reach say, printf, and we need to
2370 distinguish between them. */
2371 id_sec = htab->stub_group[input_section->id].link_sec;
2372
2373 if (h != NULL && h->stub_cache != NULL
2374 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2375 {
2376 stub_entry = h->stub_cache;
2377 }
2378 else
2379 {
2380 char *stub_name;
2381
2382 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2383 if (stub_name == NULL)
2384 return NULL;
2385
2386 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2387 stub_name, FALSE, FALSE);
2388 if (h != NULL)
2389 h->stub_cache = stub_entry;
2390
2391 free (stub_name);
2392 }
2393
2394 return stub_entry;
2395 }
2396
2397
2398 /* Create a stub section. */
2399
2400 static asection *
2401 _bfd_aarch64_create_stub_section (asection *section,
2402 struct elf_aarch64_link_hash_table *htab)
2403 {
2404 size_t namelen;
2405 bfd_size_type len;
2406 char *s_name;
2407
2408 namelen = strlen (section->name);
2409 len = namelen + sizeof (STUB_SUFFIX);
2410 s_name = bfd_alloc (htab->stub_bfd, len);
2411 if (s_name == NULL)
2412 return NULL;
2413
2414 memcpy (s_name, section->name, namelen);
2415 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2416 return (*htab->add_stub_section) (s_name, section);
2417 }
2418
2419
2420 /* Find or create a stub section for a link section.
2421
2422 Fix or create the stub section used to collect stubs attached to
2423 the specified link section. */
2424
2425 static asection *
2426 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2427 struct elf_aarch64_link_hash_table *htab)
2428 {
2429 if (htab->stub_group[link_section->id].stub_sec == NULL)
2430 htab->stub_group[link_section->id].stub_sec
2431 = _bfd_aarch64_create_stub_section (link_section, htab);
2432 return htab->stub_group[link_section->id].stub_sec;
2433 }
2434
2435
2436 /* Find or create a stub section in the stub group for an input
2437 section. */
2438
2439 static asection *
2440 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2441 struct elf_aarch64_link_hash_table *htab)
2442 {
2443 asection *link_sec = htab->stub_group[section->id].link_sec;
2444 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2445 }
2446
2447
2448 /* Add a new stub entry in the stub group associated with an input
2449 section to the stub hash. Not all fields of the new stub entry are
2450 initialised. */
2451
2452 static struct elf_aarch64_stub_hash_entry *
2453 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2454 asection *section,
2455 struct elf_aarch64_link_hash_table *htab)
2456 {
2457 asection *link_sec;
2458 asection *stub_sec;
2459 struct elf_aarch64_stub_hash_entry *stub_entry;
2460
2461 link_sec = htab->stub_group[section->id].link_sec;
2462 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2463
2464 /* Enter this entry into the linker stub hash table. */
2465 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2466 TRUE, FALSE);
2467 if (stub_entry == NULL)
2468 {
2469 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2470 section->owner, stub_name);
2471 return NULL;
2472 }
2473
2474 stub_entry->stub_sec = stub_sec;
2475 stub_entry->stub_offset = 0;
2476 stub_entry->id_sec = link_sec;
2477
2478 return stub_entry;
2479 }
2480
2481 /* Add a new stub entry in the final stub section to the stub hash.
2482 Not all fields of the new stub entry are initialised. */
2483
2484 static struct elf_aarch64_stub_hash_entry *
2485 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2486 asection *link_section,
2487 struct elf_aarch64_link_hash_table *htab)
2488 {
2489 asection *stub_sec;
2490 struct elf_aarch64_stub_hash_entry *stub_entry;
2491
2492 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2493 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2494 TRUE, FALSE);
2495 if (stub_entry == NULL)
2496 {
2497 (*_bfd_error_handler) (_("cannot create stub entry %s"), stub_name);
2498 return NULL;
2499 }
2500
2501 stub_entry->stub_sec = stub_sec;
2502 stub_entry->stub_offset = 0;
2503 stub_entry->id_sec = link_section;
2504
2505 return stub_entry;
2506 }
2507
2508
2509 static bfd_boolean
2510 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2511 void *in_arg ATTRIBUTE_UNUSED)
2512 {
2513 struct elf_aarch64_stub_hash_entry *stub_entry;
2514 asection *stub_sec;
2515 bfd *stub_bfd;
2516 bfd_byte *loc;
2517 bfd_vma sym_value;
2518 bfd_vma veneered_insn_loc;
2519 bfd_vma veneer_entry_loc;
2520 bfd_signed_vma branch_offset = 0;
2521 unsigned int template_size;
2522 const uint32_t *template;
2523 unsigned int i;
2524
2525 /* Massage our args to the form they really have. */
2526 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2527
2528 stub_sec = stub_entry->stub_sec;
2529
2530 /* Make a note of the offset within the stubs for this entry. */
2531 stub_entry->stub_offset = stub_sec->size;
2532 loc = stub_sec->contents + stub_entry->stub_offset;
2533
2534 stub_bfd = stub_sec->owner;
2535
2536 /* This is the address of the stub destination. */
2537 sym_value = (stub_entry->target_value
2538 + stub_entry->target_section->output_offset
2539 + stub_entry->target_section->output_section->vma);
2540
2541 if (stub_entry->stub_type == aarch64_stub_long_branch)
2542 {
2543 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2544 + stub_sec->output_offset);
2545
2546 /* See if we can relax the stub. */
2547 if (aarch64_valid_for_adrp_p (sym_value, place))
2548 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2549 }
2550
2551 switch (stub_entry->stub_type)
2552 {
2553 case aarch64_stub_adrp_branch:
2554 template = aarch64_adrp_branch_stub;
2555 template_size = sizeof (aarch64_adrp_branch_stub);
2556 break;
2557 case aarch64_stub_long_branch:
2558 template = aarch64_long_branch_stub;
2559 template_size = sizeof (aarch64_long_branch_stub);
2560 break;
2561 case aarch64_stub_erratum_835769_veneer:
2562 template = aarch64_erratum_835769_stub;
2563 template_size = sizeof (aarch64_erratum_835769_stub);
2564 break;
2565 case aarch64_stub_erratum_843419_veneer:
2566 template = aarch64_erratum_843419_stub;
2567 template_size = sizeof (aarch64_erratum_843419_stub);
2568 break;
2569 default:
2570 abort ();
2571 }
2572
2573 for (i = 0; i < (template_size / sizeof template[0]); i++)
2574 {
2575 bfd_putl32 (template[i], loc);
2576 loc += 4;
2577 }
2578
2579 template_size = (template_size + 7) & ~7;
2580 stub_sec->size += template_size;
2581
2582 switch (stub_entry->stub_type)
2583 {
2584 case aarch64_stub_adrp_branch:
2585 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2586 stub_entry->stub_offset, sym_value))
2587 /* The stub would not have been relaxed if the offset was out
2588 of range. */
2589 BFD_FAIL ();
2590
2591 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2592 stub_entry->stub_offset + 4, sym_value))
2593 BFD_FAIL ();
2594 break;
2595
2596 case aarch64_stub_long_branch:
2597 /* We want the value relative to the address 12 bytes back from the
2598 value itself. */
2599 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2600 stub_entry->stub_offset + 16, sym_value + 12))
2601 BFD_FAIL ();
2602 break;
2603
2604 case aarch64_stub_erratum_835769_veneer:
2605 veneered_insn_loc = stub_entry->target_section->output_section->vma
2606 + stub_entry->target_section->output_offset
2607 + stub_entry->target_value;
2608 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2609 + stub_entry->stub_sec->output_offset
2610 + stub_entry->stub_offset;
2611 branch_offset = veneered_insn_loc - veneer_entry_loc;
2612 branch_offset >>= 2;
2613 branch_offset &= 0x3ffffff;
2614 bfd_putl32 (stub_entry->veneered_insn,
2615 stub_sec->contents + stub_entry->stub_offset);
2616 bfd_putl32 (template[1] | branch_offset,
2617 stub_sec->contents + stub_entry->stub_offset + 4);
2618 break;
2619
2620 case aarch64_stub_erratum_843419_veneer:
2621 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
2622 stub_entry->stub_offset + 4, sym_value + 4))
2623 BFD_FAIL ();
2624 break;
2625
2626 default:
2627 abort ();
2628 }
2629
2630 return TRUE;
2631 }
2632
2633 /* As above, but don't actually build the stub. Just bump offset so
2634 we know stub section sizes. */
2635
2636 static bfd_boolean
2637 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2638 void *in_arg ATTRIBUTE_UNUSED)
2639 {
2640 struct elf_aarch64_stub_hash_entry *stub_entry;
2641 int size;
2642
2643 /* Massage our args to the form they really have. */
2644 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2645
2646 switch (stub_entry->stub_type)
2647 {
2648 case aarch64_stub_adrp_branch:
2649 size = sizeof (aarch64_adrp_branch_stub);
2650 break;
2651 case aarch64_stub_long_branch:
2652 size = sizeof (aarch64_long_branch_stub);
2653 break;
2654 case aarch64_stub_erratum_835769_veneer:
2655 size = sizeof (aarch64_erratum_835769_stub);
2656 break;
2657 case aarch64_stub_erratum_843419_veneer:
2658 size = sizeof (aarch64_erratum_843419_stub);
2659 break;
2660 default:
2661 abort ();
2662 }
2663
2664 size = (size + 7) & ~7;
2665 stub_entry->stub_sec->size += size;
2666 return TRUE;
2667 }
2668
2669 /* External entry points for sizing and building linker stubs. */
2670
2671 /* Set up various things so that we can make a list of input sections
2672 for each output section included in the link. Returns -1 on error,
2673 0 when no stubs will be needed, and 1 on success. */
2674
2675 int
2676 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2677 struct bfd_link_info *info)
2678 {
2679 bfd *input_bfd;
2680 unsigned int bfd_count;
2681 int top_id, top_index;
2682 asection *section;
2683 asection **input_list, **list;
2684 bfd_size_type amt;
2685 struct elf_aarch64_link_hash_table *htab =
2686 elf_aarch64_hash_table (info);
2687
2688 if (!is_elf_hash_table (htab))
2689 return 0;
2690
2691 /* Count the number of input BFDs and find the top input section id. */
2692 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2693 input_bfd != NULL; input_bfd = input_bfd->link.next)
2694 {
2695 bfd_count += 1;
2696 for (section = input_bfd->sections;
2697 section != NULL; section = section->next)
2698 {
2699 if (top_id < section->id)
2700 top_id = section->id;
2701 }
2702 }
2703 htab->bfd_count = bfd_count;
2704
2705 amt = sizeof (struct map_stub) * (top_id + 1);
2706 htab->stub_group = bfd_zmalloc (amt);
2707 if (htab->stub_group == NULL)
2708 return -1;
2709
2710 /* We can't use output_bfd->section_count here to find the top output
2711 section index as some sections may have been removed, and
2712 _bfd_strip_section_from_output doesn't renumber the indices. */
2713 for (section = output_bfd->sections, top_index = 0;
2714 section != NULL; section = section->next)
2715 {
2716 if (top_index < section->index)
2717 top_index = section->index;
2718 }
2719
2720 htab->top_index = top_index;
2721 amt = sizeof (asection *) * (top_index + 1);
2722 input_list = bfd_malloc (amt);
2723 htab->input_list = input_list;
2724 if (input_list == NULL)
2725 return -1;
2726
2727 /* For sections we aren't interested in, mark their entries with a
2728 value we can check later. */
2729 list = input_list + top_index;
2730 do
2731 *list = bfd_abs_section_ptr;
2732 while (list-- != input_list);
2733
2734 for (section = output_bfd->sections;
2735 section != NULL; section = section->next)
2736 {
2737 if ((section->flags & SEC_CODE) != 0)
2738 input_list[section->index] = NULL;
2739 }
2740
2741 return 1;
2742 }
2743
2744 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2745 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2746
2747 /* The linker repeatedly calls this function for each input section,
2748 in the order that input sections are linked into output sections.
2749 Build lists of input sections to determine groupings between which
2750 we may insert linker stubs. */
2751
2752 void
2753 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2754 {
2755 struct elf_aarch64_link_hash_table *htab =
2756 elf_aarch64_hash_table (info);
2757
2758 if (isec->output_section->index <= htab->top_index)
2759 {
2760 asection **list = htab->input_list + isec->output_section->index;
2761
2762 if (*list != bfd_abs_section_ptr)
2763 {
2764 /* Steal the link_sec pointer for our list. */
2765 /* This happens to make the list in reverse order,
2766 which is what we want. */
2767 PREV_SEC (isec) = *list;
2768 *list = isec;
2769 }
2770 }
2771 }
2772
2773 /* See whether we can group stub sections together. Grouping stub
2774 sections may result in fewer stubs. More importantly, we need to
2775 put all .init* and .fini* stubs at the beginning of the .init or
2776 .fini output sections respectively, because glibc splits the
2777 _init and _fini functions into multiple parts. Putting a stub in
2778 the middle of a function is not a good idea. */
2779
2780 static void
2781 group_sections (struct elf_aarch64_link_hash_table *htab,
2782 bfd_size_type stub_group_size,
2783 bfd_boolean stubs_always_before_branch)
2784 {
2785 asection **list = htab->input_list + htab->top_index;
2786
2787 do
2788 {
2789 asection *tail = *list;
2790
2791 if (tail == bfd_abs_section_ptr)
2792 continue;
2793
2794 while (tail != NULL)
2795 {
2796 asection *curr;
2797 asection *prev;
2798 bfd_size_type total;
2799
2800 curr = tail;
2801 total = tail->size;
2802 while ((prev = PREV_SEC (curr)) != NULL
2803 && ((total += curr->output_offset - prev->output_offset)
2804 < stub_group_size))
2805 curr = prev;
2806
2807 /* OK, the size from the start of CURR to the end is less
2808 than stub_group_size and thus can be handled by one stub
2809 section. (Or the tail section is itself larger than
2810 stub_group_size, in which case we may be toast.)
2811 We should really be keeping track of the total size of
2812 stubs added here, as stubs contribute to the final output
2813 section size. */
2814 do
2815 {
2816 prev = PREV_SEC (tail);
2817 /* Set up this stub group. */
2818 htab->stub_group[tail->id].link_sec = curr;
2819 }
2820 while (tail != curr && (tail = prev) != NULL);
2821
2822 /* But wait, there's more! Input sections up to stub_group_size
2823 bytes before the stub section can be handled by it too. */
2824 if (!stubs_always_before_branch)
2825 {
2826 total = 0;
2827 while (prev != NULL
2828 && ((total += tail->output_offset - prev->output_offset)
2829 < stub_group_size))
2830 {
2831 tail = prev;
2832 prev = PREV_SEC (tail);
2833 htab->stub_group[tail->id].link_sec = curr;
2834 }
2835 }
2836 tail = prev;
2837 }
2838 }
2839 while (list-- != htab->input_list);
2840
2841 free (htab->input_list);
2842 }
2843
2844 #undef PREV_SEC
2845
2846 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2847
2848 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2849 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2850 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2851 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2852 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2853 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2854
2855 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2856 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2857 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2858 #define AARCH64_ZR 0x1f
2859
2860 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2861 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2862
2863 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2864 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2865 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2866 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2867 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2868 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2869 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2870 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2871 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2872 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2873 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2874 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2875 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2876 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2877 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2878 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2879 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2880 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2881
2882 /* Classify an INSN if it is indeed a load/store.
2883
2884 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2885
2886 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2887 is set equal to RT.
2888
2889 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2890
2891 */
2892
2893 static bfd_boolean
2894 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2895 bfd_boolean *pair, bfd_boolean *load)
2896 {
2897 uint32_t opcode;
2898 unsigned int r;
2899 uint32_t opc = 0;
2900 uint32_t v = 0;
2901 uint32_t opc_v = 0;
2902
2903 /* Bail out quickly if INSN doesn't fall into the the load-store
2904 encoding space. */
2905 if (!AARCH64_LDST (insn))
2906 return FALSE;
2907
2908 *pair = FALSE;
2909 *load = FALSE;
2910 if (AARCH64_LDST_EX (insn))
2911 {
2912 *rt = AARCH64_RT (insn);
2913 *rt2 = *rt;
2914 if (AARCH64_BIT (insn, 21) == 1)
2915 {
2916 *pair = TRUE;
2917 *rt2 = AARCH64_RT2 (insn);
2918 }
2919 *load = AARCH64_LD (insn);
2920 return TRUE;
2921 }
2922 else if (AARCH64_LDST_NAP (insn)
2923 || AARCH64_LDSTP_PI (insn)
2924 || AARCH64_LDSTP_O (insn)
2925 || AARCH64_LDSTP_PRE (insn))
2926 {
2927 *pair = TRUE;
2928 *rt = AARCH64_RT (insn);
2929 *rt2 = AARCH64_RT2 (insn);
2930 *load = AARCH64_LD (insn);
2931 return TRUE;
2932 }
2933 else if (AARCH64_LDST_PCREL (insn)
2934 || AARCH64_LDST_UI (insn)
2935 || AARCH64_LDST_PIIMM (insn)
2936 || AARCH64_LDST_U (insn)
2937 || AARCH64_LDST_PREIMM (insn)
2938 || AARCH64_LDST_RO (insn)
2939 || AARCH64_LDST_UIMM (insn))
2940 {
2941 *rt = AARCH64_RT (insn);
2942 *rt2 = *rt;
2943 if (AARCH64_LDST_PCREL (insn))
2944 *load = TRUE;
2945 opc = AARCH64_BITS (insn, 22, 2);
2946 v = AARCH64_BIT (insn, 26);
2947 opc_v = opc | (v << 2);
2948 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2949 || opc_v == 5 || opc_v == 7);
2950 return TRUE;
2951 }
2952 else if (AARCH64_LDST_SIMD_M (insn)
2953 || AARCH64_LDST_SIMD_M_PI (insn))
2954 {
2955 *rt = AARCH64_RT (insn);
2956 *load = AARCH64_BIT (insn, 22);
2957 opcode = (insn >> 12) & 0xf;
2958 switch (opcode)
2959 {
2960 case 0:
2961 case 2:
2962 *rt2 = *rt + 3;
2963 break;
2964
2965 case 4:
2966 case 6:
2967 *rt2 = *rt + 2;
2968 break;
2969
2970 case 7:
2971 *rt2 = *rt;
2972 break;
2973
2974 case 8:
2975 case 10:
2976 *rt2 = *rt + 1;
2977 break;
2978
2979 default:
2980 return FALSE;
2981 }
2982 return TRUE;
2983 }
2984 else if (AARCH64_LDST_SIMD_S (insn)
2985 || AARCH64_LDST_SIMD_S_PI (insn))
2986 {
2987 *rt = AARCH64_RT (insn);
2988 r = (insn >> 21) & 1;
2989 *load = AARCH64_BIT (insn, 22);
2990 opcode = (insn >> 13) & 0x7;
2991 switch (opcode)
2992 {
2993 case 0:
2994 case 2:
2995 case 4:
2996 *rt2 = *rt + r;
2997 break;
2998
2999 case 1:
3000 case 3:
3001 case 5:
3002 *rt2 = *rt + (r == 0 ? 2 : 3);
3003 break;
3004
3005 case 6:
3006 *rt2 = *rt + r;
3007 break;
3008
3009 case 7:
3010 *rt2 = *rt + (r == 0 ? 2 : 3);
3011 break;
3012
3013 default:
3014 return FALSE;
3015 }
3016 return TRUE;
3017 }
3018
3019 return FALSE;
3020 }
3021
3022 /* Return TRUE if INSN is multiply-accumulate. */
3023
3024 static bfd_boolean
3025 aarch64_mlxl_p (uint32_t insn)
3026 {
3027 uint32_t op31 = AARCH64_OP31 (insn);
3028
3029 if (AARCH64_MAC (insn)
3030 && (op31 == 0 || op31 == 1 || op31 == 5)
3031 /* Exclude MUL instructions which are encoded as a multiple accumulate
3032 with RA = XZR. */
3033 && AARCH64_RA (insn) != AARCH64_ZR)
3034 return TRUE;
3035
3036 return FALSE;
3037 }
3038
3039 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3040 it is possible for a 64-bit multiply-accumulate instruction to generate an
3041 incorrect result. The details are quite complex and hard to
3042 determine statically, since branches in the code may exist in some
3043 circumstances, but all cases end with a memory (load, store, or
3044 prefetch) instruction followed immediately by the multiply-accumulate
3045 operation. We employ a linker patching technique, by moving the potentially
3046 affected multiply-accumulate instruction into a patch region and replacing
3047 the original instruction with a branch to the patch. This function checks
3048 if INSN_1 is the memory operation followed by a multiply-accumulate
3049 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3050 if INSN_1 and INSN_2 are safe. */
3051
3052 static bfd_boolean
3053 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3054 {
3055 uint32_t rt;
3056 uint32_t rt2;
3057 uint32_t rn;
3058 uint32_t rm;
3059 uint32_t ra;
3060 bfd_boolean pair;
3061 bfd_boolean load;
3062
3063 if (aarch64_mlxl_p (insn_2)
3064 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3065 {
3066 /* Any SIMD memory op is independent of the subsequent MLA
3067 by definition of the erratum. */
3068 if (AARCH64_BIT (insn_1, 26))
3069 return TRUE;
3070
3071 /* If not SIMD, check for integer memory ops and MLA relationship. */
3072 rn = AARCH64_RN (insn_2);
3073 ra = AARCH64_RA (insn_2);
3074 rm = AARCH64_RM (insn_2);
3075
3076 /* If this is a load and there's a true(RAW) dependency, we are safe
3077 and this is not an erratum sequence. */
3078 if (load &&
3079 (rt == rn || rt == rm || rt == ra
3080 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3081 return FALSE;
3082
3083 /* We conservatively put out stubs for all other cases (including
3084 writebacks). */
3085 return TRUE;
3086 }
3087
3088 return FALSE;
3089 }
3090
3091 /* Used to order a list of mapping symbols by address. */
3092
3093 static int
3094 elf_aarch64_compare_mapping (const void *a, const void *b)
3095 {
3096 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3097 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3098
3099 if (amap->vma > bmap->vma)
3100 return 1;
3101 else if (amap->vma < bmap->vma)
3102 return -1;
3103 else if (amap->type > bmap->type)
3104 /* Ensure results do not depend on the host qsort for objects with
3105 multiple mapping symbols at the same address by sorting on type
3106 after vma. */
3107 return 1;
3108 else if (amap->type < bmap->type)
3109 return -1;
3110 else
3111 return 0;
3112 }
3113
3114
3115 static char *
3116 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3117 {
3118 char *stub_name = (char *) bfd_malloc
3119 (strlen ("__erratum_835769_veneer_") + 16);
3120 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3121 return stub_name;
3122 }
3123
3124 /* Scan for Cortex-A53 erratum 835769 sequence.
3125
3126 Return TRUE else FALSE on abnormal termination. */
3127
3128 static bfd_boolean
3129 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3130 struct bfd_link_info *info,
3131 unsigned int *num_fixes_p)
3132 {
3133 asection *section;
3134 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3135 unsigned int num_fixes = *num_fixes_p;
3136
3137 if (htab == NULL)
3138 return TRUE;
3139
3140 for (section = input_bfd->sections;
3141 section != NULL;
3142 section = section->next)
3143 {
3144 bfd_byte *contents = NULL;
3145 struct _aarch64_elf_section_data *sec_data;
3146 unsigned int span;
3147
3148 if (elf_section_type (section) != SHT_PROGBITS
3149 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3150 || (section->flags & SEC_EXCLUDE) != 0
3151 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3152 || (section->output_section == bfd_abs_section_ptr))
3153 continue;
3154
3155 if (elf_section_data (section)->this_hdr.contents != NULL)
3156 contents = elf_section_data (section)->this_hdr.contents;
3157 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3158 return FALSE;
3159
3160 sec_data = elf_aarch64_section_data (section);
3161
3162 qsort (sec_data->map, sec_data->mapcount,
3163 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3164
3165 for (span = 0; span < sec_data->mapcount; span++)
3166 {
3167 unsigned int span_start = sec_data->map[span].vma;
3168 unsigned int span_end = ((span == sec_data->mapcount - 1)
3169 ? sec_data->map[0].vma + section->size
3170 : sec_data->map[span + 1].vma);
3171 unsigned int i;
3172 char span_type = sec_data->map[span].type;
3173
3174 if (span_type == 'd')
3175 continue;
3176
3177 for (i = span_start; i + 4 < span_end; i += 4)
3178 {
3179 uint32_t insn_1 = bfd_getl32 (contents + i);
3180 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3181
3182 if (aarch64_erratum_sequence (insn_1, insn_2))
3183 {
3184 struct elf_aarch64_stub_hash_entry *stub_entry;
3185 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3186 if (! stub_name)
3187 return FALSE;
3188
3189 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3190 section,
3191 htab);
3192 if (! stub_entry)
3193 return FALSE;
3194
3195 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3196 stub_entry->target_section = section;
3197 stub_entry->target_value = i + 4;
3198 stub_entry->veneered_insn = insn_2;
3199 stub_entry->output_name = stub_name;
3200 num_fixes++;
3201 }
3202 }
3203 }
3204 if (elf_section_data (section)->this_hdr.contents == NULL)
3205 free (contents);
3206 }
3207
3208 *num_fixes_p = num_fixes;
3209
3210 return TRUE;
3211 }
3212
3213
3214 /* Test if instruction INSN is ADRP. */
3215
3216 static bfd_boolean
3217 _bfd_aarch64_adrp_p (uint32_t insn)
3218 {
3219 return ((insn & 0x9f000000) == 0x90000000);
3220 }
3221
3222
3223 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3224
3225 static bfd_boolean
3226 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3227 uint32_t insn_3)
3228 {
3229 uint32_t rt;
3230 uint32_t rt2;
3231 bfd_boolean pair;
3232 bfd_boolean load;
3233
3234 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3235 && (!pair
3236 || (pair && !load))
3237 && AARCH64_LDST_UIMM (insn_3)
3238 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3239 }
3240
3241
3242 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3243
3244 Return TRUE if section CONTENTS at offset I contains one of the
3245 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3246 seen set P_VENEER_I to the offset of the final LOAD/STORE
3247 instruction in the sequence.
3248 */
3249
3250 static bfd_boolean
3251 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3252 bfd_vma i, bfd_vma span_end,
3253 bfd_vma *p_veneer_i)
3254 {
3255 uint32_t insn_1 = bfd_getl32 (contents + i);
3256
3257 if (!_bfd_aarch64_adrp_p (insn_1))
3258 return FALSE;
3259
3260 if (span_end < i + 12)
3261 return FALSE;
3262
3263 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3264 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3265
3266 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3267 return FALSE;
3268
3269 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3270 {
3271 *p_veneer_i = i + 8;
3272 return TRUE;
3273 }
3274
3275 if (span_end < i + 16)
3276 return FALSE;
3277
3278 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3279
3280 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3281 {
3282 *p_veneer_i = i + 12;
3283 return TRUE;
3284 }
3285
3286 return FALSE;
3287 }
3288
3289
3290 /* Resize all stub sections. */
3291
3292 static void
3293 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3294 {
3295 asection *section;
3296
3297 /* OK, we've added some stubs. Find out the new size of the
3298 stub sections. */
3299 for (section = htab->stub_bfd->sections;
3300 section != NULL; section = section->next)
3301 {
3302 /* Ignore non-stub sections. */
3303 if (!strstr (section->name, STUB_SUFFIX))
3304 continue;
3305 section->size = 0;
3306 }
3307
3308 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3309
3310 for (section = htab->stub_bfd->sections;
3311 section != NULL; section = section->next)
3312 {
3313 if (!strstr (section->name, STUB_SUFFIX))
3314 continue;
3315
3316 if (section->size)
3317 section->size += 4;
3318
3319 /* Ensure all stub sections have a size which is a multiple of
3320 4096. This is important in order to ensure that the insertion
3321 of stub sections does not in itself move existing code around
3322 in such a way that new errata sequences are created. */
3323 if (htab->fix_erratum_843419)
3324 if (section->size)
3325 section->size = BFD_ALIGN (section->size, 0x1000);
3326 }
3327 }
3328
3329
3330 /* Construct an erratum 843419 workaround stub name.
3331 */
3332
3333 static char *
3334 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3335 bfd_vma offset)
3336 {
3337 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3338 char *stub_name = bfd_malloc (len);
3339
3340 if (stub_name != NULL)
3341 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3342 input_section->owner->id,
3343 input_section->id,
3344 offset);
3345 return stub_name;
3346 }
3347
3348 /* Build a stub_entry structure describing an 843419 fixup.
3349
3350 The stub_entry constructed is populated with the bit pattern INSN
3351 of the instruction located at OFFSET within input SECTION.
3352
3353 Returns TRUE on success. */
3354
3355 static bfd_boolean
3356 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3357 bfd_vma adrp_offset,
3358 bfd_vma ldst_offset,
3359 asection *section,
3360 struct bfd_link_info *info)
3361 {
3362 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3363 char *stub_name;
3364 struct elf_aarch64_stub_hash_entry *stub_entry;
3365
3366 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3367 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3368 FALSE, FALSE);
3369 if (stub_entry)
3370 {
3371 free (stub_name);
3372 return TRUE;
3373 }
3374
3375 /* We always place an 843419 workaround veneer in the stub section
3376 attached to the input section in which an erratum sequence has
3377 been found. This ensures that later in the link process (in
3378 elfNN_aarch64_write_section) when we copy the veneered
3379 instruction from the input section into the stub section the
3380 copied instruction will have had any relocations applied to it.
3381 If we placed workaround veneers in any other stub section then we
3382 could not assume that all relocations have been processed on the
3383 corresponding input section at the point we output the stub
3384 section.
3385 */
3386
3387 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3388 if (stub_entry == NULL)
3389 {
3390 free (stub_name);
3391 return FALSE;
3392 }
3393
3394 stub_entry->adrp_offset = adrp_offset;
3395 stub_entry->target_value = ldst_offset;
3396 stub_entry->target_section = section;
3397 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3398 stub_entry->veneered_insn = insn;
3399 stub_entry->output_name = stub_name;
3400
3401 return TRUE;
3402 }
3403
3404
3405 /* Scan an input section looking for the signature of erratum 843419.
3406
3407 Scans input SECTION in INPUT_BFD looking for erratum 843419
3408 signatures, for each signature found a stub_entry is created
3409 describing the location of the erratum for subsequent fixup.
3410
3411 Return TRUE on successful scan, FALSE on failure to scan.
3412 */
3413
3414 static bfd_boolean
3415 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3416 struct bfd_link_info *info)
3417 {
3418 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3419
3420 if (htab == NULL)
3421 return TRUE;
3422
3423 if (elf_section_type (section) != SHT_PROGBITS
3424 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3425 || (section->flags & SEC_EXCLUDE) != 0
3426 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3427 || (section->output_section == bfd_abs_section_ptr))
3428 return TRUE;
3429
3430 do
3431 {
3432 bfd_byte *contents = NULL;
3433 struct _aarch64_elf_section_data *sec_data;
3434 unsigned int span;
3435
3436 if (elf_section_data (section)->this_hdr.contents != NULL)
3437 contents = elf_section_data (section)->this_hdr.contents;
3438 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3439 return FALSE;
3440
3441 sec_data = elf_aarch64_section_data (section);
3442
3443 qsort (sec_data->map, sec_data->mapcount,
3444 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3445
3446 for (span = 0; span < sec_data->mapcount; span++)
3447 {
3448 unsigned int span_start = sec_data->map[span].vma;
3449 unsigned int span_end = ((span == sec_data->mapcount - 1)
3450 ? sec_data->map[0].vma + section->size
3451 : sec_data->map[span + 1].vma);
3452 unsigned int i;
3453 char span_type = sec_data->map[span].type;
3454
3455 if (span_type == 'd')
3456 continue;
3457
3458 for (i = span_start; i + 8 < span_end; i += 4)
3459 {
3460 bfd_vma vma = (section->output_section->vma
3461 + section->output_offset
3462 + i);
3463 bfd_vma veneer_i;
3464
3465 if (_bfd_aarch64_erratum_843419_p
3466 (contents, vma, i, span_end, &veneer_i))
3467 {
3468 uint32_t insn = bfd_getl32 (contents + veneer_i);
3469
3470 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3471 section, info))
3472 return FALSE;
3473 }
3474 }
3475 }
3476
3477 if (elf_section_data (section)->this_hdr.contents == NULL)
3478 free (contents);
3479 }
3480 while (0);
3481
3482 return TRUE;
3483 }
3484
3485
3486 /* Determine and set the size of the stub section for a final link.
3487
3488 The basic idea here is to examine all the relocations looking for
3489 PC-relative calls to a target that is unreachable with a "bl"
3490 instruction. */
3491
3492 bfd_boolean
3493 elfNN_aarch64_size_stubs (bfd *output_bfd,
3494 bfd *stub_bfd,
3495 struct bfd_link_info *info,
3496 bfd_signed_vma group_size,
3497 asection * (*add_stub_section) (const char *,
3498 asection *),
3499 void (*layout_sections_again) (void))
3500 {
3501 bfd_size_type stub_group_size;
3502 bfd_boolean stubs_always_before_branch;
3503 bfd_boolean stub_changed = FALSE;
3504 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3505 unsigned int num_erratum_835769_fixes = 0;
3506
3507 /* Propagate mach to stub bfd, because it may not have been
3508 finalized when we created stub_bfd. */
3509 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3510 bfd_get_mach (output_bfd));
3511
3512 /* Stash our params away. */
3513 htab->stub_bfd = stub_bfd;
3514 htab->add_stub_section = add_stub_section;
3515 htab->layout_sections_again = layout_sections_again;
3516 stubs_always_before_branch = group_size < 0;
3517 if (group_size < 0)
3518 stub_group_size = -group_size;
3519 else
3520 stub_group_size = group_size;
3521
3522 if (stub_group_size == 1)
3523 {
3524 /* Default values. */
3525 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3526 stub_group_size = 127 * 1024 * 1024;
3527 }
3528
3529 group_sections (htab, stub_group_size, stubs_always_before_branch);
3530
3531 (*htab->layout_sections_again) ();
3532
3533 if (htab->fix_erratum_835769)
3534 {
3535 bfd *input_bfd;
3536
3537 for (input_bfd = info->input_bfds;
3538 input_bfd != NULL; input_bfd = input_bfd->link.next)
3539 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3540 &num_erratum_835769_fixes))
3541 return FALSE;
3542
3543 _bfd_aarch64_resize_stubs (htab);
3544 (*htab->layout_sections_again) ();
3545 }
3546
3547 if (htab->fix_erratum_843419)
3548 {
3549 bfd *input_bfd;
3550
3551 for (input_bfd = info->input_bfds;
3552 input_bfd != NULL;
3553 input_bfd = input_bfd->link.next)
3554 {
3555 asection *section;
3556
3557 for (section = input_bfd->sections;
3558 section != NULL;
3559 section = section->next)
3560 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3561 return FALSE;
3562 }
3563
3564 _bfd_aarch64_resize_stubs (htab);
3565 (*htab->layout_sections_again) ();
3566 }
3567
3568 while (1)
3569 {
3570 bfd *input_bfd;
3571
3572 for (input_bfd = info->input_bfds;
3573 input_bfd != NULL; input_bfd = input_bfd->link.next)
3574 {
3575 Elf_Internal_Shdr *symtab_hdr;
3576 asection *section;
3577 Elf_Internal_Sym *local_syms = NULL;
3578
3579 /* We'll need the symbol table in a second. */
3580 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3581 if (symtab_hdr->sh_info == 0)
3582 continue;
3583
3584 /* Walk over each section attached to the input bfd. */
3585 for (section = input_bfd->sections;
3586 section != NULL; section = section->next)
3587 {
3588 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3589
3590 /* If there aren't any relocs, then there's nothing more
3591 to do. */
3592 if ((section->flags & SEC_RELOC) == 0
3593 || section->reloc_count == 0
3594 || (section->flags & SEC_CODE) == 0)
3595 continue;
3596
3597 /* If this section is a link-once section that will be
3598 discarded, then don't create any stubs. */
3599 if (section->output_section == NULL
3600 || section->output_section->owner != output_bfd)
3601 continue;
3602
3603 /* Get the relocs. */
3604 internal_relocs
3605 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3606 NULL, info->keep_memory);
3607 if (internal_relocs == NULL)
3608 goto error_ret_free_local;
3609
3610 /* Now examine each relocation. */
3611 irela = internal_relocs;
3612 irelaend = irela + section->reloc_count;
3613 for (; irela < irelaend; irela++)
3614 {
3615 unsigned int r_type, r_indx;
3616 enum elf_aarch64_stub_type stub_type;
3617 struct elf_aarch64_stub_hash_entry *stub_entry;
3618 asection *sym_sec;
3619 bfd_vma sym_value;
3620 bfd_vma destination;
3621 struct elf_aarch64_link_hash_entry *hash;
3622 const char *sym_name;
3623 char *stub_name;
3624 const asection *id_sec;
3625 unsigned char st_type;
3626 bfd_size_type len;
3627
3628 r_type = ELFNN_R_TYPE (irela->r_info);
3629 r_indx = ELFNN_R_SYM (irela->r_info);
3630
3631 if (r_type >= (unsigned int) R_AARCH64_end)
3632 {
3633 bfd_set_error (bfd_error_bad_value);
3634 error_ret_free_internal:
3635 if (elf_section_data (section)->relocs == NULL)
3636 free (internal_relocs);
3637 goto error_ret_free_local;
3638 }
3639
3640 /* Only look for stubs on unconditional branch and
3641 branch and link instructions. */
3642 if (r_type != (unsigned int) AARCH64_R (CALL26)
3643 && r_type != (unsigned int) AARCH64_R (JUMP26))
3644 continue;
3645
3646 /* Now determine the call target, its name, value,
3647 section. */
3648 sym_sec = NULL;
3649 sym_value = 0;
3650 destination = 0;
3651 hash = NULL;
3652 sym_name = NULL;
3653 if (r_indx < symtab_hdr->sh_info)
3654 {
3655 /* It's a local symbol. */
3656 Elf_Internal_Sym *sym;
3657 Elf_Internal_Shdr *hdr;
3658
3659 if (local_syms == NULL)
3660 {
3661 local_syms
3662 = (Elf_Internal_Sym *) symtab_hdr->contents;
3663 if (local_syms == NULL)
3664 local_syms
3665 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3666 symtab_hdr->sh_info, 0,
3667 NULL, NULL, NULL);
3668 if (local_syms == NULL)
3669 goto error_ret_free_internal;
3670 }
3671
3672 sym = local_syms + r_indx;
3673 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3674 sym_sec = hdr->bfd_section;
3675 if (!sym_sec)
3676 /* This is an undefined symbol. It can never
3677 be resolved. */
3678 continue;
3679
3680 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3681 sym_value = sym->st_value;
3682 destination = (sym_value + irela->r_addend
3683 + sym_sec->output_offset
3684 + sym_sec->output_section->vma);
3685 st_type = ELF_ST_TYPE (sym->st_info);
3686 sym_name
3687 = bfd_elf_string_from_elf_section (input_bfd,
3688 symtab_hdr->sh_link,
3689 sym->st_name);
3690 }
3691 else
3692 {
3693 int e_indx;
3694
3695 e_indx = r_indx - symtab_hdr->sh_info;
3696 hash = ((struct elf_aarch64_link_hash_entry *)
3697 elf_sym_hashes (input_bfd)[e_indx]);
3698
3699 while (hash->root.root.type == bfd_link_hash_indirect
3700 || hash->root.root.type == bfd_link_hash_warning)
3701 hash = ((struct elf_aarch64_link_hash_entry *)
3702 hash->root.root.u.i.link);
3703
3704 if (hash->root.root.type == bfd_link_hash_defined
3705 || hash->root.root.type == bfd_link_hash_defweak)
3706 {
3707 struct elf_aarch64_link_hash_table *globals =
3708 elf_aarch64_hash_table (info);
3709 sym_sec = hash->root.root.u.def.section;
3710 sym_value = hash->root.root.u.def.value;
3711 /* For a destination in a shared library,
3712 use the PLT stub as target address to
3713 decide whether a branch stub is
3714 needed. */
3715 if (globals->root.splt != NULL && hash != NULL
3716 && hash->root.plt.offset != (bfd_vma) - 1)
3717 {
3718 sym_sec = globals->root.splt;
3719 sym_value = hash->root.plt.offset;
3720 if (sym_sec->output_section != NULL)
3721 destination = (sym_value
3722 + sym_sec->output_offset
3723 +
3724 sym_sec->output_section->vma);
3725 }
3726 else if (sym_sec->output_section != NULL)
3727 destination = (sym_value + irela->r_addend
3728 + sym_sec->output_offset
3729 + sym_sec->output_section->vma);
3730 }
3731 else if (hash->root.root.type == bfd_link_hash_undefined
3732 || (hash->root.root.type
3733 == bfd_link_hash_undefweak))
3734 {
3735 /* For a shared library, use the PLT stub as
3736 target address to decide whether a long
3737 branch stub is needed.
3738 For absolute code, they cannot be handled. */
3739 struct elf_aarch64_link_hash_table *globals =
3740 elf_aarch64_hash_table (info);
3741
3742 if (globals->root.splt != NULL && hash != NULL
3743 && hash->root.plt.offset != (bfd_vma) - 1)
3744 {
3745 sym_sec = globals->root.splt;
3746 sym_value = hash->root.plt.offset;
3747 if (sym_sec->output_section != NULL)
3748 destination = (sym_value
3749 + sym_sec->output_offset
3750 +
3751 sym_sec->output_section->vma);
3752 }
3753 else
3754 continue;
3755 }
3756 else
3757 {
3758 bfd_set_error (bfd_error_bad_value);
3759 goto error_ret_free_internal;
3760 }
3761 st_type = ELF_ST_TYPE (hash->root.type);
3762 sym_name = hash->root.root.root.string;
3763 }
3764
3765 /* Determine what (if any) linker stub is needed. */
3766 stub_type = aarch64_type_of_stub
3767 (info, section, irela, st_type, hash, destination);
3768 if (stub_type == aarch64_stub_none)
3769 continue;
3770
3771 /* Support for grouping stub sections. */
3772 id_sec = htab->stub_group[section->id].link_sec;
3773
3774 /* Get the name of this stub. */
3775 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3776 irela);
3777 if (!stub_name)
3778 goto error_ret_free_internal;
3779
3780 stub_entry =
3781 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3782 stub_name, FALSE, FALSE);
3783 if (stub_entry != NULL)
3784 {
3785 /* The proper stub has already been created. */
3786 free (stub_name);
3787 continue;
3788 }
3789
3790 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3791 (stub_name, section, htab);
3792 if (stub_entry == NULL)
3793 {
3794 free (stub_name);
3795 goto error_ret_free_internal;
3796 }
3797
3798 stub_entry->target_value = sym_value;
3799 stub_entry->target_section = sym_sec;
3800 stub_entry->stub_type = stub_type;
3801 stub_entry->h = hash;
3802 stub_entry->st_type = st_type;
3803
3804 if (sym_name == NULL)
3805 sym_name = "unnamed";
3806 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3807 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3808 if (stub_entry->output_name == NULL)
3809 {
3810 free (stub_name);
3811 goto error_ret_free_internal;
3812 }
3813
3814 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3815 sym_name);
3816
3817 stub_changed = TRUE;
3818 }
3819
3820 /* We're done with the internal relocs, free them. */
3821 if (elf_section_data (section)->relocs == NULL)
3822 free (internal_relocs);
3823 }
3824 }
3825
3826 if (!stub_changed)
3827 break;
3828
3829 _bfd_aarch64_resize_stubs (htab);
3830
3831 /* Ask the linker to do its stuff. */
3832 (*htab->layout_sections_again) ();
3833 stub_changed = FALSE;
3834 }
3835
3836 return TRUE;
3837
3838 error_ret_free_local:
3839 return FALSE;
3840 }
3841
3842 /* Build all the stubs associated with the current output file. The
3843 stubs are kept in a hash table attached to the main linker hash
3844 table. We also set up the .plt entries for statically linked PIC
3845 functions here. This function is called via aarch64_elf_finish in the
3846 linker. */
3847
3848 bfd_boolean
3849 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3850 {
3851 asection *stub_sec;
3852 struct bfd_hash_table *table;
3853 struct elf_aarch64_link_hash_table *htab;
3854
3855 htab = elf_aarch64_hash_table (info);
3856
3857 for (stub_sec = htab->stub_bfd->sections;
3858 stub_sec != NULL; stub_sec = stub_sec->next)
3859 {
3860 bfd_size_type size;
3861
3862 /* Ignore non-stub sections. */
3863 if (!strstr (stub_sec->name, STUB_SUFFIX))
3864 continue;
3865
3866 /* Allocate memory to hold the linker stubs. */
3867 size = stub_sec->size;
3868 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3869 if (stub_sec->contents == NULL && size != 0)
3870 return FALSE;
3871 stub_sec->size = 0;
3872
3873 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3874 stub_sec->size += 4;
3875 }
3876
3877 /* Build the stubs as directed by the stub hash table. */
3878 table = &htab->stub_hash_table;
3879 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3880
3881 return TRUE;
3882 }
3883
3884
3885 /* Add an entry to the code/data map for section SEC. */
3886
3887 static void
3888 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3889 {
3890 struct _aarch64_elf_section_data *sec_data =
3891 elf_aarch64_section_data (sec);
3892 unsigned int newidx;
3893
3894 if (sec_data->map == NULL)
3895 {
3896 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3897 sec_data->mapcount = 0;
3898 sec_data->mapsize = 1;
3899 }
3900
3901 newidx = sec_data->mapcount++;
3902
3903 if (sec_data->mapcount > sec_data->mapsize)
3904 {
3905 sec_data->mapsize *= 2;
3906 sec_data->map = bfd_realloc_or_free
3907 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3908 }
3909
3910 if (sec_data->map)
3911 {
3912 sec_data->map[newidx].vma = vma;
3913 sec_data->map[newidx].type = type;
3914 }
3915 }
3916
3917
3918 /* Initialise maps of insn/data for input BFDs. */
3919 void
3920 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3921 {
3922 Elf_Internal_Sym *isymbuf;
3923 Elf_Internal_Shdr *hdr;
3924 unsigned int i, localsyms;
3925
3926 /* Make sure that we are dealing with an AArch64 elf binary. */
3927 if (!is_aarch64_elf (abfd))
3928 return;
3929
3930 if ((abfd->flags & DYNAMIC) != 0)
3931 return;
3932
3933 hdr = &elf_symtab_hdr (abfd);
3934 localsyms = hdr->sh_info;
3935
3936 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3937 should contain the number of local symbols, which should come before any
3938 global symbols. Mapping symbols are always local. */
3939 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3940
3941 /* No internal symbols read? Skip this BFD. */
3942 if (isymbuf == NULL)
3943 return;
3944
3945 for (i = 0; i < localsyms; i++)
3946 {
3947 Elf_Internal_Sym *isym = &isymbuf[i];
3948 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3949 const char *name;
3950
3951 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3952 {
3953 name = bfd_elf_string_from_elf_section (abfd,
3954 hdr->sh_link,
3955 isym->st_name);
3956
3957 if (bfd_is_aarch64_special_symbol_name
3958 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3959 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3960 }
3961 }
3962 }
3963
3964 /* Set option values needed during linking. */
3965 void
3966 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3967 struct bfd_link_info *link_info,
3968 int no_enum_warn,
3969 int no_wchar_warn, int pic_veneer,
3970 int fix_erratum_835769,
3971 int fix_erratum_843419)
3972 {
3973 struct elf_aarch64_link_hash_table *globals;
3974
3975 globals = elf_aarch64_hash_table (link_info);
3976 globals->pic_veneer = pic_veneer;
3977 globals->fix_erratum_835769 = fix_erratum_835769;
3978 globals->fix_erratum_843419 = fix_erratum_843419;
3979 globals->fix_erratum_843419_adr = TRUE;
3980
3981 BFD_ASSERT (is_aarch64_elf (output_bfd));
3982 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3983 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3984 }
3985
3986 static bfd_vma
3987 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3988 struct elf_aarch64_link_hash_table
3989 *globals, struct bfd_link_info *info,
3990 bfd_vma value, bfd *output_bfd,
3991 bfd_boolean *unresolved_reloc_p)
3992 {
3993 bfd_vma off = (bfd_vma) - 1;
3994 asection *basegot = globals->root.sgot;
3995 bfd_boolean dyn = globals->root.dynamic_sections_created;
3996
3997 if (h != NULL)
3998 {
3999 BFD_ASSERT (basegot != NULL);
4000 off = h->got.offset;
4001 BFD_ASSERT (off != (bfd_vma) - 1);
4002 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
4003 || (info->shared
4004 && SYMBOL_REFERENCES_LOCAL (info, h))
4005 || (ELF_ST_VISIBILITY (h->other)
4006 && h->root.type == bfd_link_hash_undefweak))
4007 {
4008 /* This is actually a static link, or it is a -Bsymbolic link
4009 and the symbol is defined locally. We must initialize this
4010 entry in the global offset table. Since the offset must
4011 always be a multiple of 8 (4 in the case of ILP32), we use
4012 the least significant bit to record whether we have
4013 initialized it already.
4014 When doing a dynamic link, we create a .rel(a).got relocation
4015 entry to initialize the value. This is done in the
4016 finish_dynamic_symbol routine. */
4017 if ((off & 1) != 0)
4018 off &= ~1;
4019 else
4020 {
4021 bfd_put_NN (output_bfd, value, basegot->contents + off);
4022 h->got.offset |= 1;
4023 }
4024 }
4025 else
4026 *unresolved_reloc_p = FALSE;
4027
4028 off = off + basegot->output_section->vma + basegot->output_offset;
4029 }
4030
4031 return off;
4032 }
4033
4034 /* Change R_TYPE to a more efficient access model where possible,
4035 return the new reloc type. */
4036
4037 static bfd_reloc_code_real_type
4038 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4039 struct elf_link_hash_entry *h)
4040 {
4041 bfd_boolean is_local = h == NULL;
4042
4043 switch (r_type)
4044 {
4045 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4046 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4047 return (is_local
4048 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4049 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4050
4051 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4052 return (is_local
4053 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4054 : r_type);
4055
4056 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4057 return (is_local
4058 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4059 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4060
4061 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4062 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4063 return (is_local
4064 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4065 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4066
4067 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4068 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4069
4070 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4071 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4072
4073 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4074 return r_type;
4075
4076 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4077 return (is_local
4078 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4079 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4080
4081 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4082 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4083 /* Instructions with these relocations will become NOPs. */
4084 return BFD_RELOC_AARCH64_NONE;
4085
4086 default:
4087 break;
4088 }
4089
4090 return r_type;
4091 }
4092
4093 static unsigned int
4094 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4095 {
4096 switch (r_type)
4097 {
4098 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4099 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4100 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4101 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4102 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4103 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4104 return GOT_NORMAL;
4105
4106 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4107 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4108 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4109 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4110 return GOT_TLS_GD;
4111
4112 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4113 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4114 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4115 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4116 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4117 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4118 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4119 return GOT_TLSDESC_GD;
4120
4121 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4122 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4123 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4124 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4125 return GOT_TLS_IE;
4126
4127 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4128 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4129 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4130 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4131 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4132 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4133 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4134 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4135 return GOT_UNKNOWN;
4136
4137 default:
4138 break;
4139 }
4140 return GOT_UNKNOWN;
4141 }
4142
4143 static bfd_boolean
4144 aarch64_can_relax_tls (bfd *input_bfd,
4145 struct bfd_link_info *info,
4146 bfd_reloc_code_real_type r_type,
4147 struct elf_link_hash_entry *h,
4148 unsigned long r_symndx)
4149 {
4150 unsigned int symbol_got_type;
4151 unsigned int reloc_got_type;
4152
4153 if (! IS_AARCH64_TLS_RELOC (r_type))
4154 return FALSE;
4155
4156 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4157 reloc_got_type = aarch64_reloc_got_type (r_type);
4158
4159 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4160 return TRUE;
4161
4162 if (info->shared)
4163 return FALSE;
4164
4165 if (h && h->root.type == bfd_link_hash_undefweak)
4166 return FALSE;
4167
4168 return TRUE;
4169 }
4170
4171 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4172 enumerator. */
4173
4174 static bfd_reloc_code_real_type
4175 aarch64_tls_transition (bfd *input_bfd,
4176 struct bfd_link_info *info,
4177 unsigned int r_type,
4178 struct elf_link_hash_entry *h,
4179 unsigned long r_symndx)
4180 {
4181 bfd_reloc_code_real_type bfd_r_type
4182 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4183
4184 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4185 return bfd_r_type;
4186
4187 return aarch64_tls_transition_without_check (bfd_r_type, h);
4188 }
4189
4190 /* Return the base VMA address which should be subtracted from real addresses
4191 when resolving R_AARCH64_TLS_DTPREL relocation. */
4192
4193 static bfd_vma
4194 dtpoff_base (struct bfd_link_info *info)
4195 {
4196 /* If tls_sec is NULL, we should have signalled an error already. */
4197 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4198 return elf_hash_table (info)->tls_sec->vma;
4199 }
4200
4201 /* Return the base VMA address which should be subtracted from real addresses
4202 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4203
4204 static bfd_vma
4205 tpoff_base (struct bfd_link_info *info)
4206 {
4207 struct elf_link_hash_table *htab = elf_hash_table (info);
4208
4209 /* If tls_sec is NULL, we should have signalled an error already. */
4210 BFD_ASSERT (htab->tls_sec != NULL);
4211
4212 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4213 htab->tls_sec->alignment_power);
4214 return htab->tls_sec->vma - base;
4215 }
4216
4217 static bfd_vma *
4218 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4219 unsigned long r_symndx)
4220 {
4221 /* Calculate the address of the GOT entry for symbol
4222 referred to in h. */
4223 if (h != NULL)
4224 return &h->got.offset;
4225 else
4226 {
4227 /* local symbol */
4228 struct elf_aarch64_local_symbol *l;
4229
4230 l = elf_aarch64_locals (input_bfd);
4231 return &l[r_symndx].got_offset;
4232 }
4233 }
4234
4235 static void
4236 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4237 unsigned long r_symndx)
4238 {
4239 bfd_vma *p;
4240 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4241 *p |= 1;
4242 }
4243
4244 static int
4245 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4246 unsigned long r_symndx)
4247 {
4248 bfd_vma value;
4249 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4250 return value & 1;
4251 }
4252
4253 static bfd_vma
4254 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4255 unsigned long r_symndx)
4256 {
4257 bfd_vma value;
4258 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4259 value &= ~1;
4260 return value;
4261 }
4262
4263 static bfd_vma *
4264 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4265 unsigned long r_symndx)
4266 {
4267 /* Calculate the address of the GOT entry for symbol
4268 referred to in h. */
4269 if (h != NULL)
4270 {
4271 struct elf_aarch64_link_hash_entry *eh;
4272 eh = (struct elf_aarch64_link_hash_entry *) h;
4273 return &eh->tlsdesc_got_jump_table_offset;
4274 }
4275 else
4276 {
4277 /* local symbol */
4278 struct elf_aarch64_local_symbol *l;
4279
4280 l = elf_aarch64_locals (input_bfd);
4281 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4282 }
4283 }
4284
4285 static void
4286 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4287 unsigned long r_symndx)
4288 {
4289 bfd_vma *p;
4290 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4291 *p |= 1;
4292 }
4293
4294 static int
4295 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4296 struct elf_link_hash_entry *h,
4297 unsigned long r_symndx)
4298 {
4299 bfd_vma value;
4300 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4301 return value & 1;
4302 }
4303
4304 static bfd_vma
4305 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4306 unsigned long r_symndx)
4307 {
4308 bfd_vma value;
4309 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4310 value &= ~1;
4311 return value;
4312 }
4313
4314 /* Data for make_branch_to_erratum_835769_stub(). */
4315
4316 struct erratum_835769_branch_to_stub_data
4317 {
4318 struct bfd_link_info *info;
4319 asection *output_section;
4320 bfd_byte *contents;
4321 };
4322
4323 /* Helper to insert branches to erratum 835769 stubs in the right
4324 places for a particular section. */
4325
4326 static bfd_boolean
4327 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4328 void *in_arg)
4329 {
4330 struct elf_aarch64_stub_hash_entry *stub_entry;
4331 struct erratum_835769_branch_to_stub_data *data;
4332 bfd_byte *contents;
4333 unsigned long branch_insn = 0;
4334 bfd_vma veneered_insn_loc, veneer_entry_loc;
4335 bfd_signed_vma branch_offset;
4336 unsigned int target;
4337 bfd *abfd;
4338
4339 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4340 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4341
4342 if (stub_entry->target_section != data->output_section
4343 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4344 return TRUE;
4345
4346 contents = data->contents;
4347 veneered_insn_loc = stub_entry->target_section->output_section->vma
4348 + stub_entry->target_section->output_offset
4349 + stub_entry->target_value;
4350 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4351 + stub_entry->stub_sec->output_offset
4352 + stub_entry->stub_offset;
4353 branch_offset = veneer_entry_loc - veneered_insn_loc;
4354
4355 abfd = stub_entry->target_section->owner;
4356 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4357 (*_bfd_error_handler)
4358 (_("%B: error: Erratum 835769 stub out "
4359 "of range (input file too large)"), abfd);
4360
4361 target = stub_entry->target_value;
4362 branch_insn = 0x14000000;
4363 branch_offset >>= 2;
4364 branch_offset &= 0x3ffffff;
4365 branch_insn |= branch_offset;
4366 bfd_putl32 (branch_insn, &contents[target]);
4367
4368 return TRUE;
4369 }
4370
4371
4372 static bfd_boolean
4373 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4374 void *in_arg)
4375 {
4376 struct elf_aarch64_stub_hash_entry *stub_entry
4377 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4378 struct erratum_835769_branch_to_stub_data *data
4379 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4380 struct bfd_link_info *info;
4381 struct elf_aarch64_link_hash_table *htab;
4382 bfd_byte *contents;
4383 asection *section;
4384 bfd *abfd;
4385 bfd_vma place;
4386 uint32_t insn;
4387
4388 info = data->info;
4389 contents = data->contents;
4390 section = data->output_section;
4391
4392 htab = elf_aarch64_hash_table (info);
4393
4394 if (stub_entry->target_section != section
4395 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4396 return TRUE;
4397
4398 insn = bfd_getl32 (contents + stub_entry->target_value);
4399 bfd_putl32 (insn,
4400 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4401
4402 place = (section->output_section->vma + section->output_offset
4403 + stub_entry->adrp_offset);
4404 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4405
4406 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4407 abort ();
4408
4409 bfd_signed_vma imm =
4410 (_bfd_aarch64_sign_extend
4411 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4412 - (place & 0xfff));
4413
4414 if (htab->fix_erratum_843419_adr
4415 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4416 {
4417 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4418 | AARCH64_RT (insn));
4419 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4420 }
4421 else
4422 {
4423 bfd_vma veneered_insn_loc;
4424 bfd_vma veneer_entry_loc;
4425 bfd_signed_vma branch_offset;
4426 uint32_t branch_insn;
4427
4428 veneered_insn_loc = stub_entry->target_section->output_section->vma
4429 + stub_entry->target_section->output_offset
4430 + stub_entry->target_value;
4431 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4432 + stub_entry->stub_sec->output_offset
4433 + stub_entry->stub_offset;
4434 branch_offset = veneer_entry_loc - veneered_insn_loc;
4435
4436 abfd = stub_entry->target_section->owner;
4437 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4438 (*_bfd_error_handler)
4439 (_("%B: error: Erratum 843419 stub out "
4440 "of range (input file too large)"), abfd);
4441
4442 branch_insn = 0x14000000;
4443 branch_offset >>= 2;
4444 branch_offset &= 0x3ffffff;
4445 branch_insn |= branch_offset;
4446 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4447 }
4448 return TRUE;
4449 }
4450
4451
4452 static bfd_boolean
4453 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4454 struct bfd_link_info *link_info,
4455 asection *sec,
4456 bfd_byte *contents)
4457
4458 {
4459 struct elf_aarch64_link_hash_table *globals =
4460 elf_aarch64_hash_table (link_info);
4461
4462 if (globals == NULL)
4463 return FALSE;
4464
4465 /* Fix code to point to erratum 835769 stubs. */
4466 if (globals->fix_erratum_835769)
4467 {
4468 struct erratum_835769_branch_to_stub_data data;
4469
4470 data.info = link_info;
4471 data.output_section = sec;
4472 data.contents = contents;
4473 bfd_hash_traverse (&globals->stub_hash_table,
4474 make_branch_to_erratum_835769_stub, &data);
4475 }
4476
4477 if (globals->fix_erratum_843419)
4478 {
4479 struct erratum_835769_branch_to_stub_data data;
4480
4481 data.info = link_info;
4482 data.output_section = sec;
4483 data.contents = contents;
4484 bfd_hash_traverse (&globals->stub_hash_table,
4485 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4486 }
4487
4488 return FALSE;
4489 }
4490
4491 /* Perform a relocation as part of a final link. */
4492 static bfd_reloc_status_type
4493 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4494 bfd *input_bfd,
4495 bfd *output_bfd,
4496 asection *input_section,
4497 bfd_byte *contents,
4498 Elf_Internal_Rela *rel,
4499 bfd_vma value,
4500 struct bfd_link_info *info,
4501 asection *sym_sec,
4502 struct elf_link_hash_entry *h,
4503 bfd_boolean *unresolved_reloc_p,
4504 bfd_boolean save_addend,
4505 bfd_vma *saved_addend,
4506 Elf_Internal_Sym *sym)
4507 {
4508 Elf_Internal_Shdr *symtab_hdr;
4509 unsigned int r_type = howto->type;
4510 bfd_reloc_code_real_type bfd_r_type
4511 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4512 bfd_reloc_code_real_type new_bfd_r_type;
4513 unsigned long r_symndx;
4514 bfd_byte *hit_data = contents + rel->r_offset;
4515 bfd_vma place, off;
4516 bfd_signed_vma signed_addend;
4517 struct elf_aarch64_link_hash_table *globals;
4518 bfd_boolean weak_undef_p;
4519 asection *base_got;
4520
4521 globals = elf_aarch64_hash_table (info);
4522
4523 symtab_hdr = &elf_symtab_hdr (input_bfd);
4524
4525 BFD_ASSERT (is_aarch64_elf (input_bfd));
4526
4527 r_symndx = ELFNN_R_SYM (rel->r_info);
4528
4529 /* It is possible to have linker relaxations on some TLS access
4530 models. Update our information here. */
4531 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4532 if (new_bfd_r_type != bfd_r_type)
4533 {
4534 bfd_r_type = new_bfd_r_type;
4535 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4536 BFD_ASSERT (howto != NULL);
4537 r_type = howto->type;
4538 }
4539
4540 place = input_section->output_section->vma
4541 + input_section->output_offset + rel->r_offset;
4542
4543 /* Get addend, accumulating the addend for consecutive relocs
4544 which refer to the same offset. */
4545 signed_addend = saved_addend ? *saved_addend : 0;
4546 signed_addend += rel->r_addend;
4547
4548 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4549 : bfd_is_und_section (sym_sec));
4550
4551 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4552 it here if it is defined in a non-shared object. */
4553 if (h != NULL
4554 && h->type == STT_GNU_IFUNC
4555 && h->def_regular)
4556 {
4557 asection *plt;
4558 const char *name;
4559 bfd_vma addend = 0;
4560
4561 if ((input_section->flags & SEC_ALLOC) == 0
4562 || h->plt.offset == (bfd_vma) -1)
4563 abort ();
4564
4565 /* STT_GNU_IFUNC symbol must go through PLT. */
4566 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4567 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4568
4569 switch (bfd_r_type)
4570 {
4571 default:
4572 if (h->root.root.string)
4573 name = h->root.root.string;
4574 else
4575 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4576 NULL);
4577 (*_bfd_error_handler)
4578 (_("%B: relocation %s against STT_GNU_IFUNC "
4579 "symbol `%s' isn't handled by %s"), input_bfd,
4580 howto->name, name, __FUNCTION__);
4581 bfd_set_error (bfd_error_bad_value);
4582 return FALSE;
4583
4584 case BFD_RELOC_AARCH64_NN:
4585 if (rel->r_addend != 0)
4586 {
4587 if (h->root.root.string)
4588 name = h->root.root.string;
4589 else
4590 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4591 sym, NULL);
4592 (*_bfd_error_handler)
4593 (_("%B: relocation %s against STT_GNU_IFUNC "
4594 "symbol `%s' has non-zero addend: %d"),
4595 input_bfd, howto->name, name, rel->r_addend);
4596 bfd_set_error (bfd_error_bad_value);
4597 return FALSE;
4598 }
4599
4600 /* Generate dynamic relocation only when there is a
4601 non-GOT reference in a shared object. */
4602 if (info->shared && h->non_got_ref)
4603 {
4604 Elf_Internal_Rela outrel;
4605 asection *sreloc;
4606
4607 /* Need a dynamic relocation to get the real function
4608 address. */
4609 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4610 info,
4611 input_section,
4612 rel->r_offset);
4613 if (outrel.r_offset == (bfd_vma) -1
4614 || outrel.r_offset == (bfd_vma) -2)
4615 abort ();
4616
4617 outrel.r_offset += (input_section->output_section->vma
4618 + input_section->output_offset);
4619
4620 if (h->dynindx == -1
4621 || h->forced_local
4622 || info->executable)
4623 {
4624 /* This symbol is resolved locally. */
4625 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4626 outrel.r_addend = (h->root.u.def.value
4627 + h->root.u.def.section->output_section->vma
4628 + h->root.u.def.section->output_offset);
4629 }
4630 else
4631 {
4632 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4633 outrel.r_addend = 0;
4634 }
4635
4636 sreloc = globals->root.irelifunc;
4637 elf_append_rela (output_bfd, sreloc, &outrel);
4638
4639 /* If this reloc is against an external symbol, we
4640 do not want to fiddle with the addend. Otherwise,
4641 we need to include the symbol value so that it
4642 becomes an addend for the dynamic reloc. For an
4643 internal symbol, we have updated addend. */
4644 return bfd_reloc_ok;
4645 }
4646 /* FALLTHROUGH */
4647 case BFD_RELOC_AARCH64_CALL26:
4648 case BFD_RELOC_AARCH64_JUMP26:
4649 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4650 signed_addend,
4651 weak_undef_p);
4652 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4653 howto, value);
4654 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4655 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4656 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4657 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4658 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4659 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4660 base_got = globals->root.sgot;
4661 off = h->got.offset;
4662
4663 if (base_got == NULL)
4664 abort ();
4665
4666 if (off == (bfd_vma) -1)
4667 {
4668 bfd_vma plt_index;
4669
4670 /* We can't use h->got.offset here to save state, or
4671 even just remember the offset, as finish_dynamic_symbol
4672 would use that as offset into .got. */
4673
4674 if (globals->root.splt != NULL)
4675 {
4676 plt_index = ((h->plt.offset - globals->plt_header_size) /
4677 globals->plt_entry_size);
4678 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4679 base_got = globals->root.sgotplt;
4680 }
4681 else
4682 {
4683 plt_index = h->plt.offset / globals->plt_entry_size;
4684 off = plt_index * GOT_ENTRY_SIZE;
4685 base_got = globals->root.igotplt;
4686 }
4687
4688 if (h->dynindx == -1
4689 || h->forced_local
4690 || info->symbolic)
4691 {
4692 /* This references the local definition. We must
4693 initialize this entry in the global offset table.
4694 Since the offset must always be a multiple of 8,
4695 we use the least significant bit to record
4696 whether we have initialized it already.
4697
4698 When doing a dynamic link, we create a .rela.got
4699 relocation entry to initialize the value. This
4700 is done in the finish_dynamic_symbol routine. */
4701 if ((off & 1) != 0)
4702 off &= ~1;
4703 else
4704 {
4705 bfd_put_NN (output_bfd, value,
4706 base_got->contents + off);
4707 /* Note that this is harmless as -1 | 1 still is -1. */
4708 h->got.offset |= 1;
4709 }
4710 }
4711 value = (base_got->output_section->vma
4712 + base_got->output_offset + off);
4713 }
4714 else
4715 value = aarch64_calculate_got_entry_vma (h, globals, info,
4716 value, output_bfd,
4717 unresolved_reloc_p);
4718 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4719 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4720 addend = (globals->root.sgot->output_section->vma
4721 + globals->root.sgot->output_offset);
4722 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4723 addend, weak_undef_p);
4724 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4725 case BFD_RELOC_AARCH64_ADD_LO12:
4726 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4727 break;
4728 }
4729 }
4730
4731 switch (bfd_r_type)
4732 {
4733 case BFD_RELOC_AARCH64_NONE:
4734 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4735 *unresolved_reloc_p = FALSE;
4736 return bfd_reloc_ok;
4737
4738 case BFD_RELOC_AARCH64_NN:
4739
4740 /* When generating a shared object or relocatable executable, these
4741 relocations are copied into the output file to be resolved at
4742 run time. */
4743 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4744 && (input_section->flags & SEC_ALLOC)
4745 && (h == NULL
4746 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4747 || h->root.type != bfd_link_hash_undefweak))
4748 {
4749 Elf_Internal_Rela outrel;
4750 bfd_byte *loc;
4751 bfd_boolean skip, relocate;
4752 asection *sreloc;
4753
4754 *unresolved_reloc_p = FALSE;
4755
4756 skip = FALSE;
4757 relocate = FALSE;
4758
4759 outrel.r_addend = signed_addend;
4760 outrel.r_offset =
4761 _bfd_elf_section_offset (output_bfd, info, input_section,
4762 rel->r_offset);
4763 if (outrel.r_offset == (bfd_vma) - 1)
4764 skip = TRUE;
4765 else if (outrel.r_offset == (bfd_vma) - 2)
4766 {
4767 skip = TRUE;
4768 relocate = TRUE;
4769 }
4770
4771 outrel.r_offset += (input_section->output_section->vma
4772 + input_section->output_offset);
4773
4774 if (skip)
4775 memset (&outrel, 0, sizeof outrel);
4776 else if (h != NULL
4777 && h->dynindx != -1
4778 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4779 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4780 else
4781 {
4782 int symbol;
4783
4784 /* On SVR4-ish systems, the dynamic loader cannot
4785 relocate the text and data segments independently,
4786 so the symbol does not matter. */
4787 symbol = 0;
4788 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4789 outrel.r_addend += value;
4790 }
4791
4792 sreloc = elf_section_data (input_section)->sreloc;
4793 if (sreloc == NULL || sreloc->contents == NULL)
4794 return bfd_reloc_notsupported;
4795
4796 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4797 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4798
4799 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4800 {
4801 /* Sanity to check that we have previously allocated
4802 sufficient space in the relocation section for the
4803 number of relocations we actually want to emit. */
4804 abort ();
4805 }
4806
4807 /* If this reloc is against an external symbol, we do not want to
4808 fiddle with the addend. Otherwise, we need to include the symbol
4809 value so that it becomes an addend for the dynamic reloc. */
4810 if (!relocate)
4811 return bfd_reloc_ok;
4812
4813 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4814 contents, rel->r_offset, value,
4815 signed_addend);
4816 }
4817 else
4818 value += signed_addend;
4819 break;
4820
4821 case BFD_RELOC_AARCH64_CALL26:
4822 case BFD_RELOC_AARCH64_JUMP26:
4823 {
4824 asection *splt = globals->root.splt;
4825 bfd_boolean via_plt_p =
4826 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4827
4828 /* A call to an undefined weak symbol is converted to a jump to
4829 the next instruction unless a PLT entry will be created.
4830 The jump to the next instruction is optimized as a NOP.
4831 Do the same for local undefined symbols. */
4832 if (weak_undef_p && ! via_plt_p)
4833 {
4834 bfd_putl32 (INSN_NOP, hit_data);
4835 return bfd_reloc_ok;
4836 }
4837
4838 /* If the call goes through a PLT entry, make sure to
4839 check distance to the right destination address. */
4840 if (via_plt_p)
4841 {
4842 value = (splt->output_section->vma
4843 + splt->output_offset + h->plt.offset);
4844 *unresolved_reloc_p = FALSE;
4845 }
4846
4847 /* If the target symbol is global and marked as a function the
4848 relocation applies a function call or a tail call. In this
4849 situation we can veneer out of range branches. The veneers
4850 use IP0 and IP1 hence cannot be used arbitrary out of range
4851 branches that occur within the body of a function. */
4852 if (h && h->type == STT_FUNC)
4853 {
4854 /* Check if a stub has to be inserted because the destination
4855 is too far away. */
4856 if (! aarch64_valid_branch_p (value, place))
4857 {
4858 /* The target is out of reach, so redirect the branch to
4859 the local stub for this function. */
4860 struct elf_aarch64_stub_hash_entry *stub_entry;
4861 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4862 sym_sec, h,
4863 rel, globals);
4864 if (stub_entry != NULL)
4865 value = (stub_entry->stub_offset
4866 + stub_entry->stub_sec->output_offset
4867 + stub_entry->stub_sec->output_section->vma);
4868 }
4869 }
4870 }
4871 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4872 signed_addend, weak_undef_p);
4873 break;
4874
4875 case BFD_RELOC_AARCH64_16_PCREL:
4876 case BFD_RELOC_AARCH64_32_PCREL:
4877 case BFD_RELOC_AARCH64_64_PCREL:
4878 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4879 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4880 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4881 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4882 if (info->shared
4883 && (input_section->flags & SEC_ALLOC) != 0
4884 && (input_section->flags & SEC_READONLY) != 0
4885 && h != NULL
4886 && !h->def_regular)
4887 {
4888 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4889
4890 (*_bfd_error_handler)
4891 (_("%B: relocation %s against external symbol `%s' can not be used"
4892 " when making a shared object; recompile with -fPIC"),
4893 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
4894 h->root.root.string);
4895 bfd_set_error (bfd_error_bad_value);
4896 return FALSE;
4897 }
4898
4899 case BFD_RELOC_AARCH64_16:
4900 #if ARCH_SIZE == 64
4901 case BFD_RELOC_AARCH64_32:
4902 #endif
4903 case BFD_RELOC_AARCH64_ADD_LO12:
4904 case BFD_RELOC_AARCH64_BRANCH19:
4905 case BFD_RELOC_AARCH64_LDST128_LO12:
4906 case BFD_RELOC_AARCH64_LDST16_LO12:
4907 case BFD_RELOC_AARCH64_LDST32_LO12:
4908 case BFD_RELOC_AARCH64_LDST64_LO12:
4909 case BFD_RELOC_AARCH64_LDST8_LO12:
4910 case BFD_RELOC_AARCH64_MOVW_G0:
4911 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4912 case BFD_RELOC_AARCH64_MOVW_G0_S:
4913 case BFD_RELOC_AARCH64_MOVW_G1:
4914 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4915 case BFD_RELOC_AARCH64_MOVW_G1_S:
4916 case BFD_RELOC_AARCH64_MOVW_G2:
4917 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4918 case BFD_RELOC_AARCH64_MOVW_G2_S:
4919 case BFD_RELOC_AARCH64_MOVW_G3:
4920 case BFD_RELOC_AARCH64_TSTBR14:
4921 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4922 signed_addend, weak_undef_p);
4923 break;
4924
4925 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4926 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4927 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4928 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4929 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4930 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4931 if (globals->root.sgot == NULL)
4932 BFD_ASSERT (h != NULL);
4933
4934 if (h != NULL)
4935 {
4936 bfd_vma addend = 0;
4937 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4938 output_bfd,
4939 unresolved_reloc_p);
4940 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4941 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4942 addend = (globals->root.sgot->output_section->vma
4943 + globals->root.sgot->output_offset);
4944 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4945 addend, weak_undef_p);
4946 }
4947 else
4948 {
4949 bfd_vma addend = 0;
4950 struct elf_aarch64_local_symbol *locals
4951 = elf_aarch64_locals (input_bfd);
4952
4953 if (locals == NULL)
4954 {
4955 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4956 (*_bfd_error_handler)
4957 (_("%B: Local symbol descriptor table be NULL when applying "
4958 "relocation %s against local symbol"),
4959 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
4960 abort ();
4961 }
4962
4963 off = symbol_got_offset (input_bfd, h, r_symndx);
4964 base_got = globals->root.sgot;
4965 bfd_vma got_entry_addr = (base_got->output_section->vma
4966 + base_got->output_offset + off);
4967
4968 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4969 {
4970 bfd_put_64 (output_bfd, value, base_got->contents + off);
4971
4972 if (info->shared)
4973 {
4974 asection *s;
4975 Elf_Internal_Rela outrel;
4976
4977 /* For local symbol, we have done absolute relocation in static
4978 linking stageh. While for share library, we need to update
4979 the content of GOT entry according to the share objects
4980 loading base address. So we need to generate a
4981 R_AARCH64_RELATIVE reloc for dynamic linker. */
4982 s = globals->root.srelgot;
4983 if (s == NULL)
4984 abort ();
4985
4986 outrel.r_offset = got_entry_addr;
4987 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
4988 outrel.r_addend = value;
4989 elf_append_rela (output_bfd, s, &outrel);
4990 }
4991
4992 symbol_got_offset_mark (input_bfd, h, r_symndx);
4993 }
4994
4995 /* Update the relocation value to GOT entry addr as we have transformed
4996 the direct data access into indirect data access through GOT. */
4997 value = got_entry_addr;
4998
4999 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5000 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5001 addend = base_got->output_section->vma + base_got->output_offset;
5002
5003 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5004 addend, weak_undef_p);
5005 }
5006
5007 break;
5008
5009 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5010 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5011 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5012 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5013 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5014 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5015 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5016 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5017 if (globals->root.sgot == NULL)
5018 return bfd_reloc_notsupported;
5019
5020 value = (symbol_got_offset (input_bfd, h, r_symndx)
5021 + globals->root.sgot->output_section->vma
5022 + globals->root.sgot->output_offset);
5023
5024 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5025 0, weak_undef_p);
5026 *unresolved_reloc_p = FALSE;
5027 break;
5028
5029 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5030 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5031 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5032 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5033 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5034 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5035 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5036 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5037 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5038 signed_addend - tpoff_base (info),
5039 weak_undef_p);
5040 *unresolved_reloc_p = FALSE;
5041 break;
5042
5043 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5044 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5045 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5046 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5047 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5048 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5049 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5050 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5051 if (globals->root.sgot == NULL)
5052 return bfd_reloc_notsupported;
5053 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5054 + globals->root.sgotplt->output_section->vma
5055 + globals->root.sgotplt->output_offset
5056 + globals->sgotplt_jump_table_size);
5057
5058 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5059 0, weak_undef_p);
5060 *unresolved_reloc_p = FALSE;
5061 break;
5062
5063 default:
5064 return bfd_reloc_notsupported;
5065 }
5066
5067 if (saved_addend)
5068 *saved_addend = value;
5069
5070 /* Only apply the final relocation in a sequence. */
5071 if (save_addend)
5072 return bfd_reloc_continue;
5073
5074 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5075 howto, value);
5076 }
5077
5078 /* Handle TLS relaxations. Relaxing is possible for symbols that use
5079 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
5080 link.
5081
5082 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
5083 is to then call final_link_relocate. Return other values in the
5084 case of error. */
5085
5086 static bfd_reloc_status_type
5087 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5088 bfd *input_bfd, bfd_byte *contents,
5089 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5090 {
5091 bfd_boolean is_local = h == NULL;
5092 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5093 unsigned long insn;
5094
5095 BFD_ASSERT (globals && input_bfd && contents && rel);
5096
5097 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5098 {
5099 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5100 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5101 if (is_local)
5102 {
5103 /* GD->LE relaxation:
5104 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
5105 or
5106 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
5107 */
5108 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5109 return bfd_reloc_continue;
5110 }
5111 else
5112 {
5113 /* GD->IE relaxation:
5114 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5115 or
5116 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5117 */
5118 return bfd_reloc_continue;
5119 }
5120
5121 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5122 BFD_ASSERT (0);
5123 break;
5124
5125 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5126 if (is_local)
5127 {
5128 /* Tiny TLSDESC->LE relaxation:
5129 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
5130 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
5131 .tlsdesccall var
5132 blr x1 => nop
5133 */
5134 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5135 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5136
5137 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5138 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5139 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5140
5141 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5142 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
5143 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5144 return bfd_reloc_continue;
5145 }
5146 else
5147 {
5148 /* Tiny TLSDESC->IE relaxation:
5149 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5150 adr x0, :tlsdesc:var => nop
5151 .tlsdesccall var
5152 blr x1 => nop
5153 */
5154 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5155 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5156
5157 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5158 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5159
5160 bfd_putl32 (0x58000000, contents + rel->r_offset);
5161 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5162 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5163 return bfd_reloc_continue;
5164 }
5165
5166 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5167 if (is_local)
5168 {
5169 /* Tiny GD->LE relaxation:
5170 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5171 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
5172 nop => add x0, x0, #:tprel_lo12_nc:x
5173 */
5174
5175 /* First kill the tls_get_addr reloc on the bl instruction. */
5176 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5177
5178 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5179 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
5180 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
5181
5182 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5183 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5184 rel[1].r_offset = rel->r_offset + 8;
5185
5186 /* Move the current relocation to the second instruction in
5187 the sequence. */
5188 rel->r_offset += 4;
5189 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5190 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5191 return bfd_reloc_continue;
5192 }
5193 else
5194 {
5195 /* Tiny GD->IE relaxation:
5196 adr x0, :tlsgd:var => ldr x0, :gottprel:var
5197 bl __tls_get_addr => mrs x1, tpidr_el0
5198 nop => add x0, x0, x1
5199 */
5200
5201 /* First kill the tls_get_addr reloc on the bl instruction. */
5202 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5203 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5204
5205 bfd_putl32 (0x58000000, contents + rel->r_offset);
5206 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5207 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5208 return bfd_reloc_continue;
5209 }
5210
5211 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5212 return bfd_reloc_continue;
5213
5214 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5215 if (is_local)
5216 {
5217 /* GD->LE relaxation:
5218 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5219 */
5220 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5221 return bfd_reloc_continue;
5222 }
5223 else
5224 {
5225 /* GD->IE relaxation:
5226 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
5227 */
5228 insn = bfd_getl32 (contents + rel->r_offset);
5229 insn &= 0xffffffe0;
5230 bfd_putl32 (insn, contents + rel->r_offset);
5231 return bfd_reloc_continue;
5232 }
5233
5234 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5235 if (is_local)
5236 {
5237 /* GD->LE relaxation
5238 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
5239 bl __tls_get_addr => mrs x1, tpidr_el0
5240 nop => add x0, x1, x0
5241 */
5242
5243 /* First kill the tls_get_addr reloc on the bl instruction. */
5244 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5245 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5246
5247 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5248 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5249 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5250 return bfd_reloc_continue;
5251 }
5252 else
5253 {
5254 /* GD->IE relaxation
5255 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
5256 BL __tls_get_addr => mrs x1, tpidr_el0
5257 R_AARCH64_CALL26
5258 NOP => add x0, x1, x0
5259 */
5260
5261 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5262
5263 /* Remove the relocation on the BL instruction. */
5264 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5265
5266 bfd_putl32 (0xf9400000, contents + rel->r_offset);
5267
5268 /* We choose to fixup the BL and NOP instructions using the
5269 offset from the second relocation to allow flexibility in
5270 scheduling instructions between the ADD and BL. */
5271 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5272 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
5273 return bfd_reloc_continue;
5274 }
5275
5276 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5277 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5278 /* GD->IE/LE relaxation:
5279 add x0, x0, #:tlsdesc_lo12:var => nop
5280 blr xd => nop
5281 */
5282 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5283 return bfd_reloc_ok;
5284
5285 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5286 /* IE->LE relaxation:
5287 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
5288 */
5289 if (is_local)
5290 {
5291 insn = bfd_getl32 (contents + rel->r_offset);
5292 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5293 }
5294 return bfd_reloc_continue;
5295
5296 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5297 /* IE->LE relaxation:
5298 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
5299 */
5300 if (is_local)
5301 {
5302 insn = bfd_getl32 (contents + rel->r_offset);
5303 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
5304 }
5305 return bfd_reloc_continue;
5306
5307 default:
5308 return bfd_reloc_continue;
5309 }
5310
5311 return bfd_reloc_ok;
5312 }
5313
5314 /* Relocate an AArch64 ELF section. */
5315
5316 static bfd_boolean
5317 elfNN_aarch64_relocate_section (bfd *output_bfd,
5318 struct bfd_link_info *info,
5319 bfd *input_bfd,
5320 asection *input_section,
5321 bfd_byte *contents,
5322 Elf_Internal_Rela *relocs,
5323 Elf_Internal_Sym *local_syms,
5324 asection **local_sections)
5325 {
5326 Elf_Internal_Shdr *symtab_hdr;
5327 struct elf_link_hash_entry **sym_hashes;
5328 Elf_Internal_Rela *rel;
5329 Elf_Internal_Rela *relend;
5330 const char *name;
5331 struct elf_aarch64_link_hash_table *globals;
5332 bfd_boolean save_addend = FALSE;
5333 bfd_vma addend = 0;
5334
5335 globals = elf_aarch64_hash_table (info);
5336
5337 symtab_hdr = &elf_symtab_hdr (input_bfd);
5338 sym_hashes = elf_sym_hashes (input_bfd);
5339
5340 rel = relocs;
5341 relend = relocs + input_section->reloc_count;
5342 for (; rel < relend; rel++)
5343 {
5344 unsigned int r_type;
5345 bfd_reloc_code_real_type bfd_r_type;
5346 bfd_reloc_code_real_type relaxed_bfd_r_type;
5347 reloc_howto_type *howto;
5348 unsigned long r_symndx;
5349 Elf_Internal_Sym *sym;
5350 asection *sec;
5351 struct elf_link_hash_entry *h;
5352 bfd_vma relocation;
5353 bfd_reloc_status_type r;
5354 arelent bfd_reloc;
5355 char sym_type;
5356 bfd_boolean unresolved_reloc = FALSE;
5357 char *error_message = NULL;
5358
5359 r_symndx = ELFNN_R_SYM (rel->r_info);
5360 r_type = ELFNN_R_TYPE (rel->r_info);
5361
5362 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
5363 howto = bfd_reloc.howto;
5364
5365 if (howto == NULL)
5366 {
5367 (*_bfd_error_handler)
5368 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
5369 input_bfd, input_section, r_type);
5370 return FALSE;
5371 }
5372 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
5373
5374 h = NULL;
5375 sym = NULL;
5376 sec = NULL;
5377
5378 if (r_symndx < symtab_hdr->sh_info)
5379 {
5380 sym = local_syms + r_symndx;
5381 sym_type = ELFNN_ST_TYPE (sym->st_info);
5382 sec = local_sections[r_symndx];
5383
5384 /* An object file might have a reference to a local
5385 undefined symbol. This is a daft object file, but we
5386 should at least do something about it. */
5387 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
5388 && bfd_is_und_section (sec)
5389 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
5390 {
5391 if (!info->callbacks->undefined_symbol
5392 (info, bfd_elf_string_from_elf_section
5393 (input_bfd, symtab_hdr->sh_link, sym->st_name),
5394 input_bfd, input_section, rel->r_offset, TRUE))
5395 return FALSE;
5396 }
5397
5398 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
5399
5400 /* Relocate against local STT_GNU_IFUNC symbol. */
5401 if (!info->relocatable
5402 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
5403 {
5404 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
5405 rel, FALSE);
5406 if (h == NULL)
5407 abort ();
5408
5409 /* Set STT_GNU_IFUNC symbol value. */
5410 h->root.u.def.value = sym->st_value;
5411 h->root.u.def.section = sec;
5412 }
5413 }
5414 else
5415 {
5416 bfd_boolean warned, ignored;
5417
5418 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
5419 r_symndx, symtab_hdr, sym_hashes,
5420 h, sec, relocation,
5421 unresolved_reloc, warned, ignored);
5422
5423 sym_type = h->type;
5424 }
5425
5426 if (sec != NULL && discarded_section (sec))
5427 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
5428 rel, 1, relend, howto, 0, contents);
5429
5430 if (info->relocatable)
5431 continue;
5432
5433 if (h != NULL)
5434 name = h->root.root.string;
5435 else
5436 {
5437 name = (bfd_elf_string_from_elf_section
5438 (input_bfd, symtab_hdr->sh_link, sym->st_name));
5439 if (name == NULL || *name == '\0')
5440 name = bfd_section_name (input_bfd, sec);
5441 }
5442
5443 if (r_symndx != 0
5444 && r_type != R_AARCH64_NONE
5445 && r_type != R_AARCH64_NULL
5446 && (h == NULL
5447 || h->root.type == bfd_link_hash_defined
5448 || h->root.type == bfd_link_hash_defweak)
5449 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
5450 {
5451 (*_bfd_error_handler)
5452 ((sym_type == STT_TLS
5453 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
5454 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
5455 input_bfd,
5456 input_section, (long) rel->r_offset, howto->name, name);
5457 }
5458
5459 /* We relax only if we can see that there can be a valid transition
5460 from a reloc type to another.
5461 We call elfNN_aarch64_final_link_relocate unless we're completely
5462 done, i.e., the relaxation produced the final output we want. */
5463
5464 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
5465 h, r_symndx);
5466 if (relaxed_bfd_r_type != bfd_r_type)
5467 {
5468 bfd_r_type = relaxed_bfd_r_type;
5469 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
5470 BFD_ASSERT (howto != NULL);
5471 r_type = howto->type;
5472 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
5473 unresolved_reloc = 0;
5474 }
5475 else
5476 r = bfd_reloc_continue;
5477
5478 /* There may be multiple consecutive relocations for the
5479 same offset. In that case we are supposed to treat the
5480 output of each relocation as the addend for the next. */
5481 if (rel + 1 < relend
5482 && rel->r_offset == rel[1].r_offset
5483 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
5484 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
5485 save_addend = TRUE;
5486 else
5487 save_addend = FALSE;
5488
5489 if (r == bfd_reloc_continue)
5490 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
5491 input_section, contents, rel,
5492 relocation, info, sec,
5493 h, &unresolved_reloc,
5494 save_addend, &addend, sym);
5495
5496 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5497 {
5498 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5499 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5500 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5501 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5502 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5503 {
5504 bfd_boolean need_relocs = FALSE;
5505 bfd_byte *loc;
5506 int indx;
5507 bfd_vma off;
5508
5509 off = symbol_got_offset (input_bfd, h, r_symndx);
5510 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5511
5512 need_relocs =
5513 (info->shared || indx != 0) &&
5514 (h == NULL
5515 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5516 || h->root.type != bfd_link_hash_undefweak);
5517
5518 BFD_ASSERT (globals->root.srelgot != NULL);
5519
5520 if (need_relocs)
5521 {
5522 Elf_Internal_Rela rela;
5523 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5524 rela.r_addend = 0;
5525 rela.r_offset = globals->root.sgot->output_section->vma +
5526 globals->root.sgot->output_offset + off;
5527
5528
5529 loc = globals->root.srelgot->contents;
5530 loc += globals->root.srelgot->reloc_count++
5531 * RELOC_SIZE (htab);
5532 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5533
5534 if (elfNN_aarch64_bfd_reloc_from_type (r_type)
5535 == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21)
5536 {
5537 /* For local dynamic, don't generate DTPREL in any case.
5538 Initialize the DTPREL slot into zero, so we get module
5539 base address when invoke runtime TLS resolver. */
5540 bfd_put_NN (output_bfd, 0,
5541 globals->root.sgot->contents + off
5542 + GOT_ENTRY_SIZE);
5543 }
5544 else if (indx == 0)
5545 {
5546 bfd_put_NN (output_bfd,
5547 relocation - dtpoff_base (info),
5548 globals->root.sgot->contents + off
5549 + GOT_ENTRY_SIZE);
5550 }
5551 else
5552 {
5553 /* This TLS symbol is global. We emit a
5554 relocation to fixup the tls offset at load
5555 time. */
5556 rela.r_info =
5557 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5558 rela.r_addend = 0;
5559 rela.r_offset =
5560 (globals->root.sgot->output_section->vma
5561 + globals->root.sgot->output_offset + off
5562 + GOT_ENTRY_SIZE);
5563
5564 loc = globals->root.srelgot->contents;
5565 loc += globals->root.srelgot->reloc_count++
5566 * RELOC_SIZE (globals);
5567 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5568 bfd_put_NN (output_bfd, (bfd_vma) 0,
5569 globals->root.sgot->contents + off
5570 + GOT_ENTRY_SIZE);
5571 }
5572 }
5573 else
5574 {
5575 bfd_put_NN (output_bfd, (bfd_vma) 1,
5576 globals->root.sgot->contents + off);
5577 bfd_put_NN (output_bfd,
5578 relocation - dtpoff_base (info),
5579 globals->root.sgot->contents + off
5580 + GOT_ENTRY_SIZE);
5581 }
5582
5583 symbol_got_offset_mark (input_bfd, h, r_symndx);
5584 }
5585 break;
5586
5587 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5588 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5589 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5590 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5591 {
5592 bfd_boolean need_relocs = FALSE;
5593 bfd_byte *loc;
5594 int indx;
5595 bfd_vma off;
5596
5597 off = symbol_got_offset (input_bfd, h, r_symndx);
5598
5599 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5600
5601 need_relocs =
5602 (info->shared || indx != 0) &&
5603 (h == NULL
5604 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5605 || h->root.type != bfd_link_hash_undefweak);
5606
5607 BFD_ASSERT (globals->root.srelgot != NULL);
5608
5609 if (need_relocs)
5610 {
5611 Elf_Internal_Rela rela;
5612
5613 if (indx == 0)
5614 rela.r_addend = relocation - dtpoff_base (info);
5615 else
5616 rela.r_addend = 0;
5617
5618 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5619 rela.r_offset = globals->root.sgot->output_section->vma +
5620 globals->root.sgot->output_offset + off;
5621
5622 loc = globals->root.srelgot->contents;
5623 loc += globals->root.srelgot->reloc_count++
5624 * RELOC_SIZE (htab);
5625
5626 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5627
5628 bfd_put_NN (output_bfd, rela.r_addend,
5629 globals->root.sgot->contents + off);
5630 }
5631 else
5632 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5633 globals->root.sgot->contents + off);
5634
5635 symbol_got_offset_mark (input_bfd, h, r_symndx);
5636 }
5637 break;
5638
5639 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5640 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5641 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5642 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5643 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5644 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5645 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5646 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5647 break;
5648
5649 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5650 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5651 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5652 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5653 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5654 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5655 {
5656 bfd_boolean need_relocs = FALSE;
5657 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5658 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5659
5660 need_relocs = (h == NULL
5661 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5662 || h->root.type != bfd_link_hash_undefweak);
5663
5664 BFD_ASSERT (globals->root.srelgot != NULL);
5665 BFD_ASSERT (globals->root.sgot != NULL);
5666
5667 if (need_relocs)
5668 {
5669 bfd_byte *loc;
5670 Elf_Internal_Rela rela;
5671 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5672
5673 rela.r_addend = 0;
5674 rela.r_offset = (globals->root.sgotplt->output_section->vma
5675 + globals->root.sgotplt->output_offset
5676 + off + globals->sgotplt_jump_table_size);
5677
5678 if (indx == 0)
5679 rela.r_addend = relocation - dtpoff_base (info);
5680
5681 /* Allocate the next available slot in the PLT reloc
5682 section to hold our R_AARCH64_TLSDESC, the next
5683 available slot is determined from reloc_count,
5684 which we step. But note, reloc_count was
5685 artifically moved down while allocating slots for
5686 real PLT relocs such that all of the PLT relocs
5687 will fit above the initial reloc_count and the
5688 extra stuff will fit below. */
5689 loc = globals->root.srelplt->contents;
5690 loc += globals->root.srelplt->reloc_count++
5691 * RELOC_SIZE (globals);
5692
5693 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5694
5695 bfd_put_NN (output_bfd, (bfd_vma) 0,
5696 globals->root.sgotplt->contents + off +
5697 globals->sgotplt_jump_table_size);
5698 bfd_put_NN (output_bfd, (bfd_vma) 0,
5699 globals->root.sgotplt->contents + off +
5700 globals->sgotplt_jump_table_size +
5701 GOT_ENTRY_SIZE);
5702 }
5703
5704 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5705 }
5706 break;
5707 default:
5708 break;
5709 }
5710
5711 if (!save_addend)
5712 addend = 0;
5713
5714
5715 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5716 because such sections are not SEC_ALLOC and thus ld.so will
5717 not process them. */
5718 if (unresolved_reloc
5719 && !((input_section->flags & SEC_DEBUGGING) != 0
5720 && h->def_dynamic)
5721 && _bfd_elf_section_offset (output_bfd, info, input_section,
5722 +rel->r_offset) != (bfd_vma) - 1)
5723 {
5724 (*_bfd_error_handler)
5725 (_
5726 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5727 input_bfd, input_section, (long) rel->r_offset, howto->name,
5728 h->root.root.string);
5729 return FALSE;
5730 }
5731
5732 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5733 {
5734 switch (r)
5735 {
5736 case bfd_reloc_overflow:
5737 if (!(*info->callbacks->reloc_overflow)
5738 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
5739 input_bfd, input_section, rel->r_offset))
5740 return FALSE;
5741 break;
5742
5743 case bfd_reloc_undefined:
5744 if (!((*info->callbacks->undefined_symbol)
5745 (info, name, input_bfd, input_section,
5746 rel->r_offset, TRUE)))
5747 return FALSE;
5748 break;
5749
5750 case bfd_reloc_outofrange:
5751 error_message = _("out of range");
5752 goto common_error;
5753
5754 case bfd_reloc_notsupported:
5755 error_message = _("unsupported relocation");
5756 goto common_error;
5757
5758 case bfd_reloc_dangerous:
5759 /* error_message should already be set. */
5760 goto common_error;
5761
5762 default:
5763 error_message = _("unknown error");
5764 /* Fall through. */
5765
5766 common_error:
5767 BFD_ASSERT (error_message != NULL);
5768 if (!((*info->callbacks->reloc_dangerous)
5769 (info, error_message, input_bfd, input_section,
5770 rel->r_offset)))
5771 return FALSE;
5772 break;
5773 }
5774 }
5775 }
5776
5777 return TRUE;
5778 }
5779
5780 /* Set the right machine number. */
5781
5782 static bfd_boolean
5783 elfNN_aarch64_object_p (bfd *abfd)
5784 {
5785 #if ARCH_SIZE == 32
5786 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5787 #else
5788 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5789 #endif
5790 return TRUE;
5791 }
5792
5793 /* Function to keep AArch64 specific flags in the ELF header. */
5794
5795 static bfd_boolean
5796 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5797 {
5798 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5799 {
5800 }
5801 else
5802 {
5803 elf_elfheader (abfd)->e_flags = flags;
5804 elf_flags_init (abfd) = TRUE;
5805 }
5806
5807 return TRUE;
5808 }
5809
5810 /* Merge backend specific data from an object file to the output
5811 object file when linking. */
5812
5813 static bfd_boolean
5814 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5815 {
5816 flagword out_flags;
5817 flagword in_flags;
5818 bfd_boolean flags_compatible = TRUE;
5819 asection *sec;
5820
5821 /* Check if we have the same endianess. */
5822 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5823 return FALSE;
5824
5825 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5826 return TRUE;
5827
5828 /* The input BFD must have had its flags initialised. */
5829 /* The following seems bogus to me -- The flags are initialized in
5830 the assembler but I don't think an elf_flags_init field is
5831 written into the object. */
5832 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5833
5834 in_flags = elf_elfheader (ibfd)->e_flags;
5835 out_flags = elf_elfheader (obfd)->e_flags;
5836
5837 if (!elf_flags_init (obfd))
5838 {
5839 /* If the input is the default architecture and had the default
5840 flags then do not bother setting the flags for the output
5841 architecture, instead allow future merges to do this. If no
5842 future merges ever set these flags then they will retain their
5843 uninitialised values, which surprise surprise, correspond
5844 to the default values. */
5845 if (bfd_get_arch_info (ibfd)->the_default
5846 && elf_elfheader (ibfd)->e_flags == 0)
5847 return TRUE;
5848
5849 elf_flags_init (obfd) = TRUE;
5850 elf_elfheader (obfd)->e_flags = in_flags;
5851
5852 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5853 && bfd_get_arch_info (obfd)->the_default)
5854 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5855 bfd_get_mach (ibfd));
5856
5857 return TRUE;
5858 }
5859
5860 /* Identical flags must be compatible. */
5861 if (in_flags == out_flags)
5862 return TRUE;
5863
5864 /* Check to see if the input BFD actually contains any sections. If
5865 not, its flags may not have been initialised either, but it
5866 cannot actually cause any incompatiblity. Do not short-circuit
5867 dynamic objects; their section list may be emptied by
5868 elf_link_add_object_symbols.
5869
5870 Also check to see if there are no code sections in the input.
5871 In this case there is no need to check for code specific flags.
5872 XXX - do we need to worry about floating-point format compatability
5873 in data sections ? */
5874 if (!(ibfd->flags & DYNAMIC))
5875 {
5876 bfd_boolean null_input_bfd = TRUE;
5877 bfd_boolean only_data_sections = TRUE;
5878
5879 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5880 {
5881 if ((bfd_get_section_flags (ibfd, sec)
5882 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5883 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5884 only_data_sections = FALSE;
5885
5886 null_input_bfd = FALSE;
5887 break;
5888 }
5889
5890 if (null_input_bfd || only_data_sections)
5891 return TRUE;
5892 }
5893
5894 return flags_compatible;
5895 }
5896
5897 /* Display the flags field. */
5898
5899 static bfd_boolean
5900 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5901 {
5902 FILE *file = (FILE *) ptr;
5903 unsigned long flags;
5904
5905 BFD_ASSERT (abfd != NULL && ptr != NULL);
5906
5907 /* Print normal ELF private data. */
5908 _bfd_elf_print_private_bfd_data (abfd, ptr);
5909
5910 flags = elf_elfheader (abfd)->e_flags;
5911 /* Ignore init flag - it may not be set, despite the flags field
5912 containing valid data. */
5913
5914 /* xgettext:c-format */
5915 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5916
5917 if (flags)
5918 fprintf (file, _("<Unrecognised flag bits set>"));
5919
5920 fputc ('\n', file);
5921
5922 return TRUE;
5923 }
5924
5925 /* Update the got entry reference counts for the section being removed. */
5926
5927 static bfd_boolean
5928 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5929 struct bfd_link_info *info,
5930 asection *sec,
5931 const Elf_Internal_Rela * relocs)
5932 {
5933 struct elf_aarch64_link_hash_table *htab;
5934 Elf_Internal_Shdr *symtab_hdr;
5935 struct elf_link_hash_entry **sym_hashes;
5936 struct elf_aarch64_local_symbol *locals;
5937 const Elf_Internal_Rela *rel, *relend;
5938
5939 if (info->relocatable)
5940 return TRUE;
5941
5942 htab = elf_aarch64_hash_table (info);
5943
5944 if (htab == NULL)
5945 return FALSE;
5946
5947 elf_section_data (sec)->local_dynrel = NULL;
5948
5949 symtab_hdr = &elf_symtab_hdr (abfd);
5950 sym_hashes = elf_sym_hashes (abfd);
5951
5952 locals = elf_aarch64_locals (abfd);
5953
5954 relend = relocs + sec->reloc_count;
5955 for (rel = relocs; rel < relend; rel++)
5956 {
5957 unsigned long r_symndx;
5958 unsigned int r_type;
5959 struct elf_link_hash_entry *h = NULL;
5960
5961 r_symndx = ELFNN_R_SYM (rel->r_info);
5962
5963 if (r_symndx >= symtab_hdr->sh_info)
5964 {
5965
5966 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5967 while (h->root.type == bfd_link_hash_indirect
5968 || h->root.type == bfd_link_hash_warning)
5969 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5970 }
5971 else
5972 {
5973 Elf_Internal_Sym *isym;
5974
5975 /* A local symbol. */
5976 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5977 abfd, r_symndx);
5978
5979 /* Check relocation against local STT_GNU_IFUNC symbol. */
5980 if (isym != NULL
5981 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5982 {
5983 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
5984 if (h == NULL)
5985 abort ();
5986 }
5987 }
5988
5989 if (h)
5990 {
5991 struct elf_aarch64_link_hash_entry *eh;
5992 struct elf_dyn_relocs **pp;
5993 struct elf_dyn_relocs *p;
5994
5995 eh = (struct elf_aarch64_link_hash_entry *) h;
5996
5997 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
5998 if (p->sec == sec)
5999 {
6000 /* Everything must go for SEC. */
6001 *pp = p->next;
6002 break;
6003 }
6004 }
6005
6006 r_type = ELFNN_R_TYPE (rel->r_info);
6007 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
6008 {
6009 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6010 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6011 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6012 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6013 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6014 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6015 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6016 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6017 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6018 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6019 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6020 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6021 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6022 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6023 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6024 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6025 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6026 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6027 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6028 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6029 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6030 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6031 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6032 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6033 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6034 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6035 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6036 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6037 if (h != NULL)
6038 {
6039 if (h->got.refcount > 0)
6040 h->got.refcount -= 1;
6041
6042 if (h->type == STT_GNU_IFUNC)
6043 {
6044 if (h->plt.refcount > 0)
6045 h->plt.refcount -= 1;
6046 }
6047 }
6048 else if (locals != NULL)
6049 {
6050 if (locals[r_symndx].got_refcount > 0)
6051 locals[r_symndx].got_refcount -= 1;
6052 }
6053 break;
6054
6055 case BFD_RELOC_AARCH64_CALL26:
6056 case BFD_RELOC_AARCH64_JUMP26:
6057 /* If this is a local symbol then we resolve it
6058 directly without creating a PLT entry. */
6059 if (h == NULL)
6060 continue;
6061
6062 if (h->plt.refcount > 0)
6063 h->plt.refcount -= 1;
6064 break;
6065
6066 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6067 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6068 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6069 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6070 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6071 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6072 case BFD_RELOC_AARCH64_MOVW_G3:
6073 case BFD_RELOC_AARCH64_NN:
6074 if (h != NULL && info->executable)
6075 {
6076 if (h->plt.refcount > 0)
6077 h->plt.refcount -= 1;
6078 }
6079 break;
6080
6081 default:
6082 break;
6083 }
6084 }
6085
6086 return TRUE;
6087 }
6088
6089 /* Adjust a symbol defined by a dynamic object and referenced by a
6090 regular object. The current definition is in some section of the
6091 dynamic object, but we're not including those sections. We have to
6092 change the definition to something the rest of the link can
6093 understand. */
6094
6095 static bfd_boolean
6096 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
6097 struct elf_link_hash_entry *h)
6098 {
6099 struct elf_aarch64_link_hash_table *htab;
6100 asection *s;
6101
6102 /* If this is a function, put it in the procedure linkage table. We
6103 will fill in the contents of the procedure linkage table later,
6104 when we know the address of the .got section. */
6105 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6106 {
6107 if (h->plt.refcount <= 0
6108 || (h->type != STT_GNU_IFUNC
6109 && (SYMBOL_CALLS_LOCAL (info, h)
6110 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6111 && h->root.type == bfd_link_hash_undefweak))))
6112 {
6113 /* This case can occur if we saw a CALL26 reloc in
6114 an input file, but the symbol wasn't referred to
6115 by a dynamic object or all references were
6116 garbage collected. In which case we can end up
6117 resolving. */
6118 h->plt.offset = (bfd_vma) - 1;
6119 h->needs_plt = 0;
6120 }
6121
6122 return TRUE;
6123 }
6124 else
6125 /* Otherwise, reset to -1. */
6126 h->plt.offset = (bfd_vma) - 1;
6127
6128
6129 /* If this is a weak symbol, and there is a real definition, the
6130 processor independent code will have arranged for us to see the
6131 real definition first, and we can just use the same value. */
6132 if (h->u.weakdef != NULL)
6133 {
6134 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6135 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6136 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6137 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6138 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6139 h->non_got_ref = h->u.weakdef->non_got_ref;
6140 return TRUE;
6141 }
6142
6143 /* If we are creating a shared library, we must presume that the
6144 only references to the symbol are via the global offset table.
6145 For such cases we need not do anything here; the relocations will
6146 be handled correctly by relocate_section. */
6147 if (info->shared)
6148 return TRUE;
6149
6150 /* If there are no references to this symbol that do not use the
6151 GOT, we don't need to generate a copy reloc. */
6152 if (!h->non_got_ref)
6153 return TRUE;
6154
6155 /* If -z nocopyreloc was given, we won't generate them either. */
6156 if (info->nocopyreloc)
6157 {
6158 h->non_got_ref = 0;
6159 return TRUE;
6160 }
6161
6162 /* We must allocate the symbol in our .dynbss section, which will
6163 become part of the .bss section of the executable. There will be
6164 an entry for this symbol in the .dynsym section. The dynamic
6165 object will contain position independent code, so all references
6166 from the dynamic object to this symbol will go through the global
6167 offset table. The dynamic linker will use the .dynsym entry to
6168 determine the address it must put in the global offset table, so
6169 both the dynamic object and the regular object will refer to the
6170 same memory location for the variable. */
6171
6172 htab = elf_aarch64_hash_table (info);
6173
6174 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6175 to copy the initial value out of the dynamic object and into the
6176 runtime process image. */
6177 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6178 {
6179 htab->srelbss->size += RELOC_SIZE (htab);
6180 h->needs_copy = 1;
6181 }
6182
6183 s = htab->sdynbss;
6184
6185 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6186
6187 }
6188
6189 static bfd_boolean
6190 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6191 {
6192 struct elf_aarch64_local_symbol *locals;
6193 locals = elf_aarch64_locals (abfd);
6194 if (locals == NULL)
6195 {
6196 locals = (struct elf_aarch64_local_symbol *)
6197 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6198 if (locals == NULL)
6199 return FALSE;
6200 elf_aarch64_locals (abfd) = locals;
6201 }
6202 return TRUE;
6203 }
6204
6205 /* Create the .got section to hold the global offset table. */
6206
6207 static bfd_boolean
6208 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6209 {
6210 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6211 flagword flags;
6212 asection *s;
6213 struct elf_link_hash_entry *h;
6214 struct elf_link_hash_table *htab = elf_hash_table (info);
6215
6216 /* This function may be called more than once. */
6217 s = bfd_get_linker_section (abfd, ".got");
6218 if (s != NULL)
6219 return TRUE;
6220
6221 flags = bed->dynamic_sec_flags;
6222
6223 s = bfd_make_section_anyway_with_flags (abfd,
6224 (bed->rela_plts_and_copies_p
6225 ? ".rela.got" : ".rel.got"),
6226 (bed->dynamic_sec_flags
6227 | SEC_READONLY));
6228 if (s == NULL
6229 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6230 return FALSE;
6231 htab->srelgot = s;
6232
6233 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
6234 if (s == NULL
6235 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6236 return FALSE;
6237 htab->sgot = s;
6238 htab->sgot->size += GOT_ENTRY_SIZE;
6239
6240 if (bed->want_got_sym)
6241 {
6242 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
6243 (or .got.plt) section. We don't do this in the linker script
6244 because we don't want to define the symbol if we are not creating
6245 a global offset table. */
6246 h = _bfd_elf_define_linkage_sym (abfd, info, s,
6247 "_GLOBAL_OFFSET_TABLE_");
6248 elf_hash_table (info)->hgot = h;
6249 if (h == NULL)
6250 return FALSE;
6251 }
6252
6253 if (bed->want_got_plt)
6254 {
6255 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
6256 if (s == NULL
6257 || !bfd_set_section_alignment (abfd, s,
6258 bed->s->log_file_align))
6259 return FALSE;
6260 htab->sgotplt = s;
6261 }
6262
6263 /* The first bit of the global offset table is the header. */
6264 s->size += bed->got_header_size;
6265
6266 return TRUE;
6267 }
6268
6269 /* Look through the relocs for a section during the first phase. */
6270
6271 static bfd_boolean
6272 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
6273 asection *sec, const Elf_Internal_Rela *relocs)
6274 {
6275 Elf_Internal_Shdr *symtab_hdr;
6276 struct elf_link_hash_entry **sym_hashes;
6277 const Elf_Internal_Rela *rel;
6278 const Elf_Internal_Rela *rel_end;
6279 asection *sreloc;
6280
6281 struct elf_aarch64_link_hash_table *htab;
6282
6283 if (info->relocatable)
6284 return TRUE;
6285
6286 BFD_ASSERT (is_aarch64_elf (abfd));
6287
6288 htab = elf_aarch64_hash_table (info);
6289 sreloc = NULL;
6290
6291 symtab_hdr = &elf_symtab_hdr (abfd);
6292 sym_hashes = elf_sym_hashes (abfd);
6293
6294 rel_end = relocs + sec->reloc_count;
6295 for (rel = relocs; rel < rel_end; rel++)
6296 {
6297 struct elf_link_hash_entry *h;
6298 unsigned long r_symndx;
6299 unsigned int r_type;
6300 bfd_reloc_code_real_type bfd_r_type;
6301 Elf_Internal_Sym *isym;
6302
6303 r_symndx = ELFNN_R_SYM (rel->r_info);
6304 r_type = ELFNN_R_TYPE (rel->r_info);
6305
6306 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
6307 {
6308 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
6309 r_symndx);
6310 return FALSE;
6311 }
6312
6313 if (r_symndx < symtab_hdr->sh_info)
6314 {
6315 /* A local symbol. */
6316 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6317 abfd, r_symndx);
6318 if (isym == NULL)
6319 return FALSE;
6320
6321 /* Check relocation against local STT_GNU_IFUNC symbol. */
6322 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6323 {
6324 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
6325 TRUE);
6326 if (h == NULL)
6327 return FALSE;
6328
6329 /* Fake a STT_GNU_IFUNC symbol. */
6330 h->type = STT_GNU_IFUNC;
6331 h->def_regular = 1;
6332 h->ref_regular = 1;
6333 h->forced_local = 1;
6334 h->root.type = bfd_link_hash_defined;
6335 }
6336 else
6337 h = NULL;
6338 }
6339 else
6340 {
6341 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6342 while (h->root.type == bfd_link_hash_indirect
6343 || h->root.type == bfd_link_hash_warning)
6344 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6345
6346 /* PR15323, ref flags aren't set for references in the same
6347 object. */
6348 h->root.non_ir_ref = 1;
6349 }
6350
6351 /* Could be done earlier, if h were already available. */
6352 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
6353
6354 if (h != NULL)
6355 {
6356 /* Create the ifunc sections for static executables. If we
6357 never see an indirect function symbol nor we are building
6358 a static executable, those sections will be empty and
6359 won't appear in output. */
6360 switch (bfd_r_type)
6361 {
6362 default:
6363 break;
6364
6365 case BFD_RELOC_AARCH64_ADD_LO12:
6366 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6367 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6368 case BFD_RELOC_AARCH64_CALL26:
6369 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6370 case BFD_RELOC_AARCH64_JUMP26:
6371 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6372 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6373 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6374 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6375 case BFD_RELOC_AARCH64_NN:
6376 if (htab->root.dynobj == NULL)
6377 htab->root.dynobj = abfd;
6378 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
6379 return FALSE;
6380 break;
6381 }
6382
6383 /* It is referenced by a non-shared object. */
6384 h->ref_regular = 1;
6385 h->root.non_ir_ref = 1;
6386 }
6387
6388 switch (bfd_r_type)
6389 {
6390 case BFD_RELOC_AARCH64_NN:
6391
6392 /* We don't need to handle relocs into sections not going into
6393 the "real" output. */
6394 if ((sec->flags & SEC_ALLOC) == 0)
6395 break;
6396
6397 if (h != NULL)
6398 {
6399 if (!info->shared)
6400 h->non_got_ref = 1;
6401
6402 h->plt.refcount += 1;
6403 h->pointer_equality_needed = 1;
6404 }
6405
6406 /* No need to do anything if we're not creating a shared
6407 object. */
6408 if (! info->shared)
6409 break;
6410
6411 {
6412 struct elf_dyn_relocs *p;
6413 struct elf_dyn_relocs **head;
6414
6415 /* We must copy these reloc types into the output file.
6416 Create a reloc section in dynobj and make room for
6417 this reloc. */
6418 if (sreloc == NULL)
6419 {
6420 if (htab->root.dynobj == NULL)
6421 htab->root.dynobj = abfd;
6422
6423 sreloc = _bfd_elf_make_dynamic_reloc_section
6424 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
6425
6426 if (sreloc == NULL)
6427 return FALSE;
6428 }
6429
6430 /* If this is a global symbol, we count the number of
6431 relocations we need for this symbol. */
6432 if (h != NULL)
6433 {
6434 struct elf_aarch64_link_hash_entry *eh;
6435 eh = (struct elf_aarch64_link_hash_entry *) h;
6436 head = &eh->dyn_relocs;
6437 }
6438 else
6439 {
6440 /* Track dynamic relocs needed for local syms too.
6441 We really need local syms available to do this
6442 easily. Oh well. */
6443
6444 asection *s;
6445 void **vpp;
6446
6447 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6448 abfd, r_symndx);
6449 if (isym == NULL)
6450 return FALSE;
6451
6452 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
6453 if (s == NULL)
6454 s = sec;
6455
6456 /* Beware of type punned pointers vs strict aliasing
6457 rules. */
6458 vpp = &(elf_section_data (s)->local_dynrel);
6459 head = (struct elf_dyn_relocs **) vpp;
6460 }
6461
6462 p = *head;
6463 if (p == NULL || p->sec != sec)
6464 {
6465 bfd_size_type amt = sizeof *p;
6466 p = ((struct elf_dyn_relocs *)
6467 bfd_zalloc (htab->root.dynobj, amt));
6468 if (p == NULL)
6469 return FALSE;
6470 p->next = *head;
6471 *head = p;
6472 p->sec = sec;
6473 }
6474
6475 p->count += 1;
6476
6477 }
6478 break;
6479
6480 /* RR: We probably want to keep a consistency check that
6481 there are no dangling GOT_PAGE relocs. */
6482 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6483 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6484 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6485 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6486 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6487 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6488 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6489 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6490 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6491 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6492 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6493 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6494 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6495 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6496 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6497 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6498 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6499 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6500 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6501 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6502 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6503 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6504 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6505 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6506 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6507 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6508 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6509 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6510 {
6511 unsigned got_type;
6512 unsigned old_got_type;
6513
6514 got_type = aarch64_reloc_got_type (bfd_r_type);
6515
6516 if (h)
6517 {
6518 h->got.refcount += 1;
6519 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6520 }
6521 else
6522 {
6523 struct elf_aarch64_local_symbol *locals;
6524
6525 if (!elfNN_aarch64_allocate_local_symbols
6526 (abfd, symtab_hdr->sh_info))
6527 return FALSE;
6528
6529 locals = elf_aarch64_locals (abfd);
6530 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6531 locals[r_symndx].got_refcount += 1;
6532 old_got_type = locals[r_symndx].got_type;
6533 }
6534
6535 /* If a variable is accessed with both general dynamic TLS
6536 methods, two slots may be created. */
6537 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6538 got_type |= old_got_type;
6539
6540 /* We will already have issued an error message if there
6541 is a TLS/non-TLS mismatch, based on the symbol type.
6542 So just combine any TLS types needed. */
6543 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6544 && got_type != GOT_NORMAL)
6545 got_type |= old_got_type;
6546
6547 /* If the symbol is accessed by both IE and GD methods, we
6548 are able to relax. Turn off the GD flag, without
6549 messing up with any other kind of TLS types that may be
6550 involved. */
6551 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6552 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6553
6554 if (old_got_type != got_type)
6555 {
6556 if (h != NULL)
6557 elf_aarch64_hash_entry (h)->got_type = got_type;
6558 else
6559 {
6560 struct elf_aarch64_local_symbol *locals;
6561 locals = elf_aarch64_locals (abfd);
6562 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6563 locals[r_symndx].got_type = got_type;
6564 }
6565 }
6566
6567 if (htab->root.dynobj == NULL)
6568 htab->root.dynobj = abfd;
6569 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6570 return FALSE;
6571 break;
6572 }
6573
6574 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6575 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6576 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6577 case BFD_RELOC_AARCH64_MOVW_G3:
6578 if (info->shared)
6579 {
6580 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6581 (*_bfd_error_handler)
6582 (_("%B: relocation %s against `%s' can not be used when making "
6583 "a shared object; recompile with -fPIC"),
6584 abfd, elfNN_aarch64_howto_table[howto_index].name,
6585 (h) ? h->root.root.string : "a local symbol");
6586 bfd_set_error (bfd_error_bad_value);
6587 return FALSE;
6588 }
6589
6590 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6591 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6592 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6593 if (h != NULL && info->executable)
6594 {
6595 /* If this reloc is in a read-only section, we might
6596 need a copy reloc. We can't check reliably at this
6597 stage whether the section is read-only, as input
6598 sections have not yet been mapped to output sections.
6599 Tentatively set the flag for now, and correct in
6600 adjust_dynamic_symbol. */
6601 h->non_got_ref = 1;
6602 h->plt.refcount += 1;
6603 h->pointer_equality_needed = 1;
6604 }
6605 /* FIXME:: RR need to handle these in shared libraries
6606 and essentially bomb out as these being non-PIC
6607 relocations in shared libraries. */
6608 break;
6609
6610 case BFD_RELOC_AARCH64_CALL26:
6611 case BFD_RELOC_AARCH64_JUMP26:
6612 /* If this is a local symbol then we resolve it
6613 directly without creating a PLT entry. */
6614 if (h == NULL)
6615 continue;
6616
6617 h->needs_plt = 1;
6618 if (h->plt.refcount <= 0)
6619 h->plt.refcount = 1;
6620 else
6621 h->plt.refcount += 1;
6622 break;
6623
6624 default:
6625 break;
6626 }
6627 }
6628
6629 return TRUE;
6630 }
6631
6632 /* Treat mapping symbols as special target symbols. */
6633
6634 static bfd_boolean
6635 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6636 asymbol *sym)
6637 {
6638 return bfd_is_aarch64_special_symbol_name (sym->name,
6639 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6640 }
6641
6642 /* This is a copy of elf_find_function () from elf.c except that
6643 AArch64 mapping symbols are ignored when looking for function names. */
6644
6645 static bfd_boolean
6646 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6647 asymbol **symbols,
6648 asection *section,
6649 bfd_vma offset,
6650 const char **filename_ptr,
6651 const char **functionname_ptr)
6652 {
6653 const char *filename = NULL;
6654 asymbol *func = NULL;
6655 bfd_vma low_func = 0;
6656 asymbol **p;
6657
6658 for (p = symbols; *p != NULL; p++)
6659 {
6660 elf_symbol_type *q;
6661
6662 q = (elf_symbol_type *) * p;
6663
6664 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6665 {
6666 default:
6667 break;
6668 case STT_FILE:
6669 filename = bfd_asymbol_name (&q->symbol);
6670 break;
6671 case STT_FUNC:
6672 case STT_NOTYPE:
6673 /* Skip mapping symbols. */
6674 if ((q->symbol.flags & BSF_LOCAL)
6675 && (bfd_is_aarch64_special_symbol_name
6676 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6677 continue;
6678 /* Fall through. */
6679 if (bfd_get_section (&q->symbol) == section
6680 && q->symbol.value >= low_func && q->symbol.value <= offset)
6681 {
6682 func = (asymbol *) q;
6683 low_func = q->symbol.value;
6684 }
6685 break;
6686 }
6687 }
6688
6689 if (func == NULL)
6690 return FALSE;
6691
6692 if (filename_ptr)
6693 *filename_ptr = filename;
6694 if (functionname_ptr)
6695 *functionname_ptr = bfd_asymbol_name (func);
6696
6697 return TRUE;
6698 }
6699
6700
6701 /* Find the nearest line to a particular section and offset, for error
6702 reporting. This code is a duplicate of the code in elf.c, except
6703 that it uses aarch64_elf_find_function. */
6704
6705 static bfd_boolean
6706 elfNN_aarch64_find_nearest_line (bfd *abfd,
6707 asymbol **symbols,
6708 asection *section,
6709 bfd_vma offset,
6710 const char **filename_ptr,
6711 const char **functionname_ptr,
6712 unsigned int *line_ptr,
6713 unsigned int *discriminator_ptr)
6714 {
6715 bfd_boolean found = FALSE;
6716
6717 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6718 filename_ptr, functionname_ptr,
6719 line_ptr, discriminator_ptr,
6720 dwarf_debug_sections, 0,
6721 &elf_tdata (abfd)->dwarf2_find_line_info))
6722 {
6723 if (!*functionname_ptr)
6724 aarch64_elf_find_function (abfd, symbols, section, offset,
6725 *filename_ptr ? NULL : filename_ptr,
6726 functionname_ptr);
6727
6728 return TRUE;
6729 }
6730
6731 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6732 toolchain uses DWARF1. */
6733
6734 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6735 &found, filename_ptr,
6736 functionname_ptr, line_ptr,
6737 &elf_tdata (abfd)->line_info))
6738 return FALSE;
6739
6740 if (found && (*functionname_ptr || *line_ptr))
6741 return TRUE;
6742
6743 if (symbols == NULL)
6744 return FALSE;
6745
6746 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6747 filename_ptr, functionname_ptr))
6748 return FALSE;
6749
6750 *line_ptr = 0;
6751 return TRUE;
6752 }
6753
6754 static bfd_boolean
6755 elfNN_aarch64_find_inliner_info (bfd *abfd,
6756 const char **filename_ptr,
6757 const char **functionname_ptr,
6758 unsigned int *line_ptr)
6759 {
6760 bfd_boolean found;
6761 found = _bfd_dwarf2_find_inliner_info
6762 (abfd, filename_ptr,
6763 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6764 return found;
6765 }
6766
6767
6768 static void
6769 elfNN_aarch64_post_process_headers (bfd *abfd,
6770 struct bfd_link_info *link_info)
6771 {
6772 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6773
6774 i_ehdrp = elf_elfheader (abfd);
6775 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6776
6777 _bfd_elf_post_process_headers (abfd, link_info);
6778 }
6779
6780 static enum elf_reloc_type_class
6781 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6782 const asection *rel_sec ATTRIBUTE_UNUSED,
6783 const Elf_Internal_Rela *rela)
6784 {
6785 switch ((int) ELFNN_R_TYPE (rela->r_info))
6786 {
6787 case AARCH64_R (RELATIVE):
6788 return reloc_class_relative;
6789 case AARCH64_R (JUMP_SLOT):
6790 return reloc_class_plt;
6791 case AARCH64_R (COPY):
6792 return reloc_class_copy;
6793 default:
6794 return reloc_class_normal;
6795 }
6796 }
6797
6798 /* Handle an AArch64 specific section when reading an object file. This is
6799 called when bfd_section_from_shdr finds a section with an unknown
6800 type. */
6801
6802 static bfd_boolean
6803 elfNN_aarch64_section_from_shdr (bfd *abfd,
6804 Elf_Internal_Shdr *hdr,
6805 const char *name, int shindex)
6806 {
6807 /* There ought to be a place to keep ELF backend specific flags, but
6808 at the moment there isn't one. We just keep track of the
6809 sections by their name, instead. Fortunately, the ABI gives
6810 names for all the AArch64 specific sections, so we will probably get
6811 away with this. */
6812 switch (hdr->sh_type)
6813 {
6814 case SHT_AARCH64_ATTRIBUTES:
6815 break;
6816
6817 default:
6818 return FALSE;
6819 }
6820
6821 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6822 return FALSE;
6823
6824 return TRUE;
6825 }
6826
6827 /* A structure used to record a list of sections, independently
6828 of the next and prev fields in the asection structure. */
6829 typedef struct section_list
6830 {
6831 asection *sec;
6832 struct section_list *next;
6833 struct section_list *prev;
6834 }
6835 section_list;
6836
6837 /* Unfortunately we need to keep a list of sections for which
6838 an _aarch64_elf_section_data structure has been allocated. This
6839 is because it is possible for functions like elfNN_aarch64_write_section
6840 to be called on a section which has had an elf_data_structure
6841 allocated for it (and so the used_by_bfd field is valid) but
6842 for which the AArch64 extended version of this structure - the
6843 _aarch64_elf_section_data structure - has not been allocated. */
6844 static section_list *sections_with_aarch64_elf_section_data = NULL;
6845
6846 static void
6847 record_section_with_aarch64_elf_section_data (asection *sec)
6848 {
6849 struct section_list *entry;
6850
6851 entry = bfd_malloc (sizeof (*entry));
6852 if (entry == NULL)
6853 return;
6854 entry->sec = sec;
6855 entry->next = sections_with_aarch64_elf_section_data;
6856 entry->prev = NULL;
6857 if (entry->next != NULL)
6858 entry->next->prev = entry;
6859 sections_with_aarch64_elf_section_data = entry;
6860 }
6861
6862 static struct section_list *
6863 find_aarch64_elf_section_entry (asection *sec)
6864 {
6865 struct section_list *entry;
6866 static struct section_list *last_entry = NULL;
6867
6868 /* This is a short cut for the typical case where the sections are added
6869 to the sections_with_aarch64_elf_section_data list in forward order and
6870 then looked up here in backwards order. This makes a real difference
6871 to the ld-srec/sec64k.exp linker test. */
6872 entry = sections_with_aarch64_elf_section_data;
6873 if (last_entry != NULL)
6874 {
6875 if (last_entry->sec == sec)
6876 entry = last_entry;
6877 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6878 entry = last_entry->next;
6879 }
6880
6881 for (; entry; entry = entry->next)
6882 if (entry->sec == sec)
6883 break;
6884
6885 if (entry)
6886 /* Record the entry prior to this one - it is the entry we are
6887 most likely to want to locate next time. Also this way if we
6888 have been called from
6889 unrecord_section_with_aarch64_elf_section_data () we will not
6890 be caching a pointer that is about to be freed. */
6891 last_entry = entry->prev;
6892
6893 return entry;
6894 }
6895
6896 static void
6897 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6898 {
6899 struct section_list *entry;
6900
6901 entry = find_aarch64_elf_section_entry (sec);
6902
6903 if (entry)
6904 {
6905 if (entry->prev != NULL)
6906 entry->prev->next = entry->next;
6907 if (entry->next != NULL)
6908 entry->next->prev = entry->prev;
6909 if (entry == sections_with_aarch64_elf_section_data)
6910 sections_with_aarch64_elf_section_data = entry->next;
6911 free (entry);
6912 }
6913 }
6914
6915
6916 typedef struct
6917 {
6918 void *finfo;
6919 struct bfd_link_info *info;
6920 asection *sec;
6921 int sec_shndx;
6922 int (*func) (void *, const char *, Elf_Internal_Sym *,
6923 asection *, struct elf_link_hash_entry *);
6924 } output_arch_syminfo;
6925
6926 enum map_symbol_type
6927 {
6928 AARCH64_MAP_INSN,
6929 AARCH64_MAP_DATA
6930 };
6931
6932
6933 /* Output a single mapping symbol. */
6934
6935 static bfd_boolean
6936 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6937 enum map_symbol_type type, bfd_vma offset)
6938 {
6939 static const char *names[2] = { "$x", "$d" };
6940 Elf_Internal_Sym sym;
6941
6942 sym.st_value = (osi->sec->output_section->vma
6943 + osi->sec->output_offset + offset);
6944 sym.st_size = 0;
6945 sym.st_other = 0;
6946 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6947 sym.st_shndx = osi->sec_shndx;
6948 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6949 }
6950
6951
6952
6953 /* Output mapping symbols for PLT entries associated with H. */
6954
6955 static bfd_boolean
6956 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6957 {
6958 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6959 bfd_vma addr;
6960
6961 if (h->root.type == bfd_link_hash_indirect)
6962 return TRUE;
6963
6964 if (h->root.type == bfd_link_hash_warning)
6965 /* When warning symbols are created, they **replace** the "real"
6966 entry in the hash table, thus we never get to see the real
6967 symbol in a hash traversal. So look at it now. */
6968 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6969
6970 if (h->plt.offset == (bfd_vma) - 1)
6971 return TRUE;
6972
6973 addr = h->plt.offset;
6974 if (addr == 32)
6975 {
6976 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6977 return FALSE;
6978 }
6979 return TRUE;
6980 }
6981
6982
6983 /* Output a single local symbol for a generated stub. */
6984
6985 static bfd_boolean
6986 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
6987 bfd_vma offset, bfd_vma size)
6988 {
6989 Elf_Internal_Sym sym;
6990
6991 sym.st_value = (osi->sec->output_section->vma
6992 + osi->sec->output_offset + offset);
6993 sym.st_size = size;
6994 sym.st_other = 0;
6995 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6996 sym.st_shndx = osi->sec_shndx;
6997 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
6998 }
6999
7000 static bfd_boolean
7001 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
7002 {
7003 struct elf_aarch64_stub_hash_entry *stub_entry;
7004 asection *stub_sec;
7005 bfd_vma addr;
7006 char *stub_name;
7007 output_arch_syminfo *osi;
7008
7009 /* Massage our args to the form they really have. */
7010 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
7011 osi = (output_arch_syminfo *) in_arg;
7012
7013 stub_sec = stub_entry->stub_sec;
7014
7015 /* Ensure this stub is attached to the current section being
7016 processed. */
7017 if (stub_sec != osi->sec)
7018 return TRUE;
7019
7020 addr = (bfd_vma) stub_entry->stub_offset;
7021
7022 stub_name = stub_entry->output_name;
7023
7024 switch (stub_entry->stub_type)
7025 {
7026 case aarch64_stub_adrp_branch:
7027 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7028 sizeof (aarch64_adrp_branch_stub)))
7029 return FALSE;
7030 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7031 return FALSE;
7032 break;
7033 case aarch64_stub_long_branch:
7034 if (!elfNN_aarch64_output_stub_sym
7035 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
7036 return FALSE;
7037 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7038 return FALSE;
7039 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
7040 return FALSE;
7041 break;
7042 case aarch64_stub_erratum_835769_veneer:
7043 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7044 sizeof (aarch64_erratum_835769_stub)))
7045 return FALSE;
7046 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7047 return FALSE;
7048 break;
7049 case aarch64_stub_erratum_843419_veneer:
7050 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7051 sizeof (aarch64_erratum_843419_stub)))
7052 return FALSE;
7053 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7054 return FALSE;
7055 break;
7056
7057 default:
7058 abort ();
7059 }
7060
7061 return TRUE;
7062 }
7063
7064 /* Output mapping symbols for linker generated sections. */
7065
7066 static bfd_boolean
7067 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
7068 struct bfd_link_info *info,
7069 void *finfo,
7070 int (*func) (void *, const char *,
7071 Elf_Internal_Sym *,
7072 asection *,
7073 struct elf_link_hash_entry
7074 *))
7075 {
7076 output_arch_syminfo osi;
7077 struct elf_aarch64_link_hash_table *htab;
7078
7079 htab = elf_aarch64_hash_table (info);
7080
7081 osi.finfo = finfo;
7082 osi.info = info;
7083 osi.func = func;
7084
7085 /* Long calls stubs. */
7086 if (htab->stub_bfd && htab->stub_bfd->sections)
7087 {
7088 asection *stub_sec;
7089
7090 for (stub_sec = htab->stub_bfd->sections;
7091 stub_sec != NULL; stub_sec = stub_sec->next)
7092 {
7093 /* Ignore non-stub sections. */
7094 if (!strstr (stub_sec->name, STUB_SUFFIX))
7095 continue;
7096
7097 osi.sec = stub_sec;
7098
7099 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7100 (output_bfd, osi.sec->output_section);
7101
7102 /* The first instruction in a stub is always a branch. */
7103 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
7104 return FALSE;
7105
7106 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7107 &osi);
7108 }
7109 }
7110
7111 /* Finally, output mapping symbols for the PLT. */
7112 if (!htab->root.splt || htab->root.splt->size == 0)
7113 return TRUE;
7114
7115 /* For now live without mapping symbols for the plt. */
7116 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7117 (output_bfd, htab->root.splt->output_section);
7118 osi.sec = htab->root.splt;
7119
7120 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
7121 (void *) &osi);
7122
7123 return TRUE;
7124
7125 }
7126
7127 /* Allocate target specific section data. */
7128
7129 static bfd_boolean
7130 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7131 {
7132 if (!sec->used_by_bfd)
7133 {
7134 _aarch64_elf_section_data *sdata;
7135 bfd_size_type amt = sizeof (*sdata);
7136
7137 sdata = bfd_zalloc (abfd, amt);
7138 if (sdata == NULL)
7139 return FALSE;
7140 sec->used_by_bfd = sdata;
7141 }
7142
7143 record_section_with_aarch64_elf_section_data (sec);
7144
7145 return _bfd_elf_new_section_hook (abfd, sec);
7146 }
7147
7148
7149 static void
7150 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7151 asection *sec,
7152 void *ignore ATTRIBUTE_UNUSED)
7153 {
7154 unrecord_section_with_aarch64_elf_section_data (sec);
7155 }
7156
7157 static bfd_boolean
7158 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7159 {
7160 if (abfd->sections)
7161 bfd_map_over_sections (abfd,
7162 unrecord_section_via_map_over_sections, NULL);
7163
7164 return _bfd_elf_close_and_cleanup (abfd);
7165 }
7166
7167 static bfd_boolean
7168 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7169 {
7170 if (abfd->sections)
7171 bfd_map_over_sections (abfd,
7172 unrecord_section_via_map_over_sections, NULL);
7173
7174 return _bfd_free_cached_info (abfd);
7175 }
7176
7177 /* Create dynamic sections. This is different from the ARM backend in that
7178 the got, plt, gotplt and their relocation sections are all created in the
7179 standard part of the bfd elf backend. */
7180
7181 static bfd_boolean
7182 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7183 struct bfd_link_info *info)
7184 {
7185 struct elf_aarch64_link_hash_table *htab;
7186
7187 /* We need to create .got section. */
7188 if (!aarch64_elf_create_got_section (dynobj, info))
7189 return FALSE;
7190
7191 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
7192 return FALSE;
7193
7194 htab = elf_aarch64_hash_table (info);
7195 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
7196 if (!info->shared)
7197 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
7198
7199 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
7200 abort ();
7201
7202 return TRUE;
7203 }
7204
7205
7206 /* Allocate space in .plt, .got and associated reloc sections for
7207 dynamic relocs. */
7208
7209 static bfd_boolean
7210 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7211 {
7212 struct bfd_link_info *info;
7213 struct elf_aarch64_link_hash_table *htab;
7214 struct elf_aarch64_link_hash_entry *eh;
7215 struct elf_dyn_relocs *p;
7216
7217 /* An example of a bfd_link_hash_indirect symbol is versioned
7218 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7219 -> __gxx_personality_v0(bfd_link_hash_defined)
7220
7221 There is no need to process bfd_link_hash_indirect symbols here
7222 because we will also be presented with the concrete instance of
7223 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7224 called to copy all relevant data from the generic to the concrete
7225 symbol instance.
7226 */
7227 if (h->root.type == bfd_link_hash_indirect)
7228 return TRUE;
7229
7230 if (h->root.type == bfd_link_hash_warning)
7231 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7232
7233 info = (struct bfd_link_info *) inf;
7234 htab = elf_aarch64_hash_table (info);
7235
7236 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7237 here if it is defined and referenced in a non-shared object. */
7238 if (h->type == STT_GNU_IFUNC
7239 && h->def_regular)
7240 return TRUE;
7241 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
7242 {
7243 /* Make sure this symbol is output as a dynamic symbol.
7244 Undefined weak syms won't yet be marked as dynamic. */
7245 if (h->dynindx == -1 && !h->forced_local)
7246 {
7247 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7248 return FALSE;
7249 }
7250
7251 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
7252 {
7253 asection *s = htab->root.splt;
7254
7255 /* If this is the first .plt entry, make room for the special
7256 first entry. */
7257 if (s->size == 0)
7258 s->size += htab->plt_header_size;
7259
7260 h->plt.offset = s->size;
7261
7262 /* If this symbol is not defined in a regular file, and we are
7263 not generating a shared library, then set the symbol to this
7264 location in the .plt. This is required to make function
7265 pointers compare as equal between the normal executable and
7266 the shared library. */
7267 if (!info->shared && !h->def_regular)
7268 {
7269 h->root.u.def.section = s;
7270 h->root.u.def.value = h->plt.offset;
7271 }
7272
7273 /* Make room for this entry. For now we only create the
7274 small model PLT entries. We later need to find a way
7275 of relaxing into these from the large model PLT entries. */
7276 s->size += PLT_SMALL_ENTRY_SIZE;
7277
7278 /* We also need to make an entry in the .got.plt section, which
7279 will be placed in the .got section by the linker script. */
7280 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
7281
7282 /* We also need to make an entry in the .rela.plt section. */
7283 htab->root.srelplt->size += RELOC_SIZE (htab);
7284
7285 /* We need to ensure that all GOT entries that serve the PLT
7286 are consecutive with the special GOT slots [0] [1] and
7287 [2]. Any addtional relocations, such as
7288 R_AARCH64_TLSDESC, must be placed after the PLT related
7289 entries. We abuse the reloc_count such that during
7290 sizing we adjust reloc_count to indicate the number of
7291 PLT related reserved entries. In subsequent phases when
7292 filling in the contents of the reloc entries, PLT related
7293 entries are placed by computing their PLT index (0
7294 .. reloc_count). While other none PLT relocs are placed
7295 at the slot indicated by reloc_count and reloc_count is
7296 updated. */
7297
7298 htab->root.srelplt->reloc_count++;
7299 }
7300 else
7301 {
7302 h->plt.offset = (bfd_vma) - 1;
7303 h->needs_plt = 0;
7304 }
7305 }
7306 else
7307 {
7308 h->plt.offset = (bfd_vma) - 1;
7309 h->needs_plt = 0;
7310 }
7311
7312 eh = (struct elf_aarch64_link_hash_entry *) h;
7313 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7314
7315 if (h->got.refcount > 0)
7316 {
7317 bfd_boolean dyn;
7318 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
7319
7320 h->got.offset = (bfd_vma) - 1;
7321
7322 dyn = htab->root.dynamic_sections_created;
7323
7324 /* Make sure this symbol is output as a dynamic symbol.
7325 Undefined weak syms won't yet be marked as dynamic. */
7326 if (dyn && h->dynindx == -1 && !h->forced_local)
7327 {
7328 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7329 return FALSE;
7330 }
7331
7332 if (got_type == GOT_UNKNOWN)
7333 {
7334 }
7335 else if (got_type == GOT_NORMAL)
7336 {
7337 h->got.offset = htab->root.sgot->size;
7338 htab->root.sgot->size += GOT_ENTRY_SIZE;
7339 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7340 || h->root.type != bfd_link_hash_undefweak)
7341 && (info->shared
7342 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7343 {
7344 htab->root.srelgot->size += RELOC_SIZE (htab);
7345 }
7346 }
7347 else
7348 {
7349 int indx;
7350 if (got_type & GOT_TLSDESC_GD)
7351 {
7352 eh->tlsdesc_got_jump_table_offset =
7353 (htab->root.sgotplt->size
7354 - aarch64_compute_jump_table_size (htab));
7355 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7356 h->got.offset = (bfd_vma) - 2;
7357 }
7358
7359 if (got_type & GOT_TLS_GD)
7360 {
7361 h->got.offset = htab->root.sgot->size;
7362 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7363 }
7364
7365 if (got_type & GOT_TLS_IE)
7366 {
7367 h->got.offset = htab->root.sgot->size;
7368 htab->root.sgot->size += GOT_ENTRY_SIZE;
7369 }
7370
7371 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7372 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7373 || h->root.type != bfd_link_hash_undefweak)
7374 && (info->shared
7375 || indx != 0
7376 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7377 {
7378 if (got_type & GOT_TLSDESC_GD)
7379 {
7380 htab->root.srelplt->size += RELOC_SIZE (htab);
7381 /* Note reloc_count not incremented here! We have
7382 already adjusted reloc_count for this relocation
7383 type. */
7384
7385 /* TLSDESC PLT is now needed, but not yet determined. */
7386 htab->tlsdesc_plt = (bfd_vma) - 1;
7387 }
7388
7389 if (got_type & GOT_TLS_GD)
7390 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7391
7392 if (got_type & GOT_TLS_IE)
7393 htab->root.srelgot->size += RELOC_SIZE (htab);
7394 }
7395 }
7396 }
7397 else
7398 {
7399 h->got.offset = (bfd_vma) - 1;
7400 }
7401
7402 if (eh->dyn_relocs == NULL)
7403 return TRUE;
7404
7405 /* In the shared -Bsymbolic case, discard space allocated for
7406 dynamic pc-relative relocs against symbols which turn out to be
7407 defined in regular objects. For the normal shared case, discard
7408 space for pc-relative relocs that have become local due to symbol
7409 visibility changes. */
7410
7411 if (info->shared)
7412 {
7413 /* Relocs that use pc_count are those that appear on a call
7414 insn, or certain REL relocs that can generated via assembly.
7415 We want calls to protected symbols to resolve directly to the
7416 function rather than going via the plt. If people want
7417 function pointer comparisons to work as expected then they
7418 should avoid writing weird assembly. */
7419 if (SYMBOL_CALLS_LOCAL (info, h))
7420 {
7421 struct elf_dyn_relocs **pp;
7422
7423 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
7424 {
7425 p->count -= p->pc_count;
7426 p->pc_count = 0;
7427 if (p->count == 0)
7428 *pp = p->next;
7429 else
7430 pp = &p->next;
7431 }
7432 }
7433
7434 /* Also discard relocs on undefined weak syms with non-default
7435 visibility. */
7436 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
7437 {
7438 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7439 eh->dyn_relocs = NULL;
7440
7441 /* Make sure undefined weak symbols are output as a dynamic
7442 symbol in PIEs. */
7443 else if (h->dynindx == -1
7444 && !h->forced_local
7445 && !bfd_elf_link_record_dynamic_symbol (info, h))
7446 return FALSE;
7447 }
7448
7449 }
7450 else if (ELIMINATE_COPY_RELOCS)
7451 {
7452 /* For the non-shared case, discard space for relocs against
7453 symbols which turn out to need copy relocs or are not
7454 dynamic. */
7455
7456 if (!h->non_got_ref
7457 && ((h->def_dynamic
7458 && !h->def_regular)
7459 || (htab->root.dynamic_sections_created
7460 && (h->root.type == bfd_link_hash_undefweak
7461 || h->root.type == bfd_link_hash_undefined))))
7462 {
7463 /* Make sure this symbol is output as a dynamic symbol.
7464 Undefined weak syms won't yet be marked as dynamic. */
7465 if (h->dynindx == -1
7466 && !h->forced_local
7467 && !bfd_elf_link_record_dynamic_symbol (info, h))
7468 return FALSE;
7469
7470 /* If that succeeded, we know we'll be keeping all the
7471 relocs. */
7472 if (h->dynindx != -1)
7473 goto keep;
7474 }
7475
7476 eh->dyn_relocs = NULL;
7477
7478 keep:;
7479 }
7480
7481 /* Finally, allocate space. */
7482 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7483 {
7484 asection *sreloc;
7485
7486 sreloc = elf_section_data (p->sec)->sreloc;
7487
7488 BFD_ASSERT (sreloc != NULL);
7489
7490 sreloc->size += p->count * RELOC_SIZE (htab);
7491 }
7492
7493 return TRUE;
7494 }
7495
7496 /* Allocate space in .plt, .got and associated reloc sections for
7497 ifunc dynamic relocs. */
7498
7499 static bfd_boolean
7500 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
7501 void *inf)
7502 {
7503 struct bfd_link_info *info;
7504 struct elf_aarch64_link_hash_table *htab;
7505 struct elf_aarch64_link_hash_entry *eh;
7506
7507 /* An example of a bfd_link_hash_indirect symbol is versioned
7508 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7509 -> __gxx_personality_v0(bfd_link_hash_defined)
7510
7511 There is no need to process bfd_link_hash_indirect symbols here
7512 because we will also be presented with the concrete instance of
7513 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7514 called to copy all relevant data from the generic to the concrete
7515 symbol instance.
7516 */
7517 if (h->root.type == bfd_link_hash_indirect)
7518 return TRUE;
7519
7520 if (h->root.type == bfd_link_hash_warning)
7521 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7522
7523 info = (struct bfd_link_info *) inf;
7524 htab = elf_aarch64_hash_table (info);
7525
7526 eh = (struct elf_aarch64_link_hash_entry *) h;
7527
7528 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7529 here if it is defined and referenced in a non-shared object. */
7530 if (h->type == STT_GNU_IFUNC
7531 && h->def_regular)
7532 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7533 &eh->dyn_relocs,
7534 htab->plt_entry_size,
7535 htab->plt_header_size,
7536 GOT_ENTRY_SIZE);
7537 return TRUE;
7538 }
7539
7540 /* Allocate space in .plt, .got and associated reloc sections for
7541 local dynamic relocs. */
7542
7543 static bfd_boolean
7544 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7545 {
7546 struct elf_link_hash_entry *h
7547 = (struct elf_link_hash_entry *) *slot;
7548
7549 if (h->type != STT_GNU_IFUNC
7550 || !h->def_regular
7551 || !h->ref_regular
7552 || !h->forced_local
7553 || h->root.type != bfd_link_hash_defined)
7554 abort ();
7555
7556 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7557 }
7558
7559 /* Allocate space in .plt, .got and associated reloc sections for
7560 local ifunc dynamic relocs. */
7561
7562 static bfd_boolean
7563 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7564 {
7565 struct elf_link_hash_entry *h
7566 = (struct elf_link_hash_entry *) *slot;
7567
7568 if (h->type != STT_GNU_IFUNC
7569 || !h->def_regular
7570 || !h->ref_regular
7571 || !h->forced_local
7572 || h->root.type != bfd_link_hash_defined)
7573 abort ();
7574
7575 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7576 }
7577
7578 /* Find any dynamic relocs that apply to read-only sections. */
7579
7580 static bfd_boolean
7581 aarch64_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
7582 {
7583 struct elf_aarch64_link_hash_entry * eh;
7584 struct elf_dyn_relocs * p;
7585
7586 eh = (struct elf_aarch64_link_hash_entry *) h;
7587 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7588 {
7589 asection *s = p->sec;
7590
7591 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7592 {
7593 struct bfd_link_info *info = (struct bfd_link_info *) inf;
7594
7595 info->flags |= DF_TEXTREL;
7596
7597 /* Not an error, just cut short the traversal. */
7598 return FALSE;
7599 }
7600 }
7601 return TRUE;
7602 }
7603
7604 /* This is the most important function of all . Innocuosly named
7605 though ! */
7606 static bfd_boolean
7607 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7608 struct bfd_link_info *info)
7609 {
7610 struct elf_aarch64_link_hash_table *htab;
7611 bfd *dynobj;
7612 asection *s;
7613 bfd_boolean relocs;
7614 bfd *ibfd;
7615
7616 htab = elf_aarch64_hash_table ((info));
7617 dynobj = htab->root.dynobj;
7618
7619 BFD_ASSERT (dynobj != NULL);
7620
7621 if (htab->root.dynamic_sections_created)
7622 {
7623 if (info->executable)
7624 {
7625 s = bfd_get_linker_section (dynobj, ".interp");
7626 if (s == NULL)
7627 abort ();
7628 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7629 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7630 }
7631 }
7632
7633 /* Set up .got offsets for local syms, and space for local dynamic
7634 relocs. */
7635 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7636 {
7637 struct elf_aarch64_local_symbol *locals = NULL;
7638 Elf_Internal_Shdr *symtab_hdr;
7639 asection *srel;
7640 unsigned int i;
7641
7642 if (!is_aarch64_elf (ibfd))
7643 continue;
7644
7645 for (s = ibfd->sections; s != NULL; s = s->next)
7646 {
7647 struct elf_dyn_relocs *p;
7648
7649 for (p = (struct elf_dyn_relocs *)
7650 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7651 {
7652 if (!bfd_is_abs_section (p->sec)
7653 && bfd_is_abs_section (p->sec->output_section))
7654 {
7655 /* Input section has been discarded, either because
7656 it is a copy of a linkonce section or due to
7657 linker script /DISCARD/, so we'll be discarding
7658 the relocs too. */
7659 }
7660 else if (p->count != 0)
7661 {
7662 srel = elf_section_data (p->sec)->sreloc;
7663 srel->size += p->count * RELOC_SIZE (htab);
7664 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7665 info->flags |= DF_TEXTREL;
7666 }
7667 }
7668 }
7669
7670 locals = elf_aarch64_locals (ibfd);
7671 if (!locals)
7672 continue;
7673
7674 symtab_hdr = &elf_symtab_hdr (ibfd);
7675 srel = htab->root.srelgot;
7676 for (i = 0; i < symtab_hdr->sh_info; i++)
7677 {
7678 locals[i].got_offset = (bfd_vma) - 1;
7679 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7680 if (locals[i].got_refcount > 0)
7681 {
7682 unsigned got_type = locals[i].got_type;
7683 if (got_type & GOT_TLSDESC_GD)
7684 {
7685 locals[i].tlsdesc_got_jump_table_offset =
7686 (htab->root.sgotplt->size
7687 - aarch64_compute_jump_table_size (htab));
7688 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7689 locals[i].got_offset = (bfd_vma) - 2;
7690 }
7691
7692 if (got_type & GOT_TLS_GD)
7693 {
7694 locals[i].got_offset = htab->root.sgot->size;
7695 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7696 }
7697
7698 if (got_type & GOT_TLS_IE
7699 || got_type & GOT_NORMAL)
7700 {
7701 locals[i].got_offset = htab->root.sgot->size;
7702 htab->root.sgot->size += GOT_ENTRY_SIZE;
7703 }
7704
7705 if (got_type == GOT_UNKNOWN)
7706 {
7707 }
7708
7709 if (info->shared)
7710 {
7711 if (got_type & GOT_TLSDESC_GD)
7712 {
7713 htab->root.srelplt->size += RELOC_SIZE (htab);
7714 /* Note RELOC_COUNT not incremented here! */
7715 htab->tlsdesc_plt = (bfd_vma) - 1;
7716 }
7717
7718 if (got_type & GOT_TLS_GD)
7719 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7720
7721 if (got_type & GOT_TLS_IE
7722 || got_type & GOT_NORMAL)
7723 htab->root.srelgot->size += RELOC_SIZE (htab);
7724 }
7725 }
7726 else
7727 {
7728 locals[i].got_refcount = (bfd_vma) - 1;
7729 }
7730 }
7731 }
7732
7733
7734 /* Allocate global sym .plt and .got entries, and space for global
7735 sym dynamic relocs. */
7736 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7737 info);
7738
7739 /* Allocate global ifunc sym .plt and .got entries, and space for global
7740 ifunc sym dynamic relocs. */
7741 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7742 info);
7743
7744 /* Allocate .plt and .got entries, and space for local symbols. */
7745 htab_traverse (htab->loc_hash_table,
7746 elfNN_aarch64_allocate_local_dynrelocs,
7747 info);
7748
7749 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7750 htab_traverse (htab->loc_hash_table,
7751 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7752 info);
7753
7754 /* For every jump slot reserved in the sgotplt, reloc_count is
7755 incremented. However, when we reserve space for TLS descriptors,
7756 it's not incremented, so in order to compute the space reserved
7757 for them, it suffices to multiply the reloc count by the jump
7758 slot size. */
7759
7760 if (htab->root.srelplt)
7761 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7762
7763 if (htab->tlsdesc_plt)
7764 {
7765 if (htab->root.splt->size == 0)
7766 htab->root.splt->size += PLT_ENTRY_SIZE;
7767
7768 htab->tlsdesc_plt = htab->root.splt->size;
7769 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7770
7771 /* If we're not using lazy TLS relocations, don't generate the
7772 GOT entry required. */
7773 if (!(info->flags & DF_BIND_NOW))
7774 {
7775 htab->dt_tlsdesc_got = htab->root.sgot->size;
7776 htab->root.sgot->size += GOT_ENTRY_SIZE;
7777 }
7778 }
7779
7780 /* Init mapping symbols information to use later to distingush between
7781 code and data while scanning for errata. */
7782 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
7783 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7784 {
7785 if (!is_aarch64_elf (ibfd))
7786 continue;
7787 bfd_elfNN_aarch64_init_maps (ibfd);
7788 }
7789
7790 /* We now have determined the sizes of the various dynamic sections.
7791 Allocate memory for them. */
7792 relocs = FALSE;
7793 for (s = dynobj->sections; s != NULL; s = s->next)
7794 {
7795 if ((s->flags & SEC_LINKER_CREATED) == 0)
7796 continue;
7797
7798 if (s == htab->root.splt
7799 || s == htab->root.sgot
7800 || s == htab->root.sgotplt
7801 || s == htab->root.iplt
7802 || s == htab->root.igotplt || s == htab->sdynbss)
7803 {
7804 /* Strip this section if we don't need it; see the
7805 comment below. */
7806 }
7807 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7808 {
7809 if (s->size != 0 && s != htab->root.srelplt)
7810 relocs = TRUE;
7811
7812 /* We use the reloc_count field as a counter if we need
7813 to copy relocs into the output file. */
7814 if (s != htab->root.srelplt)
7815 s->reloc_count = 0;
7816 }
7817 else
7818 {
7819 /* It's not one of our sections, so don't allocate space. */
7820 continue;
7821 }
7822
7823 if (s->size == 0)
7824 {
7825 /* If we don't need this section, strip it from the
7826 output file. This is mostly to handle .rela.bss and
7827 .rela.plt. We must create both sections in
7828 create_dynamic_sections, because they must be created
7829 before the linker maps input sections to output
7830 sections. The linker does that before
7831 adjust_dynamic_symbol is called, and it is that
7832 function which decides whether anything needs to go
7833 into these sections. */
7834
7835 s->flags |= SEC_EXCLUDE;
7836 continue;
7837 }
7838
7839 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7840 continue;
7841
7842 /* Allocate memory for the section contents. We use bfd_zalloc
7843 here in case unused entries are not reclaimed before the
7844 section's contents are written out. This should not happen,
7845 but this way if it does, we get a R_AARCH64_NONE reloc instead
7846 of garbage. */
7847 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7848 if (s->contents == NULL)
7849 return FALSE;
7850 }
7851
7852 if (htab->root.dynamic_sections_created)
7853 {
7854 /* Add some entries to the .dynamic section. We fill in the
7855 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7856 must add the entries now so that we get the correct size for
7857 the .dynamic section. The DT_DEBUG entry is filled in by the
7858 dynamic linker and used by the debugger. */
7859 #define add_dynamic_entry(TAG, VAL) \
7860 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7861
7862 if (info->executable)
7863 {
7864 if (!add_dynamic_entry (DT_DEBUG, 0))
7865 return FALSE;
7866 }
7867
7868 if (htab->root.splt->size != 0)
7869 {
7870 if (!add_dynamic_entry (DT_PLTGOT, 0)
7871 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7872 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7873 || !add_dynamic_entry (DT_JMPREL, 0))
7874 return FALSE;
7875
7876 if (htab->tlsdesc_plt
7877 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7878 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7879 return FALSE;
7880 }
7881
7882 if (relocs)
7883 {
7884 if (!add_dynamic_entry (DT_RELA, 0)
7885 || !add_dynamic_entry (DT_RELASZ, 0)
7886 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7887 return FALSE;
7888
7889 /* If any dynamic relocs apply to a read-only section,
7890 then we need a DT_TEXTREL entry. */
7891 if ((info->flags & DF_TEXTREL) == 0)
7892 elf_link_hash_traverse (& htab->root, aarch64_readonly_dynrelocs,
7893 info);
7894
7895 if ((info->flags & DF_TEXTREL) != 0)
7896 {
7897 if (!add_dynamic_entry (DT_TEXTREL, 0))
7898 return FALSE;
7899 }
7900 }
7901 }
7902 #undef add_dynamic_entry
7903
7904 return TRUE;
7905 }
7906
7907 static inline void
7908 elf_aarch64_update_plt_entry (bfd *output_bfd,
7909 bfd_reloc_code_real_type r_type,
7910 bfd_byte *plt_entry, bfd_vma value)
7911 {
7912 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7913
7914 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7915 }
7916
7917 static void
7918 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7919 struct elf_aarch64_link_hash_table
7920 *htab, bfd *output_bfd,
7921 struct bfd_link_info *info)
7922 {
7923 bfd_byte *plt_entry;
7924 bfd_vma plt_index;
7925 bfd_vma got_offset;
7926 bfd_vma gotplt_entry_address;
7927 bfd_vma plt_entry_address;
7928 Elf_Internal_Rela rela;
7929 bfd_byte *loc;
7930 asection *plt, *gotplt, *relplt;
7931
7932 /* When building a static executable, use .iplt, .igot.plt and
7933 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7934 if (htab->root.splt != NULL)
7935 {
7936 plt = htab->root.splt;
7937 gotplt = htab->root.sgotplt;
7938 relplt = htab->root.srelplt;
7939 }
7940 else
7941 {
7942 plt = htab->root.iplt;
7943 gotplt = htab->root.igotplt;
7944 relplt = htab->root.irelplt;
7945 }
7946
7947 /* Get the index in the procedure linkage table which
7948 corresponds to this symbol. This is the index of this symbol
7949 in all the symbols for which we are making plt entries. The
7950 first entry in the procedure linkage table is reserved.
7951
7952 Get the offset into the .got table of the entry that
7953 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7954 bytes. The first three are reserved for the dynamic linker.
7955
7956 For static executables, we don't reserve anything. */
7957
7958 if (plt == htab->root.splt)
7959 {
7960 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
7961 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
7962 }
7963 else
7964 {
7965 plt_index = h->plt.offset / htab->plt_entry_size;
7966 got_offset = plt_index * GOT_ENTRY_SIZE;
7967 }
7968
7969 plt_entry = plt->contents + h->plt.offset;
7970 plt_entry_address = plt->output_section->vma
7971 + plt->output_offset + h->plt.offset;
7972 gotplt_entry_address = gotplt->output_section->vma +
7973 gotplt->output_offset + got_offset;
7974
7975 /* Copy in the boiler-plate for the PLTn entry. */
7976 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
7977
7978 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7979 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7980 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7981 plt_entry,
7982 PG (gotplt_entry_address) -
7983 PG (plt_entry_address));
7984
7985 /* Fill in the lo12 bits for the load from the pltgot. */
7986 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7987 plt_entry + 4,
7988 PG_OFFSET (gotplt_entry_address));
7989
7990 /* Fill in the lo12 bits for the add from the pltgot entry. */
7991 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7992 plt_entry + 8,
7993 PG_OFFSET (gotplt_entry_address));
7994
7995 /* All the GOTPLT Entries are essentially initialized to PLT0. */
7996 bfd_put_NN (output_bfd,
7997 plt->output_section->vma + plt->output_offset,
7998 gotplt->contents + got_offset);
7999
8000 rela.r_offset = gotplt_entry_address;
8001
8002 if (h->dynindx == -1
8003 || ((info->executable
8004 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8005 && h->def_regular
8006 && h->type == STT_GNU_IFUNC))
8007 {
8008 /* If an STT_GNU_IFUNC symbol is locally defined, generate
8009 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
8010 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
8011 rela.r_addend = (h->root.u.def.value
8012 + h->root.u.def.section->output_section->vma
8013 + h->root.u.def.section->output_offset);
8014 }
8015 else
8016 {
8017 /* Fill in the entry in the .rela.plt section. */
8018 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
8019 rela.r_addend = 0;
8020 }
8021
8022 /* Compute the relocation entry to used based on PLT index and do
8023 not adjust reloc_count. The reloc_count has already been adjusted
8024 to account for this entry. */
8025 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
8026 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8027 }
8028
8029 /* Size sections even though they're not dynamic. We use it to setup
8030 _TLS_MODULE_BASE_, if needed. */
8031
8032 static bfd_boolean
8033 elfNN_aarch64_always_size_sections (bfd *output_bfd,
8034 struct bfd_link_info *info)
8035 {
8036 asection *tls_sec;
8037
8038 if (info->relocatable)
8039 return TRUE;
8040
8041 tls_sec = elf_hash_table (info)->tls_sec;
8042
8043 if (tls_sec)
8044 {
8045 struct elf_link_hash_entry *tlsbase;
8046
8047 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
8048 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
8049
8050 if (tlsbase)
8051 {
8052 struct bfd_link_hash_entry *h = NULL;
8053 const struct elf_backend_data *bed =
8054 get_elf_backend_data (output_bfd);
8055
8056 if (!(_bfd_generic_link_add_one_symbol
8057 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
8058 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
8059 return FALSE;
8060
8061 tlsbase->type = STT_TLS;
8062 tlsbase = (struct elf_link_hash_entry *) h;
8063 tlsbase->def_regular = 1;
8064 tlsbase->other = STV_HIDDEN;
8065 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
8066 }
8067 }
8068
8069 return TRUE;
8070 }
8071
8072 /* Finish up dynamic symbol handling. We set the contents of various
8073 dynamic sections here. */
8074 static bfd_boolean
8075 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
8076 struct bfd_link_info *info,
8077 struct elf_link_hash_entry *h,
8078 Elf_Internal_Sym *sym)
8079 {
8080 struct elf_aarch64_link_hash_table *htab;
8081 htab = elf_aarch64_hash_table (info);
8082
8083 if (h->plt.offset != (bfd_vma) - 1)
8084 {
8085 asection *plt, *gotplt, *relplt;
8086
8087 /* This symbol has an entry in the procedure linkage table. Set
8088 it up. */
8089
8090 /* When building a static executable, use .iplt, .igot.plt and
8091 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8092 if (htab->root.splt != NULL)
8093 {
8094 plt = htab->root.splt;
8095 gotplt = htab->root.sgotplt;
8096 relplt = htab->root.srelplt;
8097 }
8098 else
8099 {
8100 plt = htab->root.iplt;
8101 gotplt = htab->root.igotplt;
8102 relplt = htab->root.irelplt;
8103 }
8104
8105 /* This symbol has an entry in the procedure linkage table. Set
8106 it up. */
8107 if ((h->dynindx == -1
8108 && !((h->forced_local || info->executable)
8109 && h->def_regular
8110 && h->type == STT_GNU_IFUNC))
8111 || plt == NULL
8112 || gotplt == NULL
8113 || relplt == NULL)
8114 abort ();
8115
8116 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
8117 if (!h->def_regular)
8118 {
8119 /* Mark the symbol as undefined, rather than as defined in
8120 the .plt section. */
8121 sym->st_shndx = SHN_UNDEF;
8122 /* If the symbol is weak we need to clear the value.
8123 Otherwise, the PLT entry would provide a definition for
8124 the symbol even if the symbol wasn't defined anywhere,
8125 and so the symbol would never be NULL. Leave the value if
8126 there were any relocations where pointer equality matters
8127 (this is a clue for the dynamic linker, to make function
8128 pointer comparisons work between an application and shared
8129 library). */
8130 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
8131 sym->st_value = 0;
8132 }
8133 }
8134
8135 if (h->got.offset != (bfd_vma) - 1
8136 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8137 {
8138 Elf_Internal_Rela rela;
8139 bfd_byte *loc;
8140
8141 /* This symbol has an entry in the global offset table. Set it
8142 up. */
8143 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8144 abort ();
8145
8146 rela.r_offset = (htab->root.sgot->output_section->vma
8147 + htab->root.sgot->output_offset
8148 + (h->got.offset & ~(bfd_vma) 1));
8149
8150 if (h->def_regular
8151 && h->type == STT_GNU_IFUNC)
8152 {
8153 if (info->shared)
8154 {
8155 /* Generate R_AARCH64_GLOB_DAT. */
8156 goto do_glob_dat;
8157 }
8158 else
8159 {
8160 asection *plt;
8161
8162 if (!h->pointer_equality_needed)
8163 abort ();
8164
8165 /* For non-shared object, we can't use .got.plt, which
8166 contains the real function address if we need pointer
8167 equality. We load the GOT entry with the PLT entry. */
8168 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8169 bfd_put_NN (output_bfd, (plt->output_section->vma
8170 + plt->output_offset
8171 + h->plt.offset),
8172 htab->root.sgot->contents
8173 + (h->got.offset & ~(bfd_vma) 1));
8174 return TRUE;
8175 }
8176 }
8177 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
8178 {
8179 if (!h->def_regular)
8180 return FALSE;
8181
8182 BFD_ASSERT ((h->got.offset & 1) != 0);
8183 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8184 rela.r_addend = (h->root.u.def.value
8185 + h->root.u.def.section->output_section->vma
8186 + h->root.u.def.section->output_offset);
8187 }
8188 else
8189 {
8190 do_glob_dat:
8191 BFD_ASSERT ((h->got.offset & 1) == 0);
8192 bfd_put_NN (output_bfd, (bfd_vma) 0,
8193 htab->root.sgot->contents + h->got.offset);
8194 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8195 rela.r_addend = 0;
8196 }
8197
8198 loc = htab->root.srelgot->contents;
8199 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8200 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8201 }
8202
8203 if (h->needs_copy)
8204 {
8205 Elf_Internal_Rela rela;
8206 bfd_byte *loc;
8207
8208 /* This symbol needs a copy reloc. Set it up. */
8209
8210 if (h->dynindx == -1
8211 || (h->root.type != bfd_link_hash_defined
8212 && h->root.type != bfd_link_hash_defweak)
8213 || htab->srelbss == NULL)
8214 abort ();
8215
8216 rela.r_offset = (h->root.u.def.value
8217 + h->root.u.def.section->output_section->vma
8218 + h->root.u.def.section->output_offset);
8219 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8220 rela.r_addend = 0;
8221 loc = htab->srelbss->contents;
8222 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
8223 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8224 }
8225
8226 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
8227 be NULL for local symbols. */
8228 if (sym != NULL
8229 && (h == elf_hash_table (info)->hdynamic
8230 || h == elf_hash_table (info)->hgot))
8231 sym->st_shndx = SHN_ABS;
8232
8233 return TRUE;
8234 }
8235
8236 /* Finish up local dynamic symbol handling. We set the contents of
8237 various dynamic sections here. */
8238
8239 static bfd_boolean
8240 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
8241 {
8242 struct elf_link_hash_entry *h
8243 = (struct elf_link_hash_entry *) *slot;
8244 struct bfd_link_info *info
8245 = (struct bfd_link_info *) inf;
8246
8247 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
8248 info, h, NULL);
8249 }
8250
8251 static void
8252 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
8253 struct elf_aarch64_link_hash_table
8254 *htab)
8255 {
8256 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
8257 small and large plts and at the minute just generates
8258 the small PLT. */
8259
8260 /* PLT0 of the small PLT looks like this in ELF64 -
8261 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
8262 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
8263 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
8264 // symbol resolver
8265 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
8266 // GOTPLT entry for this.
8267 br x17
8268 PLT0 will be slightly different in ELF32 due to different got entry
8269 size.
8270 */
8271 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
8272 bfd_vma plt_base;
8273
8274
8275 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
8276 PLT_ENTRY_SIZE);
8277 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
8278 PLT_ENTRY_SIZE;
8279
8280 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
8281 + htab->root.sgotplt->output_offset
8282 + GOT_ENTRY_SIZE * 2);
8283
8284 plt_base = htab->root.splt->output_section->vma +
8285 htab->root.splt->output_offset;
8286
8287 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8288 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8289 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8290 htab->root.splt->contents + 4,
8291 PG (plt_got_2nd_ent) - PG (plt_base + 4));
8292
8293 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8294 htab->root.splt->contents + 8,
8295 PG_OFFSET (plt_got_2nd_ent));
8296
8297 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8298 htab->root.splt->contents + 12,
8299 PG_OFFSET (plt_got_2nd_ent));
8300 }
8301
8302 static bfd_boolean
8303 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
8304 struct bfd_link_info *info)
8305 {
8306 struct elf_aarch64_link_hash_table *htab;
8307 bfd *dynobj;
8308 asection *sdyn;
8309
8310 htab = elf_aarch64_hash_table (info);
8311 dynobj = htab->root.dynobj;
8312 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
8313
8314 if (htab->root.dynamic_sections_created)
8315 {
8316 ElfNN_External_Dyn *dyncon, *dynconend;
8317
8318 if (sdyn == NULL || htab->root.sgot == NULL)
8319 abort ();
8320
8321 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
8322 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
8323 for (; dyncon < dynconend; dyncon++)
8324 {
8325 Elf_Internal_Dyn dyn;
8326 asection *s;
8327
8328 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
8329
8330 switch (dyn.d_tag)
8331 {
8332 default:
8333 continue;
8334
8335 case DT_PLTGOT:
8336 s = htab->root.sgotplt;
8337 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
8338 break;
8339
8340 case DT_JMPREL:
8341 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
8342 break;
8343
8344 case DT_PLTRELSZ:
8345 s = htab->root.srelplt;
8346 dyn.d_un.d_val = s->size;
8347 break;
8348
8349 case DT_RELASZ:
8350 /* The procedure linkage table relocs (DT_JMPREL) should
8351 not be included in the overall relocs (DT_RELA).
8352 Therefore, we override the DT_RELASZ entry here to
8353 make it not include the JMPREL relocs. Since the
8354 linker script arranges for .rela.plt to follow all
8355 other relocation sections, we don't have to worry
8356 about changing the DT_RELA entry. */
8357 if (htab->root.srelplt != NULL)
8358 {
8359 s = htab->root.srelplt;
8360 dyn.d_un.d_val -= s->size;
8361 }
8362 break;
8363
8364 case DT_TLSDESC_PLT:
8365 s = htab->root.splt;
8366 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8367 + htab->tlsdesc_plt;
8368 break;
8369
8370 case DT_TLSDESC_GOT:
8371 s = htab->root.sgot;
8372 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8373 + htab->dt_tlsdesc_got;
8374 break;
8375 }
8376
8377 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
8378 }
8379
8380 }
8381
8382 /* Fill in the special first entry in the procedure linkage table. */
8383 if (htab->root.splt && htab->root.splt->size > 0)
8384 {
8385 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
8386
8387 elf_section_data (htab->root.splt->output_section)->
8388 this_hdr.sh_entsize = htab->plt_entry_size;
8389
8390
8391 if (htab->tlsdesc_plt)
8392 {
8393 bfd_put_NN (output_bfd, (bfd_vma) 0,
8394 htab->root.sgot->contents + htab->dt_tlsdesc_got);
8395
8396 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
8397 elfNN_aarch64_tlsdesc_small_plt_entry,
8398 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
8399
8400 {
8401 bfd_vma adrp1_addr =
8402 htab->root.splt->output_section->vma
8403 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
8404
8405 bfd_vma adrp2_addr = adrp1_addr + 4;
8406
8407 bfd_vma got_addr =
8408 htab->root.sgot->output_section->vma
8409 + htab->root.sgot->output_offset;
8410
8411 bfd_vma pltgot_addr =
8412 htab->root.sgotplt->output_section->vma
8413 + htab->root.sgotplt->output_offset;
8414
8415 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
8416
8417 bfd_byte *plt_entry =
8418 htab->root.splt->contents + htab->tlsdesc_plt;
8419
8420 /* adrp x2, DT_TLSDESC_GOT */
8421 elf_aarch64_update_plt_entry (output_bfd,
8422 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8423 plt_entry + 4,
8424 (PG (dt_tlsdesc_got)
8425 - PG (adrp1_addr)));
8426
8427 /* adrp x3, 0 */
8428 elf_aarch64_update_plt_entry (output_bfd,
8429 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8430 plt_entry + 8,
8431 (PG (pltgot_addr)
8432 - PG (adrp2_addr)));
8433
8434 /* ldr x2, [x2, #0] */
8435 elf_aarch64_update_plt_entry (output_bfd,
8436 BFD_RELOC_AARCH64_LDSTNN_LO12,
8437 plt_entry + 12,
8438 PG_OFFSET (dt_tlsdesc_got));
8439
8440 /* add x3, x3, 0 */
8441 elf_aarch64_update_plt_entry (output_bfd,
8442 BFD_RELOC_AARCH64_ADD_LO12,
8443 plt_entry + 16,
8444 PG_OFFSET (pltgot_addr));
8445 }
8446 }
8447 }
8448
8449 if (htab->root.sgotplt)
8450 {
8451 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
8452 {
8453 (*_bfd_error_handler)
8454 (_("discarded output section: `%A'"), htab->root.sgotplt);
8455 return FALSE;
8456 }
8457
8458 /* Fill in the first three entries in the global offset table. */
8459 if (htab->root.sgotplt->size > 0)
8460 {
8461 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
8462
8463 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
8464 bfd_put_NN (output_bfd,
8465 (bfd_vma) 0,
8466 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
8467 bfd_put_NN (output_bfd,
8468 (bfd_vma) 0,
8469 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
8470 }
8471
8472 if (htab->root.sgot)
8473 {
8474 if (htab->root.sgot->size > 0)
8475 {
8476 bfd_vma addr =
8477 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
8478 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
8479 }
8480 }
8481
8482 elf_section_data (htab->root.sgotplt->output_section)->
8483 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
8484 }
8485
8486 if (htab->root.sgot && htab->root.sgot->size > 0)
8487 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
8488 = GOT_ENTRY_SIZE;
8489
8490 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
8491 htab_traverse (htab->loc_hash_table,
8492 elfNN_aarch64_finish_local_dynamic_symbol,
8493 info);
8494
8495 return TRUE;
8496 }
8497
8498 /* Return address for Ith PLT stub in section PLT, for relocation REL
8499 or (bfd_vma) -1 if it should not be included. */
8500
8501 static bfd_vma
8502 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
8503 const arelent *rel ATTRIBUTE_UNUSED)
8504 {
8505 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
8506 }
8507
8508
8509 /* We use this so we can override certain functions
8510 (though currently we don't). */
8511
8512 const struct elf_size_info elfNN_aarch64_size_info =
8513 {
8514 sizeof (ElfNN_External_Ehdr),
8515 sizeof (ElfNN_External_Phdr),
8516 sizeof (ElfNN_External_Shdr),
8517 sizeof (ElfNN_External_Rel),
8518 sizeof (ElfNN_External_Rela),
8519 sizeof (ElfNN_External_Sym),
8520 sizeof (ElfNN_External_Dyn),
8521 sizeof (Elf_External_Note),
8522 4, /* Hash table entry size. */
8523 1, /* Internal relocs per external relocs. */
8524 ARCH_SIZE, /* Arch size. */
8525 LOG_FILE_ALIGN, /* Log_file_align. */
8526 ELFCLASSNN, EV_CURRENT,
8527 bfd_elfNN_write_out_phdrs,
8528 bfd_elfNN_write_shdrs_and_ehdr,
8529 bfd_elfNN_checksum_contents,
8530 bfd_elfNN_write_relocs,
8531 bfd_elfNN_swap_symbol_in,
8532 bfd_elfNN_swap_symbol_out,
8533 bfd_elfNN_slurp_reloc_table,
8534 bfd_elfNN_slurp_symbol_table,
8535 bfd_elfNN_swap_dyn_in,
8536 bfd_elfNN_swap_dyn_out,
8537 bfd_elfNN_swap_reloc_in,
8538 bfd_elfNN_swap_reloc_out,
8539 bfd_elfNN_swap_reloca_in,
8540 bfd_elfNN_swap_reloca_out
8541 };
8542
8543 #define ELF_ARCH bfd_arch_aarch64
8544 #define ELF_MACHINE_CODE EM_AARCH64
8545 #define ELF_MAXPAGESIZE 0x10000
8546 #define ELF_MINPAGESIZE 0x1000
8547 #define ELF_COMMONPAGESIZE 0x1000
8548
8549 #define bfd_elfNN_close_and_cleanup \
8550 elfNN_aarch64_close_and_cleanup
8551
8552 #define bfd_elfNN_bfd_free_cached_info \
8553 elfNN_aarch64_bfd_free_cached_info
8554
8555 #define bfd_elfNN_bfd_is_target_special_symbol \
8556 elfNN_aarch64_is_target_special_symbol
8557
8558 #define bfd_elfNN_bfd_link_hash_table_create \
8559 elfNN_aarch64_link_hash_table_create
8560
8561 #define bfd_elfNN_bfd_merge_private_bfd_data \
8562 elfNN_aarch64_merge_private_bfd_data
8563
8564 #define bfd_elfNN_bfd_print_private_bfd_data \
8565 elfNN_aarch64_print_private_bfd_data
8566
8567 #define bfd_elfNN_bfd_reloc_type_lookup \
8568 elfNN_aarch64_reloc_type_lookup
8569
8570 #define bfd_elfNN_bfd_reloc_name_lookup \
8571 elfNN_aarch64_reloc_name_lookup
8572
8573 #define bfd_elfNN_bfd_set_private_flags \
8574 elfNN_aarch64_set_private_flags
8575
8576 #define bfd_elfNN_find_inliner_info \
8577 elfNN_aarch64_find_inliner_info
8578
8579 #define bfd_elfNN_find_nearest_line \
8580 elfNN_aarch64_find_nearest_line
8581
8582 #define bfd_elfNN_mkobject \
8583 elfNN_aarch64_mkobject
8584
8585 #define bfd_elfNN_new_section_hook \
8586 elfNN_aarch64_new_section_hook
8587
8588 #define elf_backend_adjust_dynamic_symbol \
8589 elfNN_aarch64_adjust_dynamic_symbol
8590
8591 #define elf_backend_always_size_sections \
8592 elfNN_aarch64_always_size_sections
8593
8594 #define elf_backend_check_relocs \
8595 elfNN_aarch64_check_relocs
8596
8597 #define elf_backend_copy_indirect_symbol \
8598 elfNN_aarch64_copy_indirect_symbol
8599
8600 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8601 to them in our hash. */
8602 #define elf_backend_create_dynamic_sections \
8603 elfNN_aarch64_create_dynamic_sections
8604
8605 #define elf_backend_init_index_section \
8606 _bfd_elf_init_2_index_sections
8607
8608 #define elf_backend_finish_dynamic_sections \
8609 elfNN_aarch64_finish_dynamic_sections
8610
8611 #define elf_backend_finish_dynamic_symbol \
8612 elfNN_aarch64_finish_dynamic_symbol
8613
8614 #define elf_backend_gc_sweep_hook \
8615 elfNN_aarch64_gc_sweep_hook
8616
8617 #define elf_backend_object_p \
8618 elfNN_aarch64_object_p
8619
8620 #define elf_backend_output_arch_local_syms \
8621 elfNN_aarch64_output_arch_local_syms
8622
8623 #define elf_backend_plt_sym_val \
8624 elfNN_aarch64_plt_sym_val
8625
8626 #define elf_backend_post_process_headers \
8627 elfNN_aarch64_post_process_headers
8628
8629 #define elf_backend_relocate_section \
8630 elfNN_aarch64_relocate_section
8631
8632 #define elf_backend_reloc_type_class \
8633 elfNN_aarch64_reloc_type_class
8634
8635 #define elf_backend_section_from_shdr \
8636 elfNN_aarch64_section_from_shdr
8637
8638 #define elf_backend_size_dynamic_sections \
8639 elfNN_aarch64_size_dynamic_sections
8640
8641 #define elf_backend_size_info \
8642 elfNN_aarch64_size_info
8643
8644 #define elf_backend_write_section \
8645 elfNN_aarch64_write_section
8646
8647 #define elf_backend_can_refcount 1
8648 #define elf_backend_can_gc_sections 1
8649 #define elf_backend_plt_readonly 1
8650 #define elf_backend_want_got_plt 1
8651 #define elf_backend_want_plt_sym 0
8652 #define elf_backend_may_use_rel_p 0
8653 #define elf_backend_may_use_rela_p 1
8654 #define elf_backend_default_use_rela_p 1
8655 #define elf_backend_rela_normal 1
8656 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8657 #define elf_backend_default_execstack 0
8658 #define elf_backend_extern_protected_data 1
8659
8660 #undef elf_backend_obj_attrs_section
8661 #define elf_backend_obj_attrs_section ".ARM.attributes"
8662
8663 #include "elfNN-target.h"
This page took 0.238344 seconds and 4 git commands to generate.