37fe1a655b5a6955c2ad3d31c601167074c00c03
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
191 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
192 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
193
194 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
195 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
206 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
207
208 #define ELIMINATE_COPY_RELOCS 0
209
210 /* Return size of a relocation entry. HTAB is the bfd's
211 elf_aarch64_link_hash_entry. */
212 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
213
214 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
215 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
216 #define PLT_ENTRY_SIZE (32)
217 #define PLT_SMALL_ENTRY_SIZE (16)
218 #define PLT_TLSDESC_ENTRY_SIZE (32)
219
220 /* Encoding of the nop instruction */
221 #define INSN_NOP 0xd503201f
222
223 #define aarch64_compute_jump_table_size(htab) \
224 (((htab)->root.srelplt == NULL) ? 0 \
225 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
226
227 /* The first entry in a procedure linkage table looks like this
228 if the distance between the PLTGOT and the PLT is < 4GB use
229 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
230 in x16 and needs to work out PLTGOT[1] by using an address of
231 [x16,#-GOT_ENTRY_SIZE]. */
232 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
233 {
234 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
235 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
236 #if ARCH_SIZE == 64
237 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
238 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
239 #else
240 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
241 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
242 #endif
243 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
244 0x1f, 0x20, 0x03, 0xd5, /* nop */
245 0x1f, 0x20, 0x03, 0xd5, /* nop */
246 0x1f, 0x20, 0x03, 0xd5, /* nop */
247 };
248
249 /* Per function entry in a procedure linkage table looks like this
250 if the distance between the PLTGOT and the PLT is < 4GB use
251 these PLT entries. */
252 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
253 {
254 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
255 #if ARCH_SIZE == 64
256 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
257 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
258 #else
259 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
260 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
261 #endif
262 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
263 };
264
265 static const bfd_byte
266 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
267 {
268 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
269 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
270 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
271 #if ARCH_SIZE == 64
272 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
273 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
274 #else
275 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
276 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
277 #endif
278 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
279 0x1f, 0x20, 0x03, 0xd5, /* nop */
280 0x1f, 0x20, 0x03, 0xd5, /* nop */
281 };
282
283 #define elf_info_to_howto elfNN_aarch64_info_to_howto
284 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
285
286 #define AARCH64_ELF_ABI_VERSION 0
287
288 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
289 #define ALL_ONES (~ (bfd_vma) 0)
290
291 /* Indexed by the bfd interal reloc enumerators.
292 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
293 in reloc.c. */
294
295 static reloc_howto_type elfNN_aarch64_howto_table[] =
296 {
297 EMPTY_HOWTO (0),
298
299 /* Basic data relocations. */
300
301 #if ARCH_SIZE == 64
302 HOWTO (R_AARCH64_NULL, /* type */
303 0, /* rightshift */
304 3, /* size (0 = byte, 1 = short, 2 = long) */
305 0, /* bitsize */
306 FALSE, /* pc_relative */
307 0, /* bitpos */
308 complain_overflow_dont, /* complain_on_overflow */
309 bfd_elf_generic_reloc, /* special_function */
310 "R_AARCH64_NULL", /* name */
311 FALSE, /* partial_inplace */
312 0, /* src_mask */
313 0, /* dst_mask */
314 FALSE), /* pcrel_offset */
315 #else
316 HOWTO (R_AARCH64_NONE, /* type */
317 0, /* rightshift */
318 3, /* size (0 = byte, 1 = short, 2 = long) */
319 0, /* bitsize */
320 FALSE, /* pc_relative */
321 0, /* bitpos */
322 complain_overflow_dont, /* complain_on_overflow */
323 bfd_elf_generic_reloc, /* special_function */
324 "R_AARCH64_NONE", /* name */
325 FALSE, /* partial_inplace */
326 0, /* src_mask */
327 0, /* dst_mask */
328 FALSE), /* pcrel_offset */
329 #endif
330
331 /* .xword: (S+A) */
332 HOWTO64 (AARCH64_R (ABS64), /* type */
333 0, /* rightshift */
334 4, /* size (4 = long long) */
335 64, /* bitsize */
336 FALSE, /* pc_relative */
337 0, /* bitpos */
338 complain_overflow_unsigned, /* complain_on_overflow */
339 bfd_elf_generic_reloc, /* special_function */
340 AARCH64_R_STR (ABS64), /* name */
341 FALSE, /* partial_inplace */
342 ALL_ONES, /* src_mask */
343 ALL_ONES, /* dst_mask */
344 FALSE), /* pcrel_offset */
345
346 /* .word: (S+A) */
347 HOWTO (AARCH64_R (ABS32), /* type */
348 0, /* rightshift */
349 2, /* size (0 = byte, 1 = short, 2 = long) */
350 32, /* bitsize */
351 FALSE, /* pc_relative */
352 0, /* bitpos */
353 complain_overflow_unsigned, /* complain_on_overflow */
354 bfd_elf_generic_reloc, /* special_function */
355 AARCH64_R_STR (ABS32), /* name */
356 FALSE, /* partial_inplace */
357 0xffffffff, /* src_mask */
358 0xffffffff, /* dst_mask */
359 FALSE), /* pcrel_offset */
360
361 /* .half: (S+A) */
362 HOWTO (AARCH64_R (ABS16), /* type */
363 0, /* rightshift */
364 1, /* size (0 = byte, 1 = short, 2 = long) */
365 16, /* bitsize */
366 FALSE, /* pc_relative */
367 0, /* bitpos */
368 complain_overflow_unsigned, /* complain_on_overflow */
369 bfd_elf_generic_reloc, /* special_function */
370 AARCH64_R_STR (ABS16), /* name */
371 FALSE, /* partial_inplace */
372 0xffff, /* src_mask */
373 0xffff, /* dst_mask */
374 FALSE), /* pcrel_offset */
375
376 /* .xword: (S+A-P) */
377 HOWTO64 (AARCH64_R (PREL64), /* type */
378 0, /* rightshift */
379 4, /* size (4 = long long) */
380 64, /* bitsize */
381 TRUE, /* pc_relative */
382 0, /* bitpos */
383 complain_overflow_signed, /* complain_on_overflow */
384 bfd_elf_generic_reloc, /* special_function */
385 AARCH64_R_STR (PREL64), /* name */
386 FALSE, /* partial_inplace */
387 ALL_ONES, /* src_mask */
388 ALL_ONES, /* dst_mask */
389 TRUE), /* pcrel_offset */
390
391 /* .word: (S+A-P) */
392 HOWTO (AARCH64_R (PREL32), /* type */
393 0, /* rightshift */
394 2, /* size (0 = byte, 1 = short, 2 = long) */
395 32, /* bitsize */
396 TRUE, /* pc_relative */
397 0, /* bitpos */
398 complain_overflow_signed, /* complain_on_overflow */
399 bfd_elf_generic_reloc, /* special_function */
400 AARCH64_R_STR (PREL32), /* name */
401 FALSE, /* partial_inplace */
402 0xffffffff, /* src_mask */
403 0xffffffff, /* dst_mask */
404 TRUE), /* pcrel_offset */
405
406 /* .half: (S+A-P) */
407 HOWTO (AARCH64_R (PREL16), /* type */
408 0, /* rightshift */
409 1, /* size (0 = byte, 1 = short, 2 = long) */
410 16, /* bitsize */
411 TRUE, /* pc_relative */
412 0, /* bitpos */
413 complain_overflow_signed, /* complain_on_overflow */
414 bfd_elf_generic_reloc, /* special_function */
415 AARCH64_R_STR (PREL16), /* name */
416 FALSE, /* partial_inplace */
417 0xffff, /* src_mask */
418 0xffff, /* dst_mask */
419 TRUE), /* pcrel_offset */
420
421 /* Group relocations to create a 16, 32, 48 or 64 bit
422 unsigned data or abs address inline. */
423
424 /* MOVZ: ((S+A) >> 0) & 0xffff */
425 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
426 0, /* rightshift */
427 2, /* size (0 = byte, 1 = short, 2 = long) */
428 16, /* bitsize */
429 FALSE, /* pc_relative */
430 0, /* bitpos */
431 complain_overflow_unsigned, /* complain_on_overflow */
432 bfd_elf_generic_reloc, /* special_function */
433 AARCH64_R_STR (MOVW_UABS_G0), /* name */
434 FALSE, /* partial_inplace */
435 0xffff, /* src_mask */
436 0xffff, /* dst_mask */
437 FALSE), /* pcrel_offset */
438
439 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
440 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
441 0, /* rightshift */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
443 16, /* bitsize */
444 FALSE, /* pc_relative */
445 0, /* bitpos */
446 complain_overflow_dont, /* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
449 FALSE, /* partial_inplace */
450 0xffff, /* src_mask */
451 0xffff, /* dst_mask */
452 FALSE), /* pcrel_offset */
453
454 /* MOVZ: ((S+A) >> 16) & 0xffff */
455 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
456 16, /* rightshift */
457 2, /* size (0 = byte, 1 = short, 2 = long) */
458 16, /* bitsize */
459 FALSE, /* pc_relative */
460 0, /* bitpos */
461 complain_overflow_unsigned, /* complain_on_overflow */
462 bfd_elf_generic_reloc, /* special_function */
463 AARCH64_R_STR (MOVW_UABS_G1), /* name */
464 FALSE, /* partial_inplace */
465 0xffff, /* src_mask */
466 0xffff, /* dst_mask */
467 FALSE), /* pcrel_offset */
468
469 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
470 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
471 16, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 16, /* bitsize */
474 FALSE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_dont, /* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
479 FALSE, /* partial_inplace */
480 0xffff, /* src_mask */
481 0xffff, /* dst_mask */
482 FALSE), /* pcrel_offset */
483
484 /* MOVZ: ((S+A) >> 32) & 0xffff */
485 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
486 32, /* rightshift */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
488 16, /* bitsize */
489 FALSE, /* pc_relative */
490 0, /* bitpos */
491 complain_overflow_unsigned, /* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 AARCH64_R_STR (MOVW_UABS_G2), /* name */
494 FALSE, /* partial_inplace */
495 0xffff, /* src_mask */
496 0xffff, /* dst_mask */
497 FALSE), /* pcrel_offset */
498
499 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
500 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
501 32, /* rightshift */
502 2, /* size (0 = byte, 1 = short, 2 = long) */
503 16, /* bitsize */
504 FALSE, /* pc_relative */
505 0, /* bitpos */
506 complain_overflow_dont, /* complain_on_overflow */
507 bfd_elf_generic_reloc, /* special_function */
508 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
509 FALSE, /* partial_inplace */
510 0xffff, /* src_mask */
511 0xffff, /* dst_mask */
512 FALSE), /* pcrel_offset */
513
514 /* MOVZ: ((S+A) >> 48) & 0xffff */
515 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
516 48, /* rightshift */
517 2, /* size (0 = byte, 1 = short, 2 = long) */
518 16, /* bitsize */
519 FALSE, /* pc_relative */
520 0, /* bitpos */
521 complain_overflow_unsigned, /* complain_on_overflow */
522 bfd_elf_generic_reloc, /* special_function */
523 AARCH64_R_STR (MOVW_UABS_G3), /* name */
524 FALSE, /* partial_inplace */
525 0xffff, /* src_mask */
526 0xffff, /* dst_mask */
527 FALSE), /* pcrel_offset */
528
529 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
530 signed data or abs address inline. Will change instruction
531 to MOVN or MOVZ depending on sign of calculated value. */
532
533 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
534 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
535 0, /* rightshift */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
537 16, /* bitsize */
538 FALSE, /* pc_relative */
539 0, /* bitpos */
540 complain_overflow_signed, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 AARCH64_R_STR (MOVW_SABS_G0), /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
547
548 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
549 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
550 16, /* rightshift */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
552 16, /* bitsize */
553 FALSE, /* pc_relative */
554 0, /* bitpos */
555 complain_overflow_signed, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 AARCH64_R_STR (MOVW_SABS_G1), /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
562
563 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
564 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
565 32, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 16, /* bitsize */
568 FALSE, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_signed, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 AARCH64_R_STR (MOVW_SABS_G2), /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
577
578 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
579 addresses: PG(x) is (x & ~0xfff). */
580
581 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
582 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
583 2, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 19, /* bitsize */
586 TRUE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_signed, /* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 AARCH64_R_STR (LD_PREL_LO19), /* name */
591 FALSE, /* partial_inplace */
592 0x7ffff, /* src_mask */
593 0x7ffff, /* dst_mask */
594 TRUE), /* pcrel_offset */
595
596 /* ADR: (S+A-P) & 0x1fffff */
597 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
598 0, /* rightshift */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
600 21, /* bitsize */
601 TRUE, /* pc_relative */
602 0, /* bitpos */
603 complain_overflow_signed, /* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 AARCH64_R_STR (ADR_PREL_LO21), /* name */
606 FALSE, /* partial_inplace */
607 0x1fffff, /* src_mask */
608 0x1fffff, /* dst_mask */
609 TRUE), /* pcrel_offset */
610
611 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
612 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
613 12, /* rightshift */
614 2, /* size (0 = byte, 1 = short, 2 = long) */
615 21, /* bitsize */
616 TRUE, /* pc_relative */
617 0, /* bitpos */
618 complain_overflow_signed, /* complain_on_overflow */
619 bfd_elf_generic_reloc, /* special_function */
620 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
621 FALSE, /* partial_inplace */
622 0x1fffff, /* src_mask */
623 0x1fffff, /* dst_mask */
624 TRUE), /* pcrel_offset */
625
626 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
627 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
628 12, /* rightshift */
629 2, /* size (0 = byte, 1 = short, 2 = long) */
630 21, /* bitsize */
631 TRUE, /* pc_relative */
632 0, /* bitpos */
633 complain_overflow_dont, /* complain_on_overflow */
634 bfd_elf_generic_reloc, /* special_function */
635 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
636 FALSE, /* partial_inplace */
637 0x1fffff, /* src_mask */
638 0x1fffff, /* dst_mask */
639 TRUE), /* pcrel_offset */
640
641 /* ADD: (S+A) & 0xfff [no overflow check] */
642 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
643 0, /* rightshift */
644 2, /* size (0 = byte, 1 = short, 2 = long) */
645 12, /* bitsize */
646 FALSE, /* pc_relative */
647 10, /* bitpos */
648 complain_overflow_dont, /* complain_on_overflow */
649 bfd_elf_generic_reloc, /* special_function */
650 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
651 FALSE, /* partial_inplace */
652 0x3ffc00, /* src_mask */
653 0x3ffc00, /* dst_mask */
654 FALSE), /* pcrel_offset */
655
656 /* LD/ST8: (S+A) & 0xfff */
657 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
658 0, /* rightshift */
659 2, /* size (0 = byte, 1 = short, 2 = long) */
660 12, /* bitsize */
661 FALSE, /* pc_relative */
662 0, /* bitpos */
663 complain_overflow_dont, /* complain_on_overflow */
664 bfd_elf_generic_reloc, /* special_function */
665 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
666 FALSE, /* partial_inplace */
667 0xfff, /* src_mask */
668 0xfff, /* dst_mask */
669 FALSE), /* pcrel_offset */
670
671 /* Relocations for control-flow instructions. */
672
673 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
674 HOWTO (AARCH64_R (TSTBR14), /* type */
675 2, /* rightshift */
676 2, /* size (0 = byte, 1 = short, 2 = long) */
677 14, /* bitsize */
678 TRUE, /* pc_relative */
679 0, /* bitpos */
680 complain_overflow_signed, /* complain_on_overflow */
681 bfd_elf_generic_reloc, /* special_function */
682 AARCH64_R_STR (TSTBR14), /* name */
683 FALSE, /* partial_inplace */
684 0x3fff, /* src_mask */
685 0x3fff, /* dst_mask */
686 TRUE), /* pcrel_offset */
687
688 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
689 HOWTO (AARCH64_R (CONDBR19), /* type */
690 2, /* rightshift */
691 2, /* size (0 = byte, 1 = short, 2 = long) */
692 19, /* bitsize */
693 TRUE, /* pc_relative */
694 0, /* bitpos */
695 complain_overflow_signed, /* complain_on_overflow */
696 bfd_elf_generic_reloc, /* special_function */
697 AARCH64_R_STR (CONDBR19), /* name */
698 FALSE, /* partial_inplace */
699 0x7ffff, /* src_mask */
700 0x7ffff, /* dst_mask */
701 TRUE), /* pcrel_offset */
702
703 /* B: ((S+A-P) >> 2) & 0x3ffffff */
704 HOWTO (AARCH64_R (JUMP26), /* type */
705 2, /* rightshift */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
707 26, /* bitsize */
708 TRUE, /* pc_relative */
709 0, /* bitpos */
710 complain_overflow_signed, /* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 AARCH64_R_STR (JUMP26), /* name */
713 FALSE, /* partial_inplace */
714 0x3ffffff, /* src_mask */
715 0x3ffffff, /* dst_mask */
716 TRUE), /* pcrel_offset */
717
718 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
719 HOWTO (AARCH64_R (CALL26), /* type */
720 2, /* rightshift */
721 2, /* size (0 = byte, 1 = short, 2 = long) */
722 26, /* bitsize */
723 TRUE, /* pc_relative */
724 0, /* bitpos */
725 complain_overflow_signed, /* complain_on_overflow */
726 bfd_elf_generic_reloc, /* special_function */
727 AARCH64_R_STR (CALL26), /* name */
728 FALSE, /* partial_inplace */
729 0x3ffffff, /* src_mask */
730 0x3ffffff, /* dst_mask */
731 TRUE), /* pcrel_offset */
732
733 /* LD/ST16: (S+A) & 0xffe */
734 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
735 1, /* rightshift */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
737 12, /* bitsize */
738 FALSE, /* pc_relative */
739 0, /* bitpos */
740 complain_overflow_dont, /* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
743 FALSE, /* partial_inplace */
744 0xffe, /* src_mask */
745 0xffe, /* dst_mask */
746 FALSE), /* pcrel_offset */
747
748 /* LD/ST32: (S+A) & 0xffc */
749 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
750 2, /* rightshift */
751 2, /* size (0 = byte, 1 = short, 2 = long) */
752 12, /* bitsize */
753 FALSE, /* pc_relative */
754 0, /* bitpos */
755 complain_overflow_dont, /* complain_on_overflow */
756 bfd_elf_generic_reloc, /* special_function */
757 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
758 FALSE, /* partial_inplace */
759 0xffc, /* src_mask */
760 0xffc, /* dst_mask */
761 FALSE), /* pcrel_offset */
762
763 /* LD/ST64: (S+A) & 0xff8 */
764 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
765 3, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 12, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_dont, /* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
773 FALSE, /* partial_inplace */
774 0xff8, /* src_mask */
775 0xff8, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 /* LD/ST128: (S+A) & 0xff0 */
779 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
780 4, /* rightshift */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
782 12, /* bitsize */
783 FALSE, /* pc_relative */
784 0, /* bitpos */
785 complain_overflow_dont, /* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
788 FALSE, /* partial_inplace */
789 0xff0, /* src_mask */
790 0xff0, /* dst_mask */
791 FALSE), /* pcrel_offset */
792
793 /* Set a load-literal immediate field to bits
794 0x1FFFFC of G(S)-P */
795 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
796 2, /* rightshift */
797 2, /* size (0 = byte,1 = short,2 = long) */
798 19, /* bitsize */
799 TRUE, /* pc_relative */
800 0, /* bitpos */
801 complain_overflow_signed, /* complain_on_overflow */
802 bfd_elf_generic_reloc, /* special_function */
803 AARCH64_R_STR (GOT_LD_PREL19), /* name */
804 FALSE, /* partial_inplace */
805 0xffffe0, /* src_mask */
806 0xffffe0, /* dst_mask */
807 TRUE), /* pcrel_offset */
808
809 /* Get to the page for the GOT entry for the symbol
810 (G(S) - P) using an ADRP instruction. */
811 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
812 12, /* rightshift */
813 2, /* size (0 = byte, 1 = short, 2 = long) */
814 21, /* bitsize */
815 TRUE, /* pc_relative */
816 0, /* bitpos */
817 complain_overflow_dont, /* complain_on_overflow */
818 bfd_elf_generic_reloc, /* special_function */
819 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
820 FALSE, /* partial_inplace */
821 0x1fffff, /* src_mask */
822 0x1fffff, /* dst_mask */
823 TRUE), /* pcrel_offset */
824
825 /* LD64: GOT offset G(S) & 0xff8 */
826 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
827 3, /* rightshift */
828 2, /* size (0 = byte, 1 = short, 2 = long) */
829 12, /* bitsize */
830 FALSE, /* pc_relative */
831 0, /* bitpos */
832 complain_overflow_dont, /* complain_on_overflow */
833 bfd_elf_generic_reloc, /* special_function */
834 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
835 FALSE, /* partial_inplace */
836 0xff8, /* src_mask */
837 0xff8, /* dst_mask */
838 FALSE), /* pcrel_offset */
839
840 /* LD32: GOT offset G(S) & 0xffc */
841 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
842 2, /* rightshift */
843 2, /* size (0 = byte, 1 = short, 2 = long) */
844 12, /* bitsize */
845 FALSE, /* pc_relative */
846 0, /* bitpos */
847 complain_overflow_dont, /* complain_on_overflow */
848 bfd_elf_generic_reloc, /* special_function */
849 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
850 FALSE, /* partial_inplace */
851 0xffc, /* src_mask */
852 0xffc, /* dst_mask */
853 FALSE), /* pcrel_offset */
854
855 /* LD64: GOT offset for the symbol. */
856 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
857 3, /* rightshift */
858 2, /* size (0 = byte, 1 = short, 2 = long) */
859 12, /* bitsize */
860 FALSE, /* pc_relative */
861 0, /* bitpos */
862 complain_overflow_unsigned, /* complain_on_overflow */
863 bfd_elf_generic_reloc, /* special_function */
864 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
865 FALSE, /* partial_inplace */
866 0x7ff8, /* src_mask */
867 0x7ff8, /* dst_mask */
868 FALSE), /* pcrel_offset */
869
870 /* LD32: GOT offset to the page address of GOT table.
871 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
872 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
873 2, /* rightshift */
874 2, /* size (0 = byte, 1 = short, 2 = long) */
875 12, /* bitsize */
876 FALSE, /* pc_relative */
877 0, /* bitpos */
878 complain_overflow_unsigned, /* complain_on_overflow */
879 bfd_elf_generic_reloc, /* special_function */
880 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
881 FALSE, /* partial_inplace */
882 0x5ffc, /* src_mask */
883 0x5ffc, /* dst_mask */
884 FALSE), /* pcrel_offset */
885
886 /* LD64: GOT offset to the page address of GOT table.
887 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
888 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
889 3, /* rightshift */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
891 12, /* bitsize */
892 FALSE, /* pc_relative */
893 0, /* bitpos */
894 complain_overflow_unsigned, /* complain_on_overflow */
895 bfd_elf_generic_reloc, /* special_function */
896 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
897 FALSE, /* partial_inplace */
898 0x7ff8, /* src_mask */
899 0x7ff8, /* dst_mask */
900 FALSE), /* pcrel_offset */
901
902 /* Get to the page for the GOT entry for the symbol
903 (G(S) - P) using an ADRP instruction. */
904 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
905 12, /* rightshift */
906 2, /* size (0 = byte, 1 = short, 2 = long) */
907 21, /* bitsize */
908 TRUE, /* pc_relative */
909 0, /* bitpos */
910 complain_overflow_dont, /* complain_on_overflow */
911 bfd_elf_generic_reloc, /* special_function */
912 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
913 FALSE, /* partial_inplace */
914 0x1fffff, /* src_mask */
915 0x1fffff, /* dst_mask */
916 TRUE), /* pcrel_offset */
917
918 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
919 0, /* rightshift */
920 2, /* size (0 = byte, 1 = short, 2 = long) */
921 21, /* bitsize */
922 TRUE, /* pc_relative */
923 0, /* bitpos */
924 complain_overflow_dont, /* complain_on_overflow */
925 bfd_elf_generic_reloc, /* special_function */
926 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
927 FALSE, /* partial_inplace */
928 0x1fffff, /* src_mask */
929 0x1fffff, /* dst_mask */
930 TRUE), /* pcrel_offset */
931
932 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
933 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
934 0, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 12, /* bitsize */
937 FALSE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont, /* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
942 FALSE, /* partial_inplace */
943 0xfff, /* src_mask */
944 0xfff, /* dst_mask */
945 FALSE), /* pcrel_offset */
946
947 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
948 16, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 16, /* bitsize */
951 FALSE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
956 FALSE, /* partial_inplace */
957 0xffff, /* src_mask */
958 0xffff, /* dst_mask */
959 FALSE), /* pcrel_offset */
960
961 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
962 0, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 16, /* bitsize */
965 FALSE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont, /* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
970 FALSE, /* partial_inplace */
971 0xffff, /* src_mask */
972 0xffff, /* dst_mask */
973 FALSE), /* pcrel_offset */
974
975 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
976 12, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 21, /* bitsize */
979 FALSE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont, /* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
984 FALSE, /* partial_inplace */
985 0x1fffff, /* src_mask */
986 0x1fffff, /* dst_mask */
987 FALSE), /* pcrel_offset */
988
989 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
990 3, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 12, /* bitsize */
993 FALSE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont, /* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
998 FALSE, /* partial_inplace */
999 0xff8, /* src_mask */
1000 0xff8, /* dst_mask */
1001 FALSE), /* pcrel_offset */
1002
1003 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1004 2, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 12, /* bitsize */
1007 FALSE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont, /* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1012 FALSE, /* partial_inplace */
1013 0xffc, /* src_mask */
1014 0xffc, /* dst_mask */
1015 FALSE), /* pcrel_offset */
1016
1017 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1018 2, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 19, /* bitsize */
1021 FALSE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont, /* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1026 FALSE, /* partial_inplace */
1027 0x1ffffc, /* src_mask */
1028 0x1ffffc, /* dst_mask */
1029 FALSE), /* pcrel_offset */
1030
1031 /* Unsigned 12 bit byte offset to module TLS base address. */
1032 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1033 0, /* rightshift */
1034 2, /* size (0 = byte, 1 = short, 2 = long) */
1035 12, /* bitsize */
1036 FALSE, /* pc_relative */
1037 0, /* bitpos */
1038 complain_overflow_unsigned, /* complain_on_overflow */
1039 bfd_elf_generic_reloc, /* special_function */
1040 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1041 FALSE, /* partial_inplace */
1042 0xfff, /* src_mask */
1043 0xfff, /* dst_mask */
1044 FALSE), /* pcrel_offset */
1045
1046 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1047 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1048 0, /* rightshift */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 12, /* bitsize */
1051 FALSE, /* pc_relative */
1052 0, /* bitpos */
1053 complain_overflow_dont, /* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1056 FALSE, /* partial_inplace */
1057 0xfff, /* src_mask */
1058 0xfff, /* dst_mask */
1059 FALSE), /* pcrel_offset */
1060
1061 /* Get to the page for the GOT entry for the symbol
1062 (G(S) - P) using an ADRP instruction. */
1063 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1064 12, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 21, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_signed, /* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1072 FALSE, /* partial_inplace */
1073 0x1fffff, /* src_mask */
1074 0x1fffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 21, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_signed, /* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1086 FALSE, /* partial_inplace */
1087 0x1fffff, /* src_mask */
1088 0x1fffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1092 32, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 16, /* bitsize */
1095 FALSE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_unsigned, /* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1100 FALSE, /* partial_inplace */
1101 0xffff, /* src_mask */
1102 0xffff, /* dst_mask */
1103 FALSE), /* pcrel_offset */
1104
1105 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1106 16, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 16, /* bitsize */
1109 FALSE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont, /* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1114 FALSE, /* partial_inplace */
1115 0xffff, /* src_mask */
1116 0xffff, /* dst_mask */
1117 FALSE), /* pcrel_offset */
1118
1119 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1120 16, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 16, /* bitsize */
1123 FALSE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont, /* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1128 FALSE, /* partial_inplace */
1129 0xffff, /* src_mask */
1130 0xffff, /* dst_mask */
1131 FALSE), /* pcrel_offset */
1132
1133 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 16, /* bitsize */
1137 FALSE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont, /* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1142 FALSE, /* partial_inplace */
1143 0xffff, /* src_mask */
1144 0xffff, /* dst_mask */
1145 FALSE), /* pcrel_offset */
1146
1147 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 16, /* bitsize */
1151 FALSE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont, /* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1156 FALSE, /* partial_inplace */
1157 0xffff, /* src_mask */
1158 0xffff, /* dst_mask */
1159 FALSE), /* pcrel_offset */
1160
1161 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1162 12, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 12, /* bitsize */
1165 FALSE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_unsigned, /* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1170 FALSE, /* partial_inplace */
1171 0xfff, /* src_mask */
1172 0xfff, /* dst_mask */
1173 FALSE), /* pcrel_offset */
1174
1175 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 12, /* bitsize */
1179 FALSE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_unsigned, /* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1184 FALSE, /* partial_inplace */
1185 0xfff, /* src_mask */
1186 0xfff, /* dst_mask */
1187 FALSE), /* pcrel_offset */
1188
1189 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 12, /* bitsize */
1193 FALSE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont, /* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1198 FALSE, /* partial_inplace */
1199 0xfff, /* src_mask */
1200 0xfff, /* dst_mask */
1201 FALSE), /* pcrel_offset */
1202
1203 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1204 2, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 19, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont, /* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1212 FALSE, /* partial_inplace */
1213 0x0ffffe0, /* src_mask */
1214 0x0ffffe0, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 21, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont, /* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1226 FALSE, /* partial_inplace */
1227 0x1fffff, /* src_mask */
1228 0x1fffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 /* Get to the page for the GOT entry for the symbol
1232 (G(S) - P) using an ADRP instruction. */
1233 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1234 12, /* rightshift */
1235 2, /* size (0 = byte, 1 = short, 2 = long) */
1236 21, /* bitsize */
1237 TRUE, /* pc_relative */
1238 0, /* bitpos */
1239 complain_overflow_dont, /* complain_on_overflow */
1240 bfd_elf_generic_reloc, /* special_function */
1241 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1242 FALSE, /* partial_inplace */
1243 0x1fffff, /* src_mask */
1244 0x1fffff, /* dst_mask */
1245 TRUE), /* pcrel_offset */
1246
1247 /* LD64: GOT offset G(S) & 0xff8. */
1248 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1249 3, /* rightshift */
1250 2, /* size (0 = byte, 1 = short, 2 = long) */
1251 12, /* bitsize */
1252 FALSE, /* pc_relative */
1253 0, /* bitpos */
1254 complain_overflow_dont, /* complain_on_overflow */
1255 bfd_elf_generic_reloc, /* special_function */
1256 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1257 FALSE, /* partial_inplace */
1258 0xff8, /* src_mask */
1259 0xff8, /* dst_mask */
1260 FALSE), /* pcrel_offset */
1261
1262 /* LD32: GOT offset G(S) & 0xffc. */
1263 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1264 2, /* rightshift */
1265 2, /* size (0 = byte, 1 = short, 2 = long) */
1266 12, /* bitsize */
1267 FALSE, /* pc_relative */
1268 0, /* bitpos */
1269 complain_overflow_dont, /* complain_on_overflow */
1270 bfd_elf_generic_reloc, /* special_function */
1271 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1272 FALSE, /* partial_inplace */
1273 0xffc, /* src_mask */
1274 0xffc, /* dst_mask */
1275 FALSE), /* pcrel_offset */
1276
1277 /* ADD: GOT offset G(S) & 0xfff. */
1278 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1279 0, /* rightshift */
1280 2, /* size (0 = byte, 1 = short, 2 = long) */
1281 12, /* bitsize */
1282 FALSE, /* pc_relative */
1283 0, /* bitpos */
1284 complain_overflow_dont, /* complain_on_overflow */
1285 bfd_elf_generic_reloc, /* special_function */
1286 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1287 FALSE, /* partial_inplace */
1288 0xfff, /* src_mask */
1289 0xfff, /* dst_mask */
1290 FALSE), /* pcrel_offset */
1291
1292 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1293 16, /* rightshift */
1294 2, /* size (0 = byte, 1 = short, 2 = long) */
1295 12, /* bitsize */
1296 FALSE, /* pc_relative */
1297 0, /* bitpos */
1298 complain_overflow_dont, /* complain_on_overflow */
1299 bfd_elf_generic_reloc, /* special_function */
1300 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1301 FALSE, /* partial_inplace */
1302 0xffff, /* src_mask */
1303 0xffff, /* dst_mask */
1304 FALSE), /* pcrel_offset */
1305
1306 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1307 0, /* rightshift */
1308 2, /* size (0 = byte, 1 = short, 2 = long) */
1309 12, /* bitsize */
1310 FALSE, /* pc_relative */
1311 0, /* bitpos */
1312 complain_overflow_dont, /* complain_on_overflow */
1313 bfd_elf_generic_reloc, /* special_function */
1314 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1315 FALSE, /* partial_inplace */
1316 0xffff, /* src_mask */
1317 0xffff, /* dst_mask */
1318 FALSE), /* pcrel_offset */
1319
1320 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1321 0, /* rightshift */
1322 2, /* size (0 = byte, 1 = short, 2 = long) */
1323 12, /* bitsize */
1324 FALSE, /* pc_relative */
1325 0, /* bitpos */
1326 complain_overflow_dont, /* complain_on_overflow */
1327 bfd_elf_generic_reloc, /* special_function */
1328 AARCH64_R_STR (TLSDESC_LDR), /* name */
1329 FALSE, /* partial_inplace */
1330 0x0, /* src_mask */
1331 0x0, /* dst_mask */
1332 FALSE), /* pcrel_offset */
1333
1334 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1335 0, /* rightshift */
1336 2, /* size (0 = byte, 1 = short, 2 = long) */
1337 12, /* bitsize */
1338 FALSE, /* pc_relative */
1339 0, /* bitpos */
1340 complain_overflow_dont, /* complain_on_overflow */
1341 bfd_elf_generic_reloc, /* special_function */
1342 AARCH64_R_STR (TLSDESC_ADD), /* name */
1343 FALSE, /* partial_inplace */
1344 0x0, /* src_mask */
1345 0x0, /* dst_mask */
1346 FALSE), /* pcrel_offset */
1347
1348 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1349 0, /* rightshift */
1350 2, /* size (0 = byte, 1 = short, 2 = long) */
1351 0, /* bitsize */
1352 FALSE, /* pc_relative */
1353 0, /* bitpos */
1354 complain_overflow_dont, /* complain_on_overflow */
1355 bfd_elf_generic_reloc, /* special_function */
1356 AARCH64_R_STR (TLSDESC_CALL), /* name */
1357 FALSE, /* partial_inplace */
1358 0x0, /* src_mask */
1359 0x0, /* dst_mask */
1360 FALSE), /* pcrel_offset */
1361
1362 HOWTO (AARCH64_R (COPY), /* type */
1363 0, /* rightshift */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 64, /* bitsize */
1366 FALSE, /* pc_relative */
1367 0, /* bitpos */
1368 complain_overflow_bitfield, /* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 AARCH64_R_STR (COPY), /* name */
1371 TRUE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1375
1376 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1377 0, /* rightshift */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 64, /* bitsize */
1380 FALSE, /* pc_relative */
1381 0, /* bitpos */
1382 complain_overflow_bitfield, /* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 AARCH64_R_STR (GLOB_DAT), /* name */
1385 TRUE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1389
1390 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1391 0, /* rightshift */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 64, /* bitsize */
1394 FALSE, /* pc_relative */
1395 0, /* bitpos */
1396 complain_overflow_bitfield, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 AARCH64_R_STR (JUMP_SLOT), /* name */
1399 TRUE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 FALSE), /* pcrel_offset */
1403
1404 HOWTO (AARCH64_R (RELATIVE), /* type */
1405 0, /* rightshift */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 64, /* bitsize */
1408 FALSE, /* pc_relative */
1409 0, /* bitpos */
1410 complain_overflow_bitfield, /* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 AARCH64_R_STR (RELATIVE), /* name */
1413 TRUE, /* partial_inplace */
1414 ALL_ONES, /* src_mask */
1415 ALL_ONES, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1417
1418 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1419 0, /* rightshift */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 64, /* bitsize */
1422 FALSE, /* pc_relative */
1423 0, /* bitpos */
1424 complain_overflow_dont, /* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 #if ARCH_SIZE == 64
1427 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1428 #else
1429 AARCH64_R_STR (TLS_DTPMOD), /* name */
1430 #endif
1431 FALSE, /* partial_inplace */
1432 0, /* src_mask */
1433 ALL_ONES, /* dst_mask */
1434 FALSE), /* pc_reloffset */
1435
1436 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1437 0, /* rightshift */
1438 2, /* size (0 = byte, 1 = short, 2 = long) */
1439 64, /* bitsize */
1440 FALSE, /* pc_relative */
1441 0, /* bitpos */
1442 complain_overflow_dont, /* complain_on_overflow */
1443 bfd_elf_generic_reloc, /* special_function */
1444 #if ARCH_SIZE == 64
1445 AARCH64_R_STR (TLS_DTPREL64), /* name */
1446 #else
1447 AARCH64_R_STR (TLS_DTPREL), /* name */
1448 #endif
1449 FALSE, /* partial_inplace */
1450 0, /* src_mask */
1451 ALL_ONES, /* dst_mask */
1452 FALSE), /* pcrel_offset */
1453
1454 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1455 0, /* rightshift */
1456 2, /* size (0 = byte, 1 = short, 2 = long) */
1457 64, /* bitsize */
1458 FALSE, /* pc_relative */
1459 0, /* bitpos */
1460 complain_overflow_dont, /* complain_on_overflow */
1461 bfd_elf_generic_reloc, /* special_function */
1462 #if ARCH_SIZE == 64
1463 AARCH64_R_STR (TLS_TPREL64), /* name */
1464 #else
1465 AARCH64_R_STR (TLS_TPREL), /* name */
1466 #endif
1467 FALSE, /* partial_inplace */
1468 0, /* src_mask */
1469 ALL_ONES, /* dst_mask */
1470 FALSE), /* pcrel_offset */
1471
1472 HOWTO (AARCH64_R (TLSDESC), /* type */
1473 0, /* rightshift */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 64, /* bitsize */
1476 FALSE, /* pc_relative */
1477 0, /* bitpos */
1478 complain_overflow_dont, /* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 AARCH64_R_STR (TLSDESC), /* name */
1481 FALSE, /* partial_inplace */
1482 0, /* src_mask */
1483 ALL_ONES, /* dst_mask */
1484 FALSE), /* pcrel_offset */
1485
1486 HOWTO (AARCH64_R (IRELATIVE), /* type */
1487 0, /* rightshift */
1488 2, /* size (0 = byte, 1 = short, 2 = long) */
1489 64, /* bitsize */
1490 FALSE, /* pc_relative */
1491 0, /* bitpos */
1492 complain_overflow_bitfield, /* complain_on_overflow */
1493 bfd_elf_generic_reloc, /* special_function */
1494 AARCH64_R_STR (IRELATIVE), /* name */
1495 FALSE, /* partial_inplace */
1496 0, /* src_mask */
1497 ALL_ONES, /* dst_mask */
1498 FALSE), /* pcrel_offset */
1499
1500 EMPTY_HOWTO (0),
1501 };
1502
1503 static reloc_howto_type elfNN_aarch64_howto_none =
1504 HOWTO (R_AARCH64_NONE, /* type */
1505 0, /* rightshift */
1506 3, /* size (0 = byte, 1 = short, 2 = long) */
1507 0, /* bitsize */
1508 FALSE, /* pc_relative */
1509 0, /* bitpos */
1510 complain_overflow_dont,/* complain_on_overflow */
1511 bfd_elf_generic_reloc, /* special_function */
1512 "R_AARCH64_NONE", /* name */
1513 FALSE, /* partial_inplace */
1514 0, /* src_mask */
1515 0, /* dst_mask */
1516 FALSE); /* pcrel_offset */
1517
1518 /* Given HOWTO, return the bfd internal relocation enumerator. */
1519
1520 static bfd_reloc_code_real_type
1521 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1522 {
1523 const int size
1524 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1525 const ptrdiff_t offset
1526 = howto - elfNN_aarch64_howto_table;
1527
1528 if (offset > 0 && offset < size - 1)
1529 return BFD_RELOC_AARCH64_RELOC_START + offset;
1530
1531 if (howto == &elfNN_aarch64_howto_none)
1532 return BFD_RELOC_AARCH64_NONE;
1533
1534 return BFD_RELOC_AARCH64_RELOC_START;
1535 }
1536
1537 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1538
1539 static bfd_reloc_code_real_type
1540 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1541 {
1542 static bfd_boolean initialized_p = FALSE;
1543 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1544 static unsigned int offsets[R_AARCH64_end];
1545
1546 if (initialized_p == FALSE)
1547 {
1548 unsigned int i;
1549
1550 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1551 if (elfNN_aarch64_howto_table[i].type != 0)
1552 offsets[elfNN_aarch64_howto_table[i].type] = i;
1553
1554 initialized_p = TRUE;
1555 }
1556
1557 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1558 return BFD_RELOC_AARCH64_NONE;
1559
1560 /* PR 17512: file: b371e70a. */
1561 if (r_type >= R_AARCH64_end)
1562 {
1563 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1564 bfd_set_error (bfd_error_bad_value);
1565 return BFD_RELOC_AARCH64_NONE;
1566 }
1567
1568 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1569 }
1570
1571 struct elf_aarch64_reloc_map
1572 {
1573 bfd_reloc_code_real_type from;
1574 bfd_reloc_code_real_type to;
1575 };
1576
1577 /* Map bfd generic reloc to AArch64-specific reloc. */
1578 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1579 {
1580 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1581
1582 /* Basic data relocations. */
1583 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1584 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1585 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1586 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1587 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1588 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1589 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1590 };
1591
1592 /* Given the bfd internal relocation enumerator in CODE, return the
1593 corresponding howto entry. */
1594
1595 static reloc_howto_type *
1596 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1597 {
1598 unsigned int i;
1599
1600 /* Convert bfd generic reloc to AArch64-specific reloc. */
1601 if (code < BFD_RELOC_AARCH64_RELOC_START
1602 || code > BFD_RELOC_AARCH64_RELOC_END)
1603 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1604 if (elf_aarch64_reloc_map[i].from == code)
1605 {
1606 code = elf_aarch64_reloc_map[i].to;
1607 break;
1608 }
1609
1610 if (code > BFD_RELOC_AARCH64_RELOC_START
1611 && code < BFD_RELOC_AARCH64_RELOC_END)
1612 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1613 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1614
1615 if (code == BFD_RELOC_AARCH64_NONE)
1616 return &elfNN_aarch64_howto_none;
1617
1618 return NULL;
1619 }
1620
1621 static reloc_howto_type *
1622 elfNN_aarch64_howto_from_type (unsigned int r_type)
1623 {
1624 bfd_reloc_code_real_type val;
1625 reloc_howto_type *howto;
1626
1627 #if ARCH_SIZE == 32
1628 if (r_type > 256)
1629 {
1630 bfd_set_error (bfd_error_bad_value);
1631 return NULL;
1632 }
1633 #endif
1634
1635 if (r_type == R_AARCH64_NONE)
1636 return &elfNN_aarch64_howto_none;
1637
1638 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1639 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1640
1641 if (howto != NULL)
1642 return howto;
1643
1644 bfd_set_error (bfd_error_bad_value);
1645 return NULL;
1646 }
1647
1648 static void
1649 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1650 Elf_Internal_Rela *elf_reloc)
1651 {
1652 unsigned int r_type;
1653
1654 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1655 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1656 }
1657
1658 static reloc_howto_type *
1659 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1660 bfd_reloc_code_real_type code)
1661 {
1662 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1663
1664 if (howto != NULL)
1665 return howto;
1666
1667 bfd_set_error (bfd_error_bad_value);
1668 return NULL;
1669 }
1670
1671 static reloc_howto_type *
1672 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1673 const char *r_name)
1674 {
1675 unsigned int i;
1676
1677 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1678 if (elfNN_aarch64_howto_table[i].name != NULL
1679 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1680 return &elfNN_aarch64_howto_table[i];
1681
1682 return NULL;
1683 }
1684
1685 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1686 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1687 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1688 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1689
1690 /* The linker script knows the section names for placement.
1691 The entry_names are used to do simple name mangling on the stubs.
1692 Given a function name, and its type, the stub can be found. The
1693 name can be changed. The only requirement is the %s be present. */
1694 #define STUB_ENTRY_NAME "__%s_veneer"
1695
1696 /* The name of the dynamic interpreter. This is put in the .interp
1697 section. */
1698 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1699
1700 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1701 (((1 << 25) - 1) << 2)
1702 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1703 (-((1 << 25) << 2))
1704
1705 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1706 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1707
1708 static int
1709 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1710 {
1711 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1712 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1713 }
1714
1715 static int
1716 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1717 {
1718 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1719 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1720 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1721 }
1722
1723 static const uint32_t aarch64_adrp_branch_stub [] =
1724 {
1725 0x90000010, /* adrp ip0, X */
1726 /* R_AARCH64_ADR_HI21_PCREL(X) */
1727 0x91000210, /* add ip0, ip0, :lo12:X */
1728 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1729 0xd61f0200, /* br ip0 */
1730 };
1731
1732 static const uint32_t aarch64_long_branch_stub[] =
1733 {
1734 #if ARCH_SIZE == 64
1735 0x58000090, /* ldr ip0, 1f */
1736 #else
1737 0x18000090, /* ldr wip0, 1f */
1738 #endif
1739 0x10000011, /* adr ip1, #0 */
1740 0x8b110210, /* add ip0, ip0, ip1 */
1741 0xd61f0200, /* br ip0 */
1742 0x00000000, /* 1: .xword or .word
1743 R_AARCH64_PRELNN(X) + 12
1744 */
1745 0x00000000,
1746 };
1747
1748 static const uint32_t aarch64_erratum_835769_stub[] =
1749 {
1750 0x00000000, /* Placeholder for multiply accumulate. */
1751 0x14000000, /* b <label> */
1752 };
1753
1754 static const uint32_t aarch64_erratum_843419_stub[] =
1755 {
1756 0x00000000, /* Placeholder for LDR instruction. */
1757 0x14000000, /* b <label> */
1758 };
1759
1760 /* Section name for stubs is the associated section name plus this
1761 string. */
1762 #define STUB_SUFFIX ".stub"
1763
1764 enum elf_aarch64_stub_type
1765 {
1766 aarch64_stub_none,
1767 aarch64_stub_adrp_branch,
1768 aarch64_stub_long_branch,
1769 aarch64_stub_erratum_835769_veneer,
1770 aarch64_stub_erratum_843419_veneer,
1771 };
1772
1773 struct elf_aarch64_stub_hash_entry
1774 {
1775 /* Base hash table entry structure. */
1776 struct bfd_hash_entry root;
1777
1778 /* The stub section. */
1779 asection *stub_sec;
1780
1781 /* Offset within stub_sec of the beginning of this stub. */
1782 bfd_vma stub_offset;
1783
1784 /* Given the symbol's value and its section we can determine its final
1785 value when building the stubs (so the stub knows where to jump). */
1786 bfd_vma target_value;
1787 asection *target_section;
1788
1789 enum elf_aarch64_stub_type stub_type;
1790
1791 /* The symbol table entry, if any, that this was derived from. */
1792 struct elf_aarch64_link_hash_entry *h;
1793
1794 /* Destination symbol type */
1795 unsigned char st_type;
1796
1797 /* Where this stub is being called from, or, in the case of combined
1798 stub sections, the first input section in the group. */
1799 asection *id_sec;
1800
1801 /* The name for the local symbol at the start of this stub. The
1802 stub name in the hash table has to be unique; this does not, so
1803 it can be friendlier. */
1804 char *output_name;
1805
1806 /* The instruction which caused this stub to be generated (only valid for
1807 erratum 835769 workaround stubs at present). */
1808 uint32_t veneered_insn;
1809
1810 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
1811 bfd_vma adrp_offset;
1812 };
1813
1814 /* Used to build a map of a section. This is required for mixed-endian
1815 code/data. */
1816
1817 typedef struct elf_elf_section_map
1818 {
1819 bfd_vma vma;
1820 char type;
1821 }
1822 elf_aarch64_section_map;
1823
1824
1825 typedef struct _aarch64_elf_section_data
1826 {
1827 struct bfd_elf_section_data elf;
1828 unsigned int mapcount;
1829 unsigned int mapsize;
1830 elf_aarch64_section_map *map;
1831 }
1832 _aarch64_elf_section_data;
1833
1834 #define elf_aarch64_section_data(sec) \
1835 ((_aarch64_elf_section_data *) elf_section_data (sec))
1836
1837 /* The size of the thread control block which is defined to be two pointers. */
1838 #define TCB_SIZE (ARCH_SIZE/8)*2
1839
1840 struct elf_aarch64_local_symbol
1841 {
1842 unsigned int got_type;
1843 bfd_signed_vma got_refcount;
1844 bfd_vma got_offset;
1845
1846 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1847 offset is from the end of the jump table and reserved entries
1848 within the PLTGOT.
1849
1850 The magic value (bfd_vma) -1 indicates that an offset has not be
1851 allocated. */
1852 bfd_vma tlsdesc_got_jump_table_offset;
1853 };
1854
1855 struct elf_aarch64_obj_tdata
1856 {
1857 struct elf_obj_tdata root;
1858
1859 /* local symbol descriptors */
1860 struct elf_aarch64_local_symbol *locals;
1861
1862 /* Zero to warn when linking objects with incompatible enum sizes. */
1863 int no_enum_size_warning;
1864
1865 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1866 int no_wchar_size_warning;
1867 };
1868
1869 #define elf_aarch64_tdata(bfd) \
1870 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1871
1872 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1873
1874 #define is_aarch64_elf(bfd) \
1875 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1876 && elf_tdata (bfd) != NULL \
1877 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1878
1879 static bfd_boolean
1880 elfNN_aarch64_mkobject (bfd *abfd)
1881 {
1882 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1883 AARCH64_ELF_DATA);
1884 }
1885
1886 #define elf_aarch64_hash_entry(ent) \
1887 ((struct elf_aarch64_link_hash_entry *)(ent))
1888
1889 #define GOT_UNKNOWN 0
1890 #define GOT_NORMAL 1
1891 #define GOT_TLS_GD 2
1892 #define GOT_TLS_IE 4
1893 #define GOT_TLSDESC_GD 8
1894
1895 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1896
1897 /* AArch64 ELF linker hash entry. */
1898 struct elf_aarch64_link_hash_entry
1899 {
1900 struct elf_link_hash_entry root;
1901
1902 /* Track dynamic relocs copied for this symbol. */
1903 struct elf_dyn_relocs *dyn_relocs;
1904
1905 /* Since PLT entries have variable size, we need to record the
1906 index into .got.plt instead of recomputing it from the PLT
1907 offset. */
1908 bfd_signed_vma plt_got_offset;
1909
1910 /* Bit mask representing the type of GOT entry(s) if any required by
1911 this symbol. */
1912 unsigned int got_type;
1913
1914 /* A pointer to the most recently used stub hash entry against this
1915 symbol. */
1916 struct elf_aarch64_stub_hash_entry *stub_cache;
1917
1918 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1919 is from the end of the jump table and reserved entries within the PLTGOT.
1920
1921 The magic value (bfd_vma) -1 indicates that an offset has not
1922 be allocated. */
1923 bfd_vma tlsdesc_got_jump_table_offset;
1924 };
1925
1926 static unsigned int
1927 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1928 bfd *abfd,
1929 unsigned long r_symndx)
1930 {
1931 if (h)
1932 return elf_aarch64_hash_entry (h)->got_type;
1933
1934 if (! elf_aarch64_locals (abfd))
1935 return GOT_UNKNOWN;
1936
1937 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1938 }
1939
1940 /* Get the AArch64 elf linker hash table from a link_info structure. */
1941 #define elf_aarch64_hash_table(info) \
1942 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1943
1944 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1945 ((struct elf_aarch64_stub_hash_entry *) \
1946 bfd_hash_lookup ((table), (string), (create), (copy)))
1947
1948 /* AArch64 ELF linker hash table. */
1949 struct elf_aarch64_link_hash_table
1950 {
1951 /* The main hash table. */
1952 struct elf_link_hash_table root;
1953
1954 /* Nonzero to force PIC branch veneers. */
1955 int pic_veneer;
1956
1957 /* Fix erratum 835769. */
1958 int fix_erratum_835769;
1959
1960 /* Fix erratum 843419. */
1961 int fix_erratum_843419;
1962
1963 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
1964 int fix_erratum_843419_adr;
1965
1966 /* The number of bytes in the initial entry in the PLT. */
1967 bfd_size_type plt_header_size;
1968
1969 /* The number of bytes in the subsequent PLT etries. */
1970 bfd_size_type plt_entry_size;
1971
1972 /* Short-cuts to get to dynamic linker sections. */
1973 asection *sdynbss;
1974 asection *srelbss;
1975
1976 /* Small local sym cache. */
1977 struct sym_cache sym_cache;
1978
1979 /* For convenience in allocate_dynrelocs. */
1980 bfd *obfd;
1981
1982 /* The amount of space used by the reserved portion of the sgotplt
1983 section, plus whatever space is used by the jump slots. */
1984 bfd_vma sgotplt_jump_table_size;
1985
1986 /* The stub hash table. */
1987 struct bfd_hash_table stub_hash_table;
1988
1989 /* Linker stub bfd. */
1990 bfd *stub_bfd;
1991
1992 /* Linker call-backs. */
1993 asection *(*add_stub_section) (const char *, asection *);
1994 void (*layout_sections_again) (void);
1995
1996 /* Array to keep track of which stub sections have been created, and
1997 information on stub grouping. */
1998 struct map_stub
1999 {
2000 /* This is the section to which stubs in the group will be
2001 attached. */
2002 asection *link_sec;
2003 /* The stub section. */
2004 asection *stub_sec;
2005 } *stub_group;
2006
2007 /* Assorted information used by elfNN_aarch64_size_stubs. */
2008 unsigned int bfd_count;
2009 int top_index;
2010 asection **input_list;
2011
2012 /* The offset into splt of the PLT entry for the TLS descriptor
2013 resolver. Special values are 0, if not necessary (or not found
2014 to be necessary yet), and -1 if needed but not determined
2015 yet. */
2016 bfd_vma tlsdesc_plt;
2017
2018 /* The GOT offset for the lazy trampoline. Communicated to the
2019 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
2020 indicates an offset is not allocated. */
2021 bfd_vma dt_tlsdesc_got;
2022
2023 /* Used by local STT_GNU_IFUNC symbols. */
2024 htab_t loc_hash_table;
2025 void * loc_hash_memory;
2026 };
2027
2028 /* Create an entry in an AArch64 ELF linker hash table. */
2029
2030 static struct bfd_hash_entry *
2031 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2032 struct bfd_hash_table *table,
2033 const char *string)
2034 {
2035 struct elf_aarch64_link_hash_entry *ret =
2036 (struct elf_aarch64_link_hash_entry *) entry;
2037
2038 /* Allocate the structure if it has not already been allocated by a
2039 subclass. */
2040 if (ret == NULL)
2041 ret = bfd_hash_allocate (table,
2042 sizeof (struct elf_aarch64_link_hash_entry));
2043 if (ret == NULL)
2044 return (struct bfd_hash_entry *) ret;
2045
2046 /* Call the allocation method of the superclass. */
2047 ret = ((struct elf_aarch64_link_hash_entry *)
2048 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2049 table, string));
2050 if (ret != NULL)
2051 {
2052 ret->dyn_relocs = NULL;
2053 ret->got_type = GOT_UNKNOWN;
2054 ret->plt_got_offset = (bfd_vma) - 1;
2055 ret->stub_cache = NULL;
2056 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2057 }
2058
2059 return (struct bfd_hash_entry *) ret;
2060 }
2061
2062 /* Initialize an entry in the stub hash table. */
2063
2064 static struct bfd_hash_entry *
2065 stub_hash_newfunc (struct bfd_hash_entry *entry,
2066 struct bfd_hash_table *table, const char *string)
2067 {
2068 /* Allocate the structure if it has not already been allocated by a
2069 subclass. */
2070 if (entry == NULL)
2071 {
2072 entry = bfd_hash_allocate (table,
2073 sizeof (struct
2074 elf_aarch64_stub_hash_entry));
2075 if (entry == NULL)
2076 return entry;
2077 }
2078
2079 /* Call the allocation method of the superclass. */
2080 entry = bfd_hash_newfunc (entry, table, string);
2081 if (entry != NULL)
2082 {
2083 struct elf_aarch64_stub_hash_entry *eh;
2084
2085 /* Initialize the local fields. */
2086 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2087 eh->adrp_offset = 0;
2088 eh->stub_sec = NULL;
2089 eh->stub_offset = 0;
2090 eh->target_value = 0;
2091 eh->target_section = NULL;
2092 eh->stub_type = aarch64_stub_none;
2093 eh->h = NULL;
2094 eh->id_sec = NULL;
2095 }
2096
2097 return entry;
2098 }
2099
2100 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2101 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2102 as global symbol. We reuse indx and dynstr_index for local symbol
2103 hash since they aren't used by global symbols in this backend. */
2104
2105 static hashval_t
2106 elfNN_aarch64_local_htab_hash (const void *ptr)
2107 {
2108 struct elf_link_hash_entry *h
2109 = (struct elf_link_hash_entry *) ptr;
2110 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2111 }
2112
2113 /* Compare local hash entries. */
2114
2115 static int
2116 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2117 {
2118 struct elf_link_hash_entry *h1
2119 = (struct elf_link_hash_entry *) ptr1;
2120 struct elf_link_hash_entry *h2
2121 = (struct elf_link_hash_entry *) ptr2;
2122
2123 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2124 }
2125
2126 /* Find and/or create a hash entry for local symbol. */
2127
2128 static struct elf_link_hash_entry *
2129 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2130 bfd *abfd, const Elf_Internal_Rela *rel,
2131 bfd_boolean create)
2132 {
2133 struct elf_aarch64_link_hash_entry e, *ret;
2134 asection *sec = abfd->sections;
2135 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2136 ELFNN_R_SYM (rel->r_info));
2137 void **slot;
2138
2139 e.root.indx = sec->id;
2140 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2141 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2142 create ? INSERT : NO_INSERT);
2143
2144 if (!slot)
2145 return NULL;
2146
2147 if (*slot)
2148 {
2149 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2150 return &ret->root;
2151 }
2152
2153 ret = (struct elf_aarch64_link_hash_entry *)
2154 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2155 sizeof (struct elf_aarch64_link_hash_entry));
2156 if (ret)
2157 {
2158 memset (ret, 0, sizeof (*ret));
2159 ret->root.indx = sec->id;
2160 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2161 ret->root.dynindx = -1;
2162 *slot = ret;
2163 }
2164 return &ret->root;
2165 }
2166
2167 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2168
2169 static void
2170 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2171 struct elf_link_hash_entry *dir,
2172 struct elf_link_hash_entry *ind)
2173 {
2174 struct elf_aarch64_link_hash_entry *edir, *eind;
2175
2176 edir = (struct elf_aarch64_link_hash_entry *) dir;
2177 eind = (struct elf_aarch64_link_hash_entry *) ind;
2178
2179 if (eind->dyn_relocs != NULL)
2180 {
2181 if (edir->dyn_relocs != NULL)
2182 {
2183 struct elf_dyn_relocs **pp;
2184 struct elf_dyn_relocs *p;
2185
2186 /* Add reloc counts against the indirect sym to the direct sym
2187 list. Merge any entries against the same section. */
2188 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2189 {
2190 struct elf_dyn_relocs *q;
2191
2192 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2193 if (q->sec == p->sec)
2194 {
2195 q->pc_count += p->pc_count;
2196 q->count += p->count;
2197 *pp = p->next;
2198 break;
2199 }
2200 if (q == NULL)
2201 pp = &p->next;
2202 }
2203 *pp = edir->dyn_relocs;
2204 }
2205
2206 edir->dyn_relocs = eind->dyn_relocs;
2207 eind->dyn_relocs = NULL;
2208 }
2209
2210 if (ind->root.type == bfd_link_hash_indirect)
2211 {
2212 /* Copy over PLT info. */
2213 if (dir->got.refcount <= 0)
2214 {
2215 edir->got_type = eind->got_type;
2216 eind->got_type = GOT_UNKNOWN;
2217 }
2218 }
2219
2220 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2221 }
2222
2223 /* Destroy an AArch64 elf linker hash table. */
2224
2225 static void
2226 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2227 {
2228 struct elf_aarch64_link_hash_table *ret
2229 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2230
2231 if (ret->loc_hash_table)
2232 htab_delete (ret->loc_hash_table);
2233 if (ret->loc_hash_memory)
2234 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2235
2236 bfd_hash_table_free (&ret->stub_hash_table);
2237 _bfd_elf_link_hash_table_free (obfd);
2238 }
2239
2240 /* Create an AArch64 elf linker hash table. */
2241
2242 static struct bfd_link_hash_table *
2243 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2244 {
2245 struct elf_aarch64_link_hash_table *ret;
2246 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2247
2248 ret = bfd_zmalloc (amt);
2249 if (ret == NULL)
2250 return NULL;
2251
2252 if (!_bfd_elf_link_hash_table_init
2253 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2254 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2255 {
2256 free (ret);
2257 return NULL;
2258 }
2259
2260 ret->plt_header_size = PLT_ENTRY_SIZE;
2261 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2262 ret->obfd = abfd;
2263 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2264
2265 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2266 sizeof (struct elf_aarch64_stub_hash_entry)))
2267 {
2268 _bfd_elf_link_hash_table_free (abfd);
2269 return NULL;
2270 }
2271
2272 ret->loc_hash_table = htab_try_create (1024,
2273 elfNN_aarch64_local_htab_hash,
2274 elfNN_aarch64_local_htab_eq,
2275 NULL);
2276 ret->loc_hash_memory = objalloc_create ();
2277 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2278 {
2279 elfNN_aarch64_link_hash_table_free (abfd);
2280 return NULL;
2281 }
2282 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2283
2284 return &ret->root.root;
2285 }
2286
2287 static bfd_boolean
2288 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2289 bfd_vma offset, bfd_vma value)
2290 {
2291 reloc_howto_type *howto;
2292 bfd_vma place;
2293
2294 howto = elfNN_aarch64_howto_from_type (r_type);
2295 place = (input_section->output_section->vma + input_section->output_offset
2296 + offset);
2297
2298 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2299 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2300 return _bfd_aarch64_elf_put_addend (input_bfd,
2301 input_section->contents + offset, r_type,
2302 howto, value);
2303 }
2304
2305 static enum elf_aarch64_stub_type
2306 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2307 {
2308 if (aarch64_valid_for_adrp_p (value, place))
2309 return aarch64_stub_adrp_branch;
2310 return aarch64_stub_long_branch;
2311 }
2312
2313 /* Determine the type of stub needed, if any, for a call. */
2314
2315 static enum elf_aarch64_stub_type
2316 aarch64_type_of_stub (struct bfd_link_info *info,
2317 asection *input_sec,
2318 const Elf_Internal_Rela *rel,
2319 asection *sym_sec,
2320 unsigned char st_type,
2321 struct elf_aarch64_link_hash_entry *hash,
2322 bfd_vma destination)
2323 {
2324 bfd_vma location;
2325 bfd_signed_vma branch_offset;
2326 unsigned int r_type;
2327 struct elf_aarch64_link_hash_table *globals;
2328 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2329 bfd_boolean via_plt_p;
2330
2331 if (st_type != STT_FUNC
2332 && (sym_sec != bfd_abs_section_ptr))
2333 return stub_type;
2334
2335 globals = elf_aarch64_hash_table (info);
2336 via_plt_p = (globals->root.splt != NULL && hash != NULL
2337 && hash->root.plt.offset != (bfd_vma) - 1);
2338 /* Make sure call to plt stub can fit into the branch range. */
2339 if (via_plt_p)
2340 destination = (globals->root.splt->output_section->vma
2341 + globals->root.splt->output_offset
2342 + hash->root.plt.offset);
2343
2344 /* Determine where the call point is. */
2345 location = (input_sec->output_offset
2346 + input_sec->output_section->vma + rel->r_offset);
2347
2348 branch_offset = (bfd_signed_vma) (destination - location);
2349
2350 r_type = ELFNN_R_TYPE (rel->r_info);
2351
2352 /* We don't want to redirect any old unconditional jump in this way,
2353 only one which is being used for a sibcall, where it is
2354 acceptable for the IP0 and IP1 registers to be clobbered. */
2355 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2356 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2357 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2358 {
2359 stub_type = aarch64_stub_long_branch;
2360 }
2361
2362 return stub_type;
2363 }
2364
2365 /* Build a name for an entry in the stub hash table. */
2366
2367 static char *
2368 elfNN_aarch64_stub_name (const asection *input_section,
2369 const asection *sym_sec,
2370 const struct elf_aarch64_link_hash_entry *hash,
2371 const Elf_Internal_Rela *rel)
2372 {
2373 char *stub_name;
2374 bfd_size_type len;
2375
2376 if (hash)
2377 {
2378 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2379 stub_name = bfd_malloc (len);
2380 if (stub_name != NULL)
2381 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2382 (unsigned int) input_section->id,
2383 hash->root.root.root.string,
2384 rel->r_addend);
2385 }
2386 else
2387 {
2388 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2389 stub_name = bfd_malloc (len);
2390 if (stub_name != NULL)
2391 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2392 (unsigned int) input_section->id,
2393 (unsigned int) sym_sec->id,
2394 (unsigned int) ELFNN_R_SYM (rel->r_info),
2395 rel->r_addend);
2396 }
2397
2398 return stub_name;
2399 }
2400
2401 /* Look up an entry in the stub hash. Stub entries are cached because
2402 creating the stub name takes a bit of time. */
2403
2404 static struct elf_aarch64_stub_hash_entry *
2405 elfNN_aarch64_get_stub_entry (const asection *input_section,
2406 const asection *sym_sec,
2407 struct elf_link_hash_entry *hash,
2408 const Elf_Internal_Rela *rel,
2409 struct elf_aarch64_link_hash_table *htab)
2410 {
2411 struct elf_aarch64_stub_hash_entry *stub_entry;
2412 struct elf_aarch64_link_hash_entry *h =
2413 (struct elf_aarch64_link_hash_entry *) hash;
2414 const asection *id_sec;
2415
2416 if ((input_section->flags & SEC_CODE) == 0)
2417 return NULL;
2418
2419 /* If this input section is part of a group of sections sharing one
2420 stub section, then use the id of the first section in the group.
2421 Stub names need to include a section id, as there may well be
2422 more than one stub used to reach say, printf, and we need to
2423 distinguish between them. */
2424 id_sec = htab->stub_group[input_section->id].link_sec;
2425
2426 if (h != NULL && h->stub_cache != NULL
2427 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2428 {
2429 stub_entry = h->stub_cache;
2430 }
2431 else
2432 {
2433 char *stub_name;
2434
2435 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2436 if (stub_name == NULL)
2437 return NULL;
2438
2439 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2440 stub_name, FALSE, FALSE);
2441 if (h != NULL)
2442 h->stub_cache = stub_entry;
2443
2444 free (stub_name);
2445 }
2446
2447 return stub_entry;
2448 }
2449
2450
2451 /* Create a stub section. */
2452
2453 static asection *
2454 _bfd_aarch64_create_stub_section (asection *section,
2455 struct elf_aarch64_link_hash_table *htab)
2456 {
2457 size_t namelen;
2458 bfd_size_type len;
2459 char *s_name;
2460
2461 namelen = strlen (section->name);
2462 len = namelen + sizeof (STUB_SUFFIX);
2463 s_name = bfd_alloc (htab->stub_bfd, len);
2464 if (s_name == NULL)
2465 return NULL;
2466
2467 memcpy (s_name, section->name, namelen);
2468 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2469 return (*htab->add_stub_section) (s_name, section);
2470 }
2471
2472
2473 /* Find or create a stub section for a link section.
2474
2475 Fix or create the stub section used to collect stubs attached to
2476 the specified link section. */
2477
2478 static asection *
2479 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2480 struct elf_aarch64_link_hash_table *htab)
2481 {
2482 if (htab->stub_group[link_section->id].stub_sec == NULL)
2483 htab->stub_group[link_section->id].stub_sec
2484 = _bfd_aarch64_create_stub_section (link_section, htab);
2485 return htab->stub_group[link_section->id].stub_sec;
2486 }
2487
2488
2489 /* Find or create a stub section in the stub group for an input
2490 section. */
2491
2492 static asection *
2493 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2494 struct elf_aarch64_link_hash_table *htab)
2495 {
2496 asection *link_sec = htab->stub_group[section->id].link_sec;
2497 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2498 }
2499
2500
2501 /* Add a new stub entry in the stub group associated with an input
2502 section to the stub hash. Not all fields of the new stub entry are
2503 initialised. */
2504
2505 static struct elf_aarch64_stub_hash_entry *
2506 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2507 asection *section,
2508 struct elf_aarch64_link_hash_table *htab)
2509 {
2510 asection *link_sec;
2511 asection *stub_sec;
2512 struct elf_aarch64_stub_hash_entry *stub_entry;
2513
2514 link_sec = htab->stub_group[section->id].link_sec;
2515 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2516
2517 /* Enter this entry into the linker stub hash table. */
2518 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2519 TRUE, FALSE);
2520 if (stub_entry == NULL)
2521 {
2522 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2523 section->owner, stub_name);
2524 return NULL;
2525 }
2526
2527 stub_entry->stub_sec = stub_sec;
2528 stub_entry->stub_offset = 0;
2529 stub_entry->id_sec = link_sec;
2530
2531 return stub_entry;
2532 }
2533
2534 /* Add a new stub entry in the final stub section to the stub hash.
2535 Not all fields of the new stub entry are initialised. */
2536
2537 static struct elf_aarch64_stub_hash_entry *
2538 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2539 asection *link_section,
2540 struct elf_aarch64_link_hash_table *htab)
2541 {
2542 asection *stub_sec;
2543 struct elf_aarch64_stub_hash_entry *stub_entry;
2544
2545 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2546 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2547 TRUE, FALSE);
2548 if (stub_entry == NULL)
2549 {
2550 (*_bfd_error_handler) (_("cannot create stub entry %s"), stub_name);
2551 return NULL;
2552 }
2553
2554 stub_entry->stub_sec = stub_sec;
2555 stub_entry->stub_offset = 0;
2556 stub_entry->id_sec = link_section;
2557
2558 return stub_entry;
2559 }
2560
2561
2562 static bfd_boolean
2563 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2564 void *in_arg ATTRIBUTE_UNUSED)
2565 {
2566 struct elf_aarch64_stub_hash_entry *stub_entry;
2567 asection *stub_sec;
2568 bfd *stub_bfd;
2569 bfd_byte *loc;
2570 bfd_vma sym_value;
2571 bfd_vma veneered_insn_loc;
2572 bfd_vma veneer_entry_loc;
2573 bfd_signed_vma branch_offset = 0;
2574 unsigned int template_size;
2575 const uint32_t *template;
2576 unsigned int i;
2577
2578 /* Massage our args to the form they really have. */
2579 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2580
2581 stub_sec = stub_entry->stub_sec;
2582
2583 /* Make a note of the offset within the stubs for this entry. */
2584 stub_entry->stub_offset = stub_sec->size;
2585 loc = stub_sec->contents + stub_entry->stub_offset;
2586
2587 stub_bfd = stub_sec->owner;
2588
2589 /* This is the address of the stub destination. */
2590 sym_value = (stub_entry->target_value
2591 + stub_entry->target_section->output_offset
2592 + stub_entry->target_section->output_section->vma);
2593
2594 if (stub_entry->stub_type == aarch64_stub_long_branch)
2595 {
2596 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2597 + stub_sec->output_offset);
2598
2599 /* See if we can relax the stub. */
2600 if (aarch64_valid_for_adrp_p (sym_value, place))
2601 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2602 }
2603
2604 switch (stub_entry->stub_type)
2605 {
2606 case aarch64_stub_adrp_branch:
2607 template = aarch64_adrp_branch_stub;
2608 template_size = sizeof (aarch64_adrp_branch_stub);
2609 break;
2610 case aarch64_stub_long_branch:
2611 template = aarch64_long_branch_stub;
2612 template_size = sizeof (aarch64_long_branch_stub);
2613 break;
2614 case aarch64_stub_erratum_835769_veneer:
2615 template = aarch64_erratum_835769_stub;
2616 template_size = sizeof (aarch64_erratum_835769_stub);
2617 break;
2618 case aarch64_stub_erratum_843419_veneer:
2619 template = aarch64_erratum_843419_stub;
2620 template_size = sizeof (aarch64_erratum_843419_stub);
2621 break;
2622 default:
2623 abort ();
2624 }
2625
2626 for (i = 0; i < (template_size / sizeof template[0]); i++)
2627 {
2628 bfd_putl32 (template[i], loc);
2629 loc += 4;
2630 }
2631
2632 template_size = (template_size + 7) & ~7;
2633 stub_sec->size += template_size;
2634
2635 switch (stub_entry->stub_type)
2636 {
2637 case aarch64_stub_adrp_branch:
2638 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2639 stub_entry->stub_offset, sym_value))
2640 /* The stub would not have been relaxed if the offset was out
2641 of range. */
2642 BFD_FAIL ();
2643
2644 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2645 stub_entry->stub_offset + 4, sym_value))
2646 BFD_FAIL ();
2647 break;
2648
2649 case aarch64_stub_long_branch:
2650 /* We want the value relative to the address 12 bytes back from the
2651 value itself. */
2652 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2653 stub_entry->stub_offset + 16, sym_value + 12))
2654 BFD_FAIL ();
2655 break;
2656
2657 case aarch64_stub_erratum_835769_veneer:
2658 veneered_insn_loc = stub_entry->target_section->output_section->vma
2659 + stub_entry->target_section->output_offset
2660 + stub_entry->target_value;
2661 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2662 + stub_entry->stub_sec->output_offset
2663 + stub_entry->stub_offset;
2664 branch_offset = veneered_insn_loc - veneer_entry_loc;
2665 branch_offset >>= 2;
2666 branch_offset &= 0x3ffffff;
2667 bfd_putl32 (stub_entry->veneered_insn,
2668 stub_sec->contents + stub_entry->stub_offset);
2669 bfd_putl32 (template[1] | branch_offset,
2670 stub_sec->contents + stub_entry->stub_offset + 4);
2671 break;
2672
2673 case aarch64_stub_erratum_843419_veneer:
2674 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
2675 stub_entry->stub_offset + 4, sym_value + 4))
2676 BFD_FAIL ();
2677 break;
2678
2679 default:
2680 abort ();
2681 }
2682
2683 return TRUE;
2684 }
2685
2686 /* As above, but don't actually build the stub. Just bump offset so
2687 we know stub section sizes. */
2688
2689 static bfd_boolean
2690 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2691 void *in_arg ATTRIBUTE_UNUSED)
2692 {
2693 struct elf_aarch64_stub_hash_entry *stub_entry;
2694 int size;
2695
2696 /* Massage our args to the form they really have. */
2697 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2698
2699 switch (stub_entry->stub_type)
2700 {
2701 case aarch64_stub_adrp_branch:
2702 size = sizeof (aarch64_adrp_branch_stub);
2703 break;
2704 case aarch64_stub_long_branch:
2705 size = sizeof (aarch64_long_branch_stub);
2706 break;
2707 case aarch64_stub_erratum_835769_veneer:
2708 size = sizeof (aarch64_erratum_835769_stub);
2709 break;
2710 case aarch64_stub_erratum_843419_veneer:
2711 size = sizeof (aarch64_erratum_843419_stub);
2712 break;
2713 default:
2714 abort ();
2715 }
2716
2717 size = (size + 7) & ~7;
2718 stub_entry->stub_sec->size += size;
2719 return TRUE;
2720 }
2721
2722 /* External entry points for sizing and building linker stubs. */
2723
2724 /* Set up various things so that we can make a list of input sections
2725 for each output section included in the link. Returns -1 on error,
2726 0 when no stubs will be needed, and 1 on success. */
2727
2728 int
2729 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2730 struct bfd_link_info *info)
2731 {
2732 bfd *input_bfd;
2733 unsigned int bfd_count;
2734 int top_id, top_index;
2735 asection *section;
2736 asection **input_list, **list;
2737 bfd_size_type amt;
2738 struct elf_aarch64_link_hash_table *htab =
2739 elf_aarch64_hash_table (info);
2740
2741 if (!is_elf_hash_table (htab))
2742 return 0;
2743
2744 /* Count the number of input BFDs and find the top input section id. */
2745 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2746 input_bfd != NULL; input_bfd = input_bfd->link.next)
2747 {
2748 bfd_count += 1;
2749 for (section = input_bfd->sections;
2750 section != NULL; section = section->next)
2751 {
2752 if (top_id < section->id)
2753 top_id = section->id;
2754 }
2755 }
2756 htab->bfd_count = bfd_count;
2757
2758 amt = sizeof (struct map_stub) * (top_id + 1);
2759 htab->stub_group = bfd_zmalloc (amt);
2760 if (htab->stub_group == NULL)
2761 return -1;
2762
2763 /* We can't use output_bfd->section_count here to find the top output
2764 section index as some sections may have been removed, and
2765 _bfd_strip_section_from_output doesn't renumber the indices. */
2766 for (section = output_bfd->sections, top_index = 0;
2767 section != NULL; section = section->next)
2768 {
2769 if (top_index < section->index)
2770 top_index = section->index;
2771 }
2772
2773 htab->top_index = top_index;
2774 amt = sizeof (asection *) * (top_index + 1);
2775 input_list = bfd_malloc (amt);
2776 htab->input_list = input_list;
2777 if (input_list == NULL)
2778 return -1;
2779
2780 /* For sections we aren't interested in, mark their entries with a
2781 value we can check later. */
2782 list = input_list + top_index;
2783 do
2784 *list = bfd_abs_section_ptr;
2785 while (list-- != input_list);
2786
2787 for (section = output_bfd->sections;
2788 section != NULL; section = section->next)
2789 {
2790 if ((section->flags & SEC_CODE) != 0)
2791 input_list[section->index] = NULL;
2792 }
2793
2794 return 1;
2795 }
2796
2797 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2798 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2799
2800 /* The linker repeatedly calls this function for each input section,
2801 in the order that input sections are linked into output sections.
2802 Build lists of input sections to determine groupings between which
2803 we may insert linker stubs. */
2804
2805 void
2806 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2807 {
2808 struct elf_aarch64_link_hash_table *htab =
2809 elf_aarch64_hash_table (info);
2810
2811 if (isec->output_section->index <= htab->top_index)
2812 {
2813 asection **list = htab->input_list + isec->output_section->index;
2814
2815 if (*list != bfd_abs_section_ptr)
2816 {
2817 /* Steal the link_sec pointer for our list. */
2818 /* This happens to make the list in reverse order,
2819 which is what we want. */
2820 PREV_SEC (isec) = *list;
2821 *list = isec;
2822 }
2823 }
2824 }
2825
2826 /* See whether we can group stub sections together. Grouping stub
2827 sections may result in fewer stubs. More importantly, we need to
2828 put all .init* and .fini* stubs at the beginning of the .init or
2829 .fini output sections respectively, because glibc splits the
2830 _init and _fini functions into multiple parts. Putting a stub in
2831 the middle of a function is not a good idea. */
2832
2833 static void
2834 group_sections (struct elf_aarch64_link_hash_table *htab,
2835 bfd_size_type stub_group_size,
2836 bfd_boolean stubs_always_before_branch)
2837 {
2838 asection **list = htab->input_list + htab->top_index;
2839
2840 do
2841 {
2842 asection *tail = *list;
2843
2844 if (tail == bfd_abs_section_ptr)
2845 continue;
2846
2847 while (tail != NULL)
2848 {
2849 asection *curr;
2850 asection *prev;
2851 bfd_size_type total;
2852
2853 curr = tail;
2854 total = tail->size;
2855 while ((prev = PREV_SEC (curr)) != NULL
2856 && ((total += curr->output_offset - prev->output_offset)
2857 < stub_group_size))
2858 curr = prev;
2859
2860 /* OK, the size from the start of CURR to the end is less
2861 than stub_group_size and thus can be handled by one stub
2862 section. (Or the tail section is itself larger than
2863 stub_group_size, in which case we may be toast.)
2864 We should really be keeping track of the total size of
2865 stubs added here, as stubs contribute to the final output
2866 section size. */
2867 do
2868 {
2869 prev = PREV_SEC (tail);
2870 /* Set up this stub group. */
2871 htab->stub_group[tail->id].link_sec = curr;
2872 }
2873 while (tail != curr && (tail = prev) != NULL);
2874
2875 /* But wait, there's more! Input sections up to stub_group_size
2876 bytes before the stub section can be handled by it too. */
2877 if (!stubs_always_before_branch)
2878 {
2879 total = 0;
2880 while (prev != NULL
2881 && ((total += tail->output_offset - prev->output_offset)
2882 < stub_group_size))
2883 {
2884 tail = prev;
2885 prev = PREV_SEC (tail);
2886 htab->stub_group[tail->id].link_sec = curr;
2887 }
2888 }
2889 tail = prev;
2890 }
2891 }
2892 while (list-- != htab->input_list);
2893
2894 free (htab->input_list);
2895 }
2896
2897 #undef PREV_SEC
2898
2899 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2900
2901 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2902 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2903 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2904 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2905 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2906 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2907
2908 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2909 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2910 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2911 #define AARCH64_ZR 0x1f
2912
2913 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2914 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2915
2916 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2917 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2918 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2919 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2920 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2921 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2922 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2923 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2924 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2925 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2926 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2927 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2928 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2929 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2930 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2931 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2932 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2933 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2934
2935 /* Classify an INSN if it is indeed a load/store.
2936
2937 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2938
2939 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2940 is set equal to RT.
2941
2942 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2943
2944 */
2945
2946 static bfd_boolean
2947 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2948 bfd_boolean *pair, bfd_boolean *load)
2949 {
2950 uint32_t opcode;
2951 unsigned int r;
2952 uint32_t opc = 0;
2953 uint32_t v = 0;
2954 uint32_t opc_v = 0;
2955
2956 /* Bail out quickly if INSN doesn't fall into the the load-store
2957 encoding space. */
2958 if (!AARCH64_LDST (insn))
2959 return FALSE;
2960
2961 *pair = FALSE;
2962 *load = FALSE;
2963 if (AARCH64_LDST_EX (insn))
2964 {
2965 *rt = AARCH64_RT (insn);
2966 *rt2 = *rt;
2967 if (AARCH64_BIT (insn, 21) == 1)
2968 {
2969 *pair = TRUE;
2970 *rt2 = AARCH64_RT2 (insn);
2971 }
2972 *load = AARCH64_LD (insn);
2973 return TRUE;
2974 }
2975 else if (AARCH64_LDST_NAP (insn)
2976 || AARCH64_LDSTP_PI (insn)
2977 || AARCH64_LDSTP_O (insn)
2978 || AARCH64_LDSTP_PRE (insn))
2979 {
2980 *pair = TRUE;
2981 *rt = AARCH64_RT (insn);
2982 *rt2 = AARCH64_RT2 (insn);
2983 *load = AARCH64_LD (insn);
2984 return TRUE;
2985 }
2986 else if (AARCH64_LDST_PCREL (insn)
2987 || AARCH64_LDST_UI (insn)
2988 || AARCH64_LDST_PIIMM (insn)
2989 || AARCH64_LDST_U (insn)
2990 || AARCH64_LDST_PREIMM (insn)
2991 || AARCH64_LDST_RO (insn)
2992 || AARCH64_LDST_UIMM (insn))
2993 {
2994 *rt = AARCH64_RT (insn);
2995 *rt2 = *rt;
2996 if (AARCH64_LDST_PCREL (insn))
2997 *load = TRUE;
2998 opc = AARCH64_BITS (insn, 22, 2);
2999 v = AARCH64_BIT (insn, 26);
3000 opc_v = opc | (v << 2);
3001 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
3002 || opc_v == 5 || opc_v == 7);
3003 return TRUE;
3004 }
3005 else if (AARCH64_LDST_SIMD_M (insn)
3006 || AARCH64_LDST_SIMD_M_PI (insn))
3007 {
3008 *rt = AARCH64_RT (insn);
3009 *load = AARCH64_BIT (insn, 22);
3010 opcode = (insn >> 12) & 0xf;
3011 switch (opcode)
3012 {
3013 case 0:
3014 case 2:
3015 *rt2 = *rt + 3;
3016 break;
3017
3018 case 4:
3019 case 6:
3020 *rt2 = *rt + 2;
3021 break;
3022
3023 case 7:
3024 *rt2 = *rt;
3025 break;
3026
3027 case 8:
3028 case 10:
3029 *rt2 = *rt + 1;
3030 break;
3031
3032 default:
3033 return FALSE;
3034 }
3035 return TRUE;
3036 }
3037 else if (AARCH64_LDST_SIMD_S (insn)
3038 || AARCH64_LDST_SIMD_S_PI (insn))
3039 {
3040 *rt = AARCH64_RT (insn);
3041 r = (insn >> 21) & 1;
3042 *load = AARCH64_BIT (insn, 22);
3043 opcode = (insn >> 13) & 0x7;
3044 switch (opcode)
3045 {
3046 case 0:
3047 case 2:
3048 case 4:
3049 *rt2 = *rt + r;
3050 break;
3051
3052 case 1:
3053 case 3:
3054 case 5:
3055 *rt2 = *rt + (r == 0 ? 2 : 3);
3056 break;
3057
3058 case 6:
3059 *rt2 = *rt + r;
3060 break;
3061
3062 case 7:
3063 *rt2 = *rt + (r == 0 ? 2 : 3);
3064 break;
3065
3066 default:
3067 return FALSE;
3068 }
3069 return TRUE;
3070 }
3071
3072 return FALSE;
3073 }
3074
3075 /* Return TRUE if INSN is multiply-accumulate. */
3076
3077 static bfd_boolean
3078 aarch64_mlxl_p (uint32_t insn)
3079 {
3080 uint32_t op31 = AARCH64_OP31 (insn);
3081
3082 if (AARCH64_MAC (insn)
3083 && (op31 == 0 || op31 == 1 || op31 == 5)
3084 /* Exclude MUL instructions which are encoded as a multiple accumulate
3085 with RA = XZR. */
3086 && AARCH64_RA (insn) != AARCH64_ZR)
3087 return TRUE;
3088
3089 return FALSE;
3090 }
3091
3092 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3093 it is possible for a 64-bit multiply-accumulate instruction to generate an
3094 incorrect result. The details are quite complex and hard to
3095 determine statically, since branches in the code may exist in some
3096 circumstances, but all cases end with a memory (load, store, or
3097 prefetch) instruction followed immediately by the multiply-accumulate
3098 operation. We employ a linker patching technique, by moving the potentially
3099 affected multiply-accumulate instruction into a patch region and replacing
3100 the original instruction with a branch to the patch. This function checks
3101 if INSN_1 is the memory operation followed by a multiply-accumulate
3102 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3103 if INSN_1 and INSN_2 are safe. */
3104
3105 static bfd_boolean
3106 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3107 {
3108 uint32_t rt;
3109 uint32_t rt2;
3110 uint32_t rn;
3111 uint32_t rm;
3112 uint32_t ra;
3113 bfd_boolean pair;
3114 bfd_boolean load;
3115
3116 if (aarch64_mlxl_p (insn_2)
3117 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3118 {
3119 /* Any SIMD memory op is independent of the subsequent MLA
3120 by definition of the erratum. */
3121 if (AARCH64_BIT (insn_1, 26))
3122 return TRUE;
3123
3124 /* If not SIMD, check for integer memory ops and MLA relationship. */
3125 rn = AARCH64_RN (insn_2);
3126 ra = AARCH64_RA (insn_2);
3127 rm = AARCH64_RM (insn_2);
3128
3129 /* If this is a load and there's a true(RAW) dependency, we are safe
3130 and this is not an erratum sequence. */
3131 if (load &&
3132 (rt == rn || rt == rm || rt == ra
3133 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3134 return FALSE;
3135
3136 /* We conservatively put out stubs for all other cases (including
3137 writebacks). */
3138 return TRUE;
3139 }
3140
3141 return FALSE;
3142 }
3143
3144 /* Used to order a list of mapping symbols by address. */
3145
3146 static int
3147 elf_aarch64_compare_mapping (const void *a, const void *b)
3148 {
3149 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3150 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3151
3152 if (amap->vma > bmap->vma)
3153 return 1;
3154 else if (amap->vma < bmap->vma)
3155 return -1;
3156 else if (amap->type > bmap->type)
3157 /* Ensure results do not depend on the host qsort for objects with
3158 multiple mapping symbols at the same address by sorting on type
3159 after vma. */
3160 return 1;
3161 else if (amap->type < bmap->type)
3162 return -1;
3163 else
3164 return 0;
3165 }
3166
3167
3168 static char *
3169 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3170 {
3171 char *stub_name = (char *) bfd_malloc
3172 (strlen ("__erratum_835769_veneer_") + 16);
3173 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3174 return stub_name;
3175 }
3176
3177 /* Scan for Cortex-A53 erratum 835769 sequence.
3178
3179 Return TRUE else FALSE on abnormal termination. */
3180
3181 static bfd_boolean
3182 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3183 struct bfd_link_info *info,
3184 unsigned int *num_fixes_p)
3185 {
3186 asection *section;
3187 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3188 unsigned int num_fixes = *num_fixes_p;
3189
3190 if (htab == NULL)
3191 return TRUE;
3192
3193 for (section = input_bfd->sections;
3194 section != NULL;
3195 section = section->next)
3196 {
3197 bfd_byte *contents = NULL;
3198 struct _aarch64_elf_section_data *sec_data;
3199 unsigned int span;
3200
3201 if (elf_section_type (section) != SHT_PROGBITS
3202 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3203 || (section->flags & SEC_EXCLUDE) != 0
3204 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3205 || (section->output_section == bfd_abs_section_ptr))
3206 continue;
3207
3208 if (elf_section_data (section)->this_hdr.contents != NULL)
3209 contents = elf_section_data (section)->this_hdr.contents;
3210 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3211 return FALSE;
3212
3213 sec_data = elf_aarch64_section_data (section);
3214
3215 qsort (sec_data->map, sec_data->mapcount,
3216 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3217
3218 for (span = 0; span < sec_data->mapcount; span++)
3219 {
3220 unsigned int span_start = sec_data->map[span].vma;
3221 unsigned int span_end = ((span == sec_data->mapcount - 1)
3222 ? sec_data->map[0].vma + section->size
3223 : sec_data->map[span + 1].vma);
3224 unsigned int i;
3225 char span_type = sec_data->map[span].type;
3226
3227 if (span_type == 'd')
3228 continue;
3229
3230 for (i = span_start; i + 4 < span_end; i += 4)
3231 {
3232 uint32_t insn_1 = bfd_getl32 (contents + i);
3233 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3234
3235 if (aarch64_erratum_sequence (insn_1, insn_2))
3236 {
3237 struct elf_aarch64_stub_hash_entry *stub_entry;
3238 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3239 if (! stub_name)
3240 return FALSE;
3241
3242 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3243 section,
3244 htab);
3245 if (! stub_entry)
3246 return FALSE;
3247
3248 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3249 stub_entry->target_section = section;
3250 stub_entry->target_value = i + 4;
3251 stub_entry->veneered_insn = insn_2;
3252 stub_entry->output_name = stub_name;
3253 num_fixes++;
3254 }
3255 }
3256 }
3257 if (elf_section_data (section)->this_hdr.contents == NULL)
3258 free (contents);
3259 }
3260
3261 *num_fixes_p = num_fixes;
3262
3263 return TRUE;
3264 }
3265
3266
3267 /* Test if instruction INSN is ADRP. */
3268
3269 static bfd_boolean
3270 _bfd_aarch64_adrp_p (uint32_t insn)
3271 {
3272 return ((insn & 0x9f000000) == 0x90000000);
3273 }
3274
3275
3276 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3277
3278 static bfd_boolean
3279 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3280 uint32_t insn_3)
3281 {
3282 uint32_t rt;
3283 uint32_t rt2;
3284 bfd_boolean pair;
3285 bfd_boolean load;
3286
3287 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3288 && (!pair
3289 || (pair && !load))
3290 && AARCH64_LDST_UIMM (insn_3)
3291 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3292 }
3293
3294
3295 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3296
3297 Return TRUE if section CONTENTS at offset I contains one of the
3298 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3299 seen set P_VENEER_I to the offset of the final LOAD/STORE
3300 instruction in the sequence.
3301 */
3302
3303 static bfd_boolean
3304 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3305 bfd_vma i, bfd_vma span_end,
3306 bfd_vma *p_veneer_i)
3307 {
3308 uint32_t insn_1 = bfd_getl32 (contents + i);
3309
3310 if (!_bfd_aarch64_adrp_p (insn_1))
3311 return FALSE;
3312
3313 if (span_end < i + 12)
3314 return FALSE;
3315
3316 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3317 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3318
3319 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3320 return FALSE;
3321
3322 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3323 {
3324 *p_veneer_i = i + 8;
3325 return TRUE;
3326 }
3327
3328 if (span_end < i + 16)
3329 return FALSE;
3330
3331 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3332
3333 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3334 {
3335 *p_veneer_i = i + 12;
3336 return TRUE;
3337 }
3338
3339 return FALSE;
3340 }
3341
3342
3343 /* Resize all stub sections. */
3344
3345 static void
3346 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3347 {
3348 asection *section;
3349
3350 /* OK, we've added some stubs. Find out the new size of the
3351 stub sections. */
3352 for (section = htab->stub_bfd->sections;
3353 section != NULL; section = section->next)
3354 {
3355 /* Ignore non-stub sections. */
3356 if (!strstr (section->name, STUB_SUFFIX))
3357 continue;
3358 section->size = 0;
3359 }
3360
3361 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3362
3363 for (section = htab->stub_bfd->sections;
3364 section != NULL; section = section->next)
3365 {
3366 if (!strstr (section->name, STUB_SUFFIX))
3367 continue;
3368
3369 if (section->size)
3370 section->size += 4;
3371
3372 /* Ensure all stub sections have a size which is a multiple of
3373 4096. This is important in order to ensure that the insertion
3374 of stub sections does not in itself move existing code around
3375 in such a way that new errata sequences are created. */
3376 if (htab->fix_erratum_843419)
3377 if (section->size)
3378 section->size = BFD_ALIGN (section->size, 0x1000);
3379 }
3380 }
3381
3382
3383 /* Construct an erratum 843419 workaround stub name.
3384 */
3385
3386 static char *
3387 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3388 bfd_vma offset)
3389 {
3390 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3391 char *stub_name = bfd_malloc (len);
3392
3393 if (stub_name != NULL)
3394 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3395 input_section->owner->id,
3396 input_section->id,
3397 offset);
3398 return stub_name;
3399 }
3400
3401 /* Build a stub_entry structure describing an 843419 fixup.
3402
3403 The stub_entry constructed is populated with the bit pattern INSN
3404 of the instruction located at OFFSET within input SECTION.
3405
3406 Returns TRUE on success. */
3407
3408 static bfd_boolean
3409 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3410 bfd_vma adrp_offset,
3411 bfd_vma ldst_offset,
3412 asection *section,
3413 struct bfd_link_info *info)
3414 {
3415 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3416 char *stub_name;
3417 struct elf_aarch64_stub_hash_entry *stub_entry;
3418
3419 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3420 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3421 FALSE, FALSE);
3422 if (stub_entry)
3423 {
3424 free (stub_name);
3425 return TRUE;
3426 }
3427
3428 /* We always place an 843419 workaround veneer in the stub section
3429 attached to the input section in which an erratum sequence has
3430 been found. This ensures that later in the link process (in
3431 elfNN_aarch64_write_section) when we copy the veneered
3432 instruction from the input section into the stub section the
3433 copied instruction will have had any relocations applied to it.
3434 If we placed workaround veneers in any other stub section then we
3435 could not assume that all relocations have been processed on the
3436 corresponding input section at the point we output the stub
3437 section.
3438 */
3439
3440 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3441 if (stub_entry == NULL)
3442 {
3443 free (stub_name);
3444 return FALSE;
3445 }
3446
3447 stub_entry->adrp_offset = adrp_offset;
3448 stub_entry->target_value = ldst_offset;
3449 stub_entry->target_section = section;
3450 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3451 stub_entry->veneered_insn = insn;
3452 stub_entry->output_name = stub_name;
3453
3454 return TRUE;
3455 }
3456
3457
3458 /* Scan an input section looking for the signature of erratum 843419.
3459
3460 Scans input SECTION in INPUT_BFD looking for erratum 843419
3461 signatures, for each signature found a stub_entry is created
3462 describing the location of the erratum for subsequent fixup.
3463
3464 Return TRUE on successful scan, FALSE on failure to scan.
3465 */
3466
3467 static bfd_boolean
3468 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3469 struct bfd_link_info *info)
3470 {
3471 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3472
3473 if (htab == NULL)
3474 return TRUE;
3475
3476 if (elf_section_type (section) != SHT_PROGBITS
3477 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3478 || (section->flags & SEC_EXCLUDE) != 0
3479 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3480 || (section->output_section == bfd_abs_section_ptr))
3481 return TRUE;
3482
3483 do
3484 {
3485 bfd_byte *contents = NULL;
3486 struct _aarch64_elf_section_data *sec_data;
3487 unsigned int span;
3488
3489 if (elf_section_data (section)->this_hdr.contents != NULL)
3490 contents = elf_section_data (section)->this_hdr.contents;
3491 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3492 return FALSE;
3493
3494 sec_data = elf_aarch64_section_data (section);
3495
3496 qsort (sec_data->map, sec_data->mapcount,
3497 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3498
3499 for (span = 0; span < sec_data->mapcount; span++)
3500 {
3501 unsigned int span_start = sec_data->map[span].vma;
3502 unsigned int span_end = ((span == sec_data->mapcount - 1)
3503 ? sec_data->map[0].vma + section->size
3504 : sec_data->map[span + 1].vma);
3505 unsigned int i;
3506 char span_type = sec_data->map[span].type;
3507
3508 if (span_type == 'd')
3509 continue;
3510
3511 for (i = span_start; i + 8 < span_end; i += 4)
3512 {
3513 bfd_vma vma = (section->output_section->vma
3514 + section->output_offset
3515 + i);
3516 bfd_vma veneer_i;
3517
3518 if (_bfd_aarch64_erratum_843419_p
3519 (contents, vma, i, span_end, &veneer_i))
3520 {
3521 uint32_t insn = bfd_getl32 (contents + veneer_i);
3522
3523 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3524 section, info))
3525 return FALSE;
3526 }
3527 }
3528 }
3529
3530 if (elf_section_data (section)->this_hdr.contents == NULL)
3531 free (contents);
3532 }
3533 while (0);
3534
3535 return TRUE;
3536 }
3537
3538
3539 /* Determine and set the size of the stub section for a final link.
3540
3541 The basic idea here is to examine all the relocations looking for
3542 PC-relative calls to a target that is unreachable with a "bl"
3543 instruction. */
3544
3545 bfd_boolean
3546 elfNN_aarch64_size_stubs (bfd *output_bfd,
3547 bfd *stub_bfd,
3548 struct bfd_link_info *info,
3549 bfd_signed_vma group_size,
3550 asection * (*add_stub_section) (const char *,
3551 asection *),
3552 void (*layout_sections_again) (void))
3553 {
3554 bfd_size_type stub_group_size;
3555 bfd_boolean stubs_always_before_branch;
3556 bfd_boolean stub_changed = FALSE;
3557 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3558 unsigned int num_erratum_835769_fixes = 0;
3559
3560 /* Propagate mach to stub bfd, because it may not have been
3561 finalized when we created stub_bfd. */
3562 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3563 bfd_get_mach (output_bfd));
3564
3565 /* Stash our params away. */
3566 htab->stub_bfd = stub_bfd;
3567 htab->add_stub_section = add_stub_section;
3568 htab->layout_sections_again = layout_sections_again;
3569 stubs_always_before_branch = group_size < 0;
3570 if (group_size < 0)
3571 stub_group_size = -group_size;
3572 else
3573 stub_group_size = group_size;
3574
3575 if (stub_group_size == 1)
3576 {
3577 /* Default values. */
3578 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3579 stub_group_size = 127 * 1024 * 1024;
3580 }
3581
3582 group_sections (htab, stub_group_size, stubs_always_before_branch);
3583
3584 (*htab->layout_sections_again) ();
3585
3586 if (htab->fix_erratum_835769)
3587 {
3588 bfd *input_bfd;
3589
3590 for (input_bfd = info->input_bfds;
3591 input_bfd != NULL; input_bfd = input_bfd->link.next)
3592 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3593 &num_erratum_835769_fixes))
3594 return FALSE;
3595
3596 _bfd_aarch64_resize_stubs (htab);
3597 (*htab->layout_sections_again) ();
3598 }
3599
3600 if (htab->fix_erratum_843419)
3601 {
3602 bfd *input_bfd;
3603
3604 for (input_bfd = info->input_bfds;
3605 input_bfd != NULL;
3606 input_bfd = input_bfd->link.next)
3607 {
3608 asection *section;
3609
3610 for (section = input_bfd->sections;
3611 section != NULL;
3612 section = section->next)
3613 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3614 return FALSE;
3615 }
3616
3617 _bfd_aarch64_resize_stubs (htab);
3618 (*htab->layout_sections_again) ();
3619 }
3620
3621 while (1)
3622 {
3623 bfd *input_bfd;
3624
3625 for (input_bfd = info->input_bfds;
3626 input_bfd != NULL; input_bfd = input_bfd->link.next)
3627 {
3628 Elf_Internal_Shdr *symtab_hdr;
3629 asection *section;
3630 Elf_Internal_Sym *local_syms = NULL;
3631
3632 /* We'll need the symbol table in a second. */
3633 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3634 if (symtab_hdr->sh_info == 0)
3635 continue;
3636
3637 /* Walk over each section attached to the input bfd. */
3638 for (section = input_bfd->sections;
3639 section != NULL; section = section->next)
3640 {
3641 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3642
3643 /* If there aren't any relocs, then there's nothing more
3644 to do. */
3645 if ((section->flags & SEC_RELOC) == 0
3646 || section->reloc_count == 0
3647 || (section->flags & SEC_CODE) == 0)
3648 continue;
3649
3650 /* If this section is a link-once section that will be
3651 discarded, then don't create any stubs. */
3652 if (section->output_section == NULL
3653 || section->output_section->owner != output_bfd)
3654 continue;
3655
3656 /* Get the relocs. */
3657 internal_relocs
3658 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3659 NULL, info->keep_memory);
3660 if (internal_relocs == NULL)
3661 goto error_ret_free_local;
3662
3663 /* Now examine each relocation. */
3664 irela = internal_relocs;
3665 irelaend = irela + section->reloc_count;
3666 for (; irela < irelaend; irela++)
3667 {
3668 unsigned int r_type, r_indx;
3669 enum elf_aarch64_stub_type stub_type;
3670 struct elf_aarch64_stub_hash_entry *stub_entry;
3671 asection *sym_sec;
3672 bfd_vma sym_value;
3673 bfd_vma destination;
3674 struct elf_aarch64_link_hash_entry *hash;
3675 const char *sym_name;
3676 char *stub_name;
3677 const asection *id_sec;
3678 unsigned char st_type;
3679 bfd_size_type len;
3680
3681 r_type = ELFNN_R_TYPE (irela->r_info);
3682 r_indx = ELFNN_R_SYM (irela->r_info);
3683
3684 if (r_type >= (unsigned int) R_AARCH64_end)
3685 {
3686 bfd_set_error (bfd_error_bad_value);
3687 error_ret_free_internal:
3688 if (elf_section_data (section)->relocs == NULL)
3689 free (internal_relocs);
3690 goto error_ret_free_local;
3691 }
3692
3693 /* Only look for stubs on unconditional branch and
3694 branch and link instructions. */
3695 if (r_type != (unsigned int) AARCH64_R (CALL26)
3696 && r_type != (unsigned int) AARCH64_R (JUMP26))
3697 continue;
3698
3699 /* Now determine the call target, its name, value,
3700 section. */
3701 sym_sec = NULL;
3702 sym_value = 0;
3703 destination = 0;
3704 hash = NULL;
3705 sym_name = NULL;
3706 if (r_indx < symtab_hdr->sh_info)
3707 {
3708 /* It's a local symbol. */
3709 Elf_Internal_Sym *sym;
3710 Elf_Internal_Shdr *hdr;
3711
3712 if (local_syms == NULL)
3713 {
3714 local_syms
3715 = (Elf_Internal_Sym *) symtab_hdr->contents;
3716 if (local_syms == NULL)
3717 local_syms
3718 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3719 symtab_hdr->sh_info, 0,
3720 NULL, NULL, NULL);
3721 if (local_syms == NULL)
3722 goto error_ret_free_internal;
3723 }
3724
3725 sym = local_syms + r_indx;
3726 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3727 sym_sec = hdr->bfd_section;
3728 if (!sym_sec)
3729 /* This is an undefined symbol. It can never
3730 be resolved. */
3731 continue;
3732
3733 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3734 sym_value = sym->st_value;
3735 destination = (sym_value + irela->r_addend
3736 + sym_sec->output_offset
3737 + sym_sec->output_section->vma);
3738 st_type = ELF_ST_TYPE (sym->st_info);
3739 sym_name
3740 = bfd_elf_string_from_elf_section (input_bfd,
3741 symtab_hdr->sh_link,
3742 sym->st_name);
3743 }
3744 else
3745 {
3746 int e_indx;
3747
3748 e_indx = r_indx - symtab_hdr->sh_info;
3749 hash = ((struct elf_aarch64_link_hash_entry *)
3750 elf_sym_hashes (input_bfd)[e_indx]);
3751
3752 while (hash->root.root.type == bfd_link_hash_indirect
3753 || hash->root.root.type == bfd_link_hash_warning)
3754 hash = ((struct elf_aarch64_link_hash_entry *)
3755 hash->root.root.u.i.link);
3756
3757 if (hash->root.root.type == bfd_link_hash_defined
3758 || hash->root.root.type == bfd_link_hash_defweak)
3759 {
3760 struct elf_aarch64_link_hash_table *globals =
3761 elf_aarch64_hash_table (info);
3762 sym_sec = hash->root.root.u.def.section;
3763 sym_value = hash->root.root.u.def.value;
3764 /* For a destination in a shared library,
3765 use the PLT stub as target address to
3766 decide whether a branch stub is
3767 needed. */
3768 if (globals->root.splt != NULL && hash != NULL
3769 && hash->root.plt.offset != (bfd_vma) - 1)
3770 {
3771 sym_sec = globals->root.splt;
3772 sym_value = hash->root.plt.offset;
3773 if (sym_sec->output_section != NULL)
3774 destination = (sym_value
3775 + sym_sec->output_offset
3776 +
3777 sym_sec->output_section->vma);
3778 }
3779 else if (sym_sec->output_section != NULL)
3780 destination = (sym_value + irela->r_addend
3781 + sym_sec->output_offset
3782 + sym_sec->output_section->vma);
3783 }
3784 else if (hash->root.root.type == bfd_link_hash_undefined
3785 || (hash->root.root.type
3786 == bfd_link_hash_undefweak))
3787 {
3788 /* For a shared library, use the PLT stub as
3789 target address to decide whether a long
3790 branch stub is needed.
3791 For absolute code, they cannot be handled. */
3792 struct elf_aarch64_link_hash_table *globals =
3793 elf_aarch64_hash_table (info);
3794
3795 if (globals->root.splt != NULL && hash != NULL
3796 && hash->root.plt.offset != (bfd_vma) - 1)
3797 {
3798 sym_sec = globals->root.splt;
3799 sym_value = hash->root.plt.offset;
3800 if (sym_sec->output_section != NULL)
3801 destination = (sym_value
3802 + sym_sec->output_offset
3803 +
3804 sym_sec->output_section->vma);
3805 }
3806 else
3807 continue;
3808 }
3809 else
3810 {
3811 bfd_set_error (bfd_error_bad_value);
3812 goto error_ret_free_internal;
3813 }
3814 st_type = ELF_ST_TYPE (hash->root.type);
3815 sym_name = hash->root.root.root.string;
3816 }
3817
3818 /* Determine what (if any) linker stub is needed. */
3819 stub_type = aarch64_type_of_stub
3820 (info, section, irela, sym_sec, st_type, hash, destination);
3821 if (stub_type == aarch64_stub_none)
3822 continue;
3823
3824 /* Support for grouping stub sections. */
3825 id_sec = htab->stub_group[section->id].link_sec;
3826
3827 /* Get the name of this stub. */
3828 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3829 irela);
3830 if (!stub_name)
3831 goto error_ret_free_internal;
3832
3833 stub_entry =
3834 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3835 stub_name, FALSE, FALSE);
3836 if (stub_entry != NULL)
3837 {
3838 /* The proper stub has already been created. */
3839 free (stub_name);
3840 continue;
3841 }
3842
3843 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3844 (stub_name, section, htab);
3845 if (stub_entry == NULL)
3846 {
3847 free (stub_name);
3848 goto error_ret_free_internal;
3849 }
3850
3851 stub_entry->target_value = sym_value;
3852 stub_entry->target_section = sym_sec;
3853 stub_entry->stub_type = stub_type;
3854 stub_entry->h = hash;
3855 stub_entry->st_type = st_type;
3856
3857 if (sym_name == NULL)
3858 sym_name = "unnamed";
3859 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3860 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3861 if (stub_entry->output_name == NULL)
3862 {
3863 free (stub_name);
3864 goto error_ret_free_internal;
3865 }
3866
3867 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3868 sym_name);
3869
3870 stub_changed = TRUE;
3871 }
3872
3873 /* We're done with the internal relocs, free them. */
3874 if (elf_section_data (section)->relocs == NULL)
3875 free (internal_relocs);
3876 }
3877 }
3878
3879 if (!stub_changed)
3880 break;
3881
3882 _bfd_aarch64_resize_stubs (htab);
3883
3884 /* Ask the linker to do its stuff. */
3885 (*htab->layout_sections_again) ();
3886 stub_changed = FALSE;
3887 }
3888
3889 return TRUE;
3890
3891 error_ret_free_local:
3892 return FALSE;
3893 }
3894
3895 /* Build all the stubs associated with the current output file. The
3896 stubs are kept in a hash table attached to the main linker hash
3897 table. We also set up the .plt entries for statically linked PIC
3898 functions here. This function is called via aarch64_elf_finish in the
3899 linker. */
3900
3901 bfd_boolean
3902 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3903 {
3904 asection *stub_sec;
3905 struct bfd_hash_table *table;
3906 struct elf_aarch64_link_hash_table *htab;
3907
3908 htab = elf_aarch64_hash_table (info);
3909
3910 for (stub_sec = htab->stub_bfd->sections;
3911 stub_sec != NULL; stub_sec = stub_sec->next)
3912 {
3913 bfd_size_type size;
3914
3915 /* Ignore non-stub sections. */
3916 if (!strstr (stub_sec->name, STUB_SUFFIX))
3917 continue;
3918
3919 /* Allocate memory to hold the linker stubs. */
3920 size = stub_sec->size;
3921 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3922 if (stub_sec->contents == NULL && size != 0)
3923 return FALSE;
3924 stub_sec->size = 0;
3925
3926 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3927 stub_sec->size += 4;
3928 }
3929
3930 /* Build the stubs as directed by the stub hash table. */
3931 table = &htab->stub_hash_table;
3932 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3933
3934 return TRUE;
3935 }
3936
3937
3938 /* Add an entry to the code/data map for section SEC. */
3939
3940 static void
3941 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3942 {
3943 struct _aarch64_elf_section_data *sec_data =
3944 elf_aarch64_section_data (sec);
3945 unsigned int newidx;
3946
3947 if (sec_data->map == NULL)
3948 {
3949 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3950 sec_data->mapcount = 0;
3951 sec_data->mapsize = 1;
3952 }
3953
3954 newidx = sec_data->mapcount++;
3955
3956 if (sec_data->mapcount > sec_data->mapsize)
3957 {
3958 sec_data->mapsize *= 2;
3959 sec_data->map = bfd_realloc_or_free
3960 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3961 }
3962
3963 if (sec_data->map)
3964 {
3965 sec_data->map[newidx].vma = vma;
3966 sec_data->map[newidx].type = type;
3967 }
3968 }
3969
3970
3971 /* Initialise maps of insn/data for input BFDs. */
3972 void
3973 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3974 {
3975 Elf_Internal_Sym *isymbuf;
3976 Elf_Internal_Shdr *hdr;
3977 unsigned int i, localsyms;
3978
3979 /* Make sure that we are dealing with an AArch64 elf binary. */
3980 if (!is_aarch64_elf (abfd))
3981 return;
3982
3983 if ((abfd->flags & DYNAMIC) != 0)
3984 return;
3985
3986 hdr = &elf_symtab_hdr (abfd);
3987 localsyms = hdr->sh_info;
3988
3989 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3990 should contain the number of local symbols, which should come before any
3991 global symbols. Mapping symbols are always local. */
3992 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3993
3994 /* No internal symbols read? Skip this BFD. */
3995 if (isymbuf == NULL)
3996 return;
3997
3998 for (i = 0; i < localsyms; i++)
3999 {
4000 Elf_Internal_Sym *isym = &isymbuf[i];
4001 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
4002 const char *name;
4003
4004 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4005 {
4006 name = bfd_elf_string_from_elf_section (abfd,
4007 hdr->sh_link,
4008 isym->st_name);
4009
4010 if (bfd_is_aarch64_special_symbol_name
4011 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4012 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4013 }
4014 }
4015 }
4016
4017 /* Set option values needed during linking. */
4018 void
4019 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
4020 struct bfd_link_info *link_info,
4021 int no_enum_warn,
4022 int no_wchar_warn, int pic_veneer,
4023 int fix_erratum_835769,
4024 int fix_erratum_843419)
4025 {
4026 struct elf_aarch64_link_hash_table *globals;
4027
4028 globals = elf_aarch64_hash_table (link_info);
4029 globals->pic_veneer = pic_veneer;
4030 globals->fix_erratum_835769 = fix_erratum_835769;
4031 globals->fix_erratum_843419 = fix_erratum_843419;
4032 globals->fix_erratum_843419_adr = TRUE;
4033
4034 BFD_ASSERT (is_aarch64_elf (output_bfd));
4035 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
4036 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4037 }
4038
4039 static bfd_vma
4040 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
4041 struct elf_aarch64_link_hash_table
4042 *globals, struct bfd_link_info *info,
4043 bfd_vma value, bfd *output_bfd,
4044 bfd_boolean *unresolved_reloc_p)
4045 {
4046 bfd_vma off = (bfd_vma) - 1;
4047 asection *basegot = globals->root.sgot;
4048 bfd_boolean dyn = globals->root.dynamic_sections_created;
4049
4050 if (h != NULL)
4051 {
4052 BFD_ASSERT (basegot != NULL);
4053 off = h->got.offset;
4054 BFD_ASSERT (off != (bfd_vma) - 1);
4055 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
4056 || (info->shared
4057 && SYMBOL_REFERENCES_LOCAL (info, h))
4058 || (ELF_ST_VISIBILITY (h->other)
4059 && h->root.type == bfd_link_hash_undefweak))
4060 {
4061 /* This is actually a static link, or it is a -Bsymbolic link
4062 and the symbol is defined locally. We must initialize this
4063 entry in the global offset table. Since the offset must
4064 always be a multiple of 8 (4 in the case of ILP32), we use
4065 the least significant bit to record whether we have
4066 initialized it already.
4067 When doing a dynamic link, we create a .rel(a).got relocation
4068 entry to initialize the value. This is done in the
4069 finish_dynamic_symbol routine. */
4070 if ((off & 1) != 0)
4071 off &= ~1;
4072 else
4073 {
4074 bfd_put_NN (output_bfd, value, basegot->contents + off);
4075 h->got.offset |= 1;
4076 }
4077 }
4078 else
4079 *unresolved_reloc_p = FALSE;
4080
4081 off = off + basegot->output_section->vma + basegot->output_offset;
4082 }
4083
4084 return off;
4085 }
4086
4087 /* Change R_TYPE to a more efficient access model where possible,
4088 return the new reloc type. */
4089
4090 static bfd_reloc_code_real_type
4091 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4092 struct elf_link_hash_entry *h)
4093 {
4094 bfd_boolean is_local = h == NULL;
4095
4096 switch (r_type)
4097 {
4098 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4099 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4100 return (is_local
4101 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4102 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4103
4104 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4105 return (is_local
4106 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4107 : r_type);
4108
4109 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4110 return (is_local
4111 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4112 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4113
4114 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4115 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4116 return (is_local
4117 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4118 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4119
4120 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4121 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4122
4123 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4124 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4125
4126 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4127 return r_type;
4128
4129 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4130 return (is_local
4131 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4132 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4133
4134 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4135 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4136 /* Instructions with these relocations will become NOPs. */
4137 return BFD_RELOC_AARCH64_NONE;
4138
4139 default:
4140 break;
4141 }
4142
4143 return r_type;
4144 }
4145
4146 static unsigned int
4147 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4148 {
4149 switch (r_type)
4150 {
4151 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4152 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4153 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4154 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4155 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4156 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4157 return GOT_NORMAL;
4158
4159 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4160 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4161 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4162 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
4163 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4164 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4165 return GOT_TLS_GD;
4166
4167 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4168 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4169 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4170 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4171 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4172 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4173 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4174 return GOT_TLSDESC_GD;
4175
4176 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4177 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4178 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4179 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4180 return GOT_TLS_IE;
4181
4182 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
4183 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4184 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4185 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4186 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4187 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4188 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4189 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4190 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4191 return GOT_UNKNOWN;
4192
4193 default:
4194 break;
4195 }
4196 return GOT_UNKNOWN;
4197 }
4198
4199 static bfd_boolean
4200 aarch64_can_relax_tls (bfd *input_bfd,
4201 struct bfd_link_info *info,
4202 bfd_reloc_code_real_type r_type,
4203 struct elf_link_hash_entry *h,
4204 unsigned long r_symndx)
4205 {
4206 unsigned int symbol_got_type;
4207 unsigned int reloc_got_type;
4208
4209 if (! IS_AARCH64_TLS_RELOC (r_type))
4210 return FALSE;
4211
4212 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4213 reloc_got_type = aarch64_reloc_got_type (r_type);
4214
4215 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4216 return TRUE;
4217
4218 if (info->shared)
4219 return FALSE;
4220
4221 if (h && h->root.type == bfd_link_hash_undefweak)
4222 return FALSE;
4223
4224 return TRUE;
4225 }
4226
4227 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4228 enumerator. */
4229
4230 static bfd_reloc_code_real_type
4231 aarch64_tls_transition (bfd *input_bfd,
4232 struct bfd_link_info *info,
4233 unsigned int r_type,
4234 struct elf_link_hash_entry *h,
4235 unsigned long r_symndx)
4236 {
4237 bfd_reloc_code_real_type bfd_r_type
4238 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4239
4240 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4241 return bfd_r_type;
4242
4243 return aarch64_tls_transition_without_check (bfd_r_type, h);
4244 }
4245
4246 /* Return the base VMA address which should be subtracted from real addresses
4247 when resolving R_AARCH64_TLS_DTPREL relocation. */
4248
4249 static bfd_vma
4250 dtpoff_base (struct bfd_link_info *info)
4251 {
4252 /* If tls_sec is NULL, we should have signalled an error already. */
4253 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4254 return elf_hash_table (info)->tls_sec->vma;
4255 }
4256
4257 /* Return the base VMA address which should be subtracted from real addresses
4258 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4259
4260 static bfd_vma
4261 tpoff_base (struct bfd_link_info *info)
4262 {
4263 struct elf_link_hash_table *htab = elf_hash_table (info);
4264
4265 /* If tls_sec is NULL, we should have signalled an error already. */
4266 BFD_ASSERT (htab->tls_sec != NULL);
4267
4268 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4269 htab->tls_sec->alignment_power);
4270 return htab->tls_sec->vma - base;
4271 }
4272
4273 static bfd_vma *
4274 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4275 unsigned long r_symndx)
4276 {
4277 /* Calculate the address of the GOT entry for symbol
4278 referred to in h. */
4279 if (h != NULL)
4280 return &h->got.offset;
4281 else
4282 {
4283 /* local symbol */
4284 struct elf_aarch64_local_symbol *l;
4285
4286 l = elf_aarch64_locals (input_bfd);
4287 return &l[r_symndx].got_offset;
4288 }
4289 }
4290
4291 static void
4292 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4293 unsigned long r_symndx)
4294 {
4295 bfd_vma *p;
4296 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4297 *p |= 1;
4298 }
4299
4300 static int
4301 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4302 unsigned long r_symndx)
4303 {
4304 bfd_vma value;
4305 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4306 return value & 1;
4307 }
4308
4309 static bfd_vma
4310 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4311 unsigned long r_symndx)
4312 {
4313 bfd_vma value;
4314 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4315 value &= ~1;
4316 return value;
4317 }
4318
4319 static bfd_vma *
4320 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4321 unsigned long r_symndx)
4322 {
4323 /* Calculate the address of the GOT entry for symbol
4324 referred to in h. */
4325 if (h != NULL)
4326 {
4327 struct elf_aarch64_link_hash_entry *eh;
4328 eh = (struct elf_aarch64_link_hash_entry *) h;
4329 return &eh->tlsdesc_got_jump_table_offset;
4330 }
4331 else
4332 {
4333 /* local symbol */
4334 struct elf_aarch64_local_symbol *l;
4335
4336 l = elf_aarch64_locals (input_bfd);
4337 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4338 }
4339 }
4340
4341 static void
4342 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4343 unsigned long r_symndx)
4344 {
4345 bfd_vma *p;
4346 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4347 *p |= 1;
4348 }
4349
4350 static int
4351 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4352 struct elf_link_hash_entry *h,
4353 unsigned long r_symndx)
4354 {
4355 bfd_vma value;
4356 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4357 return value & 1;
4358 }
4359
4360 static bfd_vma
4361 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4362 unsigned long r_symndx)
4363 {
4364 bfd_vma value;
4365 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4366 value &= ~1;
4367 return value;
4368 }
4369
4370 /* Data for make_branch_to_erratum_835769_stub(). */
4371
4372 struct erratum_835769_branch_to_stub_data
4373 {
4374 struct bfd_link_info *info;
4375 asection *output_section;
4376 bfd_byte *contents;
4377 };
4378
4379 /* Helper to insert branches to erratum 835769 stubs in the right
4380 places for a particular section. */
4381
4382 static bfd_boolean
4383 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4384 void *in_arg)
4385 {
4386 struct elf_aarch64_stub_hash_entry *stub_entry;
4387 struct erratum_835769_branch_to_stub_data *data;
4388 bfd_byte *contents;
4389 unsigned long branch_insn = 0;
4390 bfd_vma veneered_insn_loc, veneer_entry_loc;
4391 bfd_signed_vma branch_offset;
4392 unsigned int target;
4393 bfd *abfd;
4394
4395 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4396 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4397
4398 if (stub_entry->target_section != data->output_section
4399 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4400 return TRUE;
4401
4402 contents = data->contents;
4403 veneered_insn_loc = stub_entry->target_section->output_section->vma
4404 + stub_entry->target_section->output_offset
4405 + stub_entry->target_value;
4406 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4407 + stub_entry->stub_sec->output_offset
4408 + stub_entry->stub_offset;
4409 branch_offset = veneer_entry_loc - veneered_insn_loc;
4410
4411 abfd = stub_entry->target_section->owner;
4412 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4413 (*_bfd_error_handler)
4414 (_("%B: error: Erratum 835769 stub out "
4415 "of range (input file too large)"), abfd);
4416
4417 target = stub_entry->target_value;
4418 branch_insn = 0x14000000;
4419 branch_offset >>= 2;
4420 branch_offset &= 0x3ffffff;
4421 branch_insn |= branch_offset;
4422 bfd_putl32 (branch_insn, &contents[target]);
4423
4424 return TRUE;
4425 }
4426
4427
4428 static bfd_boolean
4429 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4430 void *in_arg)
4431 {
4432 struct elf_aarch64_stub_hash_entry *stub_entry
4433 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4434 struct erratum_835769_branch_to_stub_data *data
4435 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4436 struct bfd_link_info *info;
4437 struct elf_aarch64_link_hash_table *htab;
4438 bfd_byte *contents;
4439 asection *section;
4440 bfd *abfd;
4441 bfd_vma place;
4442 uint32_t insn;
4443
4444 info = data->info;
4445 contents = data->contents;
4446 section = data->output_section;
4447
4448 htab = elf_aarch64_hash_table (info);
4449
4450 if (stub_entry->target_section != section
4451 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4452 return TRUE;
4453
4454 insn = bfd_getl32 (contents + stub_entry->target_value);
4455 bfd_putl32 (insn,
4456 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4457
4458 place = (section->output_section->vma + section->output_offset
4459 + stub_entry->adrp_offset);
4460 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4461
4462 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4463 abort ();
4464
4465 bfd_signed_vma imm =
4466 (_bfd_aarch64_sign_extend
4467 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4468 - (place & 0xfff));
4469
4470 if (htab->fix_erratum_843419_adr
4471 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4472 {
4473 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4474 | AARCH64_RT (insn));
4475 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4476 }
4477 else
4478 {
4479 bfd_vma veneered_insn_loc;
4480 bfd_vma veneer_entry_loc;
4481 bfd_signed_vma branch_offset;
4482 uint32_t branch_insn;
4483
4484 veneered_insn_loc = stub_entry->target_section->output_section->vma
4485 + stub_entry->target_section->output_offset
4486 + stub_entry->target_value;
4487 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4488 + stub_entry->stub_sec->output_offset
4489 + stub_entry->stub_offset;
4490 branch_offset = veneer_entry_loc - veneered_insn_loc;
4491
4492 abfd = stub_entry->target_section->owner;
4493 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4494 (*_bfd_error_handler)
4495 (_("%B: error: Erratum 843419 stub out "
4496 "of range (input file too large)"), abfd);
4497
4498 branch_insn = 0x14000000;
4499 branch_offset >>= 2;
4500 branch_offset &= 0x3ffffff;
4501 branch_insn |= branch_offset;
4502 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4503 }
4504 return TRUE;
4505 }
4506
4507
4508 static bfd_boolean
4509 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4510 struct bfd_link_info *link_info,
4511 asection *sec,
4512 bfd_byte *contents)
4513
4514 {
4515 struct elf_aarch64_link_hash_table *globals =
4516 elf_aarch64_hash_table (link_info);
4517
4518 if (globals == NULL)
4519 return FALSE;
4520
4521 /* Fix code to point to erratum 835769 stubs. */
4522 if (globals->fix_erratum_835769)
4523 {
4524 struct erratum_835769_branch_to_stub_data data;
4525
4526 data.info = link_info;
4527 data.output_section = sec;
4528 data.contents = contents;
4529 bfd_hash_traverse (&globals->stub_hash_table,
4530 make_branch_to_erratum_835769_stub, &data);
4531 }
4532
4533 if (globals->fix_erratum_843419)
4534 {
4535 struct erratum_835769_branch_to_stub_data data;
4536
4537 data.info = link_info;
4538 data.output_section = sec;
4539 data.contents = contents;
4540 bfd_hash_traverse (&globals->stub_hash_table,
4541 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4542 }
4543
4544 return FALSE;
4545 }
4546
4547 /* Perform a relocation as part of a final link. */
4548 static bfd_reloc_status_type
4549 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4550 bfd *input_bfd,
4551 bfd *output_bfd,
4552 asection *input_section,
4553 bfd_byte *contents,
4554 Elf_Internal_Rela *rel,
4555 bfd_vma value,
4556 struct bfd_link_info *info,
4557 asection *sym_sec,
4558 struct elf_link_hash_entry *h,
4559 bfd_boolean *unresolved_reloc_p,
4560 bfd_boolean save_addend,
4561 bfd_vma *saved_addend,
4562 Elf_Internal_Sym *sym)
4563 {
4564 Elf_Internal_Shdr *symtab_hdr;
4565 unsigned int r_type = howto->type;
4566 bfd_reloc_code_real_type bfd_r_type
4567 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4568 bfd_reloc_code_real_type new_bfd_r_type;
4569 unsigned long r_symndx;
4570 bfd_byte *hit_data = contents + rel->r_offset;
4571 bfd_vma place, off;
4572 bfd_signed_vma signed_addend;
4573 struct elf_aarch64_link_hash_table *globals;
4574 bfd_boolean weak_undef_p;
4575 asection *base_got;
4576
4577 globals = elf_aarch64_hash_table (info);
4578
4579 symtab_hdr = &elf_symtab_hdr (input_bfd);
4580
4581 BFD_ASSERT (is_aarch64_elf (input_bfd));
4582
4583 r_symndx = ELFNN_R_SYM (rel->r_info);
4584
4585 /* It is possible to have linker relaxations on some TLS access
4586 models. Update our information here. */
4587 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4588 if (new_bfd_r_type != bfd_r_type)
4589 {
4590 bfd_r_type = new_bfd_r_type;
4591 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4592 BFD_ASSERT (howto != NULL);
4593 r_type = howto->type;
4594 }
4595
4596 place = input_section->output_section->vma
4597 + input_section->output_offset + rel->r_offset;
4598
4599 /* Get addend, accumulating the addend for consecutive relocs
4600 which refer to the same offset. */
4601 signed_addend = saved_addend ? *saved_addend : 0;
4602 signed_addend += rel->r_addend;
4603
4604 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4605 : bfd_is_und_section (sym_sec));
4606
4607 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4608 it here if it is defined in a non-shared object. */
4609 if (h != NULL
4610 && h->type == STT_GNU_IFUNC
4611 && h->def_regular)
4612 {
4613 asection *plt;
4614 const char *name;
4615 bfd_vma addend = 0;
4616
4617 if ((input_section->flags & SEC_ALLOC) == 0
4618 || h->plt.offset == (bfd_vma) -1)
4619 abort ();
4620
4621 /* STT_GNU_IFUNC symbol must go through PLT. */
4622 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4623 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4624
4625 switch (bfd_r_type)
4626 {
4627 default:
4628 if (h->root.root.string)
4629 name = h->root.root.string;
4630 else
4631 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4632 NULL);
4633 (*_bfd_error_handler)
4634 (_("%B: relocation %s against STT_GNU_IFUNC "
4635 "symbol `%s' isn't handled by %s"), input_bfd,
4636 howto->name, name, __FUNCTION__);
4637 bfd_set_error (bfd_error_bad_value);
4638 return FALSE;
4639
4640 case BFD_RELOC_AARCH64_NN:
4641 if (rel->r_addend != 0)
4642 {
4643 if (h->root.root.string)
4644 name = h->root.root.string;
4645 else
4646 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4647 sym, NULL);
4648 (*_bfd_error_handler)
4649 (_("%B: relocation %s against STT_GNU_IFUNC "
4650 "symbol `%s' has non-zero addend: %d"),
4651 input_bfd, howto->name, name, rel->r_addend);
4652 bfd_set_error (bfd_error_bad_value);
4653 return FALSE;
4654 }
4655
4656 /* Generate dynamic relocation only when there is a
4657 non-GOT reference in a shared object. */
4658 if (info->shared && h->non_got_ref)
4659 {
4660 Elf_Internal_Rela outrel;
4661 asection *sreloc;
4662
4663 /* Need a dynamic relocation to get the real function
4664 address. */
4665 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4666 info,
4667 input_section,
4668 rel->r_offset);
4669 if (outrel.r_offset == (bfd_vma) -1
4670 || outrel.r_offset == (bfd_vma) -2)
4671 abort ();
4672
4673 outrel.r_offset += (input_section->output_section->vma
4674 + input_section->output_offset);
4675
4676 if (h->dynindx == -1
4677 || h->forced_local
4678 || info->executable)
4679 {
4680 /* This symbol is resolved locally. */
4681 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4682 outrel.r_addend = (h->root.u.def.value
4683 + h->root.u.def.section->output_section->vma
4684 + h->root.u.def.section->output_offset);
4685 }
4686 else
4687 {
4688 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4689 outrel.r_addend = 0;
4690 }
4691
4692 sreloc = globals->root.irelifunc;
4693 elf_append_rela (output_bfd, sreloc, &outrel);
4694
4695 /* If this reloc is against an external symbol, we
4696 do not want to fiddle with the addend. Otherwise,
4697 we need to include the symbol value so that it
4698 becomes an addend for the dynamic reloc. For an
4699 internal symbol, we have updated addend. */
4700 return bfd_reloc_ok;
4701 }
4702 /* FALLTHROUGH */
4703 case BFD_RELOC_AARCH64_CALL26:
4704 case BFD_RELOC_AARCH64_JUMP26:
4705 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4706 signed_addend,
4707 weak_undef_p);
4708 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4709 howto, value);
4710 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4711 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4712 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4713 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4714 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4715 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4716 base_got = globals->root.sgot;
4717 off = h->got.offset;
4718
4719 if (base_got == NULL)
4720 abort ();
4721
4722 if (off == (bfd_vma) -1)
4723 {
4724 bfd_vma plt_index;
4725
4726 /* We can't use h->got.offset here to save state, or
4727 even just remember the offset, as finish_dynamic_symbol
4728 would use that as offset into .got. */
4729
4730 if (globals->root.splt != NULL)
4731 {
4732 plt_index = ((h->plt.offset - globals->plt_header_size) /
4733 globals->plt_entry_size);
4734 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4735 base_got = globals->root.sgotplt;
4736 }
4737 else
4738 {
4739 plt_index = h->plt.offset / globals->plt_entry_size;
4740 off = plt_index * GOT_ENTRY_SIZE;
4741 base_got = globals->root.igotplt;
4742 }
4743
4744 if (h->dynindx == -1
4745 || h->forced_local
4746 || info->symbolic)
4747 {
4748 /* This references the local definition. We must
4749 initialize this entry in the global offset table.
4750 Since the offset must always be a multiple of 8,
4751 we use the least significant bit to record
4752 whether we have initialized it already.
4753
4754 When doing a dynamic link, we create a .rela.got
4755 relocation entry to initialize the value. This
4756 is done in the finish_dynamic_symbol routine. */
4757 if ((off & 1) != 0)
4758 off &= ~1;
4759 else
4760 {
4761 bfd_put_NN (output_bfd, value,
4762 base_got->contents + off);
4763 /* Note that this is harmless as -1 | 1 still is -1. */
4764 h->got.offset |= 1;
4765 }
4766 }
4767 value = (base_got->output_section->vma
4768 + base_got->output_offset + off);
4769 }
4770 else
4771 value = aarch64_calculate_got_entry_vma (h, globals, info,
4772 value, output_bfd,
4773 unresolved_reloc_p);
4774 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4775 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4776 addend = (globals->root.sgot->output_section->vma
4777 + globals->root.sgot->output_offset);
4778 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4779 addend, weak_undef_p);
4780 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4781 case BFD_RELOC_AARCH64_ADD_LO12:
4782 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4783 break;
4784 }
4785 }
4786
4787 switch (bfd_r_type)
4788 {
4789 case BFD_RELOC_AARCH64_NONE:
4790 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4791 *unresolved_reloc_p = FALSE;
4792 return bfd_reloc_ok;
4793
4794 case BFD_RELOC_AARCH64_NN:
4795
4796 /* When generating a shared object or relocatable executable, these
4797 relocations are copied into the output file to be resolved at
4798 run time. */
4799 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4800 && (input_section->flags & SEC_ALLOC)
4801 && (h == NULL
4802 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4803 || h->root.type != bfd_link_hash_undefweak))
4804 {
4805 Elf_Internal_Rela outrel;
4806 bfd_byte *loc;
4807 bfd_boolean skip, relocate;
4808 asection *sreloc;
4809
4810 *unresolved_reloc_p = FALSE;
4811
4812 skip = FALSE;
4813 relocate = FALSE;
4814
4815 outrel.r_addend = signed_addend;
4816 outrel.r_offset =
4817 _bfd_elf_section_offset (output_bfd, info, input_section,
4818 rel->r_offset);
4819 if (outrel.r_offset == (bfd_vma) - 1)
4820 skip = TRUE;
4821 else if (outrel.r_offset == (bfd_vma) - 2)
4822 {
4823 skip = TRUE;
4824 relocate = TRUE;
4825 }
4826
4827 outrel.r_offset += (input_section->output_section->vma
4828 + input_section->output_offset);
4829
4830 if (skip)
4831 memset (&outrel, 0, sizeof outrel);
4832 else if (h != NULL
4833 && h->dynindx != -1
4834 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4835 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4836 else
4837 {
4838 int symbol;
4839
4840 /* On SVR4-ish systems, the dynamic loader cannot
4841 relocate the text and data segments independently,
4842 so the symbol does not matter. */
4843 symbol = 0;
4844 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4845 outrel.r_addend += value;
4846 }
4847
4848 sreloc = elf_section_data (input_section)->sreloc;
4849 if (sreloc == NULL || sreloc->contents == NULL)
4850 return bfd_reloc_notsupported;
4851
4852 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4853 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4854
4855 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4856 {
4857 /* Sanity to check that we have previously allocated
4858 sufficient space in the relocation section for the
4859 number of relocations we actually want to emit. */
4860 abort ();
4861 }
4862
4863 /* If this reloc is against an external symbol, we do not want to
4864 fiddle with the addend. Otherwise, we need to include the symbol
4865 value so that it becomes an addend for the dynamic reloc. */
4866 if (!relocate)
4867 return bfd_reloc_ok;
4868
4869 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4870 contents, rel->r_offset, value,
4871 signed_addend);
4872 }
4873 else
4874 value += signed_addend;
4875 break;
4876
4877 case BFD_RELOC_AARCH64_CALL26:
4878 case BFD_RELOC_AARCH64_JUMP26:
4879 {
4880 asection *splt = globals->root.splt;
4881 bfd_boolean via_plt_p =
4882 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4883
4884 /* A call to an undefined weak symbol is converted to a jump to
4885 the next instruction unless a PLT entry will be created.
4886 The jump to the next instruction is optimized as a NOP.
4887 Do the same for local undefined symbols. */
4888 if (weak_undef_p && ! via_plt_p)
4889 {
4890 bfd_putl32 (INSN_NOP, hit_data);
4891 return bfd_reloc_ok;
4892 }
4893
4894 /* If the call goes through a PLT entry, make sure to
4895 check distance to the right destination address. */
4896 if (via_plt_p)
4897 value = (splt->output_section->vma
4898 + splt->output_offset + h->plt.offset);
4899
4900 /* Check if a stub has to be inserted because the destination
4901 is too far away. */
4902 struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
4903 if (! aarch64_valid_branch_p (value, place))
4904 /* The target is out of reach, so redirect the branch to
4905 the local stub for this function. */
4906 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
4907 rel, globals);
4908 if (stub_entry != NULL)
4909 value = (stub_entry->stub_offset
4910 + stub_entry->stub_sec->output_offset
4911 + stub_entry->stub_sec->output_section->vma);
4912 }
4913 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4914 signed_addend, weak_undef_p);
4915 *unresolved_reloc_p = FALSE;
4916 break;
4917
4918 case BFD_RELOC_AARCH64_16_PCREL:
4919 case BFD_RELOC_AARCH64_32_PCREL:
4920 case BFD_RELOC_AARCH64_64_PCREL:
4921 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4922 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4923 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4924 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4925 if (info->shared
4926 && (input_section->flags & SEC_ALLOC) != 0
4927 && (input_section->flags & SEC_READONLY) != 0
4928 && h != NULL
4929 && !h->def_regular)
4930 {
4931 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4932
4933 (*_bfd_error_handler)
4934 (_("%B: relocation %s against external symbol `%s' can not be used"
4935 " when making a shared object; recompile with -fPIC"),
4936 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
4937 h->root.root.string);
4938 bfd_set_error (bfd_error_bad_value);
4939 return FALSE;
4940 }
4941
4942 case BFD_RELOC_AARCH64_16:
4943 #if ARCH_SIZE == 64
4944 case BFD_RELOC_AARCH64_32:
4945 #endif
4946 case BFD_RELOC_AARCH64_ADD_LO12:
4947 case BFD_RELOC_AARCH64_BRANCH19:
4948 case BFD_RELOC_AARCH64_LDST128_LO12:
4949 case BFD_RELOC_AARCH64_LDST16_LO12:
4950 case BFD_RELOC_AARCH64_LDST32_LO12:
4951 case BFD_RELOC_AARCH64_LDST64_LO12:
4952 case BFD_RELOC_AARCH64_LDST8_LO12:
4953 case BFD_RELOC_AARCH64_MOVW_G0:
4954 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4955 case BFD_RELOC_AARCH64_MOVW_G0_S:
4956 case BFD_RELOC_AARCH64_MOVW_G1:
4957 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4958 case BFD_RELOC_AARCH64_MOVW_G1_S:
4959 case BFD_RELOC_AARCH64_MOVW_G2:
4960 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4961 case BFD_RELOC_AARCH64_MOVW_G2_S:
4962 case BFD_RELOC_AARCH64_MOVW_G3:
4963 case BFD_RELOC_AARCH64_TSTBR14:
4964 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4965 signed_addend, weak_undef_p);
4966 break;
4967
4968 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4969 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4970 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4971 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4972 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4973 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4974 if (globals->root.sgot == NULL)
4975 BFD_ASSERT (h != NULL);
4976
4977 if (h != NULL)
4978 {
4979 bfd_vma addend = 0;
4980 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4981 output_bfd,
4982 unresolved_reloc_p);
4983 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4984 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4985 addend = (globals->root.sgot->output_section->vma
4986 + globals->root.sgot->output_offset);
4987 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4988 addend, weak_undef_p);
4989 }
4990 else
4991 {
4992 bfd_vma addend = 0;
4993 struct elf_aarch64_local_symbol *locals
4994 = elf_aarch64_locals (input_bfd);
4995
4996 if (locals == NULL)
4997 {
4998 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4999 (*_bfd_error_handler)
5000 (_("%B: Local symbol descriptor table be NULL when applying "
5001 "relocation %s against local symbol"),
5002 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
5003 abort ();
5004 }
5005
5006 off = symbol_got_offset (input_bfd, h, r_symndx);
5007 base_got = globals->root.sgot;
5008 bfd_vma got_entry_addr = (base_got->output_section->vma
5009 + base_got->output_offset + off);
5010
5011 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5012 {
5013 bfd_put_64 (output_bfd, value, base_got->contents + off);
5014
5015 if (info->shared)
5016 {
5017 asection *s;
5018 Elf_Internal_Rela outrel;
5019
5020 /* For local symbol, we have done absolute relocation in static
5021 linking stageh. While for share library, we need to update
5022 the content of GOT entry according to the share objects
5023 loading base address. So we need to generate a
5024 R_AARCH64_RELATIVE reloc for dynamic linker. */
5025 s = globals->root.srelgot;
5026 if (s == NULL)
5027 abort ();
5028
5029 outrel.r_offset = got_entry_addr;
5030 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
5031 outrel.r_addend = value;
5032 elf_append_rela (output_bfd, s, &outrel);
5033 }
5034
5035 symbol_got_offset_mark (input_bfd, h, r_symndx);
5036 }
5037
5038 /* Update the relocation value to GOT entry addr as we have transformed
5039 the direct data access into indirect data access through GOT. */
5040 value = got_entry_addr;
5041
5042 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5043 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5044 addend = base_got->output_section->vma + base_got->output_offset;
5045
5046 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5047 addend, weak_undef_p);
5048 }
5049
5050 break;
5051
5052 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5053 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5054 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5055 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5056 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5057 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5058 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5059 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5060 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5061 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5062 if (globals->root.sgot == NULL)
5063 return bfd_reloc_notsupported;
5064
5065 value = (symbol_got_offset (input_bfd, h, r_symndx)
5066 + globals->root.sgot->output_section->vma
5067 + globals->root.sgot->output_offset);
5068
5069 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5070 0, weak_undef_p);
5071 *unresolved_reloc_p = FALSE;
5072 break;
5073
5074 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
5075 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5076 signed_addend - dtpoff_base (info),
5077 weak_undef_p);
5078 break;
5079
5080 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5081 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5082 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5083 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5084 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5085 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5086 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5087 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5088 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5089 signed_addend - tpoff_base (info),
5090 weak_undef_p);
5091 *unresolved_reloc_p = FALSE;
5092 break;
5093
5094 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5095 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5096 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5097 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5098 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5099 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5100 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5101 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5102 if (globals->root.sgot == NULL)
5103 return bfd_reloc_notsupported;
5104 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5105 + globals->root.sgotplt->output_section->vma
5106 + globals->root.sgotplt->output_offset
5107 + globals->sgotplt_jump_table_size);
5108
5109 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5110 0, weak_undef_p);
5111 *unresolved_reloc_p = FALSE;
5112 break;
5113
5114 default:
5115 return bfd_reloc_notsupported;
5116 }
5117
5118 if (saved_addend)
5119 *saved_addend = value;
5120
5121 /* Only apply the final relocation in a sequence. */
5122 if (save_addend)
5123 return bfd_reloc_continue;
5124
5125 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5126 howto, value);
5127 }
5128
5129 /* Handle TLS relaxations. Relaxing is possible for symbols that use
5130 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
5131 link.
5132
5133 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
5134 is to then call final_link_relocate. Return other values in the
5135 case of error. */
5136
5137 static bfd_reloc_status_type
5138 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5139 bfd *input_bfd, bfd_byte *contents,
5140 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5141 {
5142 bfd_boolean is_local = h == NULL;
5143 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5144 unsigned long insn;
5145
5146 BFD_ASSERT (globals && input_bfd && contents && rel);
5147
5148 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5149 {
5150 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5151 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5152 if (is_local)
5153 {
5154 /* GD->LE relaxation:
5155 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
5156 or
5157 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
5158 */
5159 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5160 return bfd_reloc_continue;
5161 }
5162 else
5163 {
5164 /* GD->IE relaxation:
5165 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5166 or
5167 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5168 */
5169 return bfd_reloc_continue;
5170 }
5171
5172 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5173 BFD_ASSERT (0);
5174 break;
5175
5176 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5177 if (is_local)
5178 {
5179 /* Tiny TLSDESC->LE relaxation:
5180 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
5181 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
5182 .tlsdesccall var
5183 blr x1 => nop
5184 */
5185 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5186 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5187
5188 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5189 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5190 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5191
5192 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5193 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
5194 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5195 return bfd_reloc_continue;
5196 }
5197 else
5198 {
5199 /* Tiny TLSDESC->IE relaxation:
5200 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5201 adr x0, :tlsdesc:var => nop
5202 .tlsdesccall var
5203 blr x1 => nop
5204 */
5205 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5206 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5207
5208 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5209 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5210
5211 bfd_putl32 (0x58000000, contents + rel->r_offset);
5212 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5213 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5214 return bfd_reloc_continue;
5215 }
5216
5217 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5218 if (is_local)
5219 {
5220 /* Tiny GD->LE relaxation:
5221 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5222 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
5223 nop => add x0, x0, #:tprel_lo12_nc:x
5224 */
5225
5226 /* First kill the tls_get_addr reloc on the bl instruction. */
5227 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5228
5229 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5230 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
5231 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
5232
5233 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5234 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5235 rel[1].r_offset = rel->r_offset + 8;
5236
5237 /* Move the current relocation to the second instruction in
5238 the sequence. */
5239 rel->r_offset += 4;
5240 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5241 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5242 return bfd_reloc_continue;
5243 }
5244 else
5245 {
5246 /* Tiny GD->IE relaxation:
5247 adr x0, :tlsgd:var => ldr x0, :gottprel:var
5248 bl __tls_get_addr => mrs x1, tpidr_el0
5249 nop => add x0, x0, x1
5250 */
5251
5252 /* First kill the tls_get_addr reloc on the bl instruction. */
5253 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5254 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5255
5256 bfd_putl32 (0x58000000, contents + rel->r_offset);
5257 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5258 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5259 return bfd_reloc_continue;
5260 }
5261
5262 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5263 return bfd_reloc_continue;
5264
5265 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5266 if (is_local)
5267 {
5268 /* GD->LE relaxation:
5269 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5270 */
5271 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5272 return bfd_reloc_continue;
5273 }
5274 else
5275 {
5276 /* GD->IE relaxation:
5277 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
5278 */
5279 insn = bfd_getl32 (contents + rel->r_offset);
5280 insn &= 0xffffffe0;
5281 bfd_putl32 (insn, contents + rel->r_offset);
5282 return bfd_reloc_continue;
5283 }
5284
5285 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5286 if (is_local)
5287 {
5288 /* GD->LE relaxation
5289 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
5290 bl __tls_get_addr => mrs x1, tpidr_el0
5291 nop => add x0, x1, x0
5292 */
5293
5294 /* First kill the tls_get_addr reloc on the bl instruction. */
5295 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5296 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5297
5298 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5299 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5300 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5301 return bfd_reloc_continue;
5302 }
5303 else
5304 {
5305 /* GD->IE relaxation
5306 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
5307 BL __tls_get_addr => mrs x1, tpidr_el0
5308 R_AARCH64_CALL26
5309 NOP => add x0, x1, x0
5310 */
5311
5312 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5313
5314 /* Remove the relocation on the BL instruction. */
5315 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5316
5317 bfd_putl32 (0xf9400000, contents + rel->r_offset);
5318
5319 /* We choose to fixup the BL and NOP instructions using the
5320 offset from the second relocation to allow flexibility in
5321 scheduling instructions between the ADD and BL. */
5322 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5323 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
5324 return bfd_reloc_continue;
5325 }
5326
5327 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5328 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5329 /* GD->IE/LE relaxation:
5330 add x0, x0, #:tlsdesc_lo12:var => nop
5331 blr xd => nop
5332 */
5333 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5334 return bfd_reloc_ok;
5335
5336 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5337 /* IE->LE relaxation:
5338 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
5339 */
5340 if (is_local)
5341 {
5342 insn = bfd_getl32 (contents + rel->r_offset);
5343 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5344 }
5345 return bfd_reloc_continue;
5346
5347 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5348 /* IE->LE relaxation:
5349 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
5350 */
5351 if (is_local)
5352 {
5353 insn = bfd_getl32 (contents + rel->r_offset);
5354 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
5355 }
5356 return bfd_reloc_continue;
5357
5358 default:
5359 return bfd_reloc_continue;
5360 }
5361
5362 return bfd_reloc_ok;
5363 }
5364
5365 /* Relocate an AArch64 ELF section. */
5366
5367 static bfd_boolean
5368 elfNN_aarch64_relocate_section (bfd *output_bfd,
5369 struct bfd_link_info *info,
5370 bfd *input_bfd,
5371 asection *input_section,
5372 bfd_byte *contents,
5373 Elf_Internal_Rela *relocs,
5374 Elf_Internal_Sym *local_syms,
5375 asection **local_sections)
5376 {
5377 Elf_Internal_Shdr *symtab_hdr;
5378 struct elf_link_hash_entry **sym_hashes;
5379 Elf_Internal_Rela *rel;
5380 Elf_Internal_Rela *relend;
5381 const char *name;
5382 struct elf_aarch64_link_hash_table *globals;
5383 bfd_boolean save_addend = FALSE;
5384 bfd_vma addend = 0;
5385
5386 globals = elf_aarch64_hash_table (info);
5387
5388 symtab_hdr = &elf_symtab_hdr (input_bfd);
5389 sym_hashes = elf_sym_hashes (input_bfd);
5390
5391 rel = relocs;
5392 relend = relocs + input_section->reloc_count;
5393 for (; rel < relend; rel++)
5394 {
5395 unsigned int r_type;
5396 bfd_reloc_code_real_type bfd_r_type;
5397 bfd_reloc_code_real_type relaxed_bfd_r_type;
5398 reloc_howto_type *howto;
5399 unsigned long r_symndx;
5400 Elf_Internal_Sym *sym;
5401 asection *sec;
5402 struct elf_link_hash_entry *h;
5403 bfd_vma relocation;
5404 bfd_reloc_status_type r;
5405 arelent bfd_reloc;
5406 char sym_type;
5407 bfd_boolean unresolved_reloc = FALSE;
5408 char *error_message = NULL;
5409
5410 r_symndx = ELFNN_R_SYM (rel->r_info);
5411 r_type = ELFNN_R_TYPE (rel->r_info);
5412
5413 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
5414 howto = bfd_reloc.howto;
5415
5416 if (howto == NULL)
5417 {
5418 (*_bfd_error_handler)
5419 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
5420 input_bfd, input_section, r_type);
5421 return FALSE;
5422 }
5423 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
5424
5425 h = NULL;
5426 sym = NULL;
5427 sec = NULL;
5428
5429 if (r_symndx < symtab_hdr->sh_info)
5430 {
5431 sym = local_syms + r_symndx;
5432 sym_type = ELFNN_ST_TYPE (sym->st_info);
5433 sec = local_sections[r_symndx];
5434
5435 /* An object file might have a reference to a local
5436 undefined symbol. This is a daft object file, but we
5437 should at least do something about it. */
5438 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
5439 && bfd_is_und_section (sec)
5440 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
5441 {
5442 if (!info->callbacks->undefined_symbol
5443 (info, bfd_elf_string_from_elf_section
5444 (input_bfd, symtab_hdr->sh_link, sym->st_name),
5445 input_bfd, input_section, rel->r_offset, TRUE))
5446 return FALSE;
5447 }
5448
5449 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
5450
5451 /* Relocate against local STT_GNU_IFUNC symbol. */
5452 if (!info->relocatable
5453 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
5454 {
5455 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
5456 rel, FALSE);
5457 if (h == NULL)
5458 abort ();
5459
5460 /* Set STT_GNU_IFUNC symbol value. */
5461 h->root.u.def.value = sym->st_value;
5462 h->root.u.def.section = sec;
5463 }
5464 }
5465 else
5466 {
5467 bfd_boolean warned, ignored;
5468
5469 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
5470 r_symndx, symtab_hdr, sym_hashes,
5471 h, sec, relocation,
5472 unresolved_reloc, warned, ignored);
5473
5474 sym_type = h->type;
5475 }
5476
5477 if (sec != NULL && discarded_section (sec))
5478 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
5479 rel, 1, relend, howto, 0, contents);
5480
5481 if (info->relocatable)
5482 continue;
5483
5484 if (h != NULL)
5485 name = h->root.root.string;
5486 else
5487 {
5488 name = (bfd_elf_string_from_elf_section
5489 (input_bfd, symtab_hdr->sh_link, sym->st_name));
5490 if (name == NULL || *name == '\0')
5491 name = bfd_section_name (input_bfd, sec);
5492 }
5493
5494 if (r_symndx != 0
5495 && r_type != R_AARCH64_NONE
5496 && r_type != R_AARCH64_NULL
5497 && (h == NULL
5498 || h->root.type == bfd_link_hash_defined
5499 || h->root.type == bfd_link_hash_defweak)
5500 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
5501 {
5502 (*_bfd_error_handler)
5503 ((sym_type == STT_TLS
5504 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
5505 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
5506 input_bfd,
5507 input_section, (long) rel->r_offset, howto->name, name);
5508 }
5509
5510 /* We relax only if we can see that there can be a valid transition
5511 from a reloc type to another.
5512 We call elfNN_aarch64_final_link_relocate unless we're completely
5513 done, i.e., the relaxation produced the final output we want. */
5514
5515 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
5516 h, r_symndx);
5517 if (relaxed_bfd_r_type != bfd_r_type)
5518 {
5519 bfd_r_type = relaxed_bfd_r_type;
5520 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
5521 BFD_ASSERT (howto != NULL);
5522 r_type = howto->type;
5523 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
5524 unresolved_reloc = 0;
5525 }
5526 else
5527 r = bfd_reloc_continue;
5528
5529 /* There may be multiple consecutive relocations for the
5530 same offset. In that case we are supposed to treat the
5531 output of each relocation as the addend for the next. */
5532 if (rel + 1 < relend
5533 && rel->r_offset == rel[1].r_offset
5534 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
5535 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
5536 save_addend = TRUE;
5537 else
5538 save_addend = FALSE;
5539
5540 if (r == bfd_reloc_continue)
5541 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
5542 input_section, contents, rel,
5543 relocation, info, sec,
5544 h, &unresolved_reloc,
5545 save_addend, &addend, sym);
5546
5547 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5548 {
5549 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5550 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5551 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5552 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5553 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5554 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5555 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5556 {
5557 bfd_boolean need_relocs = FALSE;
5558 bfd_byte *loc;
5559 int indx;
5560 bfd_vma off;
5561
5562 off = symbol_got_offset (input_bfd, h, r_symndx);
5563 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5564
5565 need_relocs =
5566 (info->shared || indx != 0) &&
5567 (h == NULL
5568 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5569 || h->root.type != bfd_link_hash_undefweak);
5570
5571 BFD_ASSERT (globals->root.srelgot != NULL);
5572
5573 if (need_relocs)
5574 {
5575 Elf_Internal_Rela rela;
5576 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5577 rela.r_addend = 0;
5578 rela.r_offset = globals->root.sgot->output_section->vma +
5579 globals->root.sgot->output_offset + off;
5580
5581
5582 loc = globals->root.srelgot->contents;
5583 loc += globals->root.srelgot->reloc_count++
5584 * RELOC_SIZE (htab);
5585 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5586
5587 bfd_reloc_code_real_type real_type =
5588 elfNN_aarch64_bfd_reloc_from_type (r_type);
5589
5590 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
5591 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
5592 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
5593 {
5594 /* For local dynamic, don't generate DTPREL in any case.
5595 Initialize the DTPREL slot into zero, so we get module
5596 base address when invoke runtime TLS resolver. */
5597 bfd_put_NN (output_bfd, 0,
5598 globals->root.sgot->contents + off
5599 + GOT_ENTRY_SIZE);
5600 }
5601 else if (indx == 0)
5602 {
5603 bfd_put_NN (output_bfd,
5604 relocation - dtpoff_base (info),
5605 globals->root.sgot->contents + off
5606 + GOT_ENTRY_SIZE);
5607 }
5608 else
5609 {
5610 /* This TLS symbol is global. We emit a
5611 relocation to fixup the tls offset at load
5612 time. */
5613 rela.r_info =
5614 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5615 rela.r_addend = 0;
5616 rela.r_offset =
5617 (globals->root.sgot->output_section->vma
5618 + globals->root.sgot->output_offset + off
5619 + GOT_ENTRY_SIZE);
5620
5621 loc = globals->root.srelgot->contents;
5622 loc += globals->root.srelgot->reloc_count++
5623 * RELOC_SIZE (globals);
5624 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5625 bfd_put_NN (output_bfd, (bfd_vma) 0,
5626 globals->root.sgot->contents + off
5627 + GOT_ENTRY_SIZE);
5628 }
5629 }
5630 else
5631 {
5632 bfd_put_NN (output_bfd, (bfd_vma) 1,
5633 globals->root.sgot->contents + off);
5634 bfd_put_NN (output_bfd,
5635 relocation - dtpoff_base (info),
5636 globals->root.sgot->contents + off
5637 + GOT_ENTRY_SIZE);
5638 }
5639
5640 symbol_got_offset_mark (input_bfd, h, r_symndx);
5641 }
5642 break;
5643
5644 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5645 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5646 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5647 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5648 {
5649 bfd_boolean need_relocs = FALSE;
5650 bfd_byte *loc;
5651 int indx;
5652 bfd_vma off;
5653
5654 off = symbol_got_offset (input_bfd, h, r_symndx);
5655
5656 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5657
5658 need_relocs =
5659 (info->shared || indx != 0) &&
5660 (h == NULL
5661 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5662 || h->root.type != bfd_link_hash_undefweak);
5663
5664 BFD_ASSERT (globals->root.srelgot != NULL);
5665
5666 if (need_relocs)
5667 {
5668 Elf_Internal_Rela rela;
5669
5670 if (indx == 0)
5671 rela.r_addend = relocation - dtpoff_base (info);
5672 else
5673 rela.r_addend = 0;
5674
5675 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5676 rela.r_offset = globals->root.sgot->output_section->vma +
5677 globals->root.sgot->output_offset + off;
5678
5679 loc = globals->root.srelgot->contents;
5680 loc += globals->root.srelgot->reloc_count++
5681 * RELOC_SIZE (htab);
5682
5683 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5684
5685 bfd_put_NN (output_bfd, rela.r_addend,
5686 globals->root.sgot->contents + off);
5687 }
5688 else
5689 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5690 globals->root.sgot->contents + off);
5691
5692 symbol_got_offset_mark (input_bfd, h, r_symndx);
5693 }
5694 break;
5695
5696 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5697 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5698 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5699 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5700 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5701 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5702 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5703 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5704 break;
5705
5706 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5707 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5708 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5709 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5710 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5711 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5712 {
5713 bfd_boolean need_relocs = FALSE;
5714 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5715 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5716
5717 need_relocs = (h == NULL
5718 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5719 || h->root.type != bfd_link_hash_undefweak);
5720
5721 BFD_ASSERT (globals->root.srelgot != NULL);
5722 BFD_ASSERT (globals->root.sgot != NULL);
5723
5724 if (need_relocs)
5725 {
5726 bfd_byte *loc;
5727 Elf_Internal_Rela rela;
5728 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5729
5730 rela.r_addend = 0;
5731 rela.r_offset = (globals->root.sgotplt->output_section->vma
5732 + globals->root.sgotplt->output_offset
5733 + off + globals->sgotplt_jump_table_size);
5734
5735 if (indx == 0)
5736 rela.r_addend = relocation - dtpoff_base (info);
5737
5738 /* Allocate the next available slot in the PLT reloc
5739 section to hold our R_AARCH64_TLSDESC, the next
5740 available slot is determined from reloc_count,
5741 which we step. But note, reloc_count was
5742 artifically moved down while allocating slots for
5743 real PLT relocs such that all of the PLT relocs
5744 will fit above the initial reloc_count and the
5745 extra stuff will fit below. */
5746 loc = globals->root.srelplt->contents;
5747 loc += globals->root.srelplt->reloc_count++
5748 * RELOC_SIZE (globals);
5749
5750 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5751
5752 bfd_put_NN (output_bfd, (bfd_vma) 0,
5753 globals->root.sgotplt->contents + off +
5754 globals->sgotplt_jump_table_size);
5755 bfd_put_NN (output_bfd, (bfd_vma) 0,
5756 globals->root.sgotplt->contents + off +
5757 globals->sgotplt_jump_table_size +
5758 GOT_ENTRY_SIZE);
5759 }
5760
5761 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5762 }
5763 break;
5764 default:
5765 break;
5766 }
5767
5768 if (!save_addend)
5769 addend = 0;
5770
5771
5772 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5773 because such sections are not SEC_ALLOC and thus ld.so will
5774 not process them. */
5775 if (unresolved_reloc
5776 && !((input_section->flags & SEC_DEBUGGING) != 0
5777 && h->def_dynamic)
5778 && _bfd_elf_section_offset (output_bfd, info, input_section,
5779 +rel->r_offset) != (bfd_vma) - 1)
5780 {
5781 (*_bfd_error_handler)
5782 (_
5783 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5784 input_bfd, input_section, (long) rel->r_offset, howto->name,
5785 h->root.root.string);
5786 return FALSE;
5787 }
5788
5789 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5790 {
5791 switch (r)
5792 {
5793 case bfd_reloc_overflow:
5794 if (!(*info->callbacks->reloc_overflow)
5795 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
5796 input_bfd, input_section, rel->r_offset))
5797 return FALSE;
5798 break;
5799
5800 case bfd_reloc_undefined:
5801 if (!((*info->callbacks->undefined_symbol)
5802 (info, name, input_bfd, input_section,
5803 rel->r_offset, TRUE)))
5804 return FALSE;
5805 break;
5806
5807 case bfd_reloc_outofrange:
5808 error_message = _("out of range");
5809 goto common_error;
5810
5811 case bfd_reloc_notsupported:
5812 error_message = _("unsupported relocation");
5813 goto common_error;
5814
5815 case bfd_reloc_dangerous:
5816 /* error_message should already be set. */
5817 goto common_error;
5818
5819 default:
5820 error_message = _("unknown error");
5821 /* Fall through. */
5822
5823 common_error:
5824 BFD_ASSERT (error_message != NULL);
5825 if (!((*info->callbacks->reloc_dangerous)
5826 (info, error_message, input_bfd, input_section,
5827 rel->r_offset)))
5828 return FALSE;
5829 break;
5830 }
5831 }
5832 }
5833
5834 return TRUE;
5835 }
5836
5837 /* Set the right machine number. */
5838
5839 static bfd_boolean
5840 elfNN_aarch64_object_p (bfd *abfd)
5841 {
5842 #if ARCH_SIZE == 32
5843 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5844 #else
5845 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5846 #endif
5847 return TRUE;
5848 }
5849
5850 /* Function to keep AArch64 specific flags in the ELF header. */
5851
5852 static bfd_boolean
5853 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5854 {
5855 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5856 {
5857 }
5858 else
5859 {
5860 elf_elfheader (abfd)->e_flags = flags;
5861 elf_flags_init (abfd) = TRUE;
5862 }
5863
5864 return TRUE;
5865 }
5866
5867 /* Merge backend specific data from an object file to the output
5868 object file when linking. */
5869
5870 static bfd_boolean
5871 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5872 {
5873 flagword out_flags;
5874 flagword in_flags;
5875 bfd_boolean flags_compatible = TRUE;
5876 asection *sec;
5877
5878 /* Check if we have the same endianess. */
5879 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5880 return FALSE;
5881
5882 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5883 return TRUE;
5884
5885 /* The input BFD must have had its flags initialised. */
5886 /* The following seems bogus to me -- The flags are initialized in
5887 the assembler but I don't think an elf_flags_init field is
5888 written into the object. */
5889 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5890
5891 in_flags = elf_elfheader (ibfd)->e_flags;
5892 out_flags = elf_elfheader (obfd)->e_flags;
5893
5894 if (!elf_flags_init (obfd))
5895 {
5896 /* If the input is the default architecture and had the default
5897 flags then do not bother setting the flags for the output
5898 architecture, instead allow future merges to do this. If no
5899 future merges ever set these flags then they will retain their
5900 uninitialised values, which surprise surprise, correspond
5901 to the default values. */
5902 if (bfd_get_arch_info (ibfd)->the_default
5903 && elf_elfheader (ibfd)->e_flags == 0)
5904 return TRUE;
5905
5906 elf_flags_init (obfd) = TRUE;
5907 elf_elfheader (obfd)->e_flags = in_flags;
5908
5909 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5910 && bfd_get_arch_info (obfd)->the_default)
5911 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5912 bfd_get_mach (ibfd));
5913
5914 return TRUE;
5915 }
5916
5917 /* Identical flags must be compatible. */
5918 if (in_flags == out_flags)
5919 return TRUE;
5920
5921 /* Check to see if the input BFD actually contains any sections. If
5922 not, its flags may not have been initialised either, but it
5923 cannot actually cause any incompatiblity. Do not short-circuit
5924 dynamic objects; their section list may be emptied by
5925 elf_link_add_object_symbols.
5926
5927 Also check to see if there are no code sections in the input.
5928 In this case there is no need to check for code specific flags.
5929 XXX - do we need to worry about floating-point format compatability
5930 in data sections ? */
5931 if (!(ibfd->flags & DYNAMIC))
5932 {
5933 bfd_boolean null_input_bfd = TRUE;
5934 bfd_boolean only_data_sections = TRUE;
5935
5936 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5937 {
5938 if ((bfd_get_section_flags (ibfd, sec)
5939 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5940 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5941 only_data_sections = FALSE;
5942
5943 null_input_bfd = FALSE;
5944 break;
5945 }
5946
5947 if (null_input_bfd || only_data_sections)
5948 return TRUE;
5949 }
5950
5951 return flags_compatible;
5952 }
5953
5954 /* Display the flags field. */
5955
5956 static bfd_boolean
5957 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5958 {
5959 FILE *file = (FILE *) ptr;
5960 unsigned long flags;
5961
5962 BFD_ASSERT (abfd != NULL && ptr != NULL);
5963
5964 /* Print normal ELF private data. */
5965 _bfd_elf_print_private_bfd_data (abfd, ptr);
5966
5967 flags = elf_elfheader (abfd)->e_flags;
5968 /* Ignore init flag - it may not be set, despite the flags field
5969 containing valid data. */
5970
5971 /* xgettext:c-format */
5972 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5973
5974 if (flags)
5975 fprintf (file, _("<Unrecognised flag bits set>"));
5976
5977 fputc ('\n', file);
5978
5979 return TRUE;
5980 }
5981
5982 /* Update the got entry reference counts for the section being removed. */
5983
5984 static bfd_boolean
5985 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5986 struct bfd_link_info *info,
5987 asection *sec,
5988 const Elf_Internal_Rela * relocs)
5989 {
5990 struct elf_aarch64_link_hash_table *htab;
5991 Elf_Internal_Shdr *symtab_hdr;
5992 struct elf_link_hash_entry **sym_hashes;
5993 struct elf_aarch64_local_symbol *locals;
5994 const Elf_Internal_Rela *rel, *relend;
5995
5996 if (info->relocatable)
5997 return TRUE;
5998
5999 htab = elf_aarch64_hash_table (info);
6000
6001 if (htab == NULL)
6002 return FALSE;
6003
6004 elf_section_data (sec)->local_dynrel = NULL;
6005
6006 symtab_hdr = &elf_symtab_hdr (abfd);
6007 sym_hashes = elf_sym_hashes (abfd);
6008
6009 locals = elf_aarch64_locals (abfd);
6010
6011 relend = relocs + sec->reloc_count;
6012 for (rel = relocs; rel < relend; rel++)
6013 {
6014 unsigned long r_symndx;
6015 unsigned int r_type;
6016 struct elf_link_hash_entry *h = NULL;
6017
6018 r_symndx = ELFNN_R_SYM (rel->r_info);
6019
6020 if (r_symndx >= symtab_hdr->sh_info)
6021 {
6022
6023 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6024 while (h->root.type == bfd_link_hash_indirect
6025 || h->root.type == bfd_link_hash_warning)
6026 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6027 }
6028 else
6029 {
6030 Elf_Internal_Sym *isym;
6031
6032 /* A local symbol. */
6033 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6034 abfd, r_symndx);
6035
6036 /* Check relocation against local STT_GNU_IFUNC symbol. */
6037 if (isym != NULL
6038 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6039 {
6040 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
6041 if (h == NULL)
6042 abort ();
6043 }
6044 }
6045
6046 if (h)
6047 {
6048 struct elf_aarch64_link_hash_entry *eh;
6049 struct elf_dyn_relocs **pp;
6050 struct elf_dyn_relocs *p;
6051
6052 eh = (struct elf_aarch64_link_hash_entry *) h;
6053
6054 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
6055 if (p->sec == sec)
6056 {
6057 /* Everything must go for SEC. */
6058 *pp = p->next;
6059 break;
6060 }
6061 }
6062
6063 r_type = ELFNN_R_TYPE (rel->r_info);
6064 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
6065 {
6066 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6067 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6068 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6069 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6070 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6071 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6072 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6073 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6074 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6075 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6076 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6077 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6078 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6079 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6080 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6081 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6082 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6083 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6084 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6085 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6086 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6087 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6088 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6089 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6090 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6091 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6092 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6093 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6094 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6095 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6096 if (h != NULL)
6097 {
6098 if (h->got.refcount > 0)
6099 h->got.refcount -= 1;
6100
6101 if (h->type == STT_GNU_IFUNC)
6102 {
6103 if (h->plt.refcount > 0)
6104 h->plt.refcount -= 1;
6105 }
6106 }
6107 else if (locals != NULL)
6108 {
6109 if (locals[r_symndx].got_refcount > 0)
6110 locals[r_symndx].got_refcount -= 1;
6111 }
6112 break;
6113
6114 case BFD_RELOC_AARCH64_CALL26:
6115 case BFD_RELOC_AARCH64_JUMP26:
6116 /* If this is a local symbol then we resolve it
6117 directly without creating a PLT entry. */
6118 if (h == NULL)
6119 continue;
6120
6121 if (h->plt.refcount > 0)
6122 h->plt.refcount -= 1;
6123 break;
6124
6125 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6126 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6127 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6128 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6129 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6130 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6131 case BFD_RELOC_AARCH64_MOVW_G3:
6132 case BFD_RELOC_AARCH64_NN:
6133 if (h != NULL && info->executable)
6134 {
6135 if (h->plt.refcount > 0)
6136 h->plt.refcount -= 1;
6137 }
6138 break;
6139
6140 default:
6141 break;
6142 }
6143 }
6144
6145 return TRUE;
6146 }
6147
6148 /* Adjust a symbol defined by a dynamic object and referenced by a
6149 regular object. The current definition is in some section of the
6150 dynamic object, but we're not including those sections. We have to
6151 change the definition to something the rest of the link can
6152 understand. */
6153
6154 static bfd_boolean
6155 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
6156 struct elf_link_hash_entry *h)
6157 {
6158 struct elf_aarch64_link_hash_table *htab;
6159 asection *s;
6160
6161 /* If this is a function, put it in the procedure linkage table. We
6162 will fill in the contents of the procedure linkage table later,
6163 when we know the address of the .got section. */
6164 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6165 {
6166 if (h->plt.refcount <= 0
6167 || (h->type != STT_GNU_IFUNC
6168 && (SYMBOL_CALLS_LOCAL (info, h)
6169 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6170 && h->root.type == bfd_link_hash_undefweak))))
6171 {
6172 /* This case can occur if we saw a CALL26 reloc in
6173 an input file, but the symbol wasn't referred to
6174 by a dynamic object or all references were
6175 garbage collected. In which case we can end up
6176 resolving. */
6177 h->plt.offset = (bfd_vma) - 1;
6178 h->needs_plt = 0;
6179 }
6180
6181 return TRUE;
6182 }
6183 else
6184 /* Otherwise, reset to -1. */
6185 h->plt.offset = (bfd_vma) - 1;
6186
6187
6188 /* If this is a weak symbol, and there is a real definition, the
6189 processor independent code will have arranged for us to see the
6190 real definition first, and we can just use the same value. */
6191 if (h->u.weakdef != NULL)
6192 {
6193 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6194 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6195 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6196 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6197 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6198 h->non_got_ref = h->u.weakdef->non_got_ref;
6199 return TRUE;
6200 }
6201
6202 /* If we are creating a shared library, we must presume that the
6203 only references to the symbol are via the global offset table.
6204 For such cases we need not do anything here; the relocations will
6205 be handled correctly by relocate_section. */
6206 if (info->shared)
6207 return TRUE;
6208
6209 /* If there are no references to this symbol that do not use the
6210 GOT, we don't need to generate a copy reloc. */
6211 if (!h->non_got_ref)
6212 return TRUE;
6213
6214 /* If -z nocopyreloc was given, we won't generate them either. */
6215 if (info->nocopyreloc)
6216 {
6217 h->non_got_ref = 0;
6218 return TRUE;
6219 }
6220
6221 /* We must allocate the symbol in our .dynbss section, which will
6222 become part of the .bss section of the executable. There will be
6223 an entry for this symbol in the .dynsym section. The dynamic
6224 object will contain position independent code, so all references
6225 from the dynamic object to this symbol will go through the global
6226 offset table. The dynamic linker will use the .dynsym entry to
6227 determine the address it must put in the global offset table, so
6228 both the dynamic object and the regular object will refer to the
6229 same memory location for the variable. */
6230
6231 htab = elf_aarch64_hash_table (info);
6232
6233 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6234 to copy the initial value out of the dynamic object and into the
6235 runtime process image. */
6236 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6237 {
6238 htab->srelbss->size += RELOC_SIZE (htab);
6239 h->needs_copy = 1;
6240 }
6241
6242 s = htab->sdynbss;
6243
6244 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6245
6246 }
6247
6248 static bfd_boolean
6249 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6250 {
6251 struct elf_aarch64_local_symbol *locals;
6252 locals = elf_aarch64_locals (abfd);
6253 if (locals == NULL)
6254 {
6255 locals = (struct elf_aarch64_local_symbol *)
6256 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6257 if (locals == NULL)
6258 return FALSE;
6259 elf_aarch64_locals (abfd) = locals;
6260 }
6261 return TRUE;
6262 }
6263
6264 /* Create the .got section to hold the global offset table. */
6265
6266 static bfd_boolean
6267 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6268 {
6269 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6270 flagword flags;
6271 asection *s;
6272 struct elf_link_hash_entry *h;
6273 struct elf_link_hash_table *htab = elf_hash_table (info);
6274
6275 /* This function may be called more than once. */
6276 s = bfd_get_linker_section (abfd, ".got");
6277 if (s != NULL)
6278 return TRUE;
6279
6280 flags = bed->dynamic_sec_flags;
6281
6282 s = bfd_make_section_anyway_with_flags (abfd,
6283 (bed->rela_plts_and_copies_p
6284 ? ".rela.got" : ".rel.got"),
6285 (bed->dynamic_sec_flags
6286 | SEC_READONLY));
6287 if (s == NULL
6288 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6289 return FALSE;
6290 htab->srelgot = s;
6291
6292 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
6293 if (s == NULL
6294 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6295 return FALSE;
6296 htab->sgot = s;
6297 htab->sgot->size += GOT_ENTRY_SIZE;
6298
6299 if (bed->want_got_sym)
6300 {
6301 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
6302 (or .got.plt) section. We don't do this in the linker script
6303 because we don't want to define the symbol if we are not creating
6304 a global offset table. */
6305 h = _bfd_elf_define_linkage_sym (abfd, info, s,
6306 "_GLOBAL_OFFSET_TABLE_");
6307 elf_hash_table (info)->hgot = h;
6308 if (h == NULL)
6309 return FALSE;
6310 }
6311
6312 if (bed->want_got_plt)
6313 {
6314 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
6315 if (s == NULL
6316 || !bfd_set_section_alignment (abfd, s,
6317 bed->s->log_file_align))
6318 return FALSE;
6319 htab->sgotplt = s;
6320 }
6321
6322 /* The first bit of the global offset table is the header. */
6323 s->size += bed->got_header_size;
6324
6325 return TRUE;
6326 }
6327
6328 /* Look through the relocs for a section during the first phase. */
6329
6330 static bfd_boolean
6331 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
6332 asection *sec, const Elf_Internal_Rela *relocs)
6333 {
6334 Elf_Internal_Shdr *symtab_hdr;
6335 struct elf_link_hash_entry **sym_hashes;
6336 const Elf_Internal_Rela *rel;
6337 const Elf_Internal_Rela *rel_end;
6338 asection *sreloc;
6339
6340 struct elf_aarch64_link_hash_table *htab;
6341
6342 if (info->relocatable)
6343 return TRUE;
6344
6345 BFD_ASSERT (is_aarch64_elf (abfd));
6346
6347 htab = elf_aarch64_hash_table (info);
6348 sreloc = NULL;
6349
6350 symtab_hdr = &elf_symtab_hdr (abfd);
6351 sym_hashes = elf_sym_hashes (abfd);
6352
6353 rel_end = relocs + sec->reloc_count;
6354 for (rel = relocs; rel < rel_end; rel++)
6355 {
6356 struct elf_link_hash_entry *h;
6357 unsigned long r_symndx;
6358 unsigned int r_type;
6359 bfd_reloc_code_real_type bfd_r_type;
6360 Elf_Internal_Sym *isym;
6361
6362 r_symndx = ELFNN_R_SYM (rel->r_info);
6363 r_type = ELFNN_R_TYPE (rel->r_info);
6364
6365 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
6366 {
6367 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
6368 r_symndx);
6369 return FALSE;
6370 }
6371
6372 if (r_symndx < symtab_hdr->sh_info)
6373 {
6374 /* A local symbol. */
6375 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6376 abfd, r_symndx);
6377 if (isym == NULL)
6378 return FALSE;
6379
6380 /* Check relocation against local STT_GNU_IFUNC symbol. */
6381 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6382 {
6383 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
6384 TRUE);
6385 if (h == NULL)
6386 return FALSE;
6387
6388 /* Fake a STT_GNU_IFUNC symbol. */
6389 h->type = STT_GNU_IFUNC;
6390 h->def_regular = 1;
6391 h->ref_regular = 1;
6392 h->forced_local = 1;
6393 h->root.type = bfd_link_hash_defined;
6394 }
6395 else
6396 h = NULL;
6397 }
6398 else
6399 {
6400 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6401 while (h->root.type == bfd_link_hash_indirect
6402 || h->root.type == bfd_link_hash_warning)
6403 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6404
6405 /* PR15323, ref flags aren't set for references in the same
6406 object. */
6407 h->root.non_ir_ref = 1;
6408 }
6409
6410 /* Could be done earlier, if h were already available. */
6411 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
6412
6413 if (h != NULL)
6414 {
6415 /* Create the ifunc sections for static executables. If we
6416 never see an indirect function symbol nor we are building
6417 a static executable, those sections will be empty and
6418 won't appear in output. */
6419 switch (bfd_r_type)
6420 {
6421 default:
6422 break;
6423
6424 case BFD_RELOC_AARCH64_ADD_LO12:
6425 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6426 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6427 case BFD_RELOC_AARCH64_CALL26:
6428 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6429 case BFD_RELOC_AARCH64_JUMP26:
6430 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6431 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6432 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6433 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6434 case BFD_RELOC_AARCH64_NN:
6435 if (htab->root.dynobj == NULL)
6436 htab->root.dynobj = abfd;
6437 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
6438 return FALSE;
6439 break;
6440 }
6441
6442 /* It is referenced by a non-shared object. */
6443 h->ref_regular = 1;
6444 h->root.non_ir_ref = 1;
6445 }
6446
6447 switch (bfd_r_type)
6448 {
6449 case BFD_RELOC_AARCH64_NN:
6450
6451 /* We don't need to handle relocs into sections not going into
6452 the "real" output. */
6453 if ((sec->flags & SEC_ALLOC) == 0)
6454 break;
6455
6456 if (h != NULL)
6457 {
6458 if (!info->shared)
6459 h->non_got_ref = 1;
6460
6461 h->plt.refcount += 1;
6462 h->pointer_equality_needed = 1;
6463 }
6464
6465 /* No need to do anything if we're not creating a shared
6466 object. */
6467 if (! info->shared)
6468 break;
6469
6470 {
6471 struct elf_dyn_relocs *p;
6472 struct elf_dyn_relocs **head;
6473
6474 /* We must copy these reloc types into the output file.
6475 Create a reloc section in dynobj and make room for
6476 this reloc. */
6477 if (sreloc == NULL)
6478 {
6479 if (htab->root.dynobj == NULL)
6480 htab->root.dynobj = abfd;
6481
6482 sreloc = _bfd_elf_make_dynamic_reloc_section
6483 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
6484
6485 if (sreloc == NULL)
6486 return FALSE;
6487 }
6488
6489 /* If this is a global symbol, we count the number of
6490 relocations we need for this symbol. */
6491 if (h != NULL)
6492 {
6493 struct elf_aarch64_link_hash_entry *eh;
6494 eh = (struct elf_aarch64_link_hash_entry *) h;
6495 head = &eh->dyn_relocs;
6496 }
6497 else
6498 {
6499 /* Track dynamic relocs needed for local syms too.
6500 We really need local syms available to do this
6501 easily. Oh well. */
6502
6503 asection *s;
6504 void **vpp;
6505
6506 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6507 abfd, r_symndx);
6508 if (isym == NULL)
6509 return FALSE;
6510
6511 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
6512 if (s == NULL)
6513 s = sec;
6514
6515 /* Beware of type punned pointers vs strict aliasing
6516 rules. */
6517 vpp = &(elf_section_data (s)->local_dynrel);
6518 head = (struct elf_dyn_relocs **) vpp;
6519 }
6520
6521 p = *head;
6522 if (p == NULL || p->sec != sec)
6523 {
6524 bfd_size_type amt = sizeof *p;
6525 p = ((struct elf_dyn_relocs *)
6526 bfd_zalloc (htab->root.dynobj, amt));
6527 if (p == NULL)
6528 return FALSE;
6529 p->next = *head;
6530 *head = p;
6531 p->sec = sec;
6532 }
6533
6534 p->count += 1;
6535
6536 }
6537 break;
6538
6539 /* RR: We probably want to keep a consistency check that
6540 there are no dangling GOT_PAGE relocs. */
6541 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6542 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6543 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6544 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6545 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6546 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6547 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6548 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6549 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6550 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6551 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6552 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6553 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6554 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6555 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6556 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6557 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6558 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6559 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6560 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6561 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6562 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6563 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6564 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6565 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6566 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6567 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6568 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6569 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6570 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6571 {
6572 unsigned got_type;
6573 unsigned old_got_type;
6574
6575 got_type = aarch64_reloc_got_type (bfd_r_type);
6576
6577 if (h)
6578 {
6579 h->got.refcount += 1;
6580 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6581 }
6582 else
6583 {
6584 struct elf_aarch64_local_symbol *locals;
6585
6586 if (!elfNN_aarch64_allocate_local_symbols
6587 (abfd, symtab_hdr->sh_info))
6588 return FALSE;
6589
6590 locals = elf_aarch64_locals (abfd);
6591 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6592 locals[r_symndx].got_refcount += 1;
6593 old_got_type = locals[r_symndx].got_type;
6594 }
6595
6596 /* If a variable is accessed with both general dynamic TLS
6597 methods, two slots may be created. */
6598 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6599 got_type |= old_got_type;
6600
6601 /* We will already have issued an error message if there
6602 is a TLS/non-TLS mismatch, based on the symbol type.
6603 So just combine any TLS types needed. */
6604 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6605 && got_type != GOT_NORMAL)
6606 got_type |= old_got_type;
6607
6608 /* If the symbol is accessed by both IE and GD methods, we
6609 are able to relax. Turn off the GD flag, without
6610 messing up with any other kind of TLS types that may be
6611 involved. */
6612 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6613 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6614
6615 if (old_got_type != got_type)
6616 {
6617 if (h != NULL)
6618 elf_aarch64_hash_entry (h)->got_type = got_type;
6619 else
6620 {
6621 struct elf_aarch64_local_symbol *locals;
6622 locals = elf_aarch64_locals (abfd);
6623 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6624 locals[r_symndx].got_type = got_type;
6625 }
6626 }
6627
6628 if (htab->root.dynobj == NULL)
6629 htab->root.dynobj = abfd;
6630 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6631 return FALSE;
6632 break;
6633 }
6634
6635 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6636 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6637 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6638 case BFD_RELOC_AARCH64_MOVW_G3:
6639 if (info->shared)
6640 {
6641 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6642 (*_bfd_error_handler)
6643 (_("%B: relocation %s against `%s' can not be used when making "
6644 "a shared object; recompile with -fPIC"),
6645 abfd, elfNN_aarch64_howto_table[howto_index].name,
6646 (h) ? h->root.root.string : "a local symbol");
6647 bfd_set_error (bfd_error_bad_value);
6648 return FALSE;
6649 }
6650
6651 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6652 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6653 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6654 if (h != NULL && info->executable)
6655 {
6656 /* If this reloc is in a read-only section, we might
6657 need a copy reloc. We can't check reliably at this
6658 stage whether the section is read-only, as input
6659 sections have not yet been mapped to output sections.
6660 Tentatively set the flag for now, and correct in
6661 adjust_dynamic_symbol. */
6662 h->non_got_ref = 1;
6663 h->plt.refcount += 1;
6664 h->pointer_equality_needed = 1;
6665 }
6666 /* FIXME:: RR need to handle these in shared libraries
6667 and essentially bomb out as these being non-PIC
6668 relocations in shared libraries. */
6669 break;
6670
6671 case BFD_RELOC_AARCH64_CALL26:
6672 case BFD_RELOC_AARCH64_JUMP26:
6673 /* If this is a local symbol then we resolve it
6674 directly without creating a PLT entry. */
6675 if (h == NULL)
6676 continue;
6677
6678 h->needs_plt = 1;
6679 if (h->plt.refcount <= 0)
6680 h->plt.refcount = 1;
6681 else
6682 h->plt.refcount += 1;
6683 break;
6684
6685 default:
6686 break;
6687 }
6688 }
6689
6690 return TRUE;
6691 }
6692
6693 /* Treat mapping symbols as special target symbols. */
6694
6695 static bfd_boolean
6696 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6697 asymbol *sym)
6698 {
6699 return bfd_is_aarch64_special_symbol_name (sym->name,
6700 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6701 }
6702
6703 /* This is a copy of elf_find_function () from elf.c except that
6704 AArch64 mapping symbols are ignored when looking for function names. */
6705
6706 static bfd_boolean
6707 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6708 asymbol **symbols,
6709 asection *section,
6710 bfd_vma offset,
6711 const char **filename_ptr,
6712 const char **functionname_ptr)
6713 {
6714 const char *filename = NULL;
6715 asymbol *func = NULL;
6716 bfd_vma low_func = 0;
6717 asymbol **p;
6718
6719 for (p = symbols; *p != NULL; p++)
6720 {
6721 elf_symbol_type *q;
6722
6723 q = (elf_symbol_type *) * p;
6724
6725 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6726 {
6727 default:
6728 break;
6729 case STT_FILE:
6730 filename = bfd_asymbol_name (&q->symbol);
6731 break;
6732 case STT_FUNC:
6733 case STT_NOTYPE:
6734 /* Skip mapping symbols. */
6735 if ((q->symbol.flags & BSF_LOCAL)
6736 && (bfd_is_aarch64_special_symbol_name
6737 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6738 continue;
6739 /* Fall through. */
6740 if (bfd_get_section (&q->symbol) == section
6741 && q->symbol.value >= low_func && q->symbol.value <= offset)
6742 {
6743 func = (asymbol *) q;
6744 low_func = q->symbol.value;
6745 }
6746 break;
6747 }
6748 }
6749
6750 if (func == NULL)
6751 return FALSE;
6752
6753 if (filename_ptr)
6754 *filename_ptr = filename;
6755 if (functionname_ptr)
6756 *functionname_ptr = bfd_asymbol_name (func);
6757
6758 return TRUE;
6759 }
6760
6761
6762 /* Find the nearest line to a particular section and offset, for error
6763 reporting. This code is a duplicate of the code in elf.c, except
6764 that it uses aarch64_elf_find_function. */
6765
6766 static bfd_boolean
6767 elfNN_aarch64_find_nearest_line (bfd *abfd,
6768 asymbol **symbols,
6769 asection *section,
6770 bfd_vma offset,
6771 const char **filename_ptr,
6772 const char **functionname_ptr,
6773 unsigned int *line_ptr,
6774 unsigned int *discriminator_ptr)
6775 {
6776 bfd_boolean found = FALSE;
6777
6778 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6779 filename_ptr, functionname_ptr,
6780 line_ptr, discriminator_ptr,
6781 dwarf_debug_sections, 0,
6782 &elf_tdata (abfd)->dwarf2_find_line_info))
6783 {
6784 if (!*functionname_ptr)
6785 aarch64_elf_find_function (abfd, symbols, section, offset,
6786 *filename_ptr ? NULL : filename_ptr,
6787 functionname_ptr);
6788
6789 return TRUE;
6790 }
6791
6792 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6793 toolchain uses DWARF1. */
6794
6795 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6796 &found, filename_ptr,
6797 functionname_ptr, line_ptr,
6798 &elf_tdata (abfd)->line_info))
6799 return FALSE;
6800
6801 if (found && (*functionname_ptr || *line_ptr))
6802 return TRUE;
6803
6804 if (symbols == NULL)
6805 return FALSE;
6806
6807 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6808 filename_ptr, functionname_ptr))
6809 return FALSE;
6810
6811 *line_ptr = 0;
6812 return TRUE;
6813 }
6814
6815 static bfd_boolean
6816 elfNN_aarch64_find_inliner_info (bfd *abfd,
6817 const char **filename_ptr,
6818 const char **functionname_ptr,
6819 unsigned int *line_ptr)
6820 {
6821 bfd_boolean found;
6822 found = _bfd_dwarf2_find_inliner_info
6823 (abfd, filename_ptr,
6824 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6825 return found;
6826 }
6827
6828
6829 static void
6830 elfNN_aarch64_post_process_headers (bfd *abfd,
6831 struct bfd_link_info *link_info)
6832 {
6833 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6834
6835 i_ehdrp = elf_elfheader (abfd);
6836 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6837
6838 _bfd_elf_post_process_headers (abfd, link_info);
6839 }
6840
6841 static enum elf_reloc_type_class
6842 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6843 const asection *rel_sec ATTRIBUTE_UNUSED,
6844 const Elf_Internal_Rela *rela)
6845 {
6846 switch ((int) ELFNN_R_TYPE (rela->r_info))
6847 {
6848 case AARCH64_R (RELATIVE):
6849 return reloc_class_relative;
6850 case AARCH64_R (JUMP_SLOT):
6851 return reloc_class_plt;
6852 case AARCH64_R (COPY):
6853 return reloc_class_copy;
6854 default:
6855 return reloc_class_normal;
6856 }
6857 }
6858
6859 /* Handle an AArch64 specific section when reading an object file. This is
6860 called when bfd_section_from_shdr finds a section with an unknown
6861 type. */
6862
6863 static bfd_boolean
6864 elfNN_aarch64_section_from_shdr (bfd *abfd,
6865 Elf_Internal_Shdr *hdr,
6866 const char *name, int shindex)
6867 {
6868 /* There ought to be a place to keep ELF backend specific flags, but
6869 at the moment there isn't one. We just keep track of the
6870 sections by their name, instead. Fortunately, the ABI gives
6871 names for all the AArch64 specific sections, so we will probably get
6872 away with this. */
6873 switch (hdr->sh_type)
6874 {
6875 case SHT_AARCH64_ATTRIBUTES:
6876 break;
6877
6878 default:
6879 return FALSE;
6880 }
6881
6882 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6883 return FALSE;
6884
6885 return TRUE;
6886 }
6887
6888 /* A structure used to record a list of sections, independently
6889 of the next and prev fields in the asection structure. */
6890 typedef struct section_list
6891 {
6892 asection *sec;
6893 struct section_list *next;
6894 struct section_list *prev;
6895 }
6896 section_list;
6897
6898 /* Unfortunately we need to keep a list of sections for which
6899 an _aarch64_elf_section_data structure has been allocated. This
6900 is because it is possible for functions like elfNN_aarch64_write_section
6901 to be called on a section which has had an elf_data_structure
6902 allocated for it (and so the used_by_bfd field is valid) but
6903 for which the AArch64 extended version of this structure - the
6904 _aarch64_elf_section_data structure - has not been allocated. */
6905 static section_list *sections_with_aarch64_elf_section_data = NULL;
6906
6907 static void
6908 record_section_with_aarch64_elf_section_data (asection *sec)
6909 {
6910 struct section_list *entry;
6911
6912 entry = bfd_malloc (sizeof (*entry));
6913 if (entry == NULL)
6914 return;
6915 entry->sec = sec;
6916 entry->next = sections_with_aarch64_elf_section_data;
6917 entry->prev = NULL;
6918 if (entry->next != NULL)
6919 entry->next->prev = entry;
6920 sections_with_aarch64_elf_section_data = entry;
6921 }
6922
6923 static struct section_list *
6924 find_aarch64_elf_section_entry (asection *sec)
6925 {
6926 struct section_list *entry;
6927 static struct section_list *last_entry = NULL;
6928
6929 /* This is a short cut for the typical case where the sections are added
6930 to the sections_with_aarch64_elf_section_data list in forward order and
6931 then looked up here in backwards order. This makes a real difference
6932 to the ld-srec/sec64k.exp linker test. */
6933 entry = sections_with_aarch64_elf_section_data;
6934 if (last_entry != NULL)
6935 {
6936 if (last_entry->sec == sec)
6937 entry = last_entry;
6938 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6939 entry = last_entry->next;
6940 }
6941
6942 for (; entry; entry = entry->next)
6943 if (entry->sec == sec)
6944 break;
6945
6946 if (entry)
6947 /* Record the entry prior to this one - it is the entry we are
6948 most likely to want to locate next time. Also this way if we
6949 have been called from
6950 unrecord_section_with_aarch64_elf_section_data () we will not
6951 be caching a pointer that is about to be freed. */
6952 last_entry = entry->prev;
6953
6954 return entry;
6955 }
6956
6957 static void
6958 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6959 {
6960 struct section_list *entry;
6961
6962 entry = find_aarch64_elf_section_entry (sec);
6963
6964 if (entry)
6965 {
6966 if (entry->prev != NULL)
6967 entry->prev->next = entry->next;
6968 if (entry->next != NULL)
6969 entry->next->prev = entry->prev;
6970 if (entry == sections_with_aarch64_elf_section_data)
6971 sections_with_aarch64_elf_section_data = entry->next;
6972 free (entry);
6973 }
6974 }
6975
6976
6977 typedef struct
6978 {
6979 void *finfo;
6980 struct bfd_link_info *info;
6981 asection *sec;
6982 int sec_shndx;
6983 int (*func) (void *, const char *, Elf_Internal_Sym *,
6984 asection *, struct elf_link_hash_entry *);
6985 } output_arch_syminfo;
6986
6987 enum map_symbol_type
6988 {
6989 AARCH64_MAP_INSN,
6990 AARCH64_MAP_DATA
6991 };
6992
6993
6994 /* Output a single mapping symbol. */
6995
6996 static bfd_boolean
6997 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6998 enum map_symbol_type type, bfd_vma offset)
6999 {
7000 static const char *names[2] = { "$x", "$d" };
7001 Elf_Internal_Sym sym;
7002
7003 sym.st_value = (osi->sec->output_section->vma
7004 + osi->sec->output_offset + offset);
7005 sym.st_size = 0;
7006 sym.st_other = 0;
7007 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7008 sym.st_shndx = osi->sec_shndx;
7009 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
7010 }
7011
7012
7013
7014 /* Output mapping symbols for PLT entries associated with H. */
7015
7016 static bfd_boolean
7017 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
7018 {
7019 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
7020 bfd_vma addr;
7021
7022 if (h->root.type == bfd_link_hash_indirect)
7023 return TRUE;
7024
7025 if (h->root.type == bfd_link_hash_warning)
7026 /* When warning symbols are created, they **replace** the "real"
7027 entry in the hash table, thus we never get to see the real
7028 symbol in a hash traversal. So look at it now. */
7029 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7030
7031 if (h->plt.offset == (bfd_vma) - 1)
7032 return TRUE;
7033
7034 addr = h->plt.offset;
7035 if (addr == 32)
7036 {
7037 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7038 return FALSE;
7039 }
7040 return TRUE;
7041 }
7042
7043
7044 /* Output a single local symbol for a generated stub. */
7045
7046 static bfd_boolean
7047 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
7048 bfd_vma offset, bfd_vma size)
7049 {
7050 Elf_Internal_Sym sym;
7051
7052 sym.st_value = (osi->sec->output_section->vma
7053 + osi->sec->output_offset + offset);
7054 sym.st_size = size;
7055 sym.st_other = 0;
7056 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7057 sym.st_shndx = osi->sec_shndx;
7058 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
7059 }
7060
7061 static bfd_boolean
7062 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
7063 {
7064 struct elf_aarch64_stub_hash_entry *stub_entry;
7065 asection *stub_sec;
7066 bfd_vma addr;
7067 char *stub_name;
7068 output_arch_syminfo *osi;
7069
7070 /* Massage our args to the form they really have. */
7071 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
7072 osi = (output_arch_syminfo *) in_arg;
7073
7074 stub_sec = stub_entry->stub_sec;
7075
7076 /* Ensure this stub is attached to the current section being
7077 processed. */
7078 if (stub_sec != osi->sec)
7079 return TRUE;
7080
7081 addr = (bfd_vma) stub_entry->stub_offset;
7082
7083 stub_name = stub_entry->output_name;
7084
7085 switch (stub_entry->stub_type)
7086 {
7087 case aarch64_stub_adrp_branch:
7088 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7089 sizeof (aarch64_adrp_branch_stub)))
7090 return FALSE;
7091 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7092 return FALSE;
7093 break;
7094 case aarch64_stub_long_branch:
7095 if (!elfNN_aarch64_output_stub_sym
7096 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
7097 return FALSE;
7098 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7099 return FALSE;
7100 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
7101 return FALSE;
7102 break;
7103 case aarch64_stub_erratum_835769_veneer:
7104 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7105 sizeof (aarch64_erratum_835769_stub)))
7106 return FALSE;
7107 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7108 return FALSE;
7109 break;
7110 case aarch64_stub_erratum_843419_veneer:
7111 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7112 sizeof (aarch64_erratum_843419_stub)))
7113 return FALSE;
7114 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7115 return FALSE;
7116 break;
7117
7118 default:
7119 abort ();
7120 }
7121
7122 return TRUE;
7123 }
7124
7125 /* Output mapping symbols for linker generated sections. */
7126
7127 static bfd_boolean
7128 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
7129 struct bfd_link_info *info,
7130 void *finfo,
7131 int (*func) (void *, const char *,
7132 Elf_Internal_Sym *,
7133 asection *,
7134 struct elf_link_hash_entry
7135 *))
7136 {
7137 output_arch_syminfo osi;
7138 struct elf_aarch64_link_hash_table *htab;
7139
7140 htab = elf_aarch64_hash_table (info);
7141
7142 osi.finfo = finfo;
7143 osi.info = info;
7144 osi.func = func;
7145
7146 /* Long calls stubs. */
7147 if (htab->stub_bfd && htab->stub_bfd->sections)
7148 {
7149 asection *stub_sec;
7150
7151 for (stub_sec = htab->stub_bfd->sections;
7152 stub_sec != NULL; stub_sec = stub_sec->next)
7153 {
7154 /* Ignore non-stub sections. */
7155 if (!strstr (stub_sec->name, STUB_SUFFIX))
7156 continue;
7157
7158 osi.sec = stub_sec;
7159
7160 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7161 (output_bfd, osi.sec->output_section);
7162
7163 /* The first instruction in a stub is always a branch. */
7164 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
7165 return FALSE;
7166
7167 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7168 &osi);
7169 }
7170 }
7171
7172 /* Finally, output mapping symbols for the PLT. */
7173 if (!htab->root.splt || htab->root.splt->size == 0)
7174 return TRUE;
7175
7176 /* For now live without mapping symbols for the plt. */
7177 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7178 (output_bfd, htab->root.splt->output_section);
7179 osi.sec = htab->root.splt;
7180
7181 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
7182 (void *) &osi);
7183
7184 return TRUE;
7185
7186 }
7187
7188 /* Allocate target specific section data. */
7189
7190 static bfd_boolean
7191 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7192 {
7193 if (!sec->used_by_bfd)
7194 {
7195 _aarch64_elf_section_data *sdata;
7196 bfd_size_type amt = sizeof (*sdata);
7197
7198 sdata = bfd_zalloc (abfd, amt);
7199 if (sdata == NULL)
7200 return FALSE;
7201 sec->used_by_bfd = sdata;
7202 }
7203
7204 record_section_with_aarch64_elf_section_data (sec);
7205
7206 return _bfd_elf_new_section_hook (abfd, sec);
7207 }
7208
7209
7210 static void
7211 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7212 asection *sec,
7213 void *ignore ATTRIBUTE_UNUSED)
7214 {
7215 unrecord_section_with_aarch64_elf_section_data (sec);
7216 }
7217
7218 static bfd_boolean
7219 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7220 {
7221 if (abfd->sections)
7222 bfd_map_over_sections (abfd,
7223 unrecord_section_via_map_over_sections, NULL);
7224
7225 return _bfd_elf_close_and_cleanup (abfd);
7226 }
7227
7228 static bfd_boolean
7229 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7230 {
7231 if (abfd->sections)
7232 bfd_map_over_sections (abfd,
7233 unrecord_section_via_map_over_sections, NULL);
7234
7235 return _bfd_free_cached_info (abfd);
7236 }
7237
7238 /* Create dynamic sections. This is different from the ARM backend in that
7239 the got, plt, gotplt and their relocation sections are all created in the
7240 standard part of the bfd elf backend. */
7241
7242 static bfd_boolean
7243 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7244 struct bfd_link_info *info)
7245 {
7246 struct elf_aarch64_link_hash_table *htab;
7247
7248 /* We need to create .got section. */
7249 if (!aarch64_elf_create_got_section (dynobj, info))
7250 return FALSE;
7251
7252 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
7253 return FALSE;
7254
7255 htab = elf_aarch64_hash_table (info);
7256 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
7257 if (!info->shared)
7258 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
7259
7260 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
7261 abort ();
7262
7263 return TRUE;
7264 }
7265
7266
7267 /* Allocate space in .plt, .got and associated reloc sections for
7268 dynamic relocs. */
7269
7270 static bfd_boolean
7271 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7272 {
7273 struct bfd_link_info *info;
7274 struct elf_aarch64_link_hash_table *htab;
7275 struct elf_aarch64_link_hash_entry *eh;
7276 struct elf_dyn_relocs *p;
7277
7278 /* An example of a bfd_link_hash_indirect symbol is versioned
7279 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7280 -> __gxx_personality_v0(bfd_link_hash_defined)
7281
7282 There is no need to process bfd_link_hash_indirect symbols here
7283 because we will also be presented with the concrete instance of
7284 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7285 called to copy all relevant data from the generic to the concrete
7286 symbol instance.
7287 */
7288 if (h->root.type == bfd_link_hash_indirect)
7289 return TRUE;
7290
7291 if (h->root.type == bfd_link_hash_warning)
7292 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7293
7294 info = (struct bfd_link_info *) inf;
7295 htab = elf_aarch64_hash_table (info);
7296
7297 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7298 here if it is defined and referenced in a non-shared object. */
7299 if (h->type == STT_GNU_IFUNC
7300 && h->def_regular)
7301 return TRUE;
7302 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
7303 {
7304 /* Make sure this symbol is output as a dynamic symbol.
7305 Undefined weak syms won't yet be marked as dynamic. */
7306 if (h->dynindx == -1 && !h->forced_local)
7307 {
7308 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7309 return FALSE;
7310 }
7311
7312 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
7313 {
7314 asection *s = htab->root.splt;
7315
7316 /* If this is the first .plt entry, make room for the special
7317 first entry. */
7318 if (s->size == 0)
7319 s->size += htab->plt_header_size;
7320
7321 h->plt.offset = s->size;
7322
7323 /* If this symbol is not defined in a regular file, and we are
7324 not generating a shared library, then set the symbol to this
7325 location in the .plt. This is required to make function
7326 pointers compare as equal between the normal executable and
7327 the shared library. */
7328 if (!info->shared && !h->def_regular)
7329 {
7330 h->root.u.def.section = s;
7331 h->root.u.def.value = h->plt.offset;
7332 }
7333
7334 /* Make room for this entry. For now we only create the
7335 small model PLT entries. We later need to find a way
7336 of relaxing into these from the large model PLT entries. */
7337 s->size += PLT_SMALL_ENTRY_SIZE;
7338
7339 /* We also need to make an entry in the .got.plt section, which
7340 will be placed in the .got section by the linker script. */
7341 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
7342
7343 /* We also need to make an entry in the .rela.plt section. */
7344 htab->root.srelplt->size += RELOC_SIZE (htab);
7345
7346 /* We need to ensure that all GOT entries that serve the PLT
7347 are consecutive with the special GOT slots [0] [1] and
7348 [2]. Any addtional relocations, such as
7349 R_AARCH64_TLSDESC, must be placed after the PLT related
7350 entries. We abuse the reloc_count such that during
7351 sizing we adjust reloc_count to indicate the number of
7352 PLT related reserved entries. In subsequent phases when
7353 filling in the contents of the reloc entries, PLT related
7354 entries are placed by computing their PLT index (0
7355 .. reloc_count). While other none PLT relocs are placed
7356 at the slot indicated by reloc_count and reloc_count is
7357 updated. */
7358
7359 htab->root.srelplt->reloc_count++;
7360 }
7361 else
7362 {
7363 h->plt.offset = (bfd_vma) - 1;
7364 h->needs_plt = 0;
7365 }
7366 }
7367 else
7368 {
7369 h->plt.offset = (bfd_vma) - 1;
7370 h->needs_plt = 0;
7371 }
7372
7373 eh = (struct elf_aarch64_link_hash_entry *) h;
7374 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7375
7376 if (h->got.refcount > 0)
7377 {
7378 bfd_boolean dyn;
7379 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
7380
7381 h->got.offset = (bfd_vma) - 1;
7382
7383 dyn = htab->root.dynamic_sections_created;
7384
7385 /* Make sure this symbol is output as a dynamic symbol.
7386 Undefined weak syms won't yet be marked as dynamic. */
7387 if (dyn && h->dynindx == -1 && !h->forced_local)
7388 {
7389 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7390 return FALSE;
7391 }
7392
7393 if (got_type == GOT_UNKNOWN)
7394 {
7395 }
7396 else if (got_type == GOT_NORMAL)
7397 {
7398 h->got.offset = htab->root.sgot->size;
7399 htab->root.sgot->size += GOT_ENTRY_SIZE;
7400 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7401 || h->root.type != bfd_link_hash_undefweak)
7402 && (info->shared
7403 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7404 {
7405 htab->root.srelgot->size += RELOC_SIZE (htab);
7406 }
7407 }
7408 else
7409 {
7410 int indx;
7411 if (got_type & GOT_TLSDESC_GD)
7412 {
7413 eh->tlsdesc_got_jump_table_offset =
7414 (htab->root.sgotplt->size
7415 - aarch64_compute_jump_table_size (htab));
7416 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7417 h->got.offset = (bfd_vma) - 2;
7418 }
7419
7420 if (got_type & GOT_TLS_GD)
7421 {
7422 h->got.offset = htab->root.sgot->size;
7423 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7424 }
7425
7426 if (got_type & GOT_TLS_IE)
7427 {
7428 h->got.offset = htab->root.sgot->size;
7429 htab->root.sgot->size += GOT_ENTRY_SIZE;
7430 }
7431
7432 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7433 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7434 || h->root.type != bfd_link_hash_undefweak)
7435 && (info->shared
7436 || indx != 0
7437 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7438 {
7439 if (got_type & GOT_TLSDESC_GD)
7440 {
7441 htab->root.srelplt->size += RELOC_SIZE (htab);
7442 /* Note reloc_count not incremented here! We have
7443 already adjusted reloc_count for this relocation
7444 type. */
7445
7446 /* TLSDESC PLT is now needed, but not yet determined. */
7447 htab->tlsdesc_plt = (bfd_vma) - 1;
7448 }
7449
7450 if (got_type & GOT_TLS_GD)
7451 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7452
7453 if (got_type & GOT_TLS_IE)
7454 htab->root.srelgot->size += RELOC_SIZE (htab);
7455 }
7456 }
7457 }
7458 else
7459 {
7460 h->got.offset = (bfd_vma) - 1;
7461 }
7462
7463 if (eh->dyn_relocs == NULL)
7464 return TRUE;
7465
7466 /* In the shared -Bsymbolic case, discard space allocated for
7467 dynamic pc-relative relocs against symbols which turn out to be
7468 defined in regular objects. For the normal shared case, discard
7469 space for pc-relative relocs that have become local due to symbol
7470 visibility changes. */
7471
7472 if (info->shared)
7473 {
7474 /* Relocs that use pc_count are those that appear on a call
7475 insn, or certain REL relocs that can generated via assembly.
7476 We want calls to protected symbols to resolve directly to the
7477 function rather than going via the plt. If people want
7478 function pointer comparisons to work as expected then they
7479 should avoid writing weird assembly. */
7480 if (SYMBOL_CALLS_LOCAL (info, h))
7481 {
7482 struct elf_dyn_relocs **pp;
7483
7484 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
7485 {
7486 p->count -= p->pc_count;
7487 p->pc_count = 0;
7488 if (p->count == 0)
7489 *pp = p->next;
7490 else
7491 pp = &p->next;
7492 }
7493 }
7494
7495 /* Also discard relocs on undefined weak syms with non-default
7496 visibility. */
7497 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
7498 {
7499 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7500 eh->dyn_relocs = NULL;
7501
7502 /* Make sure undefined weak symbols are output as a dynamic
7503 symbol in PIEs. */
7504 else if (h->dynindx == -1
7505 && !h->forced_local
7506 && !bfd_elf_link_record_dynamic_symbol (info, h))
7507 return FALSE;
7508 }
7509
7510 }
7511 else if (ELIMINATE_COPY_RELOCS)
7512 {
7513 /* For the non-shared case, discard space for relocs against
7514 symbols which turn out to need copy relocs or are not
7515 dynamic. */
7516
7517 if (!h->non_got_ref
7518 && ((h->def_dynamic
7519 && !h->def_regular)
7520 || (htab->root.dynamic_sections_created
7521 && (h->root.type == bfd_link_hash_undefweak
7522 || h->root.type == bfd_link_hash_undefined))))
7523 {
7524 /* Make sure this symbol is output as a dynamic symbol.
7525 Undefined weak syms won't yet be marked as dynamic. */
7526 if (h->dynindx == -1
7527 && !h->forced_local
7528 && !bfd_elf_link_record_dynamic_symbol (info, h))
7529 return FALSE;
7530
7531 /* If that succeeded, we know we'll be keeping all the
7532 relocs. */
7533 if (h->dynindx != -1)
7534 goto keep;
7535 }
7536
7537 eh->dyn_relocs = NULL;
7538
7539 keep:;
7540 }
7541
7542 /* Finally, allocate space. */
7543 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7544 {
7545 asection *sreloc;
7546
7547 sreloc = elf_section_data (p->sec)->sreloc;
7548
7549 BFD_ASSERT (sreloc != NULL);
7550
7551 sreloc->size += p->count * RELOC_SIZE (htab);
7552 }
7553
7554 return TRUE;
7555 }
7556
7557 /* Allocate space in .plt, .got and associated reloc sections for
7558 ifunc dynamic relocs. */
7559
7560 static bfd_boolean
7561 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
7562 void *inf)
7563 {
7564 struct bfd_link_info *info;
7565 struct elf_aarch64_link_hash_table *htab;
7566 struct elf_aarch64_link_hash_entry *eh;
7567
7568 /* An example of a bfd_link_hash_indirect symbol is versioned
7569 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7570 -> __gxx_personality_v0(bfd_link_hash_defined)
7571
7572 There is no need to process bfd_link_hash_indirect symbols here
7573 because we will also be presented with the concrete instance of
7574 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7575 called to copy all relevant data from the generic to the concrete
7576 symbol instance.
7577 */
7578 if (h->root.type == bfd_link_hash_indirect)
7579 return TRUE;
7580
7581 if (h->root.type == bfd_link_hash_warning)
7582 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7583
7584 info = (struct bfd_link_info *) inf;
7585 htab = elf_aarch64_hash_table (info);
7586
7587 eh = (struct elf_aarch64_link_hash_entry *) h;
7588
7589 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7590 here if it is defined and referenced in a non-shared object. */
7591 if (h->type == STT_GNU_IFUNC
7592 && h->def_regular)
7593 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7594 &eh->dyn_relocs,
7595 htab->plt_entry_size,
7596 htab->plt_header_size,
7597 GOT_ENTRY_SIZE);
7598 return TRUE;
7599 }
7600
7601 /* Allocate space in .plt, .got and associated reloc sections for
7602 local dynamic relocs. */
7603
7604 static bfd_boolean
7605 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7606 {
7607 struct elf_link_hash_entry *h
7608 = (struct elf_link_hash_entry *) *slot;
7609
7610 if (h->type != STT_GNU_IFUNC
7611 || !h->def_regular
7612 || !h->ref_regular
7613 || !h->forced_local
7614 || h->root.type != bfd_link_hash_defined)
7615 abort ();
7616
7617 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7618 }
7619
7620 /* Allocate space in .plt, .got and associated reloc sections for
7621 local ifunc dynamic relocs. */
7622
7623 static bfd_boolean
7624 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7625 {
7626 struct elf_link_hash_entry *h
7627 = (struct elf_link_hash_entry *) *slot;
7628
7629 if (h->type != STT_GNU_IFUNC
7630 || !h->def_regular
7631 || !h->ref_regular
7632 || !h->forced_local
7633 || h->root.type != bfd_link_hash_defined)
7634 abort ();
7635
7636 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7637 }
7638
7639 /* Find any dynamic relocs that apply to read-only sections. */
7640
7641 static bfd_boolean
7642 aarch64_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
7643 {
7644 struct elf_aarch64_link_hash_entry * eh;
7645 struct elf_dyn_relocs * p;
7646
7647 eh = (struct elf_aarch64_link_hash_entry *) h;
7648 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7649 {
7650 asection *s = p->sec;
7651
7652 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7653 {
7654 struct bfd_link_info *info = (struct bfd_link_info *) inf;
7655
7656 info->flags |= DF_TEXTREL;
7657
7658 /* Not an error, just cut short the traversal. */
7659 return FALSE;
7660 }
7661 }
7662 return TRUE;
7663 }
7664
7665 /* This is the most important function of all . Innocuosly named
7666 though ! */
7667 static bfd_boolean
7668 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7669 struct bfd_link_info *info)
7670 {
7671 struct elf_aarch64_link_hash_table *htab;
7672 bfd *dynobj;
7673 asection *s;
7674 bfd_boolean relocs;
7675 bfd *ibfd;
7676
7677 htab = elf_aarch64_hash_table ((info));
7678 dynobj = htab->root.dynobj;
7679
7680 BFD_ASSERT (dynobj != NULL);
7681
7682 if (htab->root.dynamic_sections_created)
7683 {
7684 if (info->executable)
7685 {
7686 s = bfd_get_linker_section (dynobj, ".interp");
7687 if (s == NULL)
7688 abort ();
7689 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7690 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7691 }
7692 }
7693
7694 /* Set up .got offsets for local syms, and space for local dynamic
7695 relocs. */
7696 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7697 {
7698 struct elf_aarch64_local_symbol *locals = NULL;
7699 Elf_Internal_Shdr *symtab_hdr;
7700 asection *srel;
7701 unsigned int i;
7702
7703 if (!is_aarch64_elf (ibfd))
7704 continue;
7705
7706 for (s = ibfd->sections; s != NULL; s = s->next)
7707 {
7708 struct elf_dyn_relocs *p;
7709
7710 for (p = (struct elf_dyn_relocs *)
7711 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7712 {
7713 if (!bfd_is_abs_section (p->sec)
7714 && bfd_is_abs_section (p->sec->output_section))
7715 {
7716 /* Input section has been discarded, either because
7717 it is a copy of a linkonce section or due to
7718 linker script /DISCARD/, so we'll be discarding
7719 the relocs too. */
7720 }
7721 else if (p->count != 0)
7722 {
7723 srel = elf_section_data (p->sec)->sreloc;
7724 srel->size += p->count * RELOC_SIZE (htab);
7725 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7726 info->flags |= DF_TEXTREL;
7727 }
7728 }
7729 }
7730
7731 locals = elf_aarch64_locals (ibfd);
7732 if (!locals)
7733 continue;
7734
7735 symtab_hdr = &elf_symtab_hdr (ibfd);
7736 srel = htab->root.srelgot;
7737 for (i = 0; i < symtab_hdr->sh_info; i++)
7738 {
7739 locals[i].got_offset = (bfd_vma) - 1;
7740 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7741 if (locals[i].got_refcount > 0)
7742 {
7743 unsigned got_type = locals[i].got_type;
7744 if (got_type & GOT_TLSDESC_GD)
7745 {
7746 locals[i].tlsdesc_got_jump_table_offset =
7747 (htab->root.sgotplt->size
7748 - aarch64_compute_jump_table_size (htab));
7749 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7750 locals[i].got_offset = (bfd_vma) - 2;
7751 }
7752
7753 if (got_type & GOT_TLS_GD)
7754 {
7755 locals[i].got_offset = htab->root.sgot->size;
7756 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7757 }
7758
7759 if (got_type & GOT_TLS_IE
7760 || got_type & GOT_NORMAL)
7761 {
7762 locals[i].got_offset = htab->root.sgot->size;
7763 htab->root.sgot->size += GOT_ENTRY_SIZE;
7764 }
7765
7766 if (got_type == GOT_UNKNOWN)
7767 {
7768 }
7769
7770 if (info->shared)
7771 {
7772 if (got_type & GOT_TLSDESC_GD)
7773 {
7774 htab->root.srelplt->size += RELOC_SIZE (htab);
7775 /* Note RELOC_COUNT not incremented here! */
7776 htab->tlsdesc_plt = (bfd_vma) - 1;
7777 }
7778
7779 if (got_type & GOT_TLS_GD)
7780 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7781
7782 if (got_type & GOT_TLS_IE
7783 || got_type & GOT_NORMAL)
7784 htab->root.srelgot->size += RELOC_SIZE (htab);
7785 }
7786 }
7787 else
7788 {
7789 locals[i].got_refcount = (bfd_vma) - 1;
7790 }
7791 }
7792 }
7793
7794
7795 /* Allocate global sym .plt and .got entries, and space for global
7796 sym dynamic relocs. */
7797 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7798 info);
7799
7800 /* Allocate global ifunc sym .plt and .got entries, and space for global
7801 ifunc sym dynamic relocs. */
7802 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7803 info);
7804
7805 /* Allocate .plt and .got entries, and space for local symbols. */
7806 htab_traverse (htab->loc_hash_table,
7807 elfNN_aarch64_allocate_local_dynrelocs,
7808 info);
7809
7810 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7811 htab_traverse (htab->loc_hash_table,
7812 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7813 info);
7814
7815 /* For every jump slot reserved in the sgotplt, reloc_count is
7816 incremented. However, when we reserve space for TLS descriptors,
7817 it's not incremented, so in order to compute the space reserved
7818 for them, it suffices to multiply the reloc count by the jump
7819 slot size. */
7820
7821 if (htab->root.srelplt)
7822 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7823
7824 if (htab->tlsdesc_plt)
7825 {
7826 if (htab->root.splt->size == 0)
7827 htab->root.splt->size += PLT_ENTRY_SIZE;
7828
7829 htab->tlsdesc_plt = htab->root.splt->size;
7830 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7831
7832 /* If we're not using lazy TLS relocations, don't generate the
7833 GOT entry required. */
7834 if (!(info->flags & DF_BIND_NOW))
7835 {
7836 htab->dt_tlsdesc_got = htab->root.sgot->size;
7837 htab->root.sgot->size += GOT_ENTRY_SIZE;
7838 }
7839 }
7840
7841 /* Init mapping symbols information to use later to distingush between
7842 code and data while scanning for errata. */
7843 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
7844 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7845 {
7846 if (!is_aarch64_elf (ibfd))
7847 continue;
7848 bfd_elfNN_aarch64_init_maps (ibfd);
7849 }
7850
7851 /* We now have determined the sizes of the various dynamic sections.
7852 Allocate memory for them. */
7853 relocs = FALSE;
7854 for (s = dynobj->sections; s != NULL; s = s->next)
7855 {
7856 if ((s->flags & SEC_LINKER_CREATED) == 0)
7857 continue;
7858
7859 if (s == htab->root.splt
7860 || s == htab->root.sgot
7861 || s == htab->root.sgotplt
7862 || s == htab->root.iplt
7863 || s == htab->root.igotplt || s == htab->sdynbss)
7864 {
7865 /* Strip this section if we don't need it; see the
7866 comment below. */
7867 }
7868 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7869 {
7870 if (s->size != 0 && s != htab->root.srelplt)
7871 relocs = TRUE;
7872
7873 /* We use the reloc_count field as a counter if we need
7874 to copy relocs into the output file. */
7875 if (s != htab->root.srelplt)
7876 s->reloc_count = 0;
7877 }
7878 else
7879 {
7880 /* It's not one of our sections, so don't allocate space. */
7881 continue;
7882 }
7883
7884 if (s->size == 0)
7885 {
7886 /* If we don't need this section, strip it from the
7887 output file. This is mostly to handle .rela.bss and
7888 .rela.plt. We must create both sections in
7889 create_dynamic_sections, because they must be created
7890 before the linker maps input sections to output
7891 sections. The linker does that before
7892 adjust_dynamic_symbol is called, and it is that
7893 function which decides whether anything needs to go
7894 into these sections. */
7895
7896 s->flags |= SEC_EXCLUDE;
7897 continue;
7898 }
7899
7900 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7901 continue;
7902
7903 /* Allocate memory for the section contents. We use bfd_zalloc
7904 here in case unused entries are not reclaimed before the
7905 section's contents are written out. This should not happen,
7906 but this way if it does, we get a R_AARCH64_NONE reloc instead
7907 of garbage. */
7908 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7909 if (s->contents == NULL)
7910 return FALSE;
7911 }
7912
7913 if (htab->root.dynamic_sections_created)
7914 {
7915 /* Add some entries to the .dynamic section. We fill in the
7916 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7917 must add the entries now so that we get the correct size for
7918 the .dynamic section. The DT_DEBUG entry is filled in by the
7919 dynamic linker and used by the debugger. */
7920 #define add_dynamic_entry(TAG, VAL) \
7921 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7922
7923 if (info->executable)
7924 {
7925 if (!add_dynamic_entry (DT_DEBUG, 0))
7926 return FALSE;
7927 }
7928
7929 if (htab->root.splt->size != 0)
7930 {
7931 if (!add_dynamic_entry (DT_PLTGOT, 0)
7932 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7933 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7934 || !add_dynamic_entry (DT_JMPREL, 0))
7935 return FALSE;
7936
7937 if (htab->tlsdesc_plt
7938 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7939 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7940 return FALSE;
7941 }
7942
7943 if (relocs)
7944 {
7945 if (!add_dynamic_entry (DT_RELA, 0)
7946 || !add_dynamic_entry (DT_RELASZ, 0)
7947 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7948 return FALSE;
7949
7950 /* If any dynamic relocs apply to a read-only section,
7951 then we need a DT_TEXTREL entry. */
7952 if ((info->flags & DF_TEXTREL) == 0)
7953 elf_link_hash_traverse (& htab->root, aarch64_readonly_dynrelocs,
7954 info);
7955
7956 if ((info->flags & DF_TEXTREL) != 0)
7957 {
7958 if (!add_dynamic_entry (DT_TEXTREL, 0))
7959 return FALSE;
7960 }
7961 }
7962 }
7963 #undef add_dynamic_entry
7964
7965 return TRUE;
7966 }
7967
7968 static inline void
7969 elf_aarch64_update_plt_entry (bfd *output_bfd,
7970 bfd_reloc_code_real_type r_type,
7971 bfd_byte *plt_entry, bfd_vma value)
7972 {
7973 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7974
7975 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7976 }
7977
7978 static void
7979 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7980 struct elf_aarch64_link_hash_table
7981 *htab, bfd *output_bfd,
7982 struct bfd_link_info *info)
7983 {
7984 bfd_byte *plt_entry;
7985 bfd_vma plt_index;
7986 bfd_vma got_offset;
7987 bfd_vma gotplt_entry_address;
7988 bfd_vma plt_entry_address;
7989 Elf_Internal_Rela rela;
7990 bfd_byte *loc;
7991 asection *plt, *gotplt, *relplt;
7992
7993 /* When building a static executable, use .iplt, .igot.plt and
7994 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7995 if (htab->root.splt != NULL)
7996 {
7997 plt = htab->root.splt;
7998 gotplt = htab->root.sgotplt;
7999 relplt = htab->root.srelplt;
8000 }
8001 else
8002 {
8003 plt = htab->root.iplt;
8004 gotplt = htab->root.igotplt;
8005 relplt = htab->root.irelplt;
8006 }
8007
8008 /* Get the index in the procedure linkage table which
8009 corresponds to this symbol. This is the index of this symbol
8010 in all the symbols for which we are making plt entries. The
8011 first entry in the procedure linkage table is reserved.
8012
8013 Get the offset into the .got table of the entry that
8014 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
8015 bytes. The first three are reserved for the dynamic linker.
8016
8017 For static executables, we don't reserve anything. */
8018
8019 if (plt == htab->root.splt)
8020 {
8021 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
8022 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
8023 }
8024 else
8025 {
8026 plt_index = h->plt.offset / htab->plt_entry_size;
8027 got_offset = plt_index * GOT_ENTRY_SIZE;
8028 }
8029
8030 plt_entry = plt->contents + h->plt.offset;
8031 plt_entry_address = plt->output_section->vma
8032 + plt->output_offset + h->plt.offset;
8033 gotplt_entry_address = gotplt->output_section->vma +
8034 gotplt->output_offset + got_offset;
8035
8036 /* Copy in the boiler-plate for the PLTn entry. */
8037 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
8038
8039 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8040 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8041 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8042 plt_entry,
8043 PG (gotplt_entry_address) -
8044 PG (plt_entry_address));
8045
8046 /* Fill in the lo12 bits for the load from the pltgot. */
8047 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8048 plt_entry + 4,
8049 PG_OFFSET (gotplt_entry_address));
8050
8051 /* Fill in the lo12 bits for the add from the pltgot entry. */
8052 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8053 plt_entry + 8,
8054 PG_OFFSET (gotplt_entry_address));
8055
8056 /* All the GOTPLT Entries are essentially initialized to PLT0. */
8057 bfd_put_NN (output_bfd,
8058 plt->output_section->vma + plt->output_offset,
8059 gotplt->contents + got_offset);
8060
8061 rela.r_offset = gotplt_entry_address;
8062
8063 if (h->dynindx == -1
8064 || ((info->executable
8065 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8066 && h->def_regular
8067 && h->type == STT_GNU_IFUNC))
8068 {
8069 /* If an STT_GNU_IFUNC symbol is locally defined, generate
8070 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
8071 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
8072 rela.r_addend = (h->root.u.def.value
8073 + h->root.u.def.section->output_section->vma
8074 + h->root.u.def.section->output_offset);
8075 }
8076 else
8077 {
8078 /* Fill in the entry in the .rela.plt section. */
8079 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
8080 rela.r_addend = 0;
8081 }
8082
8083 /* Compute the relocation entry to used based on PLT index and do
8084 not adjust reloc_count. The reloc_count has already been adjusted
8085 to account for this entry. */
8086 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
8087 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8088 }
8089
8090 /* Size sections even though they're not dynamic. We use it to setup
8091 _TLS_MODULE_BASE_, if needed. */
8092
8093 static bfd_boolean
8094 elfNN_aarch64_always_size_sections (bfd *output_bfd,
8095 struct bfd_link_info *info)
8096 {
8097 asection *tls_sec;
8098
8099 if (info->relocatable)
8100 return TRUE;
8101
8102 tls_sec = elf_hash_table (info)->tls_sec;
8103
8104 if (tls_sec)
8105 {
8106 struct elf_link_hash_entry *tlsbase;
8107
8108 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
8109 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
8110
8111 if (tlsbase)
8112 {
8113 struct bfd_link_hash_entry *h = NULL;
8114 const struct elf_backend_data *bed =
8115 get_elf_backend_data (output_bfd);
8116
8117 if (!(_bfd_generic_link_add_one_symbol
8118 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
8119 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
8120 return FALSE;
8121
8122 tlsbase->type = STT_TLS;
8123 tlsbase = (struct elf_link_hash_entry *) h;
8124 tlsbase->def_regular = 1;
8125 tlsbase->other = STV_HIDDEN;
8126 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
8127 }
8128 }
8129
8130 return TRUE;
8131 }
8132
8133 /* Finish up dynamic symbol handling. We set the contents of various
8134 dynamic sections here. */
8135 static bfd_boolean
8136 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
8137 struct bfd_link_info *info,
8138 struct elf_link_hash_entry *h,
8139 Elf_Internal_Sym *sym)
8140 {
8141 struct elf_aarch64_link_hash_table *htab;
8142 htab = elf_aarch64_hash_table (info);
8143
8144 if (h->plt.offset != (bfd_vma) - 1)
8145 {
8146 asection *plt, *gotplt, *relplt;
8147
8148 /* This symbol has an entry in the procedure linkage table. Set
8149 it up. */
8150
8151 /* When building a static executable, use .iplt, .igot.plt and
8152 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8153 if (htab->root.splt != NULL)
8154 {
8155 plt = htab->root.splt;
8156 gotplt = htab->root.sgotplt;
8157 relplt = htab->root.srelplt;
8158 }
8159 else
8160 {
8161 plt = htab->root.iplt;
8162 gotplt = htab->root.igotplt;
8163 relplt = htab->root.irelplt;
8164 }
8165
8166 /* This symbol has an entry in the procedure linkage table. Set
8167 it up. */
8168 if ((h->dynindx == -1
8169 && !((h->forced_local || info->executable)
8170 && h->def_regular
8171 && h->type == STT_GNU_IFUNC))
8172 || plt == NULL
8173 || gotplt == NULL
8174 || relplt == NULL)
8175 abort ();
8176
8177 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
8178 if (!h->def_regular)
8179 {
8180 /* Mark the symbol as undefined, rather than as defined in
8181 the .plt section. */
8182 sym->st_shndx = SHN_UNDEF;
8183 /* If the symbol is weak we need to clear the value.
8184 Otherwise, the PLT entry would provide a definition for
8185 the symbol even if the symbol wasn't defined anywhere,
8186 and so the symbol would never be NULL. Leave the value if
8187 there were any relocations where pointer equality matters
8188 (this is a clue for the dynamic linker, to make function
8189 pointer comparisons work between an application and shared
8190 library). */
8191 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
8192 sym->st_value = 0;
8193 }
8194 }
8195
8196 if (h->got.offset != (bfd_vma) - 1
8197 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8198 {
8199 Elf_Internal_Rela rela;
8200 bfd_byte *loc;
8201
8202 /* This symbol has an entry in the global offset table. Set it
8203 up. */
8204 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8205 abort ();
8206
8207 rela.r_offset = (htab->root.sgot->output_section->vma
8208 + htab->root.sgot->output_offset
8209 + (h->got.offset & ~(bfd_vma) 1));
8210
8211 if (h->def_regular
8212 && h->type == STT_GNU_IFUNC)
8213 {
8214 if (info->shared)
8215 {
8216 /* Generate R_AARCH64_GLOB_DAT. */
8217 goto do_glob_dat;
8218 }
8219 else
8220 {
8221 asection *plt;
8222
8223 if (!h->pointer_equality_needed)
8224 abort ();
8225
8226 /* For non-shared object, we can't use .got.plt, which
8227 contains the real function address if we need pointer
8228 equality. We load the GOT entry with the PLT entry. */
8229 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8230 bfd_put_NN (output_bfd, (plt->output_section->vma
8231 + plt->output_offset
8232 + h->plt.offset),
8233 htab->root.sgot->contents
8234 + (h->got.offset & ~(bfd_vma) 1));
8235 return TRUE;
8236 }
8237 }
8238 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
8239 {
8240 if (!h->def_regular)
8241 return FALSE;
8242
8243 BFD_ASSERT ((h->got.offset & 1) != 0);
8244 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8245 rela.r_addend = (h->root.u.def.value
8246 + h->root.u.def.section->output_section->vma
8247 + h->root.u.def.section->output_offset);
8248 }
8249 else
8250 {
8251 do_glob_dat:
8252 BFD_ASSERT ((h->got.offset & 1) == 0);
8253 bfd_put_NN (output_bfd, (bfd_vma) 0,
8254 htab->root.sgot->contents + h->got.offset);
8255 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8256 rela.r_addend = 0;
8257 }
8258
8259 loc = htab->root.srelgot->contents;
8260 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8261 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8262 }
8263
8264 if (h->needs_copy)
8265 {
8266 Elf_Internal_Rela rela;
8267 bfd_byte *loc;
8268
8269 /* This symbol needs a copy reloc. Set it up. */
8270
8271 if (h->dynindx == -1
8272 || (h->root.type != bfd_link_hash_defined
8273 && h->root.type != bfd_link_hash_defweak)
8274 || htab->srelbss == NULL)
8275 abort ();
8276
8277 rela.r_offset = (h->root.u.def.value
8278 + h->root.u.def.section->output_section->vma
8279 + h->root.u.def.section->output_offset);
8280 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8281 rela.r_addend = 0;
8282 loc = htab->srelbss->contents;
8283 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
8284 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8285 }
8286
8287 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
8288 be NULL for local symbols. */
8289 if (sym != NULL
8290 && (h == elf_hash_table (info)->hdynamic
8291 || h == elf_hash_table (info)->hgot))
8292 sym->st_shndx = SHN_ABS;
8293
8294 return TRUE;
8295 }
8296
8297 /* Finish up local dynamic symbol handling. We set the contents of
8298 various dynamic sections here. */
8299
8300 static bfd_boolean
8301 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
8302 {
8303 struct elf_link_hash_entry *h
8304 = (struct elf_link_hash_entry *) *slot;
8305 struct bfd_link_info *info
8306 = (struct bfd_link_info *) inf;
8307
8308 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
8309 info, h, NULL);
8310 }
8311
8312 static void
8313 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
8314 struct elf_aarch64_link_hash_table
8315 *htab)
8316 {
8317 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
8318 small and large plts and at the minute just generates
8319 the small PLT. */
8320
8321 /* PLT0 of the small PLT looks like this in ELF64 -
8322 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
8323 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
8324 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
8325 // symbol resolver
8326 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
8327 // GOTPLT entry for this.
8328 br x17
8329 PLT0 will be slightly different in ELF32 due to different got entry
8330 size.
8331 */
8332 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
8333 bfd_vma plt_base;
8334
8335
8336 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
8337 PLT_ENTRY_SIZE);
8338 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
8339 PLT_ENTRY_SIZE;
8340
8341 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
8342 + htab->root.sgotplt->output_offset
8343 + GOT_ENTRY_SIZE * 2);
8344
8345 plt_base = htab->root.splt->output_section->vma +
8346 htab->root.splt->output_offset;
8347
8348 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8349 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8350 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8351 htab->root.splt->contents + 4,
8352 PG (plt_got_2nd_ent) - PG (plt_base + 4));
8353
8354 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8355 htab->root.splt->contents + 8,
8356 PG_OFFSET (plt_got_2nd_ent));
8357
8358 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8359 htab->root.splt->contents + 12,
8360 PG_OFFSET (plt_got_2nd_ent));
8361 }
8362
8363 static bfd_boolean
8364 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
8365 struct bfd_link_info *info)
8366 {
8367 struct elf_aarch64_link_hash_table *htab;
8368 bfd *dynobj;
8369 asection *sdyn;
8370
8371 htab = elf_aarch64_hash_table (info);
8372 dynobj = htab->root.dynobj;
8373 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
8374
8375 if (htab->root.dynamic_sections_created)
8376 {
8377 ElfNN_External_Dyn *dyncon, *dynconend;
8378
8379 if (sdyn == NULL || htab->root.sgot == NULL)
8380 abort ();
8381
8382 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
8383 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
8384 for (; dyncon < dynconend; dyncon++)
8385 {
8386 Elf_Internal_Dyn dyn;
8387 asection *s;
8388
8389 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
8390
8391 switch (dyn.d_tag)
8392 {
8393 default:
8394 continue;
8395
8396 case DT_PLTGOT:
8397 s = htab->root.sgotplt;
8398 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
8399 break;
8400
8401 case DT_JMPREL:
8402 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
8403 break;
8404
8405 case DT_PLTRELSZ:
8406 s = htab->root.srelplt;
8407 dyn.d_un.d_val = s->size;
8408 break;
8409
8410 case DT_RELASZ:
8411 /* The procedure linkage table relocs (DT_JMPREL) should
8412 not be included in the overall relocs (DT_RELA).
8413 Therefore, we override the DT_RELASZ entry here to
8414 make it not include the JMPREL relocs. Since the
8415 linker script arranges for .rela.plt to follow all
8416 other relocation sections, we don't have to worry
8417 about changing the DT_RELA entry. */
8418 if (htab->root.srelplt != NULL)
8419 {
8420 s = htab->root.srelplt;
8421 dyn.d_un.d_val -= s->size;
8422 }
8423 break;
8424
8425 case DT_TLSDESC_PLT:
8426 s = htab->root.splt;
8427 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8428 + htab->tlsdesc_plt;
8429 break;
8430
8431 case DT_TLSDESC_GOT:
8432 s = htab->root.sgot;
8433 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8434 + htab->dt_tlsdesc_got;
8435 break;
8436 }
8437
8438 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
8439 }
8440
8441 }
8442
8443 /* Fill in the special first entry in the procedure linkage table. */
8444 if (htab->root.splt && htab->root.splt->size > 0)
8445 {
8446 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
8447
8448 elf_section_data (htab->root.splt->output_section)->
8449 this_hdr.sh_entsize = htab->plt_entry_size;
8450
8451
8452 if (htab->tlsdesc_plt)
8453 {
8454 bfd_put_NN (output_bfd, (bfd_vma) 0,
8455 htab->root.sgot->contents + htab->dt_tlsdesc_got);
8456
8457 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
8458 elfNN_aarch64_tlsdesc_small_plt_entry,
8459 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
8460
8461 {
8462 bfd_vma adrp1_addr =
8463 htab->root.splt->output_section->vma
8464 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
8465
8466 bfd_vma adrp2_addr = adrp1_addr + 4;
8467
8468 bfd_vma got_addr =
8469 htab->root.sgot->output_section->vma
8470 + htab->root.sgot->output_offset;
8471
8472 bfd_vma pltgot_addr =
8473 htab->root.sgotplt->output_section->vma
8474 + htab->root.sgotplt->output_offset;
8475
8476 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
8477
8478 bfd_byte *plt_entry =
8479 htab->root.splt->contents + htab->tlsdesc_plt;
8480
8481 /* adrp x2, DT_TLSDESC_GOT */
8482 elf_aarch64_update_plt_entry (output_bfd,
8483 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8484 plt_entry + 4,
8485 (PG (dt_tlsdesc_got)
8486 - PG (adrp1_addr)));
8487
8488 /* adrp x3, 0 */
8489 elf_aarch64_update_plt_entry (output_bfd,
8490 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8491 plt_entry + 8,
8492 (PG (pltgot_addr)
8493 - PG (adrp2_addr)));
8494
8495 /* ldr x2, [x2, #0] */
8496 elf_aarch64_update_plt_entry (output_bfd,
8497 BFD_RELOC_AARCH64_LDSTNN_LO12,
8498 plt_entry + 12,
8499 PG_OFFSET (dt_tlsdesc_got));
8500
8501 /* add x3, x3, 0 */
8502 elf_aarch64_update_plt_entry (output_bfd,
8503 BFD_RELOC_AARCH64_ADD_LO12,
8504 plt_entry + 16,
8505 PG_OFFSET (pltgot_addr));
8506 }
8507 }
8508 }
8509
8510 if (htab->root.sgotplt)
8511 {
8512 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
8513 {
8514 (*_bfd_error_handler)
8515 (_("discarded output section: `%A'"), htab->root.sgotplt);
8516 return FALSE;
8517 }
8518
8519 /* Fill in the first three entries in the global offset table. */
8520 if (htab->root.sgotplt->size > 0)
8521 {
8522 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
8523
8524 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
8525 bfd_put_NN (output_bfd,
8526 (bfd_vma) 0,
8527 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
8528 bfd_put_NN (output_bfd,
8529 (bfd_vma) 0,
8530 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
8531 }
8532
8533 if (htab->root.sgot)
8534 {
8535 if (htab->root.sgot->size > 0)
8536 {
8537 bfd_vma addr =
8538 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
8539 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
8540 }
8541 }
8542
8543 elf_section_data (htab->root.sgotplt->output_section)->
8544 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
8545 }
8546
8547 if (htab->root.sgot && htab->root.sgot->size > 0)
8548 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
8549 = GOT_ENTRY_SIZE;
8550
8551 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
8552 htab_traverse (htab->loc_hash_table,
8553 elfNN_aarch64_finish_local_dynamic_symbol,
8554 info);
8555
8556 return TRUE;
8557 }
8558
8559 /* Return address for Ith PLT stub in section PLT, for relocation REL
8560 or (bfd_vma) -1 if it should not be included. */
8561
8562 static bfd_vma
8563 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
8564 const arelent *rel ATTRIBUTE_UNUSED)
8565 {
8566 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
8567 }
8568
8569
8570 /* We use this so we can override certain functions
8571 (though currently we don't). */
8572
8573 const struct elf_size_info elfNN_aarch64_size_info =
8574 {
8575 sizeof (ElfNN_External_Ehdr),
8576 sizeof (ElfNN_External_Phdr),
8577 sizeof (ElfNN_External_Shdr),
8578 sizeof (ElfNN_External_Rel),
8579 sizeof (ElfNN_External_Rela),
8580 sizeof (ElfNN_External_Sym),
8581 sizeof (ElfNN_External_Dyn),
8582 sizeof (Elf_External_Note),
8583 4, /* Hash table entry size. */
8584 1, /* Internal relocs per external relocs. */
8585 ARCH_SIZE, /* Arch size. */
8586 LOG_FILE_ALIGN, /* Log_file_align. */
8587 ELFCLASSNN, EV_CURRENT,
8588 bfd_elfNN_write_out_phdrs,
8589 bfd_elfNN_write_shdrs_and_ehdr,
8590 bfd_elfNN_checksum_contents,
8591 bfd_elfNN_write_relocs,
8592 bfd_elfNN_swap_symbol_in,
8593 bfd_elfNN_swap_symbol_out,
8594 bfd_elfNN_slurp_reloc_table,
8595 bfd_elfNN_slurp_symbol_table,
8596 bfd_elfNN_swap_dyn_in,
8597 bfd_elfNN_swap_dyn_out,
8598 bfd_elfNN_swap_reloc_in,
8599 bfd_elfNN_swap_reloc_out,
8600 bfd_elfNN_swap_reloca_in,
8601 bfd_elfNN_swap_reloca_out
8602 };
8603
8604 #define ELF_ARCH bfd_arch_aarch64
8605 #define ELF_MACHINE_CODE EM_AARCH64
8606 #define ELF_MAXPAGESIZE 0x10000
8607 #define ELF_MINPAGESIZE 0x1000
8608 #define ELF_COMMONPAGESIZE 0x1000
8609
8610 #define bfd_elfNN_close_and_cleanup \
8611 elfNN_aarch64_close_and_cleanup
8612
8613 #define bfd_elfNN_bfd_free_cached_info \
8614 elfNN_aarch64_bfd_free_cached_info
8615
8616 #define bfd_elfNN_bfd_is_target_special_symbol \
8617 elfNN_aarch64_is_target_special_symbol
8618
8619 #define bfd_elfNN_bfd_link_hash_table_create \
8620 elfNN_aarch64_link_hash_table_create
8621
8622 #define bfd_elfNN_bfd_merge_private_bfd_data \
8623 elfNN_aarch64_merge_private_bfd_data
8624
8625 #define bfd_elfNN_bfd_print_private_bfd_data \
8626 elfNN_aarch64_print_private_bfd_data
8627
8628 #define bfd_elfNN_bfd_reloc_type_lookup \
8629 elfNN_aarch64_reloc_type_lookup
8630
8631 #define bfd_elfNN_bfd_reloc_name_lookup \
8632 elfNN_aarch64_reloc_name_lookup
8633
8634 #define bfd_elfNN_bfd_set_private_flags \
8635 elfNN_aarch64_set_private_flags
8636
8637 #define bfd_elfNN_find_inliner_info \
8638 elfNN_aarch64_find_inliner_info
8639
8640 #define bfd_elfNN_find_nearest_line \
8641 elfNN_aarch64_find_nearest_line
8642
8643 #define bfd_elfNN_mkobject \
8644 elfNN_aarch64_mkobject
8645
8646 #define bfd_elfNN_new_section_hook \
8647 elfNN_aarch64_new_section_hook
8648
8649 #define elf_backend_adjust_dynamic_symbol \
8650 elfNN_aarch64_adjust_dynamic_symbol
8651
8652 #define elf_backend_always_size_sections \
8653 elfNN_aarch64_always_size_sections
8654
8655 #define elf_backend_check_relocs \
8656 elfNN_aarch64_check_relocs
8657
8658 #define elf_backend_copy_indirect_symbol \
8659 elfNN_aarch64_copy_indirect_symbol
8660
8661 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8662 to them in our hash. */
8663 #define elf_backend_create_dynamic_sections \
8664 elfNN_aarch64_create_dynamic_sections
8665
8666 #define elf_backend_init_index_section \
8667 _bfd_elf_init_2_index_sections
8668
8669 #define elf_backend_finish_dynamic_sections \
8670 elfNN_aarch64_finish_dynamic_sections
8671
8672 #define elf_backend_finish_dynamic_symbol \
8673 elfNN_aarch64_finish_dynamic_symbol
8674
8675 #define elf_backend_gc_sweep_hook \
8676 elfNN_aarch64_gc_sweep_hook
8677
8678 #define elf_backend_object_p \
8679 elfNN_aarch64_object_p
8680
8681 #define elf_backend_output_arch_local_syms \
8682 elfNN_aarch64_output_arch_local_syms
8683
8684 #define elf_backend_plt_sym_val \
8685 elfNN_aarch64_plt_sym_val
8686
8687 #define elf_backend_post_process_headers \
8688 elfNN_aarch64_post_process_headers
8689
8690 #define elf_backend_relocate_section \
8691 elfNN_aarch64_relocate_section
8692
8693 #define elf_backend_reloc_type_class \
8694 elfNN_aarch64_reloc_type_class
8695
8696 #define elf_backend_section_from_shdr \
8697 elfNN_aarch64_section_from_shdr
8698
8699 #define elf_backend_size_dynamic_sections \
8700 elfNN_aarch64_size_dynamic_sections
8701
8702 #define elf_backend_size_info \
8703 elfNN_aarch64_size_info
8704
8705 #define elf_backend_write_section \
8706 elfNN_aarch64_write_section
8707
8708 #define elf_backend_can_refcount 1
8709 #define elf_backend_can_gc_sections 1
8710 #define elf_backend_plt_readonly 1
8711 #define elf_backend_want_got_plt 1
8712 #define elf_backend_want_plt_sym 0
8713 #define elf_backend_may_use_rel_p 0
8714 #define elf_backend_may_use_rela_p 1
8715 #define elf_backend_default_use_rela_p 1
8716 #define elf_backend_rela_normal 1
8717 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8718 #define elf_backend_default_execstack 0
8719 #define elf_backend_extern_protected_data 1
8720
8721 #undef elf_backend_obj_attrs_section
8722 #define elf_backend_obj_attrs_section ".ARM.attributes"
8723
8724 #include "elfNN-target.h"
This page took 0.208969 seconds and 4 git commands to generate.