1db5942d30980776a4f4cc7de9f90f83ecd28291
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
188 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
189
190 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
191 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC)
203
204 #define ELIMINATE_COPY_RELOCS 0
205
206 /* Return size of a relocation entry. HTAB is the bfd's
207 elf_aarch64_link_hash_entry. */
208 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
209
210 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
211 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
212 #define PLT_ENTRY_SIZE (32)
213 #define PLT_SMALL_ENTRY_SIZE (16)
214 #define PLT_TLSDESC_ENTRY_SIZE (32)
215
216 /* Encoding of the nop instruction */
217 #define INSN_NOP 0xd503201f
218
219 #define aarch64_compute_jump_table_size(htab) \
220 (((htab)->root.srelplt == NULL) ? 0 \
221 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
222
223 /* The first entry in a procedure linkage table looks like this
224 if the distance between the PLTGOT and the PLT is < 4GB use
225 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
226 in x16 and needs to work out PLTGOT[1] by using an address of
227 [x16,#-GOT_ENTRY_SIZE]. */
228 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
229 {
230 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
231 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
232 #if ARCH_SIZE == 64
233 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
234 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
235 #else
236 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
237 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
238 #endif
239 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
240 0x1f, 0x20, 0x03, 0xd5, /* nop */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
243 };
244
245 /* Per function entry in a procedure linkage table looks like this
246 if the distance between the PLTGOT and the PLT is < 4GB use
247 these PLT entries. */
248 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
249 {
250 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
251 #if ARCH_SIZE == 64
252 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
253 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
254 #else
255 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
256 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
257 #endif
258 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
259 };
260
261 static const bfd_byte
262 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
263 {
264 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
265 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
266 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
267 #if ARCH_SIZE == 64
268 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
269 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
270 #else
271 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
272 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
273 #endif
274 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
275 0x1f, 0x20, 0x03, 0xd5, /* nop */
276 0x1f, 0x20, 0x03, 0xd5, /* nop */
277 };
278
279 #define elf_info_to_howto elfNN_aarch64_info_to_howto
280 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
281
282 #define AARCH64_ELF_ABI_VERSION 0
283
284 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
285 #define ALL_ONES (~ (bfd_vma) 0)
286
287 /* Indexed by the bfd interal reloc enumerators.
288 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
289 in reloc.c. */
290
291 static reloc_howto_type elfNN_aarch64_howto_table[] =
292 {
293 EMPTY_HOWTO (0),
294
295 /* Basic data relocations. */
296
297 #if ARCH_SIZE == 64
298 HOWTO (R_AARCH64_NULL, /* type */
299 0, /* rightshift */
300 3, /* size (0 = byte, 1 = short, 2 = long) */
301 0, /* bitsize */
302 FALSE, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_dont, /* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_AARCH64_NULL", /* name */
307 FALSE, /* partial_inplace */
308 0, /* src_mask */
309 0, /* dst_mask */
310 FALSE), /* pcrel_offset */
311 #else
312 HOWTO (R_AARCH64_NONE, /* type */
313 0, /* rightshift */
314 3, /* size (0 = byte, 1 = short, 2 = long) */
315 0, /* bitsize */
316 FALSE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_dont, /* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_AARCH64_NONE", /* name */
321 FALSE, /* partial_inplace */
322 0, /* src_mask */
323 0, /* dst_mask */
324 FALSE), /* pcrel_offset */
325 #endif
326
327 /* .xword: (S+A) */
328 HOWTO64 (AARCH64_R (ABS64), /* type */
329 0, /* rightshift */
330 4, /* size (4 = long long) */
331 64, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_unsigned, /* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 AARCH64_R_STR (ABS64), /* name */
337 FALSE, /* partial_inplace */
338 ALL_ONES, /* src_mask */
339 ALL_ONES, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 /* .word: (S+A) */
343 HOWTO (AARCH64_R (ABS32), /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_unsigned, /* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 AARCH64_R_STR (ABS32), /* name */
352 FALSE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
356
357 /* .half: (S+A) */
358 HOWTO (AARCH64_R (ABS16), /* type */
359 0, /* rightshift */
360 1, /* size (0 = byte, 1 = short, 2 = long) */
361 16, /* bitsize */
362 FALSE, /* pc_relative */
363 0, /* bitpos */
364 complain_overflow_unsigned, /* complain_on_overflow */
365 bfd_elf_generic_reloc, /* special_function */
366 AARCH64_R_STR (ABS16), /* name */
367 FALSE, /* partial_inplace */
368 0xffff, /* src_mask */
369 0xffff, /* dst_mask */
370 FALSE), /* pcrel_offset */
371
372 /* .xword: (S+A-P) */
373 HOWTO64 (AARCH64_R (PREL64), /* type */
374 0, /* rightshift */
375 4, /* size (4 = long long) */
376 64, /* bitsize */
377 TRUE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_signed, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 AARCH64_R_STR (PREL64), /* name */
382 FALSE, /* partial_inplace */
383 ALL_ONES, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 TRUE), /* pcrel_offset */
386
387 /* .word: (S+A-P) */
388 HOWTO (AARCH64_R (PREL32), /* type */
389 0, /* rightshift */
390 2, /* size (0 = byte, 1 = short, 2 = long) */
391 32, /* bitsize */
392 TRUE, /* pc_relative */
393 0, /* bitpos */
394 complain_overflow_signed, /* complain_on_overflow */
395 bfd_elf_generic_reloc, /* special_function */
396 AARCH64_R_STR (PREL32), /* name */
397 FALSE, /* partial_inplace */
398 0xffffffff, /* src_mask */
399 0xffffffff, /* dst_mask */
400 TRUE), /* pcrel_offset */
401
402 /* .half: (S+A-P) */
403 HOWTO (AARCH64_R (PREL16), /* type */
404 0, /* rightshift */
405 1, /* size (0 = byte, 1 = short, 2 = long) */
406 16, /* bitsize */
407 TRUE, /* pc_relative */
408 0, /* bitpos */
409 complain_overflow_signed, /* complain_on_overflow */
410 bfd_elf_generic_reloc, /* special_function */
411 AARCH64_R_STR (PREL16), /* name */
412 FALSE, /* partial_inplace */
413 0xffff, /* src_mask */
414 0xffff, /* dst_mask */
415 TRUE), /* pcrel_offset */
416
417 /* Group relocations to create a 16, 32, 48 or 64 bit
418 unsigned data or abs address inline. */
419
420 /* MOVZ: ((S+A) >> 0) & 0xffff */
421 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
422 0, /* rightshift */
423 2, /* size (0 = byte, 1 = short, 2 = long) */
424 16, /* bitsize */
425 FALSE, /* pc_relative */
426 0, /* bitpos */
427 complain_overflow_unsigned, /* complain_on_overflow */
428 bfd_elf_generic_reloc, /* special_function */
429 AARCH64_R_STR (MOVW_UABS_G0), /* name */
430 FALSE, /* partial_inplace */
431 0xffff, /* src_mask */
432 0xffff, /* dst_mask */
433 FALSE), /* pcrel_offset */
434
435 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
436 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
437 0, /* rightshift */
438 2, /* size (0 = byte, 1 = short, 2 = long) */
439 16, /* bitsize */
440 FALSE, /* pc_relative */
441 0, /* bitpos */
442 complain_overflow_dont, /* complain_on_overflow */
443 bfd_elf_generic_reloc, /* special_function */
444 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
445 FALSE, /* partial_inplace */
446 0xffff, /* src_mask */
447 0xffff, /* dst_mask */
448 FALSE), /* pcrel_offset */
449
450 /* MOVZ: ((S+A) >> 16) & 0xffff */
451 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
452 16, /* rightshift */
453 2, /* size (0 = byte, 1 = short, 2 = long) */
454 16, /* bitsize */
455 FALSE, /* pc_relative */
456 0, /* bitpos */
457 complain_overflow_unsigned, /* complain_on_overflow */
458 bfd_elf_generic_reloc, /* special_function */
459 AARCH64_R_STR (MOVW_UABS_G1), /* name */
460 FALSE, /* partial_inplace */
461 0xffff, /* src_mask */
462 0xffff, /* dst_mask */
463 FALSE), /* pcrel_offset */
464
465 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
466 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
467 16, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 16, /* bitsize */
470 FALSE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_dont, /* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
475 FALSE, /* partial_inplace */
476 0xffff, /* src_mask */
477 0xffff, /* dst_mask */
478 FALSE), /* pcrel_offset */
479
480 /* MOVZ: ((S+A) >> 32) & 0xffff */
481 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
482 32, /* rightshift */
483 2, /* size (0 = byte, 1 = short, 2 = long) */
484 16, /* bitsize */
485 FALSE, /* pc_relative */
486 0, /* bitpos */
487 complain_overflow_unsigned, /* complain_on_overflow */
488 bfd_elf_generic_reloc, /* special_function */
489 AARCH64_R_STR (MOVW_UABS_G2), /* name */
490 FALSE, /* partial_inplace */
491 0xffff, /* src_mask */
492 0xffff, /* dst_mask */
493 FALSE), /* pcrel_offset */
494
495 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
496 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
497 32, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 16, /* bitsize */
500 FALSE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_dont, /* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
505 FALSE, /* partial_inplace */
506 0xffff, /* src_mask */
507 0xffff, /* dst_mask */
508 FALSE), /* pcrel_offset */
509
510 /* MOVZ: ((S+A) >> 48) & 0xffff */
511 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
512 48, /* rightshift */
513 2, /* size (0 = byte, 1 = short, 2 = long) */
514 16, /* bitsize */
515 FALSE, /* pc_relative */
516 0, /* bitpos */
517 complain_overflow_unsigned, /* complain_on_overflow */
518 bfd_elf_generic_reloc, /* special_function */
519 AARCH64_R_STR (MOVW_UABS_G3), /* name */
520 FALSE, /* partial_inplace */
521 0xffff, /* src_mask */
522 0xffff, /* dst_mask */
523 FALSE), /* pcrel_offset */
524
525 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
526 signed data or abs address inline. Will change instruction
527 to MOVN or MOVZ depending on sign of calculated value. */
528
529 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
530 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
531 0, /* rightshift */
532 2, /* size (0 = byte, 1 = short, 2 = long) */
533 16, /* bitsize */
534 FALSE, /* pc_relative */
535 0, /* bitpos */
536 complain_overflow_signed, /* complain_on_overflow */
537 bfd_elf_generic_reloc, /* special_function */
538 AARCH64_R_STR (MOVW_SABS_G0), /* name */
539 FALSE, /* partial_inplace */
540 0xffff, /* src_mask */
541 0xffff, /* dst_mask */
542 FALSE), /* pcrel_offset */
543
544 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
545 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
546 16, /* rightshift */
547 2, /* size (0 = byte, 1 = short, 2 = long) */
548 16, /* bitsize */
549 FALSE, /* pc_relative */
550 0, /* bitpos */
551 complain_overflow_signed, /* complain_on_overflow */
552 bfd_elf_generic_reloc, /* special_function */
553 AARCH64_R_STR (MOVW_SABS_G1), /* name */
554 FALSE, /* partial_inplace */
555 0xffff, /* src_mask */
556 0xffff, /* dst_mask */
557 FALSE), /* pcrel_offset */
558
559 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
560 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
561 32, /* rightshift */
562 2, /* size (0 = byte, 1 = short, 2 = long) */
563 16, /* bitsize */
564 FALSE, /* pc_relative */
565 0, /* bitpos */
566 complain_overflow_signed, /* complain_on_overflow */
567 bfd_elf_generic_reloc, /* special_function */
568 AARCH64_R_STR (MOVW_SABS_G2), /* name */
569 FALSE, /* partial_inplace */
570 0xffff, /* src_mask */
571 0xffff, /* dst_mask */
572 FALSE), /* pcrel_offset */
573
574 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
575 addresses: PG(x) is (x & ~0xfff). */
576
577 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
578 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
579 2, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 19, /* bitsize */
582 TRUE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_signed, /* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 AARCH64_R_STR (LD_PREL_LO19), /* name */
587 FALSE, /* partial_inplace */
588 0x7ffff, /* src_mask */
589 0x7ffff, /* dst_mask */
590 TRUE), /* pcrel_offset */
591
592 /* ADR: (S+A-P) & 0x1fffff */
593 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
594 0, /* rightshift */
595 2, /* size (0 = byte, 1 = short, 2 = long) */
596 21, /* bitsize */
597 TRUE, /* pc_relative */
598 0, /* bitpos */
599 complain_overflow_signed, /* complain_on_overflow */
600 bfd_elf_generic_reloc, /* special_function */
601 AARCH64_R_STR (ADR_PREL_LO21), /* name */
602 FALSE, /* partial_inplace */
603 0x1fffff, /* src_mask */
604 0x1fffff, /* dst_mask */
605 TRUE), /* pcrel_offset */
606
607 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
608 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
609 12, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 21, /* bitsize */
612 TRUE, /* pc_relative */
613 0, /* bitpos */
614 complain_overflow_signed, /* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
617 FALSE, /* partial_inplace */
618 0x1fffff, /* src_mask */
619 0x1fffff, /* dst_mask */
620 TRUE), /* pcrel_offset */
621
622 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
623 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
624 12, /* rightshift */
625 2, /* size (0 = byte, 1 = short, 2 = long) */
626 21, /* bitsize */
627 TRUE, /* pc_relative */
628 0, /* bitpos */
629 complain_overflow_dont, /* complain_on_overflow */
630 bfd_elf_generic_reloc, /* special_function */
631 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
632 FALSE, /* partial_inplace */
633 0x1fffff, /* src_mask */
634 0x1fffff, /* dst_mask */
635 TRUE), /* pcrel_offset */
636
637 /* ADD: (S+A) & 0xfff [no overflow check] */
638 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 12, /* bitsize */
642 FALSE, /* pc_relative */
643 10, /* bitpos */
644 complain_overflow_dont, /* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
647 FALSE, /* partial_inplace */
648 0x3ffc00, /* src_mask */
649 0x3ffc00, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 /* LD/ST8: (S+A) & 0xfff */
653 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 12, /* bitsize */
657 FALSE, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont, /* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
662 FALSE, /* partial_inplace */
663 0xfff, /* src_mask */
664 0xfff, /* dst_mask */
665 FALSE), /* pcrel_offset */
666
667 /* Relocations for control-flow instructions. */
668
669 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
670 HOWTO (AARCH64_R (TSTBR14), /* type */
671 2, /* rightshift */
672 2, /* size (0 = byte, 1 = short, 2 = long) */
673 14, /* bitsize */
674 TRUE, /* pc_relative */
675 0, /* bitpos */
676 complain_overflow_signed, /* complain_on_overflow */
677 bfd_elf_generic_reloc, /* special_function */
678 AARCH64_R_STR (TSTBR14), /* name */
679 FALSE, /* partial_inplace */
680 0x3fff, /* src_mask */
681 0x3fff, /* dst_mask */
682 TRUE), /* pcrel_offset */
683
684 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
685 HOWTO (AARCH64_R (CONDBR19), /* type */
686 2, /* rightshift */
687 2, /* size (0 = byte, 1 = short, 2 = long) */
688 19, /* bitsize */
689 TRUE, /* pc_relative */
690 0, /* bitpos */
691 complain_overflow_signed, /* complain_on_overflow */
692 bfd_elf_generic_reloc, /* special_function */
693 AARCH64_R_STR (CONDBR19), /* name */
694 FALSE, /* partial_inplace */
695 0x7ffff, /* src_mask */
696 0x7ffff, /* dst_mask */
697 TRUE), /* pcrel_offset */
698
699 /* B: ((S+A-P) >> 2) & 0x3ffffff */
700 HOWTO (AARCH64_R (JUMP26), /* type */
701 2, /* rightshift */
702 2, /* size (0 = byte, 1 = short, 2 = long) */
703 26, /* bitsize */
704 TRUE, /* pc_relative */
705 0, /* bitpos */
706 complain_overflow_signed, /* complain_on_overflow */
707 bfd_elf_generic_reloc, /* special_function */
708 AARCH64_R_STR (JUMP26), /* name */
709 FALSE, /* partial_inplace */
710 0x3ffffff, /* src_mask */
711 0x3ffffff, /* dst_mask */
712 TRUE), /* pcrel_offset */
713
714 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
715 HOWTO (AARCH64_R (CALL26), /* type */
716 2, /* rightshift */
717 2, /* size (0 = byte, 1 = short, 2 = long) */
718 26, /* bitsize */
719 TRUE, /* pc_relative */
720 0, /* bitpos */
721 complain_overflow_signed, /* complain_on_overflow */
722 bfd_elf_generic_reloc, /* special_function */
723 AARCH64_R_STR (CALL26), /* name */
724 FALSE, /* partial_inplace */
725 0x3ffffff, /* src_mask */
726 0x3ffffff, /* dst_mask */
727 TRUE), /* pcrel_offset */
728
729 /* LD/ST16: (S+A) & 0xffe */
730 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
731 1, /* rightshift */
732 2, /* size (0 = byte, 1 = short, 2 = long) */
733 12, /* bitsize */
734 FALSE, /* pc_relative */
735 0, /* bitpos */
736 complain_overflow_dont, /* complain_on_overflow */
737 bfd_elf_generic_reloc, /* special_function */
738 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
739 FALSE, /* partial_inplace */
740 0xffe, /* src_mask */
741 0xffe, /* dst_mask */
742 FALSE), /* pcrel_offset */
743
744 /* LD/ST32: (S+A) & 0xffc */
745 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
746 2, /* rightshift */
747 2, /* size (0 = byte, 1 = short, 2 = long) */
748 12, /* bitsize */
749 FALSE, /* pc_relative */
750 0, /* bitpos */
751 complain_overflow_dont, /* complain_on_overflow */
752 bfd_elf_generic_reloc, /* special_function */
753 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
754 FALSE, /* partial_inplace */
755 0xffc, /* src_mask */
756 0xffc, /* dst_mask */
757 FALSE), /* pcrel_offset */
758
759 /* LD/ST64: (S+A) & 0xff8 */
760 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
761 3, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 12, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_dont, /* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
769 FALSE, /* partial_inplace */
770 0xff8, /* src_mask */
771 0xff8, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 /* LD/ST128: (S+A) & 0xff0 */
775 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
776 4, /* rightshift */
777 2, /* size (0 = byte, 1 = short, 2 = long) */
778 12, /* bitsize */
779 FALSE, /* pc_relative */
780 0, /* bitpos */
781 complain_overflow_dont, /* complain_on_overflow */
782 bfd_elf_generic_reloc, /* special_function */
783 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
784 FALSE, /* partial_inplace */
785 0xff0, /* src_mask */
786 0xff0, /* dst_mask */
787 FALSE), /* pcrel_offset */
788
789 /* Set a load-literal immediate field to bits
790 0x1FFFFC of G(S)-P */
791 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
792 2, /* rightshift */
793 2, /* size (0 = byte,1 = short,2 = long) */
794 19, /* bitsize */
795 TRUE, /* pc_relative */
796 0, /* bitpos */
797 complain_overflow_signed, /* complain_on_overflow */
798 bfd_elf_generic_reloc, /* special_function */
799 AARCH64_R_STR (GOT_LD_PREL19), /* name */
800 FALSE, /* partial_inplace */
801 0xffffe0, /* src_mask */
802 0xffffe0, /* dst_mask */
803 TRUE), /* pcrel_offset */
804
805 /* Get to the page for the GOT entry for the symbol
806 (G(S) - P) using an ADRP instruction. */
807 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
808 12, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 21, /* bitsize */
811 TRUE, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_dont, /* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
816 FALSE, /* partial_inplace */
817 0x1fffff, /* src_mask */
818 0x1fffff, /* dst_mask */
819 TRUE), /* pcrel_offset */
820
821 /* LD64: GOT offset G(S) & 0xff8 */
822 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
823 3, /* rightshift */
824 2, /* size (0 = byte, 1 = short, 2 = long) */
825 12, /* bitsize */
826 FALSE, /* pc_relative */
827 0, /* bitpos */
828 complain_overflow_dont, /* complain_on_overflow */
829 bfd_elf_generic_reloc, /* special_function */
830 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
831 FALSE, /* partial_inplace */
832 0xff8, /* src_mask */
833 0xff8, /* dst_mask */
834 FALSE), /* pcrel_offset */
835
836 /* LD32: GOT offset G(S) & 0xffc */
837 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
838 2, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 12, /* bitsize */
841 FALSE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont, /* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
846 FALSE, /* partial_inplace */
847 0xffc, /* src_mask */
848 0xffc, /* dst_mask */
849 FALSE), /* pcrel_offset */
850
851 /* Get to the page for the GOT entry for the symbol
852 (G(S) - P) using an ADRP instruction. */
853 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
854 12, /* rightshift */
855 2, /* size (0 = byte, 1 = short, 2 = long) */
856 21, /* bitsize */
857 TRUE, /* pc_relative */
858 0, /* bitpos */
859 complain_overflow_dont, /* complain_on_overflow */
860 bfd_elf_generic_reloc, /* special_function */
861 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
862 FALSE, /* partial_inplace */
863 0x1fffff, /* src_mask */
864 0x1fffff, /* dst_mask */
865 TRUE), /* pcrel_offset */
866
867 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
868 0, /* rightshift */
869 2, /* size (0 = byte, 1 = short, 2 = long) */
870 21, /* bitsize */
871 TRUE, /* pc_relative */
872 0, /* bitpos */
873 complain_overflow_dont, /* complain_on_overflow */
874 bfd_elf_generic_reloc, /* special_function */
875 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
876 FALSE, /* partial_inplace */
877 0x1fffff, /* src_mask */
878 0x1fffff, /* dst_mask */
879 TRUE), /* pcrel_offset */
880
881 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
882 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
883 0, /* rightshift */
884 2, /* size (0 = byte, 1 = short, 2 = long) */
885 12, /* bitsize */
886 FALSE, /* pc_relative */
887 0, /* bitpos */
888 complain_overflow_dont, /* complain_on_overflow */
889 bfd_elf_generic_reloc, /* special_function */
890 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
891 FALSE, /* partial_inplace */
892 0xfff, /* src_mask */
893 0xfff, /* dst_mask */
894 FALSE), /* pcrel_offset */
895
896 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
897 16, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 16, /* bitsize */
900 FALSE, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont, /* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
905 FALSE, /* partial_inplace */
906 0xffff, /* src_mask */
907 0xffff, /* dst_mask */
908 FALSE), /* pcrel_offset */
909
910 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 16, /* bitsize */
914 FALSE, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont, /* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
919 FALSE, /* partial_inplace */
920 0xffff, /* src_mask */
921 0xffff, /* dst_mask */
922 FALSE), /* pcrel_offset */
923
924 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
925 12, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 21, /* bitsize */
928 FALSE, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont, /* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
933 FALSE, /* partial_inplace */
934 0x1fffff, /* src_mask */
935 0x1fffff, /* dst_mask */
936 FALSE), /* pcrel_offset */
937
938 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
939 3, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 12, /* bitsize */
942 FALSE, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont, /* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
947 FALSE, /* partial_inplace */
948 0xff8, /* src_mask */
949 0xff8, /* dst_mask */
950 FALSE), /* pcrel_offset */
951
952 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
953 2, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 12, /* bitsize */
956 FALSE, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont, /* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
961 FALSE, /* partial_inplace */
962 0xffc, /* src_mask */
963 0xffc, /* dst_mask */
964 FALSE), /* pcrel_offset */
965
966 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
967 2, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 19, /* bitsize */
970 FALSE, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
975 FALSE, /* partial_inplace */
976 0x1ffffc, /* src_mask */
977 0x1ffffc, /* dst_mask */
978 FALSE), /* pcrel_offset */
979
980 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
981 32, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 16, /* bitsize */
984 FALSE, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_unsigned, /* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
989 FALSE, /* partial_inplace */
990 0xffff, /* src_mask */
991 0xffff, /* dst_mask */
992 FALSE), /* pcrel_offset */
993
994 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
995 16, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 16, /* bitsize */
998 FALSE, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont, /* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1003 FALSE, /* partial_inplace */
1004 0xffff, /* src_mask */
1005 0xffff, /* dst_mask */
1006 FALSE), /* pcrel_offset */
1007
1008 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1009 16, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 16, /* bitsize */
1012 FALSE, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont, /* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1017 FALSE, /* partial_inplace */
1018 0xffff, /* src_mask */
1019 0xffff, /* dst_mask */
1020 FALSE), /* pcrel_offset */
1021
1022 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 16, /* bitsize */
1026 FALSE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1035
1036 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 16, /* bitsize */
1040 FALSE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1049
1050 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 12, /* bitsize */
1054 FALSE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_unsigned, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1059 FALSE, /* partial_inplace */
1060 0xfff, /* src_mask */
1061 0xfff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1063
1064 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 12, /* bitsize */
1068 FALSE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1073 FALSE, /* partial_inplace */
1074 0xfff, /* src_mask */
1075 0xfff, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1077
1078 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 12, /* bitsize */
1082 FALSE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1087 FALSE, /* partial_inplace */
1088 0xfff, /* src_mask */
1089 0xfff, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1091
1092 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1093 2, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 19, /* bitsize */
1096 TRUE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1101 FALSE, /* partial_inplace */
1102 0x0ffffe0, /* src_mask */
1103 0x0ffffe0, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1105
1106 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 21, /* bitsize */
1110 TRUE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1115 FALSE, /* partial_inplace */
1116 0x1fffff, /* src_mask */
1117 0x1fffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1119
1120 /* Get to the page for the GOT entry for the symbol
1121 (G(S) - P) using an ADRP instruction. */
1122 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1123 12, /* rightshift */
1124 2, /* size (0 = byte, 1 = short, 2 = long) */
1125 21, /* bitsize */
1126 TRUE, /* pc_relative */
1127 0, /* bitpos */
1128 complain_overflow_dont, /* complain_on_overflow */
1129 bfd_elf_generic_reloc, /* special_function */
1130 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1131 FALSE, /* partial_inplace */
1132 0x1fffff, /* src_mask */
1133 0x1fffff, /* dst_mask */
1134 TRUE), /* pcrel_offset */
1135
1136 /* LD64: GOT offset G(S) & 0xff8. */
1137 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1138 3, /* rightshift */
1139 2, /* size (0 = byte, 1 = short, 2 = long) */
1140 12, /* bitsize */
1141 FALSE, /* pc_relative */
1142 0, /* bitpos */
1143 complain_overflow_dont, /* complain_on_overflow */
1144 bfd_elf_generic_reloc, /* special_function */
1145 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1146 FALSE, /* partial_inplace */
1147 0xff8, /* src_mask */
1148 0xff8, /* dst_mask */
1149 FALSE), /* pcrel_offset */
1150
1151 /* LD32: GOT offset G(S) & 0xffc. */
1152 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1153 2, /* rightshift */
1154 2, /* size (0 = byte, 1 = short, 2 = long) */
1155 12, /* bitsize */
1156 FALSE, /* pc_relative */
1157 0, /* bitpos */
1158 complain_overflow_dont, /* complain_on_overflow */
1159 bfd_elf_generic_reloc, /* special_function */
1160 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1161 FALSE, /* partial_inplace */
1162 0xffc, /* src_mask */
1163 0xffc, /* dst_mask */
1164 FALSE), /* pcrel_offset */
1165
1166 /* ADD: GOT offset G(S) & 0xfff. */
1167 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1168 0, /* rightshift */
1169 2, /* size (0 = byte, 1 = short, 2 = long) */
1170 12, /* bitsize */
1171 FALSE, /* pc_relative */
1172 0, /* bitpos */
1173 complain_overflow_dont, /* complain_on_overflow */
1174 bfd_elf_generic_reloc, /* special_function */
1175 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1176 FALSE, /* partial_inplace */
1177 0xfff, /* src_mask */
1178 0xfff, /* dst_mask */
1179 FALSE), /* pcrel_offset */
1180
1181 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1182 16, /* rightshift */
1183 2, /* size (0 = byte, 1 = short, 2 = long) */
1184 12, /* bitsize */
1185 FALSE, /* pc_relative */
1186 0, /* bitpos */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1190 FALSE, /* partial_inplace */
1191 0xffff, /* src_mask */
1192 0xffff, /* dst_mask */
1193 FALSE), /* pcrel_offset */
1194
1195 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1196 0, /* rightshift */
1197 2, /* size (0 = byte, 1 = short, 2 = long) */
1198 12, /* bitsize */
1199 FALSE, /* pc_relative */
1200 0, /* bitpos */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1204 FALSE, /* partial_inplace */
1205 0xffff, /* src_mask */
1206 0xffff, /* dst_mask */
1207 FALSE), /* pcrel_offset */
1208
1209 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1210 0, /* rightshift */
1211 2, /* size (0 = byte, 1 = short, 2 = long) */
1212 12, /* bitsize */
1213 FALSE, /* pc_relative */
1214 0, /* bitpos */
1215 complain_overflow_dont, /* complain_on_overflow */
1216 bfd_elf_generic_reloc, /* special_function */
1217 AARCH64_R_STR (TLSDESC_LDR), /* name */
1218 FALSE, /* partial_inplace */
1219 0x0, /* src_mask */
1220 0x0, /* dst_mask */
1221 FALSE), /* pcrel_offset */
1222
1223 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1224 0, /* rightshift */
1225 2, /* size (0 = byte, 1 = short, 2 = long) */
1226 12, /* bitsize */
1227 FALSE, /* pc_relative */
1228 0, /* bitpos */
1229 complain_overflow_dont, /* complain_on_overflow */
1230 bfd_elf_generic_reloc, /* special_function */
1231 AARCH64_R_STR (TLSDESC_ADD), /* name */
1232 FALSE, /* partial_inplace */
1233 0x0, /* src_mask */
1234 0x0, /* dst_mask */
1235 FALSE), /* pcrel_offset */
1236
1237 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1238 0, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1240 0, /* bitsize */
1241 FALSE, /* pc_relative */
1242 0, /* bitpos */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 AARCH64_R_STR (TLSDESC_CALL), /* name */
1246 FALSE, /* partial_inplace */
1247 0x0, /* src_mask */
1248 0x0, /* dst_mask */
1249 FALSE), /* pcrel_offset */
1250
1251 HOWTO (AARCH64_R (COPY), /* type */
1252 0, /* rightshift */
1253 2, /* size (0 = byte, 1 = short, 2 = long) */
1254 64, /* bitsize */
1255 FALSE, /* pc_relative */
1256 0, /* bitpos */
1257 complain_overflow_bitfield, /* complain_on_overflow */
1258 bfd_elf_generic_reloc, /* special_function */
1259 AARCH64_R_STR (COPY), /* name */
1260 TRUE, /* partial_inplace */
1261 0xffffffff, /* src_mask */
1262 0xffffffff, /* dst_mask */
1263 FALSE), /* pcrel_offset */
1264
1265 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1266 0, /* rightshift */
1267 2, /* size (0 = byte, 1 = short, 2 = long) */
1268 64, /* bitsize */
1269 FALSE, /* pc_relative */
1270 0, /* bitpos */
1271 complain_overflow_bitfield, /* complain_on_overflow */
1272 bfd_elf_generic_reloc, /* special_function */
1273 AARCH64_R_STR (GLOB_DAT), /* name */
1274 TRUE, /* partial_inplace */
1275 0xffffffff, /* src_mask */
1276 0xffffffff, /* dst_mask */
1277 FALSE), /* pcrel_offset */
1278
1279 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1280 0, /* rightshift */
1281 2, /* size (0 = byte, 1 = short, 2 = long) */
1282 64, /* bitsize */
1283 FALSE, /* pc_relative */
1284 0, /* bitpos */
1285 complain_overflow_bitfield, /* complain_on_overflow */
1286 bfd_elf_generic_reloc, /* special_function */
1287 AARCH64_R_STR (JUMP_SLOT), /* name */
1288 TRUE, /* partial_inplace */
1289 0xffffffff, /* src_mask */
1290 0xffffffff, /* dst_mask */
1291 FALSE), /* pcrel_offset */
1292
1293 HOWTO (AARCH64_R (RELATIVE), /* type */
1294 0, /* rightshift */
1295 2, /* size (0 = byte, 1 = short, 2 = long) */
1296 64, /* bitsize */
1297 FALSE, /* pc_relative */
1298 0, /* bitpos */
1299 complain_overflow_bitfield, /* complain_on_overflow */
1300 bfd_elf_generic_reloc, /* special_function */
1301 AARCH64_R_STR (RELATIVE), /* name */
1302 TRUE, /* partial_inplace */
1303 ALL_ONES, /* src_mask */
1304 ALL_ONES, /* dst_mask */
1305 FALSE), /* pcrel_offset */
1306
1307 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1308 0, /* rightshift */
1309 2, /* size (0 = byte, 1 = short, 2 = long) */
1310 64, /* bitsize */
1311 FALSE, /* pc_relative */
1312 0, /* bitpos */
1313 complain_overflow_dont, /* complain_on_overflow */
1314 bfd_elf_generic_reloc, /* special_function */
1315 #if ARCH_SIZE == 64
1316 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1317 #else
1318 AARCH64_R_STR (TLS_DTPMOD), /* name */
1319 #endif
1320 FALSE, /* partial_inplace */
1321 0, /* src_mask */
1322 ALL_ONES, /* dst_mask */
1323 FALSE), /* pc_reloffset */
1324
1325 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1326 0, /* rightshift */
1327 2, /* size (0 = byte, 1 = short, 2 = long) */
1328 64, /* bitsize */
1329 FALSE, /* pc_relative */
1330 0, /* bitpos */
1331 complain_overflow_dont, /* complain_on_overflow */
1332 bfd_elf_generic_reloc, /* special_function */
1333 #if ARCH_SIZE == 64
1334 AARCH64_R_STR (TLS_DTPREL64), /* name */
1335 #else
1336 AARCH64_R_STR (TLS_DTPREL), /* name */
1337 #endif
1338 FALSE, /* partial_inplace */
1339 0, /* src_mask */
1340 ALL_ONES, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 64, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont, /* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 #if ARCH_SIZE == 64
1352 AARCH64_R_STR (TLS_TPREL64), /* name */
1353 #else
1354 AARCH64_R_STR (TLS_TPREL), /* name */
1355 #endif
1356 FALSE, /* partial_inplace */
1357 0, /* src_mask */
1358 ALL_ONES, /* dst_mask */
1359 FALSE), /* pcrel_offset */
1360
1361 HOWTO (AARCH64_R (TLSDESC), /* type */
1362 0, /* rightshift */
1363 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 64, /* bitsize */
1365 FALSE, /* pc_relative */
1366 0, /* bitpos */
1367 complain_overflow_dont, /* complain_on_overflow */
1368 bfd_elf_generic_reloc, /* special_function */
1369 AARCH64_R_STR (TLSDESC), /* name */
1370 FALSE, /* partial_inplace */
1371 0, /* src_mask */
1372 ALL_ONES, /* dst_mask */
1373 FALSE), /* pcrel_offset */
1374
1375 HOWTO (AARCH64_R (IRELATIVE), /* type */
1376 0, /* rightshift */
1377 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 64, /* bitsize */
1379 FALSE, /* pc_relative */
1380 0, /* bitpos */
1381 complain_overflow_bitfield, /* complain_on_overflow */
1382 bfd_elf_generic_reloc, /* special_function */
1383 AARCH64_R_STR (IRELATIVE), /* name */
1384 FALSE, /* partial_inplace */
1385 0, /* src_mask */
1386 ALL_ONES, /* dst_mask */
1387 FALSE), /* pcrel_offset */
1388
1389 EMPTY_HOWTO (0),
1390 };
1391
1392 static reloc_howto_type elfNN_aarch64_howto_none =
1393 HOWTO (R_AARCH64_NONE, /* type */
1394 0, /* rightshift */
1395 3, /* size (0 = byte, 1 = short, 2 = long) */
1396 0, /* bitsize */
1397 FALSE, /* pc_relative */
1398 0, /* bitpos */
1399 complain_overflow_dont,/* complain_on_overflow */
1400 bfd_elf_generic_reloc, /* special_function */
1401 "R_AARCH64_NONE", /* name */
1402 FALSE, /* partial_inplace */
1403 0, /* src_mask */
1404 0, /* dst_mask */
1405 FALSE); /* pcrel_offset */
1406
1407 /* Given HOWTO, return the bfd internal relocation enumerator. */
1408
1409 static bfd_reloc_code_real_type
1410 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1411 {
1412 const int size
1413 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1414 const ptrdiff_t offset
1415 = howto - elfNN_aarch64_howto_table;
1416
1417 if (offset > 0 && offset < size - 1)
1418 return BFD_RELOC_AARCH64_RELOC_START + offset;
1419
1420 if (howto == &elfNN_aarch64_howto_none)
1421 return BFD_RELOC_AARCH64_NONE;
1422
1423 return BFD_RELOC_AARCH64_RELOC_START;
1424 }
1425
1426 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1427
1428 static bfd_reloc_code_real_type
1429 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1430 {
1431 static bfd_boolean initialized_p = FALSE;
1432 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1433 static unsigned int offsets[R_AARCH64_end];
1434
1435 if (initialized_p == FALSE)
1436 {
1437 unsigned int i;
1438
1439 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1440 if (elfNN_aarch64_howto_table[i].type != 0)
1441 offsets[elfNN_aarch64_howto_table[i].type] = i;
1442
1443 initialized_p = TRUE;
1444 }
1445
1446 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1447 return BFD_RELOC_AARCH64_NONE;
1448
1449 /* PR 17512: file: b371e70a. */
1450 if (r_type >= R_AARCH64_end)
1451 {
1452 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1453 bfd_set_error (bfd_error_bad_value);
1454 return BFD_RELOC_AARCH64_NONE;
1455 }
1456
1457 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1458 }
1459
1460 struct elf_aarch64_reloc_map
1461 {
1462 bfd_reloc_code_real_type from;
1463 bfd_reloc_code_real_type to;
1464 };
1465
1466 /* Map bfd generic reloc to AArch64-specific reloc. */
1467 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1468 {
1469 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1470
1471 /* Basic data relocations. */
1472 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1473 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1474 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1475 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1476 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1477 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1478 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1479 };
1480
1481 /* Given the bfd internal relocation enumerator in CODE, return the
1482 corresponding howto entry. */
1483
1484 static reloc_howto_type *
1485 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1486 {
1487 unsigned int i;
1488
1489 /* Convert bfd generic reloc to AArch64-specific reloc. */
1490 if (code < BFD_RELOC_AARCH64_RELOC_START
1491 || code > BFD_RELOC_AARCH64_RELOC_END)
1492 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1493 if (elf_aarch64_reloc_map[i].from == code)
1494 {
1495 code = elf_aarch64_reloc_map[i].to;
1496 break;
1497 }
1498
1499 if (code > BFD_RELOC_AARCH64_RELOC_START
1500 && code < BFD_RELOC_AARCH64_RELOC_END)
1501 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1502 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1503
1504 if (code == BFD_RELOC_AARCH64_NONE)
1505 return &elfNN_aarch64_howto_none;
1506
1507 return NULL;
1508 }
1509
1510 static reloc_howto_type *
1511 elfNN_aarch64_howto_from_type (unsigned int r_type)
1512 {
1513 bfd_reloc_code_real_type val;
1514 reloc_howto_type *howto;
1515
1516 #if ARCH_SIZE == 32
1517 if (r_type > 256)
1518 {
1519 bfd_set_error (bfd_error_bad_value);
1520 return NULL;
1521 }
1522 #endif
1523
1524 if (r_type == R_AARCH64_NONE)
1525 return &elfNN_aarch64_howto_none;
1526
1527 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1528 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1529
1530 if (howto != NULL)
1531 return howto;
1532
1533 bfd_set_error (bfd_error_bad_value);
1534 return NULL;
1535 }
1536
1537 static void
1538 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1539 Elf_Internal_Rela *elf_reloc)
1540 {
1541 unsigned int r_type;
1542
1543 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1544 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1545 }
1546
1547 static reloc_howto_type *
1548 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1549 bfd_reloc_code_real_type code)
1550 {
1551 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1552
1553 if (howto != NULL)
1554 return howto;
1555
1556 bfd_set_error (bfd_error_bad_value);
1557 return NULL;
1558 }
1559
1560 static reloc_howto_type *
1561 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1562 const char *r_name)
1563 {
1564 unsigned int i;
1565
1566 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1567 if (elfNN_aarch64_howto_table[i].name != NULL
1568 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1569 return &elfNN_aarch64_howto_table[i];
1570
1571 return NULL;
1572 }
1573
1574 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1575 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1576 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1577 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1578
1579 /* The linker script knows the section names for placement.
1580 The entry_names are used to do simple name mangling on the stubs.
1581 Given a function name, and its type, the stub can be found. The
1582 name can be changed. The only requirement is the %s be present. */
1583 #define STUB_ENTRY_NAME "__%s_veneer"
1584
1585 /* The name of the dynamic interpreter. This is put in the .interp
1586 section. */
1587 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1588
1589 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1590 (((1 << 25) - 1) << 2)
1591 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1592 (-((1 << 25) << 2))
1593
1594 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1595 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1596
1597 static int
1598 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1599 {
1600 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1601 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1602 }
1603
1604 static int
1605 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1606 {
1607 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1608 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1609 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1610 }
1611
1612 static const uint32_t aarch64_adrp_branch_stub [] =
1613 {
1614 0x90000010, /* adrp ip0, X */
1615 /* R_AARCH64_ADR_HI21_PCREL(X) */
1616 0x91000210, /* add ip0, ip0, :lo12:X */
1617 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1618 0xd61f0200, /* br ip0 */
1619 };
1620
1621 static const uint32_t aarch64_long_branch_stub[] =
1622 {
1623 #if ARCH_SIZE == 64
1624 0x58000090, /* ldr ip0, 1f */
1625 #else
1626 0x18000090, /* ldr wip0, 1f */
1627 #endif
1628 0x10000011, /* adr ip1, #0 */
1629 0x8b110210, /* add ip0, ip0, ip1 */
1630 0xd61f0200, /* br ip0 */
1631 0x00000000, /* 1: .xword or .word
1632 R_AARCH64_PRELNN(X) + 12
1633 */
1634 0x00000000,
1635 };
1636
1637 static const uint32_t aarch64_erratum_835769_stub[] =
1638 {
1639 0x00000000, /* Placeholder for multiply accumulate. */
1640 0x14000000, /* b <label> */
1641 };
1642
1643 /* Section name for stubs is the associated section name plus this
1644 string. */
1645 #define STUB_SUFFIX ".stub"
1646
1647 enum elf_aarch64_stub_type
1648 {
1649 aarch64_stub_none,
1650 aarch64_stub_adrp_branch,
1651 aarch64_stub_long_branch,
1652 aarch64_stub_erratum_835769_veneer,
1653 };
1654
1655 struct elf_aarch64_stub_hash_entry
1656 {
1657 /* Base hash table entry structure. */
1658 struct bfd_hash_entry root;
1659
1660 /* The stub section. */
1661 asection *stub_sec;
1662
1663 /* Offset within stub_sec of the beginning of this stub. */
1664 bfd_vma stub_offset;
1665
1666 /* Given the symbol's value and its section we can determine its final
1667 value when building the stubs (so the stub knows where to jump). */
1668 bfd_vma target_value;
1669 asection *target_section;
1670
1671 enum elf_aarch64_stub_type stub_type;
1672
1673 /* The symbol table entry, if any, that this was derived from. */
1674 struct elf_aarch64_link_hash_entry *h;
1675
1676 /* Destination symbol type */
1677 unsigned char st_type;
1678
1679 /* Where this stub is being called from, or, in the case of combined
1680 stub sections, the first input section in the group. */
1681 asection *id_sec;
1682
1683 /* The name for the local symbol at the start of this stub. The
1684 stub name in the hash table has to be unique; this does not, so
1685 it can be friendlier. */
1686 char *output_name;
1687
1688 /* The instruction which caused this stub to be generated (only valid for
1689 erratum 835769 workaround stubs at present). */
1690 uint32_t veneered_insn;
1691 };
1692
1693 /* Used to build a map of a section. This is required for mixed-endian
1694 code/data. */
1695
1696 typedef struct elf_elf_section_map
1697 {
1698 bfd_vma vma;
1699 char type;
1700 }
1701 elf_aarch64_section_map;
1702
1703
1704 typedef struct _aarch64_elf_section_data
1705 {
1706 struct bfd_elf_section_data elf;
1707 unsigned int mapcount;
1708 unsigned int mapsize;
1709 elf_aarch64_section_map *map;
1710 }
1711 _aarch64_elf_section_data;
1712
1713 #define elf_aarch64_section_data(sec) \
1714 ((_aarch64_elf_section_data *) elf_section_data (sec))
1715
1716 /* A fix-descriptor for erratum 835769. */
1717 struct aarch64_erratum_835769_fix
1718 {
1719 bfd *input_bfd;
1720 asection *section;
1721 bfd_vma offset;
1722 uint32_t veneered_insn;
1723 char *stub_name;
1724 enum elf_aarch64_stub_type stub_type;
1725 };
1726
1727 /* The size of the thread control block which is defined to be two pointers. */
1728 #define TCB_SIZE (ARCH_SIZE/8)*2
1729
1730 struct elf_aarch64_local_symbol
1731 {
1732 unsigned int got_type;
1733 bfd_signed_vma got_refcount;
1734 bfd_vma got_offset;
1735
1736 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1737 offset is from the end of the jump table and reserved entries
1738 within the PLTGOT.
1739
1740 The magic value (bfd_vma) -1 indicates that an offset has not be
1741 allocated. */
1742 bfd_vma tlsdesc_got_jump_table_offset;
1743 };
1744
1745 struct elf_aarch64_obj_tdata
1746 {
1747 struct elf_obj_tdata root;
1748
1749 /* local symbol descriptors */
1750 struct elf_aarch64_local_symbol *locals;
1751
1752 /* Zero to warn when linking objects with incompatible enum sizes. */
1753 int no_enum_size_warning;
1754
1755 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1756 int no_wchar_size_warning;
1757 };
1758
1759 #define elf_aarch64_tdata(bfd) \
1760 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1761
1762 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1763
1764 #define is_aarch64_elf(bfd) \
1765 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1766 && elf_tdata (bfd) != NULL \
1767 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1768
1769 static bfd_boolean
1770 elfNN_aarch64_mkobject (bfd *abfd)
1771 {
1772 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1773 AARCH64_ELF_DATA);
1774 }
1775
1776 #define elf_aarch64_hash_entry(ent) \
1777 ((struct elf_aarch64_link_hash_entry *)(ent))
1778
1779 #define GOT_UNKNOWN 0
1780 #define GOT_NORMAL 1
1781 #define GOT_TLS_GD 2
1782 #define GOT_TLS_IE 4
1783 #define GOT_TLSDESC_GD 8
1784
1785 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1786
1787 /* AArch64 ELF linker hash entry. */
1788 struct elf_aarch64_link_hash_entry
1789 {
1790 struct elf_link_hash_entry root;
1791
1792 /* Track dynamic relocs copied for this symbol. */
1793 struct elf_dyn_relocs *dyn_relocs;
1794
1795 /* Since PLT entries have variable size, we need to record the
1796 index into .got.plt instead of recomputing it from the PLT
1797 offset. */
1798 bfd_signed_vma plt_got_offset;
1799
1800 /* Bit mask representing the type of GOT entry(s) if any required by
1801 this symbol. */
1802 unsigned int got_type;
1803
1804 /* A pointer to the most recently used stub hash entry against this
1805 symbol. */
1806 struct elf_aarch64_stub_hash_entry *stub_cache;
1807
1808 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1809 is from the end of the jump table and reserved entries within the PLTGOT.
1810
1811 The magic value (bfd_vma) -1 indicates that an offset has not
1812 be allocated. */
1813 bfd_vma tlsdesc_got_jump_table_offset;
1814 };
1815
1816 static unsigned int
1817 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1818 bfd *abfd,
1819 unsigned long r_symndx)
1820 {
1821 if (h)
1822 return elf_aarch64_hash_entry (h)->got_type;
1823
1824 if (! elf_aarch64_locals (abfd))
1825 return GOT_UNKNOWN;
1826
1827 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1828 }
1829
1830 /* Get the AArch64 elf linker hash table from a link_info structure. */
1831 #define elf_aarch64_hash_table(info) \
1832 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1833
1834 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1835 ((struct elf_aarch64_stub_hash_entry *) \
1836 bfd_hash_lookup ((table), (string), (create), (copy)))
1837
1838 /* AArch64 ELF linker hash table. */
1839 struct elf_aarch64_link_hash_table
1840 {
1841 /* The main hash table. */
1842 struct elf_link_hash_table root;
1843
1844 /* Nonzero to force PIC branch veneers. */
1845 int pic_veneer;
1846
1847 /* Fix erratum 835769. */
1848 int fix_erratum_835769;
1849
1850 /* The number of bytes in the initial entry in the PLT. */
1851 bfd_size_type plt_header_size;
1852
1853 /* The number of bytes in the subsequent PLT etries. */
1854 bfd_size_type plt_entry_size;
1855
1856 /* Short-cuts to get to dynamic linker sections. */
1857 asection *sdynbss;
1858 asection *srelbss;
1859
1860 /* Small local sym cache. */
1861 struct sym_cache sym_cache;
1862
1863 /* For convenience in allocate_dynrelocs. */
1864 bfd *obfd;
1865
1866 /* The amount of space used by the reserved portion of the sgotplt
1867 section, plus whatever space is used by the jump slots. */
1868 bfd_vma sgotplt_jump_table_size;
1869
1870 /* The stub hash table. */
1871 struct bfd_hash_table stub_hash_table;
1872
1873 /* Linker stub bfd. */
1874 bfd *stub_bfd;
1875
1876 /* Linker call-backs. */
1877 asection *(*add_stub_section) (const char *, asection *);
1878 void (*layout_sections_again) (void);
1879
1880 /* Array to keep track of which stub sections have been created, and
1881 information on stub grouping. */
1882 struct map_stub
1883 {
1884 /* This is the section to which stubs in the group will be
1885 attached. */
1886 asection *link_sec;
1887 /* The stub section. */
1888 asection *stub_sec;
1889 } *stub_group;
1890
1891 /* Assorted information used by elfNN_aarch64_size_stubs. */
1892 unsigned int bfd_count;
1893 int top_index;
1894 asection **input_list;
1895
1896 /* The offset into splt of the PLT entry for the TLS descriptor
1897 resolver. Special values are 0, if not necessary (or not found
1898 to be necessary yet), and -1 if needed but not determined
1899 yet. */
1900 bfd_vma tlsdesc_plt;
1901
1902 /* The GOT offset for the lazy trampoline. Communicated to the
1903 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1904 indicates an offset is not allocated. */
1905 bfd_vma dt_tlsdesc_got;
1906
1907 /* Used by local STT_GNU_IFUNC symbols. */
1908 htab_t loc_hash_table;
1909 void * loc_hash_memory;
1910 };
1911
1912 /* Create an entry in an AArch64 ELF linker hash table. */
1913
1914 static struct bfd_hash_entry *
1915 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1916 struct bfd_hash_table *table,
1917 const char *string)
1918 {
1919 struct elf_aarch64_link_hash_entry *ret =
1920 (struct elf_aarch64_link_hash_entry *) entry;
1921
1922 /* Allocate the structure if it has not already been allocated by a
1923 subclass. */
1924 if (ret == NULL)
1925 ret = bfd_hash_allocate (table,
1926 sizeof (struct elf_aarch64_link_hash_entry));
1927 if (ret == NULL)
1928 return (struct bfd_hash_entry *) ret;
1929
1930 /* Call the allocation method of the superclass. */
1931 ret = ((struct elf_aarch64_link_hash_entry *)
1932 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1933 table, string));
1934 if (ret != NULL)
1935 {
1936 ret->dyn_relocs = NULL;
1937 ret->got_type = GOT_UNKNOWN;
1938 ret->plt_got_offset = (bfd_vma) - 1;
1939 ret->stub_cache = NULL;
1940 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1941 }
1942
1943 return (struct bfd_hash_entry *) ret;
1944 }
1945
1946 /* Initialize an entry in the stub hash table. */
1947
1948 static struct bfd_hash_entry *
1949 stub_hash_newfunc (struct bfd_hash_entry *entry,
1950 struct bfd_hash_table *table, const char *string)
1951 {
1952 /* Allocate the structure if it has not already been allocated by a
1953 subclass. */
1954 if (entry == NULL)
1955 {
1956 entry = bfd_hash_allocate (table,
1957 sizeof (struct
1958 elf_aarch64_stub_hash_entry));
1959 if (entry == NULL)
1960 return entry;
1961 }
1962
1963 /* Call the allocation method of the superclass. */
1964 entry = bfd_hash_newfunc (entry, table, string);
1965 if (entry != NULL)
1966 {
1967 struct elf_aarch64_stub_hash_entry *eh;
1968
1969 /* Initialize the local fields. */
1970 eh = (struct elf_aarch64_stub_hash_entry *) entry;
1971 eh->stub_sec = NULL;
1972 eh->stub_offset = 0;
1973 eh->target_value = 0;
1974 eh->target_section = NULL;
1975 eh->stub_type = aarch64_stub_none;
1976 eh->h = NULL;
1977 eh->id_sec = NULL;
1978 }
1979
1980 return entry;
1981 }
1982
1983 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
1984 for local symbol so that we can handle local STT_GNU_IFUNC symbols
1985 as global symbol. We reuse indx and dynstr_index for local symbol
1986 hash since they aren't used by global symbols in this backend. */
1987
1988 static hashval_t
1989 elfNN_aarch64_local_htab_hash (const void *ptr)
1990 {
1991 struct elf_link_hash_entry *h
1992 = (struct elf_link_hash_entry *) ptr;
1993 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
1994 }
1995
1996 /* Compare local hash entries. */
1997
1998 static int
1999 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2000 {
2001 struct elf_link_hash_entry *h1
2002 = (struct elf_link_hash_entry *) ptr1;
2003 struct elf_link_hash_entry *h2
2004 = (struct elf_link_hash_entry *) ptr2;
2005
2006 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2007 }
2008
2009 /* Find and/or create a hash entry for local symbol. */
2010
2011 static struct elf_link_hash_entry *
2012 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2013 bfd *abfd, const Elf_Internal_Rela *rel,
2014 bfd_boolean create)
2015 {
2016 struct elf_aarch64_link_hash_entry e, *ret;
2017 asection *sec = abfd->sections;
2018 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2019 ELFNN_R_SYM (rel->r_info));
2020 void **slot;
2021
2022 e.root.indx = sec->id;
2023 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2024 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2025 create ? INSERT : NO_INSERT);
2026
2027 if (!slot)
2028 return NULL;
2029
2030 if (*slot)
2031 {
2032 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2033 return &ret->root;
2034 }
2035
2036 ret = (struct elf_aarch64_link_hash_entry *)
2037 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2038 sizeof (struct elf_aarch64_link_hash_entry));
2039 if (ret)
2040 {
2041 memset (ret, 0, sizeof (*ret));
2042 ret->root.indx = sec->id;
2043 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2044 ret->root.dynindx = -1;
2045 *slot = ret;
2046 }
2047 return &ret->root;
2048 }
2049
2050 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2051
2052 static void
2053 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2054 struct elf_link_hash_entry *dir,
2055 struct elf_link_hash_entry *ind)
2056 {
2057 struct elf_aarch64_link_hash_entry *edir, *eind;
2058
2059 edir = (struct elf_aarch64_link_hash_entry *) dir;
2060 eind = (struct elf_aarch64_link_hash_entry *) ind;
2061
2062 if (eind->dyn_relocs != NULL)
2063 {
2064 if (edir->dyn_relocs != NULL)
2065 {
2066 struct elf_dyn_relocs **pp;
2067 struct elf_dyn_relocs *p;
2068
2069 /* Add reloc counts against the indirect sym to the direct sym
2070 list. Merge any entries against the same section. */
2071 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2072 {
2073 struct elf_dyn_relocs *q;
2074
2075 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2076 if (q->sec == p->sec)
2077 {
2078 q->pc_count += p->pc_count;
2079 q->count += p->count;
2080 *pp = p->next;
2081 break;
2082 }
2083 if (q == NULL)
2084 pp = &p->next;
2085 }
2086 *pp = edir->dyn_relocs;
2087 }
2088
2089 edir->dyn_relocs = eind->dyn_relocs;
2090 eind->dyn_relocs = NULL;
2091 }
2092
2093 if (ind->root.type == bfd_link_hash_indirect)
2094 {
2095 /* Copy over PLT info. */
2096 if (dir->got.refcount <= 0)
2097 {
2098 edir->got_type = eind->got_type;
2099 eind->got_type = GOT_UNKNOWN;
2100 }
2101 }
2102
2103 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2104 }
2105
2106 /* Destroy an AArch64 elf linker hash table. */
2107
2108 static void
2109 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2110 {
2111 struct elf_aarch64_link_hash_table *ret
2112 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2113
2114 if (ret->loc_hash_table)
2115 htab_delete (ret->loc_hash_table);
2116 if (ret->loc_hash_memory)
2117 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2118
2119 bfd_hash_table_free (&ret->stub_hash_table);
2120 _bfd_elf_link_hash_table_free (obfd);
2121 }
2122
2123 /* Create an AArch64 elf linker hash table. */
2124
2125 static struct bfd_link_hash_table *
2126 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2127 {
2128 struct elf_aarch64_link_hash_table *ret;
2129 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2130
2131 ret = bfd_zmalloc (amt);
2132 if (ret == NULL)
2133 return NULL;
2134
2135 if (!_bfd_elf_link_hash_table_init
2136 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2137 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2138 {
2139 free (ret);
2140 return NULL;
2141 }
2142
2143 ret->plt_header_size = PLT_ENTRY_SIZE;
2144 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2145 ret->obfd = abfd;
2146 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2147
2148 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2149 sizeof (struct elf_aarch64_stub_hash_entry)))
2150 {
2151 _bfd_elf_link_hash_table_free (abfd);
2152 return NULL;
2153 }
2154
2155 ret->loc_hash_table = htab_try_create (1024,
2156 elfNN_aarch64_local_htab_hash,
2157 elfNN_aarch64_local_htab_eq,
2158 NULL);
2159 ret->loc_hash_memory = objalloc_create ();
2160 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2161 {
2162 elfNN_aarch64_link_hash_table_free (abfd);
2163 return NULL;
2164 }
2165 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2166
2167 return &ret->root.root;
2168 }
2169
2170 static bfd_boolean
2171 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2172 bfd_vma offset, bfd_vma value)
2173 {
2174 reloc_howto_type *howto;
2175 bfd_vma place;
2176
2177 howto = elfNN_aarch64_howto_from_type (r_type);
2178 place = (input_section->output_section->vma + input_section->output_offset
2179 + offset);
2180
2181 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2182 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2183 return _bfd_aarch64_elf_put_addend (input_bfd,
2184 input_section->contents + offset, r_type,
2185 howto, value);
2186 }
2187
2188 static enum elf_aarch64_stub_type
2189 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2190 {
2191 if (aarch64_valid_for_adrp_p (value, place))
2192 return aarch64_stub_adrp_branch;
2193 return aarch64_stub_long_branch;
2194 }
2195
2196 /* Determine the type of stub needed, if any, for a call. */
2197
2198 static enum elf_aarch64_stub_type
2199 aarch64_type_of_stub (struct bfd_link_info *info,
2200 asection *input_sec,
2201 const Elf_Internal_Rela *rel,
2202 unsigned char st_type,
2203 struct elf_aarch64_link_hash_entry *hash,
2204 bfd_vma destination)
2205 {
2206 bfd_vma location;
2207 bfd_signed_vma branch_offset;
2208 unsigned int r_type;
2209 struct elf_aarch64_link_hash_table *globals;
2210 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2211 bfd_boolean via_plt_p;
2212
2213 if (st_type != STT_FUNC)
2214 return stub_type;
2215
2216 globals = elf_aarch64_hash_table (info);
2217 via_plt_p = (globals->root.splt != NULL && hash != NULL
2218 && hash->root.plt.offset != (bfd_vma) - 1);
2219
2220 if (via_plt_p)
2221 return stub_type;
2222
2223 /* Determine where the call point is. */
2224 location = (input_sec->output_offset
2225 + input_sec->output_section->vma + rel->r_offset);
2226
2227 branch_offset = (bfd_signed_vma) (destination - location);
2228
2229 r_type = ELFNN_R_TYPE (rel->r_info);
2230
2231 /* We don't want to redirect any old unconditional jump in this way,
2232 only one which is being used for a sibcall, where it is
2233 acceptable for the IP0 and IP1 registers to be clobbered. */
2234 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2235 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2236 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2237 {
2238 stub_type = aarch64_stub_long_branch;
2239 }
2240
2241 return stub_type;
2242 }
2243
2244 /* Build a name for an entry in the stub hash table. */
2245
2246 static char *
2247 elfNN_aarch64_stub_name (const asection *input_section,
2248 const asection *sym_sec,
2249 const struct elf_aarch64_link_hash_entry *hash,
2250 const Elf_Internal_Rela *rel)
2251 {
2252 char *stub_name;
2253 bfd_size_type len;
2254
2255 if (hash)
2256 {
2257 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2258 stub_name = bfd_malloc (len);
2259 if (stub_name != NULL)
2260 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2261 (unsigned int) input_section->id,
2262 hash->root.root.root.string,
2263 rel->r_addend);
2264 }
2265 else
2266 {
2267 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2268 stub_name = bfd_malloc (len);
2269 if (stub_name != NULL)
2270 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2271 (unsigned int) input_section->id,
2272 (unsigned int) sym_sec->id,
2273 (unsigned int) ELFNN_R_SYM (rel->r_info),
2274 rel->r_addend);
2275 }
2276
2277 return stub_name;
2278 }
2279
2280 /* Look up an entry in the stub hash. Stub entries are cached because
2281 creating the stub name takes a bit of time. */
2282
2283 static struct elf_aarch64_stub_hash_entry *
2284 elfNN_aarch64_get_stub_entry (const asection *input_section,
2285 const asection *sym_sec,
2286 struct elf_link_hash_entry *hash,
2287 const Elf_Internal_Rela *rel,
2288 struct elf_aarch64_link_hash_table *htab)
2289 {
2290 struct elf_aarch64_stub_hash_entry *stub_entry;
2291 struct elf_aarch64_link_hash_entry *h =
2292 (struct elf_aarch64_link_hash_entry *) hash;
2293 const asection *id_sec;
2294
2295 if ((input_section->flags & SEC_CODE) == 0)
2296 return NULL;
2297
2298 /* If this input section is part of a group of sections sharing one
2299 stub section, then use the id of the first section in the group.
2300 Stub names need to include a section id, as there may well be
2301 more than one stub used to reach say, printf, and we need to
2302 distinguish between them. */
2303 id_sec = htab->stub_group[input_section->id].link_sec;
2304
2305 if (h != NULL && h->stub_cache != NULL
2306 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2307 {
2308 stub_entry = h->stub_cache;
2309 }
2310 else
2311 {
2312 char *stub_name;
2313
2314 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2315 if (stub_name == NULL)
2316 return NULL;
2317
2318 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2319 stub_name, FALSE, FALSE);
2320 if (h != NULL)
2321 h->stub_cache = stub_entry;
2322
2323 free (stub_name);
2324 }
2325
2326 return stub_entry;
2327 }
2328
2329
2330 /* Find or create a stub section in the stub group for an input
2331 section. */
2332
2333 static asection *
2334 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2335 struct elf_aarch64_link_hash_table *htab)
2336 {
2337 asection *link_sec;
2338 asection *stub_sec;
2339
2340 link_sec = htab->stub_group[section->id].link_sec;
2341 BFD_ASSERT (link_sec != NULL);
2342 stub_sec = htab->stub_group[section->id].stub_sec;
2343
2344 if (stub_sec == NULL)
2345 {
2346 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2347 if (stub_sec == NULL)
2348 {
2349 size_t namelen;
2350 bfd_size_type len;
2351 char *s_name;
2352
2353 namelen = strlen (link_sec->name);
2354 len = namelen + sizeof (STUB_SUFFIX);
2355 s_name = bfd_alloc (htab->stub_bfd, len);
2356 if (s_name == NULL)
2357 return NULL;
2358
2359 memcpy (s_name, link_sec->name, namelen);
2360 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2361 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2362
2363 if (stub_sec == NULL)
2364 return NULL;
2365 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2366 }
2367 htab->stub_group[section->id].stub_sec = stub_sec;
2368 }
2369
2370 return stub_sec;
2371 }
2372
2373
2374 /* Add a new stub entry in the stub group associated with an input
2375 section to the stub hash. Not all fields of the new stub entry are
2376 initialised. */
2377
2378 static struct elf_aarch64_stub_hash_entry *
2379 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2380 asection *section,
2381 struct elf_aarch64_link_hash_table *htab)
2382 {
2383 asection *link_sec;
2384 asection *stub_sec;
2385 struct elf_aarch64_stub_hash_entry *stub_entry;
2386
2387 link_sec = htab->stub_group[section->id].link_sec;
2388 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2389
2390 /* Enter this entry into the linker stub hash table. */
2391 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2392 TRUE, FALSE);
2393 if (stub_entry == NULL)
2394 {
2395 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2396 section->owner, stub_name);
2397 return NULL;
2398 }
2399
2400 stub_entry->stub_sec = stub_sec;
2401 stub_entry->stub_offset = 0;
2402 stub_entry->id_sec = link_sec;
2403
2404 return stub_entry;
2405 }
2406
2407 static bfd_boolean
2408 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2409 void *in_arg ATTRIBUTE_UNUSED)
2410 {
2411 struct elf_aarch64_stub_hash_entry *stub_entry;
2412 asection *stub_sec;
2413 bfd *stub_bfd;
2414 bfd_byte *loc;
2415 bfd_vma sym_value;
2416 bfd_vma veneered_insn_loc;
2417 bfd_vma veneer_entry_loc;
2418 bfd_signed_vma branch_offset = 0;
2419 unsigned int template_size;
2420 const uint32_t *template;
2421 unsigned int i;
2422
2423 /* Massage our args to the form they really have. */
2424 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2425
2426 stub_sec = stub_entry->stub_sec;
2427
2428 /* Make a note of the offset within the stubs for this entry. */
2429 stub_entry->stub_offset = stub_sec->size;
2430 loc = stub_sec->contents + stub_entry->stub_offset;
2431
2432 stub_bfd = stub_sec->owner;
2433
2434 /* This is the address of the stub destination. */
2435 sym_value = (stub_entry->target_value
2436 + stub_entry->target_section->output_offset
2437 + stub_entry->target_section->output_section->vma);
2438
2439 if (stub_entry->stub_type == aarch64_stub_long_branch)
2440 {
2441 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2442 + stub_sec->output_offset);
2443
2444 /* See if we can relax the stub. */
2445 if (aarch64_valid_for_adrp_p (sym_value, place))
2446 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2447 }
2448
2449 switch (stub_entry->stub_type)
2450 {
2451 case aarch64_stub_adrp_branch:
2452 template = aarch64_adrp_branch_stub;
2453 template_size = sizeof (aarch64_adrp_branch_stub);
2454 break;
2455 case aarch64_stub_long_branch:
2456 template = aarch64_long_branch_stub;
2457 template_size = sizeof (aarch64_long_branch_stub);
2458 break;
2459 case aarch64_stub_erratum_835769_veneer:
2460 template = aarch64_erratum_835769_stub;
2461 template_size = sizeof (aarch64_erratum_835769_stub);
2462 break;
2463 default:
2464 abort ();
2465 }
2466
2467 for (i = 0; i < (template_size / sizeof template[0]); i++)
2468 {
2469 bfd_putl32 (template[i], loc);
2470 loc += 4;
2471 }
2472
2473 template_size = (template_size + 7) & ~7;
2474 stub_sec->size += template_size;
2475
2476 switch (stub_entry->stub_type)
2477 {
2478 case aarch64_stub_adrp_branch:
2479 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2480 stub_entry->stub_offset, sym_value))
2481 /* The stub would not have been relaxed if the offset was out
2482 of range. */
2483 BFD_FAIL ();
2484
2485 _bfd_final_link_relocate
2486 (elfNN_aarch64_howto_from_type (AARCH64_R (ADD_ABS_LO12_NC)),
2487 stub_bfd,
2488 stub_sec,
2489 stub_sec->contents,
2490 stub_entry->stub_offset + 4,
2491 sym_value,
2492 0);
2493 break;
2494
2495 case aarch64_stub_long_branch:
2496 /* We want the value relative to the address 12 bytes back from the
2497 value itself. */
2498 _bfd_final_link_relocate (elfNN_aarch64_howto_from_type
2499 (AARCH64_R (PRELNN)), stub_bfd, stub_sec,
2500 stub_sec->contents,
2501 stub_entry->stub_offset + 16,
2502 sym_value + 12, 0);
2503 break;
2504
2505 case aarch64_stub_erratum_835769_veneer:
2506 veneered_insn_loc = stub_entry->target_section->output_section->vma
2507 + stub_entry->target_section->output_offset
2508 + stub_entry->target_value;
2509 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2510 + stub_entry->stub_sec->output_offset
2511 + stub_entry->stub_offset;
2512 branch_offset = veneered_insn_loc - veneer_entry_loc;
2513 branch_offset >>= 2;
2514 branch_offset &= 0x3ffffff;
2515 bfd_putl32 (stub_entry->veneered_insn,
2516 stub_sec->contents + stub_entry->stub_offset);
2517 bfd_putl32 (template[1] | branch_offset,
2518 stub_sec->contents + stub_entry->stub_offset + 4);
2519 break;
2520
2521 default:
2522 abort ();
2523 }
2524
2525 return TRUE;
2526 }
2527
2528 /* As above, but don't actually build the stub. Just bump offset so
2529 we know stub section sizes. */
2530
2531 static bfd_boolean
2532 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2533 void *in_arg ATTRIBUTE_UNUSED)
2534 {
2535 struct elf_aarch64_stub_hash_entry *stub_entry;
2536 int size;
2537
2538 /* Massage our args to the form they really have. */
2539 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2540
2541 switch (stub_entry->stub_type)
2542 {
2543 case aarch64_stub_adrp_branch:
2544 size = sizeof (aarch64_adrp_branch_stub);
2545 break;
2546 case aarch64_stub_long_branch:
2547 size = sizeof (aarch64_long_branch_stub);
2548 break;
2549 case aarch64_stub_erratum_835769_veneer:
2550 size = sizeof (aarch64_erratum_835769_stub);
2551 break;
2552 default:
2553 abort ();
2554 }
2555
2556 size = (size + 7) & ~7;
2557 stub_entry->stub_sec->size += size;
2558 return TRUE;
2559 }
2560
2561 /* External entry points for sizing and building linker stubs. */
2562
2563 /* Set up various things so that we can make a list of input sections
2564 for each output section included in the link. Returns -1 on error,
2565 0 when no stubs will be needed, and 1 on success. */
2566
2567 int
2568 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2569 struct bfd_link_info *info)
2570 {
2571 bfd *input_bfd;
2572 unsigned int bfd_count;
2573 int top_id, top_index;
2574 asection *section;
2575 asection **input_list, **list;
2576 bfd_size_type amt;
2577 struct elf_aarch64_link_hash_table *htab =
2578 elf_aarch64_hash_table (info);
2579
2580 if (!is_elf_hash_table (htab))
2581 return 0;
2582
2583 /* Count the number of input BFDs and find the top input section id. */
2584 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2585 input_bfd != NULL; input_bfd = input_bfd->link.next)
2586 {
2587 bfd_count += 1;
2588 for (section = input_bfd->sections;
2589 section != NULL; section = section->next)
2590 {
2591 if (top_id < section->id)
2592 top_id = section->id;
2593 }
2594 }
2595 htab->bfd_count = bfd_count;
2596
2597 amt = sizeof (struct map_stub) * (top_id + 1);
2598 htab->stub_group = bfd_zmalloc (amt);
2599 if (htab->stub_group == NULL)
2600 return -1;
2601
2602 /* We can't use output_bfd->section_count here to find the top output
2603 section index as some sections may have been removed, and
2604 _bfd_strip_section_from_output doesn't renumber the indices. */
2605 for (section = output_bfd->sections, top_index = 0;
2606 section != NULL; section = section->next)
2607 {
2608 if (top_index < section->index)
2609 top_index = section->index;
2610 }
2611
2612 htab->top_index = top_index;
2613 amt = sizeof (asection *) * (top_index + 1);
2614 input_list = bfd_malloc (amt);
2615 htab->input_list = input_list;
2616 if (input_list == NULL)
2617 return -1;
2618
2619 /* For sections we aren't interested in, mark their entries with a
2620 value we can check later. */
2621 list = input_list + top_index;
2622 do
2623 *list = bfd_abs_section_ptr;
2624 while (list-- != input_list);
2625
2626 for (section = output_bfd->sections;
2627 section != NULL; section = section->next)
2628 {
2629 if ((section->flags & SEC_CODE) != 0)
2630 input_list[section->index] = NULL;
2631 }
2632
2633 return 1;
2634 }
2635
2636 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2637 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2638
2639 /* The linker repeatedly calls this function for each input section,
2640 in the order that input sections are linked into output sections.
2641 Build lists of input sections to determine groupings between which
2642 we may insert linker stubs. */
2643
2644 void
2645 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2646 {
2647 struct elf_aarch64_link_hash_table *htab =
2648 elf_aarch64_hash_table (info);
2649
2650 if (isec->output_section->index <= htab->top_index)
2651 {
2652 asection **list = htab->input_list + isec->output_section->index;
2653
2654 if (*list != bfd_abs_section_ptr)
2655 {
2656 /* Steal the link_sec pointer for our list. */
2657 /* This happens to make the list in reverse order,
2658 which is what we want. */
2659 PREV_SEC (isec) = *list;
2660 *list = isec;
2661 }
2662 }
2663 }
2664
2665 /* See whether we can group stub sections together. Grouping stub
2666 sections may result in fewer stubs. More importantly, we need to
2667 put all .init* and .fini* stubs at the beginning of the .init or
2668 .fini output sections respectively, because glibc splits the
2669 _init and _fini functions into multiple parts. Putting a stub in
2670 the middle of a function is not a good idea. */
2671
2672 static void
2673 group_sections (struct elf_aarch64_link_hash_table *htab,
2674 bfd_size_type stub_group_size,
2675 bfd_boolean stubs_always_before_branch)
2676 {
2677 asection **list = htab->input_list + htab->top_index;
2678
2679 do
2680 {
2681 asection *tail = *list;
2682
2683 if (tail == bfd_abs_section_ptr)
2684 continue;
2685
2686 while (tail != NULL)
2687 {
2688 asection *curr;
2689 asection *prev;
2690 bfd_size_type total;
2691
2692 curr = tail;
2693 total = tail->size;
2694 while ((prev = PREV_SEC (curr)) != NULL
2695 && ((total += curr->output_offset - prev->output_offset)
2696 < stub_group_size))
2697 curr = prev;
2698
2699 /* OK, the size from the start of CURR to the end is less
2700 than stub_group_size and thus can be handled by one stub
2701 section. (Or the tail section is itself larger than
2702 stub_group_size, in which case we may be toast.)
2703 We should really be keeping track of the total size of
2704 stubs added here, as stubs contribute to the final output
2705 section size. */
2706 do
2707 {
2708 prev = PREV_SEC (tail);
2709 /* Set up this stub group. */
2710 htab->stub_group[tail->id].link_sec = curr;
2711 }
2712 while (tail != curr && (tail = prev) != NULL);
2713
2714 /* But wait, there's more! Input sections up to stub_group_size
2715 bytes before the stub section can be handled by it too. */
2716 if (!stubs_always_before_branch)
2717 {
2718 total = 0;
2719 while (prev != NULL
2720 && ((total += tail->output_offset - prev->output_offset)
2721 < stub_group_size))
2722 {
2723 tail = prev;
2724 prev = PREV_SEC (tail);
2725 htab->stub_group[tail->id].link_sec = curr;
2726 }
2727 }
2728 tail = prev;
2729 }
2730 }
2731 while (list-- != htab->input_list);
2732
2733 free (htab->input_list);
2734 }
2735
2736 #undef PREV_SEC
2737
2738 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2739
2740 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2741 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2742 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2743 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2744 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2745 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2746
2747 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2748 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2749 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2750 #define AARCH64_ZR 0x1f
2751
2752 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2753 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2754
2755 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2756 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2757 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2758 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2759 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2760 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2761 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2762 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2763 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2764 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2765 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2766 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2767 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2768 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2769 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2770 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2771 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2772 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2773
2774 /* Classify an INSN if it is indeed a load/store.
2775
2776 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2777
2778 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2779 is set equal to RT.
2780
2781 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2782
2783 */
2784
2785 static bfd_boolean
2786 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2787 bfd_boolean *pair, bfd_boolean *load)
2788 {
2789 uint32_t opcode;
2790 unsigned int r;
2791 uint32_t opc = 0;
2792 uint32_t v = 0;
2793 uint32_t opc_v = 0;
2794
2795 /* Bail out quickly if INSN doesn't fall into the the load-store
2796 encoding space. */
2797 if (!AARCH64_LDST (insn))
2798 return FALSE;
2799
2800 *pair = FALSE;
2801 *load = FALSE;
2802 if (AARCH64_LDST_EX (insn))
2803 {
2804 *rt = AARCH64_RT (insn);
2805 *rt2 = *rt;
2806 if (AARCH64_BIT (insn, 21) == 1)
2807 {
2808 *pair = TRUE;
2809 *rt2 = AARCH64_RT2 (insn);
2810 }
2811 *load = AARCH64_LD (insn);
2812 return TRUE;
2813 }
2814 else if (AARCH64_LDST_NAP (insn)
2815 || AARCH64_LDSTP_PI (insn)
2816 || AARCH64_LDSTP_O (insn)
2817 || AARCH64_LDSTP_PRE (insn))
2818 {
2819 *pair = TRUE;
2820 *rt = AARCH64_RT (insn);
2821 *rt2 = AARCH64_RT2 (insn);
2822 *load = AARCH64_LD (insn);
2823 return TRUE;
2824 }
2825 else if (AARCH64_LDST_PCREL (insn)
2826 || AARCH64_LDST_UI (insn)
2827 || AARCH64_LDST_PIIMM (insn)
2828 || AARCH64_LDST_U (insn)
2829 || AARCH64_LDST_PREIMM (insn)
2830 || AARCH64_LDST_RO (insn)
2831 || AARCH64_LDST_UIMM (insn))
2832 {
2833 *rt = AARCH64_RT (insn);
2834 *rt2 = *rt;
2835 if (AARCH64_LDST_PCREL (insn))
2836 *load = TRUE;
2837 opc = AARCH64_BITS (insn, 22, 2);
2838 v = AARCH64_BIT (insn, 26);
2839 opc_v = opc | (v << 2);
2840 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2841 || opc_v == 5 || opc_v == 7);
2842 return TRUE;
2843 }
2844 else if (AARCH64_LDST_SIMD_M (insn)
2845 || AARCH64_LDST_SIMD_M_PI (insn))
2846 {
2847 *rt = AARCH64_RT (insn);
2848 *load = AARCH64_BIT (insn, 22);
2849 opcode = (insn >> 12) & 0xf;
2850 switch (opcode)
2851 {
2852 case 0:
2853 case 2:
2854 *rt2 = *rt + 3;
2855 break;
2856
2857 case 4:
2858 case 6:
2859 *rt2 = *rt + 2;
2860 break;
2861
2862 case 7:
2863 *rt2 = *rt;
2864 break;
2865
2866 case 8:
2867 case 10:
2868 *rt2 = *rt + 1;
2869 break;
2870
2871 default:
2872 return FALSE;
2873 }
2874 return TRUE;
2875 }
2876 else if (AARCH64_LDST_SIMD_S (insn)
2877 || AARCH64_LDST_SIMD_S_PI (insn))
2878 {
2879 *rt = AARCH64_RT (insn);
2880 r = (insn >> 21) & 1;
2881 *load = AARCH64_BIT (insn, 22);
2882 opcode = (insn >> 13) & 0x7;
2883 switch (opcode)
2884 {
2885 case 0:
2886 case 2:
2887 case 4:
2888 *rt2 = *rt + r;
2889 break;
2890
2891 case 1:
2892 case 3:
2893 case 5:
2894 *rt2 = *rt + (r == 0 ? 2 : 3);
2895 break;
2896
2897 case 6:
2898 *rt2 = *rt + r;
2899 break;
2900
2901 case 7:
2902 *rt2 = *rt + (r == 0 ? 2 : 3);
2903 break;
2904
2905 default:
2906 return FALSE;
2907 }
2908 return TRUE;
2909 }
2910
2911 return FALSE;
2912 }
2913
2914 /* Return TRUE if INSN is multiply-accumulate. */
2915
2916 static bfd_boolean
2917 aarch64_mlxl_p (uint32_t insn)
2918 {
2919 uint32_t op31 = AARCH64_OP31 (insn);
2920
2921 if (AARCH64_MAC (insn)
2922 && (op31 == 0 || op31 == 1 || op31 == 5)
2923 /* Exclude MUL instructions which are encoded as a multiple accumulate
2924 with RA = XZR. */
2925 && AARCH64_RA (insn) != AARCH64_ZR)
2926 return TRUE;
2927
2928 return FALSE;
2929 }
2930
2931 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
2932 it is possible for a 64-bit multiply-accumulate instruction to generate an
2933 incorrect result. The details are quite complex and hard to
2934 determine statically, since branches in the code may exist in some
2935 circumstances, but all cases end with a memory (load, store, or
2936 prefetch) instruction followed immediately by the multiply-accumulate
2937 operation. We employ a linker patching technique, by moving the potentially
2938 affected multiply-accumulate instruction into a patch region and replacing
2939 the original instruction with a branch to the patch. This function checks
2940 if INSN_1 is the memory operation followed by a multiply-accumulate
2941 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
2942 if INSN_1 and INSN_2 are safe. */
2943
2944 static bfd_boolean
2945 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
2946 {
2947 uint32_t rt;
2948 uint32_t rt2;
2949 uint32_t rn;
2950 uint32_t rm;
2951 uint32_t ra;
2952 bfd_boolean pair;
2953 bfd_boolean load;
2954
2955 if (aarch64_mlxl_p (insn_2)
2956 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
2957 {
2958 /* Any SIMD memory op is independent of the subsequent MLA
2959 by definition of the erratum. */
2960 if (AARCH64_BIT (insn_1, 26))
2961 return TRUE;
2962
2963 /* If not SIMD, check for integer memory ops and MLA relationship. */
2964 rn = AARCH64_RN (insn_2);
2965 ra = AARCH64_RA (insn_2);
2966 rm = AARCH64_RM (insn_2);
2967
2968 /* If this is a load and there's a true(RAW) dependency, we are safe
2969 and this is not an erratum sequence. */
2970 if (load &&
2971 (rt == rn || rt == rm || rt == ra
2972 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
2973 return FALSE;
2974
2975 /* We conservatively put out stubs for all other cases (including
2976 writebacks). */
2977 return TRUE;
2978 }
2979
2980 return FALSE;
2981 }
2982
2983 /* Used to order a list of mapping symbols by address. */
2984
2985 static int
2986 elf_aarch64_compare_mapping (const void *a, const void *b)
2987 {
2988 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
2989 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
2990
2991 if (amap->vma > bmap->vma)
2992 return 1;
2993 else if (amap->vma < bmap->vma)
2994 return -1;
2995 else if (amap->type > bmap->type)
2996 /* Ensure results do not depend on the host qsort for objects with
2997 multiple mapping symbols at the same address by sorting on type
2998 after vma. */
2999 return 1;
3000 else if (amap->type < bmap->type)
3001 return -1;
3002 else
3003 return 0;
3004 }
3005
3006
3007 /* Scan for cortex-a53 erratum 835769 sequence.
3008
3009 Return TRUE else FALSE on abnormal termination. */
3010
3011 static bfd_boolean
3012 erratum_835769_scan (bfd *input_bfd,
3013 struct bfd_link_info *info,
3014 struct aarch64_erratum_835769_fix **fixes_p,
3015 unsigned int *num_fixes_p,
3016 unsigned int *fix_table_size_p)
3017 {
3018 asection *section;
3019 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3020 struct aarch64_erratum_835769_fix *fixes = *fixes_p;
3021 unsigned int num_fixes = *num_fixes_p;
3022 unsigned int fix_table_size = *fix_table_size_p;
3023
3024 if (htab == NULL)
3025 return TRUE;
3026
3027 for (section = input_bfd->sections;
3028 section != NULL;
3029 section = section->next)
3030 {
3031 bfd_byte *contents = NULL;
3032 struct _aarch64_elf_section_data *sec_data;
3033 unsigned int span;
3034
3035 if (elf_section_type (section) != SHT_PROGBITS
3036 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3037 || (section->flags & SEC_EXCLUDE) != 0
3038 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3039 || (section->output_section == bfd_abs_section_ptr))
3040 continue;
3041
3042 if (elf_section_data (section)->this_hdr.contents != NULL)
3043 contents = elf_section_data (section)->this_hdr.contents;
3044 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3045 return FALSE;
3046
3047 sec_data = elf_aarch64_section_data (section);
3048
3049 qsort (sec_data->map, sec_data->mapcount,
3050 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3051
3052 for (span = 0; span < sec_data->mapcount; span++)
3053 {
3054 unsigned int span_start = sec_data->map[span].vma;
3055 unsigned int span_end = ((span == sec_data->mapcount - 1)
3056 ? sec_data->map[0].vma + section->size
3057 : sec_data->map[span + 1].vma);
3058 unsigned int i;
3059 char span_type = sec_data->map[span].type;
3060
3061 if (span_type == 'd')
3062 continue;
3063
3064 for (i = span_start; i + 4 < span_end; i += 4)
3065 {
3066 uint32_t insn_1 = bfd_getl32 (contents + i);
3067 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3068
3069 if (aarch64_erratum_sequence (insn_1, insn_2))
3070 {
3071 char *stub_name = NULL;
3072 stub_name = (char *) bfd_malloc
3073 (strlen ("__erratum_835769_veneer_") + 16);
3074 if (stub_name != NULL)
3075 sprintf
3076 (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3077 else
3078 return FALSE;
3079
3080 if (num_fixes == fix_table_size)
3081 {
3082 fix_table_size *= 2;
3083 fixes =
3084 (struct aarch64_erratum_835769_fix *)
3085 bfd_realloc (fixes,
3086 sizeof (struct aarch64_erratum_835769_fix)
3087 * fix_table_size);
3088 if (fixes == NULL)
3089 return FALSE;
3090 }
3091
3092 fixes[num_fixes].input_bfd = input_bfd;
3093 fixes[num_fixes].section = section;
3094 fixes[num_fixes].offset = i + 4;
3095 fixes[num_fixes].veneered_insn = insn_2;
3096 fixes[num_fixes].stub_name = stub_name;
3097 fixes[num_fixes].stub_type = aarch64_stub_erratum_835769_veneer;
3098 num_fixes++;
3099 }
3100 }
3101 }
3102 if (elf_section_data (section)->this_hdr.contents == NULL)
3103 free (contents);
3104 }
3105
3106 *fixes_p = fixes;
3107 *num_fixes_p = num_fixes;
3108 *fix_table_size_p = fix_table_size;
3109 return TRUE;
3110 }
3111
3112 /* Determine and set the size of the stub section for a final link.
3113
3114 The basic idea here is to examine all the relocations looking for
3115 PC-relative calls to a target that is unreachable with a "bl"
3116 instruction. */
3117
3118 bfd_boolean
3119 elfNN_aarch64_size_stubs (bfd *output_bfd,
3120 bfd *stub_bfd,
3121 struct bfd_link_info *info,
3122 bfd_signed_vma group_size,
3123 asection * (*add_stub_section) (const char *,
3124 asection *),
3125 void (*layout_sections_again) (void))
3126 {
3127 bfd_size_type stub_group_size;
3128 bfd_boolean stubs_always_before_branch;
3129 bfd_boolean stub_changed = 0;
3130 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3131 struct aarch64_erratum_835769_fix *erratum_835769_fixes = NULL;
3132 unsigned int num_erratum_835769_fixes = 0;
3133 unsigned int erratum_835769_fix_table_size = 10;
3134 unsigned int i;
3135
3136 if (htab->fix_erratum_835769)
3137 {
3138 erratum_835769_fixes
3139 = (struct aarch64_erratum_835769_fix *)
3140 bfd_zmalloc
3141 (sizeof (struct aarch64_erratum_835769_fix) *
3142 erratum_835769_fix_table_size);
3143 if (erratum_835769_fixes == NULL)
3144 goto error_ret_free_local;
3145 }
3146
3147 /* Propagate mach to stub bfd, because it may not have been
3148 finalized when we created stub_bfd. */
3149 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3150 bfd_get_mach (output_bfd));
3151
3152 /* Stash our params away. */
3153 htab->stub_bfd = stub_bfd;
3154 htab->add_stub_section = add_stub_section;
3155 htab->layout_sections_again = layout_sections_again;
3156 stubs_always_before_branch = group_size < 0;
3157 if (group_size < 0)
3158 stub_group_size = -group_size;
3159 else
3160 stub_group_size = group_size;
3161
3162 if (stub_group_size == 1)
3163 {
3164 /* Default values. */
3165 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3166 stub_group_size = 127 * 1024 * 1024;
3167 }
3168
3169 group_sections (htab, stub_group_size, stubs_always_before_branch);
3170
3171 while (1)
3172 {
3173 bfd *input_bfd;
3174 asection *stub_sec;
3175 unsigned prev_num_erratum_835769_fixes = num_erratum_835769_fixes;
3176
3177 num_erratum_835769_fixes = 0;
3178 for (input_bfd = info->input_bfds;
3179 input_bfd != NULL; input_bfd = input_bfd->link.next)
3180 {
3181 Elf_Internal_Shdr *symtab_hdr;
3182 asection *section;
3183 Elf_Internal_Sym *local_syms = NULL;
3184
3185 /* We'll need the symbol table in a second. */
3186 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3187 if (symtab_hdr->sh_info == 0)
3188 continue;
3189
3190 /* Walk over each section attached to the input bfd. */
3191 for (section = input_bfd->sections;
3192 section != NULL; section = section->next)
3193 {
3194 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3195
3196 /* If there aren't any relocs, then there's nothing more
3197 to do. */
3198 if ((section->flags & SEC_RELOC) == 0
3199 || section->reloc_count == 0
3200 || (section->flags & SEC_CODE) == 0)
3201 continue;
3202
3203 /* If this section is a link-once section that will be
3204 discarded, then don't create any stubs. */
3205 if (section->output_section == NULL
3206 || section->output_section->owner != output_bfd)
3207 continue;
3208
3209 /* Get the relocs. */
3210 internal_relocs
3211 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3212 NULL, info->keep_memory);
3213 if (internal_relocs == NULL)
3214 goto error_ret_free_local;
3215
3216 /* Now examine each relocation. */
3217 irela = internal_relocs;
3218 irelaend = irela + section->reloc_count;
3219 for (; irela < irelaend; irela++)
3220 {
3221 unsigned int r_type, r_indx;
3222 enum elf_aarch64_stub_type stub_type;
3223 struct elf_aarch64_stub_hash_entry *stub_entry;
3224 asection *sym_sec;
3225 bfd_vma sym_value;
3226 bfd_vma destination;
3227 struct elf_aarch64_link_hash_entry *hash;
3228 const char *sym_name;
3229 char *stub_name;
3230 const asection *id_sec;
3231 unsigned char st_type;
3232 bfd_size_type len;
3233
3234 r_type = ELFNN_R_TYPE (irela->r_info);
3235 r_indx = ELFNN_R_SYM (irela->r_info);
3236
3237 if (r_type >= (unsigned int) R_AARCH64_end)
3238 {
3239 bfd_set_error (bfd_error_bad_value);
3240 error_ret_free_internal:
3241 if (elf_section_data (section)->relocs == NULL)
3242 free (internal_relocs);
3243 goto error_ret_free_local;
3244 }
3245
3246 /* Only look for stubs on unconditional branch and
3247 branch and link instructions. */
3248 if (r_type != (unsigned int) AARCH64_R (CALL26)
3249 && r_type != (unsigned int) AARCH64_R (JUMP26))
3250 continue;
3251
3252 /* Now determine the call target, its name, value,
3253 section. */
3254 sym_sec = NULL;
3255 sym_value = 0;
3256 destination = 0;
3257 hash = NULL;
3258 sym_name = NULL;
3259 if (r_indx < symtab_hdr->sh_info)
3260 {
3261 /* It's a local symbol. */
3262 Elf_Internal_Sym *sym;
3263 Elf_Internal_Shdr *hdr;
3264
3265 if (local_syms == NULL)
3266 {
3267 local_syms
3268 = (Elf_Internal_Sym *) symtab_hdr->contents;
3269 if (local_syms == NULL)
3270 local_syms
3271 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3272 symtab_hdr->sh_info, 0,
3273 NULL, NULL, NULL);
3274 if (local_syms == NULL)
3275 goto error_ret_free_internal;
3276 }
3277
3278 sym = local_syms + r_indx;
3279 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3280 sym_sec = hdr->bfd_section;
3281 if (!sym_sec)
3282 /* This is an undefined symbol. It can never
3283 be resolved. */
3284 continue;
3285
3286 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3287 sym_value = sym->st_value;
3288 destination = (sym_value + irela->r_addend
3289 + sym_sec->output_offset
3290 + sym_sec->output_section->vma);
3291 st_type = ELF_ST_TYPE (sym->st_info);
3292 sym_name
3293 = bfd_elf_string_from_elf_section (input_bfd,
3294 symtab_hdr->sh_link,
3295 sym->st_name);
3296 }
3297 else
3298 {
3299 int e_indx;
3300
3301 e_indx = r_indx - symtab_hdr->sh_info;
3302 hash = ((struct elf_aarch64_link_hash_entry *)
3303 elf_sym_hashes (input_bfd)[e_indx]);
3304
3305 while (hash->root.root.type == bfd_link_hash_indirect
3306 || hash->root.root.type == bfd_link_hash_warning)
3307 hash = ((struct elf_aarch64_link_hash_entry *)
3308 hash->root.root.u.i.link);
3309
3310 if (hash->root.root.type == bfd_link_hash_defined
3311 || hash->root.root.type == bfd_link_hash_defweak)
3312 {
3313 struct elf_aarch64_link_hash_table *globals =
3314 elf_aarch64_hash_table (info);
3315 sym_sec = hash->root.root.u.def.section;
3316 sym_value = hash->root.root.u.def.value;
3317 /* For a destination in a shared library,
3318 use the PLT stub as target address to
3319 decide whether a branch stub is
3320 needed. */
3321 if (globals->root.splt != NULL && hash != NULL
3322 && hash->root.plt.offset != (bfd_vma) - 1)
3323 {
3324 sym_sec = globals->root.splt;
3325 sym_value = hash->root.plt.offset;
3326 if (sym_sec->output_section != NULL)
3327 destination = (sym_value
3328 + sym_sec->output_offset
3329 +
3330 sym_sec->output_section->vma);
3331 }
3332 else if (sym_sec->output_section != NULL)
3333 destination = (sym_value + irela->r_addend
3334 + sym_sec->output_offset
3335 + sym_sec->output_section->vma);
3336 }
3337 else if (hash->root.root.type == bfd_link_hash_undefined
3338 || (hash->root.root.type
3339 == bfd_link_hash_undefweak))
3340 {
3341 /* For a shared library, use the PLT stub as
3342 target address to decide whether a long
3343 branch stub is needed.
3344 For absolute code, they cannot be handled. */
3345 struct elf_aarch64_link_hash_table *globals =
3346 elf_aarch64_hash_table (info);
3347
3348 if (globals->root.splt != NULL && hash != NULL
3349 && hash->root.plt.offset != (bfd_vma) - 1)
3350 {
3351 sym_sec = globals->root.splt;
3352 sym_value = hash->root.plt.offset;
3353 if (sym_sec->output_section != NULL)
3354 destination = (sym_value
3355 + sym_sec->output_offset
3356 +
3357 sym_sec->output_section->vma);
3358 }
3359 else
3360 continue;
3361 }
3362 else
3363 {
3364 bfd_set_error (bfd_error_bad_value);
3365 goto error_ret_free_internal;
3366 }
3367 st_type = ELF_ST_TYPE (hash->root.type);
3368 sym_name = hash->root.root.root.string;
3369 }
3370
3371 /* Determine what (if any) linker stub is needed. */
3372 stub_type = aarch64_type_of_stub
3373 (info, section, irela, st_type, hash, destination);
3374 if (stub_type == aarch64_stub_none)
3375 continue;
3376
3377 /* Support for grouping stub sections. */
3378 id_sec = htab->stub_group[section->id].link_sec;
3379
3380 /* Get the name of this stub. */
3381 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3382 irela);
3383 if (!stub_name)
3384 goto error_ret_free_internal;
3385
3386 stub_entry =
3387 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3388 stub_name, FALSE, FALSE);
3389 if (stub_entry != NULL)
3390 {
3391 /* The proper stub has already been created. */
3392 free (stub_name);
3393 continue;
3394 }
3395
3396 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3397 (stub_name, section, htab);
3398 if (stub_entry == NULL)
3399 {
3400 free (stub_name);
3401 goto error_ret_free_internal;
3402 }
3403
3404 stub_entry->target_value = sym_value;
3405 stub_entry->target_section = sym_sec;
3406 stub_entry->stub_type = stub_type;
3407 stub_entry->h = hash;
3408 stub_entry->st_type = st_type;
3409
3410 if (sym_name == NULL)
3411 sym_name = "unnamed";
3412 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3413 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3414 if (stub_entry->output_name == NULL)
3415 {
3416 free (stub_name);
3417 goto error_ret_free_internal;
3418 }
3419
3420 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3421 sym_name);
3422
3423 stub_changed = TRUE;
3424 }
3425
3426 /* We're done with the internal relocs, free them. */
3427 if (elf_section_data (section)->relocs == NULL)
3428 free (internal_relocs);
3429 }
3430
3431 if (htab->fix_erratum_835769)
3432 {
3433 /* Scan for sequences which might trigger erratum 835769. */
3434 if (!erratum_835769_scan (input_bfd, info, &erratum_835769_fixes,
3435 &num_erratum_835769_fixes,
3436 &erratum_835769_fix_table_size))
3437 goto error_ret_free_local;
3438 }
3439 }
3440
3441 if (prev_num_erratum_835769_fixes != num_erratum_835769_fixes)
3442 stub_changed = TRUE;
3443
3444 if (!stub_changed)
3445 break;
3446
3447 /* OK, we've added some stubs. Find out the new size of the
3448 stub sections. */
3449 for (stub_sec = htab->stub_bfd->sections;
3450 stub_sec != NULL; stub_sec = stub_sec->next)
3451 {
3452 /* Ignore non-stub sections. */
3453 if (!strstr (stub_sec->name, STUB_SUFFIX))
3454 continue;
3455 stub_sec->size = 0;
3456 }
3457
3458 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3459
3460 /* Add erratum 835769 veneers to stub section sizes too. */
3461 if (htab->fix_erratum_835769)
3462 for (i = 0; i < num_erratum_835769_fixes; i++)
3463 {
3464 stub_sec = _bfd_aarch64_create_or_find_stub_sec
3465 (erratum_835769_fixes[i].section, htab);
3466
3467 if (stub_sec == NULL)
3468 goto error_ret_free_local;
3469
3470 stub_sec->size += 8;
3471 }
3472
3473 /* Ask the linker to do its stuff. */
3474 (*htab->layout_sections_again) ();
3475 stub_changed = FALSE;
3476 }
3477
3478 /* Add stubs for erratum 835769 fixes now. */
3479 if (htab->fix_erratum_835769)
3480 {
3481 for (i = 0; i < num_erratum_835769_fixes; i++)
3482 {
3483 struct elf_aarch64_stub_hash_entry *stub_entry;
3484 char *stub_name = erratum_835769_fixes[i].stub_name;
3485 asection *section = erratum_835769_fixes[i].section;
3486 unsigned int section_id = erratum_835769_fixes[i].section->id;
3487 asection *link_sec = htab->stub_group[section_id].link_sec;
3488 asection *stub_sec = htab->stub_group[section_id].stub_sec;
3489
3490 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
3491 stub_name, TRUE, FALSE);
3492 if (stub_entry == NULL)
3493 {
3494 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3495 section->owner,
3496 stub_name);
3497 return FALSE;
3498 }
3499
3500 stub_entry->stub_sec = stub_sec;
3501 stub_entry->stub_offset = 0;
3502 stub_entry->id_sec = link_sec;
3503 stub_entry->stub_type = erratum_835769_fixes[i].stub_type;
3504 stub_entry->target_section = section;
3505 stub_entry->target_value = erratum_835769_fixes[i].offset;
3506 stub_entry->veneered_insn = erratum_835769_fixes[i].veneered_insn;
3507 stub_entry->output_name = erratum_835769_fixes[i].stub_name;
3508 }
3509 }
3510
3511 return TRUE;
3512
3513 error_ret_free_local:
3514 return FALSE;
3515 }
3516
3517 /* Build all the stubs associated with the current output file. The
3518 stubs are kept in a hash table attached to the main linker hash
3519 table. We also set up the .plt entries for statically linked PIC
3520 functions here. This function is called via aarch64_elf_finish in the
3521 linker. */
3522
3523 bfd_boolean
3524 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3525 {
3526 asection *stub_sec;
3527 struct bfd_hash_table *table;
3528 struct elf_aarch64_link_hash_table *htab;
3529
3530 htab = elf_aarch64_hash_table (info);
3531
3532 for (stub_sec = htab->stub_bfd->sections;
3533 stub_sec != NULL; stub_sec = stub_sec->next)
3534 {
3535 bfd_size_type size;
3536
3537 /* Ignore non-stub sections. */
3538 if (!strstr (stub_sec->name, STUB_SUFFIX))
3539 continue;
3540
3541 /* Allocate memory to hold the linker stubs. */
3542 size = stub_sec->size;
3543 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3544 if (stub_sec->contents == NULL && size != 0)
3545 return FALSE;
3546 stub_sec->size = 0;
3547 }
3548
3549 /* Build the stubs as directed by the stub hash table. */
3550 table = &htab->stub_hash_table;
3551 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3552
3553 return TRUE;
3554 }
3555
3556
3557 /* Add an entry to the code/data map for section SEC. */
3558
3559 static void
3560 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3561 {
3562 struct _aarch64_elf_section_data *sec_data =
3563 elf_aarch64_section_data (sec);
3564 unsigned int newidx;
3565
3566 if (sec_data->map == NULL)
3567 {
3568 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3569 sec_data->mapcount = 0;
3570 sec_data->mapsize = 1;
3571 }
3572
3573 newidx = sec_data->mapcount++;
3574
3575 if (sec_data->mapcount > sec_data->mapsize)
3576 {
3577 sec_data->mapsize *= 2;
3578 sec_data->map = bfd_realloc_or_free
3579 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3580 }
3581
3582 if (sec_data->map)
3583 {
3584 sec_data->map[newidx].vma = vma;
3585 sec_data->map[newidx].type = type;
3586 }
3587 }
3588
3589
3590 /* Initialise maps of insn/data for input BFDs. */
3591 void
3592 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3593 {
3594 Elf_Internal_Sym *isymbuf;
3595 Elf_Internal_Shdr *hdr;
3596 unsigned int i, localsyms;
3597
3598 /* Make sure that we are dealing with an AArch64 elf binary. */
3599 if (!is_aarch64_elf (abfd))
3600 return;
3601
3602 if ((abfd->flags & DYNAMIC) != 0)
3603 return;
3604
3605 hdr = &elf_symtab_hdr (abfd);
3606 localsyms = hdr->sh_info;
3607
3608 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3609 should contain the number of local symbols, which should come before any
3610 global symbols. Mapping symbols are always local. */
3611 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3612
3613 /* No internal symbols read? Skip this BFD. */
3614 if (isymbuf == NULL)
3615 return;
3616
3617 for (i = 0; i < localsyms; i++)
3618 {
3619 Elf_Internal_Sym *isym = &isymbuf[i];
3620 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3621 const char *name;
3622
3623 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3624 {
3625 name = bfd_elf_string_from_elf_section (abfd,
3626 hdr->sh_link,
3627 isym->st_name);
3628
3629 if (bfd_is_aarch64_special_symbol_name
3630 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3631 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3632 }
3633 }
3634 }
3635
3636 /* Set option values needed during linking. */
3637 void
3638 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3639 struct bfd_link_info *link_info,
3640 int no_enum_warn,
3641 int no_wchar_warn, int pic_veneer,
3642 int fix_erratum_835769)
3643 {
3644 struct elf_aarch64_link_hash_table *globals;
3645
3646 globals = elf_aarch64_hash_table (link_info);
3647 globals->pic_veneer = pic_veneer;
3648 globals->fix_erratum_835769 = fix_erratum_835769;
3649
3650 BFD_ASSERT (is_aarch64_elf (output_bfd));
3651 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3652 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3653 }
3654
3655 static bfd_vma
3656 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3657 struct elf_aarch64_link_hash_table
3658 *globals, struct bfd_link_info *info,
3659 bfd_vma value, bfd *output_bfd,
3660 bfd_boolean *unresolved_reloc_p)
3661 {
3662 bfd_vma off = (bfd_vma) - 1;
3663 asection *basegot = globals->root.sgot;
3664 bfd_boolean dyn = globals->root.dynamic_sections_created;
3665
3666 if (h != NULL)
3667 {
3668 BFD_ASSERT (basegot != NULL);
3669 off = h->got.offset;
3670 BFD_ASSERT (off != (bfd_vma) - 1);
3671 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3672 || (info->shared
3673 && SYMBOL_REFERENCES_LOCAL (info, h))
3674 || (ELF_ST_VISIBILITY (h->other)
3675 && h->root.type == bfd_link_hash_undefweak))
3676 {
3677 /* This is actually a static link, or it is a -Bsymbolic link
3678 and the symbol is defined locally. We must initialize this
3679 entry in the global offset table. Since the offset must
3680 always be a multiple of 8 (4 in the case of ILP32), we use
3681 the least significant bit to record whether we have
3682 initialized it already.
3683 When doing a dynamic link, we create a .rel(a).got relocation
3684 entry to initialize the value. This is done in the
3685 finish_dynamic_symbol routine. */
3686 if ((off & 1) != 0)
3687 off &= ~1;
3688 else
3689 {
3690 bfd_put_NN (output_bfd, value, basegot->contents + off);
3691 h->got.offset |= 1;
3692 }
3693 }
3694 else
3695 *unresolved_reloc_p = FALSE;
3696
3697 off = off + basegot->output_section->vma + basegot->output_offset;
3698 }
3699
3700 return off;
3701 }
3702
3703 /* Change R_TYPE to a more efficient access model where possible,
3704 return the new reloc type. */
3705
3706 static bfd_reloc_code_real_type
3707 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
3708 struct elf_link_hash_entry *h)
3709 {
3710 bfd_boolean is_local = h == NULL;
3711
3712 switch (r_type)
3713 {
3714 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3715 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3716 return (is_local
3717 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3718 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
3719
3720 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3721 return (is_local
3722 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3723 : r_type);
3724
3725 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3726 return (is_local
3727 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3728 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3729
3730 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3731 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
3732 return (is_local
3733 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3734 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
3735
3736 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3737 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3738
3739 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
3740 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3741
3742 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3743 return r_type;
3744
3745 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3746 return (is_local
3747 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
3748 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3749
3750 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3751 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3752 /* Instructions with these relocations will become NOPs. */
3753 return BFD_RELOC_AARCH64_NONE;
3754
3755 default:
3756 break;
3757 }
3758
3759 return r_type;
3760 }
3761
3762 static unsigned int
3763 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
3764 {
3765 switch (r_type)
3766 {
3767 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3768 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3769 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3770 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3771 return GOT_NORMAL;
3772
3773 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3774 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3775 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3776 return GOT_TLS_GD;
3777
3778 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3779 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3780 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3781 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3782 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
3783 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3784 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3785 return GOT_TLSDESC_GD;
3786
3787 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3788 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3789 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3790 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3791 return GOT_TLS_IE;
3792
3793 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3794 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3795 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3796 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3797 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3798 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3799 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3800 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3801 return GOT_UNKNOWN;
3802
3803 default:
3804 break;
3805 }
3806 return GOT_UNKNOWN;
3807 }
3808
3809 static bfd_boolean
3810 aarch64_can_relax_tls (bfd *input_bfd,
3811 struct bfd_link_info *info,
3812 bfd_reloc_code_real_type r_type,
3813 struct elf_link_hash_entry *h,
3814 unsigned long r_symndx)
3815 {
3816 unsigned int symbol_got_type;
3817 unsigned int reloc_got_type;
3818
3819 if (! IS_AARCH64_TLS_RELOC (r_type))
3820 return FALSE;
3821
3822 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3823 reloc_got_type = aarch64_reloc_got_type (r_type);
3824
3825 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3826 return TRUE;
3827
3828 if (info->shared)
3829 return FALSE;
3830
3831 if (h && h->root.type == bfd_link_hash_undefweak)
3832 return FALSE;
3833
3834 return TRUE;
3835 }
3836
3837 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
3838 enumerator. */
3839
3840 static bfd_reloc_code_real_type
3841 aarch64_tls_transition (bfd *input_bfd,
3842 struct bfd_link_info *info,
3843 unsigned int r_type,
3844 struct elf_link_hash_entry *h,
3845 unsigned long r_symndx)
3846 {
3847 bfd_reloc_code_real_type bfd_r_type
3848 = elfNN_aarch64_bfd_reloc_from_type (r_type);
3849
3850 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
3851 return bfd_r_type;
3852
3853 return aarch64_tls_transition_without_check (bfd_r_type, h);
3854 }
3855
3856 /* Return the base VMA address which should be subtracted from real addresses
3857 when resolving R_AARCH64_TLS_DTPREL relocation. */
3858
3859 static bfd_vma
3860 dtpoff_base (struct bfd_link_info *info)
3861 {
3862 /* If tls_sec is NULL, we should have signalled an error already. */
3863 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3864 return elf_hash_table (info)->tls_sec->vma;
3865 }
3866
3867 /* Return the base VMA address which should be subtracted from real addresses
3868 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3869
3870 static bfd_vma
3871 tpoff_base (struct bfd_link_info *info)
3872 {
3873 struct elf_link_hash_table *htab = elf_hash_table (info);
3874
3875 /* If tls_sec is NULL, we should have signalled an error already. */
3876 BFD_ASSERT (htab->tls_sec != NULL);
3877
3878 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3879 htab->tls_sec->alignment_power);
3880 return htab->tls_sec->vma - base;
3881 }
3882
3883 static bfd_vma *
3884 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3885 unsigned long r_symndx)
3886 {
3887 /* Calculate the address of the GOT entry for symbol
3888 referred to in h. */
3889 if (h != NULL)
3890 return &h->got.offset;
3891 else
3892 {
3893 /* local symbol */
3894 struct elf_aarch64_local_symbol *l;
3895
3896 l = elf_aarch64_locals (input_bfd);
3897 return &l[r_symndx].got_offset;
3898 }
3899 }
3900
3901 static void
3902 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3903 unsigned long r_symndx)
3904 {
3905 bfd_vma *p;
3906 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3907 *p |= 1;
3908 }
3909
3910 static int
3911 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3912 unsigned long r_symndx)
3913 {
3914 bfd_vma value;
3915 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3916 return value & 1;
3917 }
3918
3919 static bfd_vma
3920 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3921 unsigned long r_symndx)
3922 {
3923 bfd_vma value;
3924 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3925 value &= ~1;
3926 return value;
3927 }
3928
3929 static bfd_vma *
3930 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3931 unsigned long r_symndx)
3932 {
3933 /* Calculate the address of the GOT entry for symbol
3934 referred to in h. */
3935 if (h != NULL)
3936 {
3937 struct elf_aarch64_link_hash_entry *eh;
3938 eh = (struct elf_aarch64_link_hash_entry *) h;
3939 return &eh->tlsdesc_got_jump_table_offset;
3940 }
3941 else
3942 {
3943 /* local symbol */
3944 struct elf_aarch64_local_symbol *l;
3945
3946 l = elf_aarch64_locals (input_bfd);
3947 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3948 }
3949 }
3950
3951 static void
3952 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3953 unsigned long r_symndx)
3954 {
3955 bfd_vma *p;
3956 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3957 *p |= 1;
3958 }
3959
3960 static int
3961 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3962 struct elf_link_hash_entry *h,
3963 unsigned long r_symndx)
3964 {
3965 bfd_vma value;
3966 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3967 return value & 1;
3968 }
3969
3970 static bfd_vma
3971 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3972 unsigned long r_symndx)
3973 {
3974 bfd_vma value;
3975 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3976 value &= ~1;
3977 return value;
3978 }
3979
3980 /* Data for make_branch_to_erratum_835769_stub(). */
3981
3982 struct erratum_835769_branch_to_stub_data
3983 {
3984 asection *output_section;
3985 bfd_byte *contents;
3986 };
3987
3988 /* Helper to insert branches to erratum 835769 stubs in the right
3989 places for a particular section. */
3990
3991 static bfd_boolean
3992 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
3993 void *in_arg)
3994 {
3995 struct elf_aarch64_stub_hash_entry *stub_entry;
3996 struct erratum_835769_branch_to_stub_data *data;
3997 bfd_byte *contents;
3998 unsigned long branch_insn = 0;
3999 bfd_vma veneered_insn_loc, veneer_entry_loc;
4000 bfd_signed_vma branch_offset;
4001 unsigned int target;
4002 bfd *abfd;
4003
4004 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4005 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4006
4007 if (stub_entry->target_section != data->output_section
4008 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4009 return TRUE;
4010
4011 contents = data->contents;
4012 veneered_insn_loc = stub_entry->target_section->output_section->vma
4013 + stub_entry->target_section->output_offset
4014 + stub_entry->target_value;
4015 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4016 + stub_entry->stub_sec->output_offset
4017 + stub_entry->stub_offset;
4018 branch_offset = veneer_entry_loc - veneered_insn_loc;
4019
4020 abfd = stub_entry->target_section->owner;
4021 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4022 (*_bfd_error_handler)
4023 (_("%B: error: Erratum 835769 stub out "
4024 "of range (input file too large)"), abfd);
4025
4026 target = stub_entry->target_value;
4027 branch_insn = 0x14000000;
4028 branch_offset >>= 2;
4029 branch_offset &= 0x3ffffff;
4030 branch_insn |= branch_offset;
4031 bfd_putl32 (branch_insn, &contents[target]);
4032
4033 return TRUE;
4034 }
4035
4036 static bfd_boolean
4037 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4038 struct bfd_link_info *link_info,
4039 asection *sec,
4040 bfd_byte *contents)
4041
4042 {
4043 struct elf_aarch64_link_hash_table *globals =
4044 elf_aarch64_hash_table (link_info);
4045
4046 if (globals == NULL)
4047 return FALSE;
4048
4049 /* Fix code to point to erratum 835769 stubs. */
4050 if (globals->fix_erratum_835769)
4051 {
4052 struct erratum_835769_branch_to_stub_data data;
4053
4054 data.output_section = sec;
4055 data.contents = contents;
4056 bfd_hash_traverse (&globals->stub_hash_table,
4057 make_branch_to_erratum_835769_stub, &data);
4058 }
4059
4060 return FALSE;
4061 }
4062
4063 /* Perform a relocation as part of a final link. */
4064 static bfd_reloc_status_type
4065 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4066 bfd *input_bfd,
4067 bfd *output_bfd,
4068 asection *input_section,
4069 bfd_byte *contents,
4070 Elf_Internal_Rela *rel,
4071 bfd_vma value,
4072 struct bfd_link_info *info,
4073 asection *sym_sec,
4074 struct elf_link_hash_entry *h,
4075 bfd_boolean *unresolved_reloc_p,
4076 bfd_boolean save_addend,
4077 bfd_vma *saved_addend,
4078 Elf_Internal_Sym *sym)
4079 {
4080 Elf_Internal_Shdr *symtab_hdr;
4081 unsigned int r_type = howto->type;
4082 bfd_reloc_code_real_type bfd_r_type
4083 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4084 bfd_reloc_code_real_type new_bfd_r_type;
4085 unsigned long r_symndx;
4086 bfd_byte *hit_data = contents + rel->r_offset;
4087 bfd_vma place;
4088 bfd_signed_vma signed_addend;
4089 struct elf_aarch64_link_hash_table *globals;
4090 bfd_boolean weak_undef_p;
4091
4092 globals = elf_aarch64_hash_table (info);
4093
4094 symtab_hdr = &elf_symtab_hdr (input_bfd);
4095
4096 BFD_ASSERT (is_aarch64_elf (input_bfd));
4097
4098 r_symndx = ELFNN_R_SYM (rel->r_info);
4099
4100 /* It is possible to have linker relaxations on some TLS access
4101 models. Update our information here. */
4102 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4103 if (new_bfd_r_type != bfd_r_type)
4104 {
4105 bfd_r_type = new_bfd_r_type;
4106 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4107 BFD_ASSERT (howto != NULL);
4108 r_type = howto->type;
4109 }
4110
4111 place = input_section->output_section->vma
4112 + input_section->output_offset + rel->r_offset;
4113
4114 /* Get addend, accumulating the addend for consecutive relocs
4115 which refer to the same offset. */
4116 signed_addend = saved_addend ? *saved_addend : 0;
4117 signed_addend += rel->r_addend;
4118
4119 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4120 : bfd_is_und_section (sym_sec));
4121
4122 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4123 it here if it is defined in a non-shared object. */
4124 if (h != NULL
4125 && h->type == STT_GNU_IFUNC
4126 && h->def_regular)
4127 {
4128 asection *plt;
4129 const char *name;
4130 asection *base_got;
4131 bfd_vma off;
4132
4133 if ((input_section->flags & SEC_ALLOC) == 0
4134 || h->plt.offset == (bfd_vma) -1)
4135 abort ();
4136
4137 /* STT_GNU_IFUNC symbol must go through PLT. */
4138 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4139 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4140
4141 switch (bfd_r_type)
4142 {
4143 default:
4144 if (h->root.root.string)
4145 name = h->root.root.string;
4146 else
4147 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4148 NULL);
4149 (*_bfd_error_handler)
4150 (_("%B: relocation %s against STT_GNU_IFUNC "
4151 "symbol `%s' isn't handled by %s"), input_bfd,
4152 howto->name, name, __FUNCTION__);
4153 bfd_set_error (bfd_error_bad_value);
4154 return FALSE;
4155
4156 case BFD_RELOC_AARCH64_NN:
4157 if (rel->r_addend != 0)
4158 {
4159 if (h->root.root.string)
4160 name = h->root.root.string;
4161 else
4162 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4163 sym, NULL);
4164 (*_bfd_error_handler)
4165 (_("%B: relocation %s against STT_GNU_IFUNC "
4166 "symbol `%s' has non-zero addend: %d"),
4167 input_bfd, howto->name, name, rel->r_addend);
4168 bfd_set_error (bfd_error_bad_value);
4169 return FALSE;
4170 }
4171
4172 /* Generate dynamic relocation only when there is a
4173 non-GOT reference in a shared object. */
4174 if (info->shared && h->non_got_ref)
4175 {
4176 Elf_Internal_Rela outrel;
4177 asection *sreloc;
4178
4179 /* Need a dynamic relocation to get the real function
4180 address. */
4181 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4182 info,
4183 input_section,
4184 rel->r_offset);
4185 if (outrel.r_offset == (bfd_vma) -1
4186 || outrel.r_offset == (bfd_vma) -2)
4187 abort ();
4188
4189 outrel.r_offset += (input_section->output_section->vma
4190 + input_section->output_offset);
4191
4192 if (h->dynindx == -1
4193 || h->forced_local
4194 || info->executable)
4195 {
4196 /* This symbol is resolved locally. */
4197 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4198 outrel.r_addend = (h->root.u.def.value
4199 + h->root.u.def.section->output_section->vma
4200 + h->root.u.def.section->output_offset);
4201 }
4202 else
4203 {
4204 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4205 outrel.r_addend = 0;
4206 }
4207
4208 sreloc = globals->root.irelifunc;
4209 elf_append_rela (output_bfd, sreloc, &outrel);
4210
4211 /* If this reloc is against an external symbol, we
4212 do not want to fiddle with the addend. Otherwise,
4213 we need to include the symbol value so that it
4214 becomes an addend for the dynamic reloc. For an
4215 internal symbol, we have updated addend. */
4216 return bfd_reloc_ok;
4217 }
4218 /* FALLTHROUGH */
4219 case BFD_RELOC_AARCH64_JUMP26:
4220 case BFD_RELOC_AARCH64_CALL26:
4221 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4222 signed_addend,
4223 weak_undef_p);
4224 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4225 howto, value);
4226 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4227 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4228 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4229 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4230 base_got = globals->root.sgot;
4231 off = h->got.offset;
4232
4233 if (base_got == NULL)
4234 abort ();
4235
4236 if (off == (bfd_vma) -1)
4237 {
4238 bfd_vma plt_index;
4239
4240 /* We can't use h->got.offset here to save state, or
4241 even just remember the offset, as finish_dynamic_symbol
4242 would use that as offset into .got. */
4243
4244 if (globals->root.splt != NULL)
4245 {
4246 plt_index = ((h->plt.offset - globals->plt_header_size) /
4247 globals->plt_entry_size);
4248 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4249 base_got = globals->root.sgotplt;
4250 }
4251 else
4252 {
4253 plt_index = h->plt.offset / globals->plt_entry_size;
4254 off = plt_index * GOT_ENTRY_SIZE;
4255 base_got = globals->root.igotplt;
4256 }
4257
4258 if (h->dynindx == -1
4259 || h->forced_local
4260 || info->symbolic)
4261 {
4262 /* This references the local definition. We must
4263 initialize this entry in the global offset table.
4264 Since the offset must always be a multiple of 8,
4265 we use the least significant bit to record
4266 whether we have initialized it already.
4267
4268 When doing a dynamic link, we create a .rela.got
4269 relocation entry to initialize the value. This
4270 is done in the finish_dynamic_symbol routine. */
4271 if ((off & 1) != 0)
4272 off &= ~1;
4273 else
4274 {
4275 bfd_put_NN (output_bfd, value,
4276 base_got->contents + off);
4277 /* Note that this is harmless as -1 | 1 still is -1. */
4278 h->got.offset |= 1;
4279 }
4280 }
4281 value = (base_got->output_section->vma
4282 + base_got->output_offset + off);
4283 }
4284 else
4285 value = aarch64_calculate_got_entry_vma (h, globals, info,
4286 value, output_bfd,
4287 unresolved_reloc_p);
4288 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4289 0, weak_undef_p);
4290 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4291 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4292 case BFD_RELOC_AARCH64_ADD_LO12:
4293 break;
4294 }
4295 }
4296
4297 switch (bfd_r_type)
4298 {
4299 case BFD_RELOC_AARCH64_NONE:
4300 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4301 *unresolved_reloc_p = FALSE;
4302 return bfd_reloc_ok;
4303
4304 case BFD_RELOC_AARCH64_NN:
4305
4306 /* When generating a shared object or relocatable executable, these
4307 relocations are copied into the output file to be resolved at
4308 run time. */
4309 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4310 && (input_section->flags & SEC_ALLOC)
4311 && (h == NULL
4312 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4313 || h->root.type != bfd_link_hash_undefweak))
4314 {
4315 Elf_Internal_Rela outrel;
4316 bfd_byte *loc;
4317 bfd_boolean skip, relocate;
4318 asection *sreloc;
4319
4320 *unresolved_reloc_p = FALSE;
4321
4322 skip = FALSE;
4323 relocate = FALSE;
4324
4325 outrel.r_addend = signed_addend;
4326 outrel.r_offset =
4327 _bfd_elf_section_offset (output_bfd, info, input_section,
4328 rel->r_offset);
4329 if (outrel.r_offset == (bfd_vma) - 1)
4330 skip = TRUE;
4331 else if (outrel.r_offset == (bfd_vma) - 2)
4332 {
4333 skip = TRUE;
4334 relocate = TRUE;
4335 }
4336
4337 outrel.r_offset += (input_section->output_section->vma
4338 + input_section->output_offset);
4339
4340 if (skip)
4341 memset (&outrel, 0, sizeof outrel);
4342 else if (h != NULL
4343 && h->dynindx != -1
4344 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4345 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4346 else
4347 {
4348 int symbol;
4349
4350 /* On SVR4-ish systems, the dynamic loader cannot
4351 relocate the text and data segments independently,
4352 so the symbol does not matter. */
4353 symbol = 0;
4354 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4355 outrel.r_addend += value;
4356 }
4357
4358 sreloc = elf_section_data (input_section)->sreloc;
4359 if (sreloc == NULL || sreloc->contents == NULL)
4360 return bfd_reloc_notsupported;
4361
4362 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4363 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4364
4365 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4366 {
4367 /* Sanity to check that we have previously allocated
4368 sufficient space in the relocation section for the
4369 number of relocations we actually want to emit. */
4370 abort ();
4371 }
4372
4373 /* If this reloc is against an external symbol, we do not want to
4374 fiddle with the addend. Otherwise, we need to include the symbol
4375 value so that it becomes an addend for the dynamic reloc. */
4376 if (!relocate)
4377 return bfd_reloc_ok;
4378
4379 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4380 contents, rel->r_offset, value,
4381 signed_addend);
4382 }
4383 else
4384 value += signed_addend;
4385 break;
4386
4387 case BFD_RELOC_AARCH64_JUMP26:
4388 case BFD_RELOC_AARCH64_CALL26:
4389 {
4390 asection *splt = globals->root.splt;
4391 bfd_boolean via_plt_p =
4392 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4393
4394 /* A call to an undefined weak symbol is converted to a jump to
4395 the next instruction unless a PLT entry will be created.
4396 The jump to the next instruction is optimized as a NOP.
4397 Do the same for local undefined symbols. */
4398 if (weak_undef_p && ! via_plt_p)
4399 {
4400 bfd_putl32 (INSN_NOP, hit_data);
4401 return bfd_reloc_ok;
4402 }
4403
4404 /* If the call goes through a PLT entry, make sure to
4405 check distance to the right destination address. */
4406 if (via_plt_p)
4407 {
4408 value = (splt->output_section->vma
4409 + splt->output_offset + h->plt.offset);
4410 *unresolved_reloc_p = FALSE;
4411 }
4412
4413 /* If the target symbol is global and marked as a function the
4414 relocation applies a function call or a tail call. In this
4415 situation we can veneer out of range branches. The veneers
4416 use IP0 and IP1 hence cannot be used arbitrary out of range
4417 branches that occur within the body of a function. */
4418 if (h && h->type == STT_FUNC)
4419 {
4420 /* Check if a stub has to be inserted because the destination
4421 is too far away. */
4422 if (! aarch64_valid_branch_p (value, place))
4423 {
4424 /* The target is out of reach, so redirect the branch to
4425 the local stub for this function. */
4426 struct elf_aarch64_stub_hash_entry *stub_entry;
4427 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4428 sym_sec, h,
4429 rel, globals);
4430 if (stub_entry != NULL)
4431 value = (stub_entry->stub_offset
4432 + stub_entry->stub_sec->output_offset
4433 + stub_entry->stub_sec->output_section->vma);
4434 }
4435 }
4436 }
4437 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4438 signed_addend, weak_undef_p);
4439 break;
4440
4441 case BFD_RELOC_AARCH64_16:
4442 #if ARCH_SIZE == 64
4443 case BFD_RELOC_AARCH64_32:
4444 #endif
4445 case BFD_RELOC_AARCH64_ADD_LO12:
4446 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4447 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4448 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4449 case BFD_RELOC_AARCH64_BRANCH19:
4450 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4451 case BFD_RELOC_AARCH64_LDST8_LO12:
4452 case BFD_RELOC_AARCH64_LDST16_LO12:
4453 case BFD_RELOC_AARCH64_LDST32_LO12:
4454 case BFD_RELOC_AARCH64_LDST64_LO12:
4455 case BFD_RELOC_AARCH64_LDST128_LO12:
4456 case BFD_RELOC_AARCH64_MOVW_G0_S:
4457 case BFD_RELOC_AARCH64_MOVW_G1_S:
4458 case BFD_RELOC_AARCH64_MOVW_G2_S:
4459 case BFD_RELOC_AARCH64_MOVW_G0:
4460 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4461 case BFD_RELOC_AARCH64_MOVW_G1:
4462 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4463 case BFD_RELOC_AARCH64_MOVW_G2:
4464 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4465 case BFD_RELOC_AARCH64_MOVW_G3:
4466 case BFD_RELOC_AARCH64_16_PCREL:
4467 case BFD_RELOC_AARCH64_32_PCREL:
4468 case BFD_RELOC_AARCH64_64_PCREL:
4469 case BFD_RELOC_AARCH64_TSTBR14:
4470 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4471 signed_addend, weak_undef_p);
4472 break;
4473
4474 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4475 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4476 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4477 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4478 if (globals->root.sgot == NULL)
4479 BFD_ASSERT (h != NULL);
4480
4481 if (h != NULL)
4482 {
4483 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4484 output_bfd,
4485 unresolved_reloc_p);
4486 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4487 0, weak_undef_p);
4488 }
4489 break;
4490
4491 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4492 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4493 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4494 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4495 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4496 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4497 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4498 if (globals->root.sgot == NULL)
4499 return bfd_reloc_notsupported;
4500
4501 value = (symbol_got_offset (input_bfd, h, r_symndx)
4502 + globals->root.sgot->output_section->vma
4503 + globals->root.sgot->output_offset);
4504
4505 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4506 0, weak_undef_p);
4507 *unresolved_reloc_p = FALSE;
4508 break;
4509
4510 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4511 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4512 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4513 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4514 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4515 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4516 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4517 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4518 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4519 signed_addend - tpoff_base (info),
4520 weak_undef_p);
4521 *unresolved_reloc_p = FALSE;
4522 break;
4523
4524 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4525 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4526 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4527 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4528 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4529 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4530 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4531 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4532 if (globals->root.sgot == NULL)
4533 return bfd_reloc_notsupported;
4534 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4535 + globals->root.sgotplt->output_section->vma
4536 + globals->root.sgotplt->output_offset
4537 + globals->sgotplt_jump_table_size);
4538
4539 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4540 0, weak_undef_p);
4541 *unresolved_reloc_p = FALSE;
4542 break;
4543
4544 default:
4545 return bfd_reloc_notsupported;
4546 }
4547
4548 if (saved_addend)
4549 *saved_addend = value;
4550
4551 /* Only apply the final relocation in a sequence. */
4552 if (save_addend)
4553 return bfd_reloc_continue;
4554
4555 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4556 howto, value);
4557 }
4558
4559 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4560 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4561 link.
4562
4563 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4564 is to then call final_link_relocate. Return other values in the
4565 case of error. */
4566
4567 static bfd_reloc_status_type
4568 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
4569 bfd *input_bfd, bfd_byte *contents,
4570 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4571 {
4572 bfd_boolean is_local = h == NULL;
4573 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
4574 unsigned long insn;
4575
4576 BFD_ASSERT (globals && input_bfd && contents && rel);
4577
4578 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4579 {
4580 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4581 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4582 if (is_local)
4583 {
4584 /* GD->LE relaxation:
4585 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4586 or
4587 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4588 */
4589 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4590 return bfd_reloc_continue;
4591 }
4592 else
4593 {
4594 /* GD->IE relaxation:
4595 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4596 or
4597 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4598 */
4599 return bfd_reloc_continue;
4600 }
4601
4602 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4603 BFD_ASSERT (0);
4604 break;
4605
4606 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4607 if (is_local)
4608 {
4609 /* Tiny TLSDESC->LE relaxation:
4610 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
4611 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
4612 .tlsdesccall var
4613 blr x1 => nop
4614 */
4615 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4616 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4617
4618 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4619 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
4620 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4621
4622 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4623 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
4624 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4625 return bfd_reloc_continue;
4626 }
4627 else
4628 {
4629 /* Tiny TLSDESC->IE relaxation:
4630 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
4631 adr x0, :tlsdesc:var => nop
4632 .tlsdesccall var
4633 blr x1 => nop
4634 */
4635 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4636 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4637
4638 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4639 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4640
4641 bfd_putl32 (0x58000000, contents + rel->r_offset);
4642 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
4643 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4644 return bfd_reloc_continue;
4645 }
4646
4647 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4648 if (is_local)
4649 {
4650 /* Tiny GD->LE relaxation:
4651 adr x0, :tlsgd:var => mrs x1, tpidr_el0
4652 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
4653 nop => add x0, x0, #:tprel_lo12_nc:x
4654 */
4655
4656 /* First kill the tls_get_addr reloc on the bl instruction. */
4657 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4658
4659 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
4660 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
4661 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
4662
4663 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4664 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
4665 rel[1].r_offset = rel->r_offset + 8;
4666
4667 /* Move the current relocation to the second instruction in
4668 the sequence. */
4669 rel->r_offset += 4;
4670 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4671 AARCH64_R (TLSLE_ADD_TPREL_HI12));
4672 return bfd_reloc_continue;
4673 }
4674 else
4675 {
4676 /* Tiny GD->IE relaxation:
4677 adr x0, :tlsgd:var => ldr x0, :gottprel:var
4678 bl __tls_get_addr => mrs x1, tpidr_el0
4679 nop => add x0, x0, x1
4680 */
4681
4682 /* First kill the tls_get_addr reloc on the bl instruction. */
4683 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4684 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4685
4686 bfd_putl32 (0x58000000, contents + rel->r_offset);
4687 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4688 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4689 return bfd_reloc_continue;
4690 }
4691
4692 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4693 return bfd_reloc_continue;
4694
4695 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4696 if (is_local)
4697 {
4698 /* GD->LE relaxation:
4699 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4700 */
4701 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4702 return bfd_reloc_continue;
4703 }
4704 else
4705 {
4706 /* GD->IE relaxation:
4707 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4708 */
4709 insn = bfd_getl32 (contents + rel->r_offset);
4710 insn &= 0xffffffe0;
4711 bfd_putl32 (insn, contents + rel->r_offset);
4712 return bfd_reloc_continue;
4713 }
4714
4715 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4716 if (is_local)
4717 {
4718 /* GD->LE relaxation
4719 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4720 bl __tls_get_addr => mrs x1, tpidr_el0
4721 nop => add x0, x1, x0
4722 */
4723
4724 /* First kill the tls_get_addr reloc on the bl instruction. */
4725 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4726 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4727
4728 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4729 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4730 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4731 return bfd_reloc_continue;
4732 }
4733 else
4734 {
4735 /* GD->IE relaxation
4736 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4737 BL __tls_get_addr => mrs x1, tpidr_el0
4738 R_AARCH64_CALL26
4739 NOP => add x0, x1, x0
4740 */
4741
4742 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
4743
4744 /* Remove the relocation on the BL instruction. */
4745 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4746
4747 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4748
4749 /* We choose to fixup the BL and NOP instructions using the
4750 offset from the second relocation to allow flexibility in
4751 scheduling instructions between the ADD and BL. */
4752 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4753 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4754 return bfd_reloc_continue;
4755 }
4756
4757 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4758 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4759 /* GD->IE/LE relaxation:
4760 add x0, x0, #:tlsdesc_lo12:var => nop
4761 blr xd => nop
4762 */
4763 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4764 return bfd_reloc_ok;
4765
4766 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4767 /* IE->LE relaxation:
4768 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4769 */
4770 if (is_local)
4771 {
4772 insn = bfd_getl32 (contents + rel->r_offset);
4773 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4774 }
4775 return bfd_reloc_continue;
4776
4777 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4778 /* IE->LE relaxation:
4779 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4780 */
4781 if (is_local)
4782 {
4783 insn = bfd_getl32 (contents + rel->r_offset);
4784 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4785 }
4786 return bfd_reloc_continue;
4787
4788 default:
4789 return bfd_reloc_continue;
4790 }
4791
4792 return bfd_reloc_ok;
4793 }
4794
4795 /* Relocate an AArch64 ELF section. */
4796
4797 static bfd_boolean
4798 elfNN_aarch64_relocate_section (bfd *output_bfd,
4799 struct bfd_link_info *info,
4800 bfd *input_bfd,
4801 asection *input_section,
4802 bfd_byte *contents,
4803 Elf_Internal_Rela *relocs,
4804 Elf_Internal_Sym *local_syms,
4805 asection **local_sections)
4806 {
4807 Elf_Internal_Shdr *symtab_hdr;
4808 struct elf_link_hash_entry **sym_hashes;
4809 Elf_Internal_Rela *rel;
4810 Elf_Internal_Rela *relend;
4811 const char *name;
4812 struct elf_aarch64_link_hash_table *globals;
4813 bfd_boolean save_addend = FALSE;
4814 bfd_vma addend = 0;
4815
4816 globals = elf_aarch64_hash_table (info);
4817
4818 symtab_hdr = &elf_symtab_hdr (input_bfd);
4819 sym_hashes = elf_sym_hashes (input_bfd);
4820
4821 rel = relocs;
4822 relend = relocs + input_section->reloc_count;
4823 for (; rel < relend; rel++)
4824 {
4825 unsigned int r_type;
4826 bfd_reloc_code_real_type bfd_r_type;
4827 bfd_reloc_code_real_type relaxed_bfd_r_type;
4828 reloc_howto_type *howto;
4829 unsigned long r_symndx;
4830 Elf_Internal_Sym *sym;
4831 asection *sec;
4832 struct elf_link_hash_entry *h;
4833 bfd_vma relocation;
4834 bfd_reloc_status_type r;
4835 arelent bfd_reloc;
4836 char sym_type;
4837 bfd_boolean unresolved_reloc = FALSE;
4838 char *error_message = NULL;
4839
4840 r_symndx = ELFNN_R_SYM (rel->r_info);
4841 r_type = ELFNN_R_TYPE (rel->r_info);
4842
4843 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
4844 howto = bfd_reloc.howto;
4845
4846 if (howto == NULL)
4847 {
4848 (*_bfd_error_handler)
4849 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4850 input_bfd, input_section, r_type);
4851 return FALSE;
4852 }
4853 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
4854
4855 h = NULL;
4856 sym = NULL;
4857 sec = NULL;
4858
4859 if (r_symndx < symtab_hdr->sh_info)
4860 {
4861 sym = local_syms + r_symndx;
4862 sym_type = ELFNN_ST_TYPE (sym->st_info);
4863 sec = local_sections[r_symndx];
4864
4865 /* An object file might have a reference to a local
4866 undefined symbol. This is a daft object file, but we
4867 should at least do something about it. */
4868 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4869 && bfd_is_und_section (sec)
4870 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4871 {
4872 if (!info->callbacks->undefined_symbol
4873 (info, bfd_elf_string_from_elf_section
4874 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4875 input_bfd, input_section, rel->r_offset, TRUE))
4876 return FALSE;
4877 }
4878
4879 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4880
4881 /* Relocate against local STT_GNU_IFUNC symbol. */
4882 if (!info->relocatable
4883 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4884 {
4885 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
4886 rel, FALSE);
4887 if (h == NULL)
4888 abort ();
4889
4890 /* Set STT_GNU_IFUNC symbol value. */
4891 h->root.u.def.value = sym->st_value;
4892 h->root.u.def.section = sec;
4893 }
4894 }
4895 else
4896 {
4897 bfd_boolean warned, ignored;
4898
4899 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4900 r_symndx, symtab_hdr, sym_hashes,
4901 h, sec, relocation,
4902 unresolved_reloc, warned, ignored);
4903
4904 sym_type = h->type;
4905 }
4906
4907 if (sec != NULL && discarded_section (sec))
4908 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4909 rel, 1, relend, howto, 0, contents);
4910
4911 if (info->relocatable)
4912 continue;
4913
4914 if (h != NULL)
4915 name = h->root.root.string;
4916 else
4917 {
4918 name = (bfd_elf_string_from_elf_section
4919 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4920 if (name == NULL || *name == '\0')
4921 name = bfd_section_name (input_bfd, sec);
4922 }
4923
4924 if (r_symndx != 0
4925 && r_type != R_AARCH64_NONE
4926 && r_type != R_AARCH64_NULL
4927 && (h == NULL
4928 || h->root.type == bfd_link_hash_defined
4929 || h->root.type == bfd_link_hash_defweak)
4930 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
4931 {
4932 (*_bfd_error_handler)
4933 ((sym_type == STT_TLS
4934 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4935 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4936 input_bfd,
4937 input_section, (long) rel->r_offset, howto->name, name);
4938 }
4939
4940 /* We relax only if we can see that there can be a valid transition
4941 from a reloc type to another.
4942 We call elfNN_aarch64_final_link_relocate unless we're completely
4943 done, i.e., the relaxation produced the final output we want. */
4944
4945 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4946 h, r_symndx);
4947 if (relaxed_bfd_r_type != bfd_r_type)
4948 {
4949 bfd_r_type = relaxed_bfd_r_type;
4950 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4951 BFD_ASSERT (howto != NULL);
4952 r_type = howto->type;
4953 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4954 unresolved_reloc = 0;
4955 }
4956 else
4957 r = bfd_reloc_continue;
4958
4959 /* There may be multiple consecutive relocations for the
4960 same offset. In that case we are supposed to treat the
4961 output of each relocation as the addend for the next. */
4962 if (rel + 1 < relend
4963 && rel->r_offset == rel[1].r_offset
4964 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4965 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4966 save_addend = TRUE;
4967 else
4968 save_addend = FALSE;
4969
4970 if (r == bfd_reloc_continue)
4971 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4972 input_section, contents, rel,
4973 relocation, info, sec,
4974 h, &unresolved_reloc,
4975 save_addend, &addend, sym);
4976
4977 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4978 {
4979 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4980 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4981 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4982 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4983 {
4984 bfd_boolean need_relocs = FALSE;
4985 bfd_byte *loc;
4986 int indx;
4987 bfd_vma off;
4988
4989 off = symbol_got_offset (input_bfd, h, r_symndx);
4990 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4991
4992 need_relocs =
4993 (info->shared || indx != 0) &&
4994 (h == NULL
4995 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4996 || h->root.type != bfd_link_hash_undefweak);
4997
4998 BFD_ASSERT (globals->root.srelgot != NULL);
4999
5000 if (need_relocs)
5001 {
5002 Elf_Internal_Rela rela;
5003 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5004 rela.r_addend = 0;
5005 rela.r_offset = globals->root.sgot->output_section->vma +
5006 globals->root.sgot->output_offset + off;
5007
5008
5009 loc = globals->root.srelgot->contents;
5010 loc += globals->root.srelgot->reloc_count++
5011 * RELOC_SIZE (htab);
5012 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5013
5014 if (indx == 0)
5015 {
5016 bfd_put_NN (output_bfd,
5017 relocation - dtpoff_base (info),
5018 globals->root.sgot->contents + off
5019 + GOT_ENTRY_SIZE);
5020 }
5021 else
5022 {
5023 /* This TLS symbol is global. We emit a
5024 relocation to fixup the tls offset at load
5025 time. */
5026 rela.r_info =
5027 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5028 rela.r_addend = 0;
5029 rela.r_offset =
5030 (globals->root.sgot->output_section->vma
5031 + globals->root.sgot->output_offset + off
5032 + GOT_ENTRY_SIZE);
5033
5034 loc = globals->root.srelgot->contents;
5035 loc += globals->root.srelgot->reloc_count++
5036 * RELOC_SIZE (globals);
5037 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5038 bfd_put_NN (output_bfd, (bfd_vma) 0,
5039 globals->root.sgot->contents + off
5040 + GOT_ENTRY_SIZE);
5041 }
5042 }
5043 else
5044 {
5045 bfd_put_NN (output_bfd, (bfd_vma) 1,
5046 globals->root.sgot->contents + off);
5047 bfd_put_NN (output_bfd,
5048 relocation - dtpoff_base (info),
5049 globals->root.sgot->contents + off
5050 + GOT_ENTRY_SIZE);
5051 }
5052
5053 symbol_got_offset_mark (input_bfd, h, r_symndx);
5054 }
5055 break;
5056
5057 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5058 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5059 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5060 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5061 {
5062 bfd_boolean need_relocs = FALSE;
5063 bfd_byte *loc;
5064 int indx;
5065 bfd_vma off;
5066
5067 off = symbol_got_offset (input_bfd, h, r_symndx);
5068
5069 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5070
5071 need_relocs =
5072 (info->shared || indx != 0) &&
5073 (h == NULL
5074 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5075 || h->root.type != bfd_link_hash_undefweak);
5076
5077 BFD_ASSERT (globals->root.srelgot != NULL);
5078
5079 if (need_relocs)
5080 {
5081 Elf_Internal_Rela rela;
5082
5083 if (indx == 0)
5084 rela.r_addend = relocation - dtpoff_base (info);
5085 else
5086 rela.r_addend = 0;
5087
5088 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5089 rela.r_offset = globals->root.sgot->output_section->vma +
5090 globals->root.sgot->output_offset + off;
5091
5092 loc = globals->root.srelgot->contents;
5093 loc += globals->root.srelgot->reloc_count++
5094 * RELOC_SIZE (htab);
5095
5096 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5097
5098 bfd_put_NN (output_bfd, rela.r_addend,
5099 globals->root.sgot->contents + off);
5100 }
5101 else
5102 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5103 globals->root.sgot->contents + off);
5104
5105 symbol_got_offset_mark (input_bfd, h, r_symndx);
5106 }
5107 break;
5108
5109 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5110 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5111 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5112 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5113 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5114 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5115 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5116 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5117 break;
5118
5119 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5120 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5121 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5122 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5123 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5124 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5125 {
5126 bfd_boolean need_relocs = FALSE;
5127 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5128 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5129
5130 need_relocs = (h == NULL
5131 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5132 || h->root.type != bfd_link_hash_undefweak);
5133
5134 BFD_ASSERT (globals->root.srelgot != NULL);
5135 BFD_ASSERT (globals->root.sgot != NULL);
5136
5137 if (need_relocs)
5138 {
5139 bfd_byte *loc;
5140 Elf_Internal_Rela rela;
5141 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5142
5143 rela.r_addend = 0;
5144 rela.r_offset = (globals->root.sgotplt->output_section->vma
5145 + globals->root.sgotplt->output_offset
5146 + off + globals->sgotplt_jump_table_size);
5147
5148 if (indx == 0)
5149 rela.r_addend = relocation - dtpoff_base (info);
5150
5151 /* Allocate the next available slot in the PLT reloc
5152 section to hold our R_AARCH64_TLSDESC, the next
5153 available slot is determined from reloc_count,
5154 which we step. But note, reloc_count was
5155 artifically moved down while allocating slots for
5156 real PLT relocs such that all of the PLT relocs
5157 will fit above the initial reloc_count and the
5158 extra stuff will fit below. */
5159 loc = globals->root.srelplt->contents;
5160 loc += globals->root.srelplt->reloc_count++
5161 * RELOC_SIZE (globals);
5162
5163 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5164
5165 bfd_put_NN (output_bfd, (bfd_vma) 0,
5166 globals->root.sgotplt->contents + off +
5167 globals->sgotplt_jump_table_size);
5168 bfd_put_NN (output_bfd, (bfd_vma) 0,
5169 globals->root.sgotplt->contents + off +
5170 globals->sgotplt_jump_table_size +
5171 GOT_ENTRY_SIZE);
5172 }
5173
5174 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5175 }
5176 break;
5177 default:
5178 break;
5179 }
5180
5181 if (!save_addend)
5182 addend = 0;
5183
5184
5185 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5186 because such sections are not SEC_ALLOC and thus ld.so will
5187 not process them. */
5188 if (unresolved_reloc
5189 && !((input_section->flags & SEC_DEBUGGING) != 0
5190 && h->def_dynamic)
5191 && _bfd_elf_section_offset (output_bfd, info, input_section,
5192 +rel->r_offset) != (bfd_vma) - 1)
5193 {
5194 (*_bfd_error_handler)
5195 (_
5196 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5197 input_bfd, input_section, (long) rel->r_offset, howto->name,
5198 h->root.root.string);
5199 return FALSE;
5200 }
5201
5202 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5203 {
5204 switch (r)
5205 {
5206 case bfd_reloc_overflow:
5207 /* If the overflowing reloc was to an undefined symbol,
5208 we have already printed one error message and there
5209 is no point complaining again. */
5210 if ((!h ||
5211 h->root.type != bfd_link_hash_undefined)
5212 && (!((*info->callbacks->reloc_overflow)
5213 (info, (h ? &h->root : NULL), name, howto->name,
5214 (bfd_vma) 0, input_bfd, input_section,
5215 rel->r_offset))))
5216 return FALSE;
5217 break;
5218
5219 case bfd_reloc_undefined:
5220 if (!((*info->callbacks->undefined_symbol)
5221 (info, name, input_bfd, input_section,
5222 rel->r_offset, TRUE)))
5223 return FALSE;
5224 break;
5225
5226 case bfd_reloc_outofrange:
5227 error_message = _("out of range");
5228 goto common_error;
5229
5230 case bfd_reloc_notsupported:
5231 error_message = _("unsupported relocation");
5232 goto common_error;
5233
5234 case bfd_reloc_dangerous:
5235 /* error_message should already be set. */
5236 goto common_error;
5237
5238 default:
5239 error_message = _("unknown error");
5240 /* Fall through. */
5241
5242 common_error:
5243 BFD_ASSERT (error_message != NULL);
5244 if (!((*info->callbacks->reloc_dangerous)
5245 (info, error_message, input_bfd, input_section,
5246 rel->r_offset)))
5247 return FALSE;
5248 break;
5249 }
5250 }
5251 }
5252
5253 return TRUE;
5254 }
5255
5256 /* Set the right machine number. */
5257
5258 static bfd_boolean
5259 elfNN_aarch64_object_p (bfd *abfd)
5260 {
5261 #if ARCH_SIZE == 32
5262 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5263 #else
5264 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5265 #endif
5266 return TRUE;
5267 }
5268
5269 /* Function to keep AArch64 specific flags in the ELF header. */
5270
5271 static bfd_boolean
5272 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5273 {
5274 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5275 {
5276 }
5277 else
5278 {
5279 elf_elfheader (abfd)->e_flags = flags;
5280 elf_flags_init (abfd) = TRUE;
5281 }
5282
5283 return TRUE;
5284 }
5285
5286 /* Merge backend specific data from an object file to the output
5287 object file when linking. */
5288
5289 static bfd_boolean
5290 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5291 {
5292 flagword out_flags;
5293 flagword in_flags;
5294 bfd_boolean flags_compatible = TRUE;
5295 asection *sec;
5296
5297 /* Check if we have the same endianess. */
5298 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5299 return FALSE;
5300
5301 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5302 return TRUE;
5303
5304 /* The input BFD must have had its flags initialised. */
5305 /* The following seems bogus to me -- The flags are initialized in
5306 the assembler but I don't think an elf_flags_init field is
5307 written into the object. */
5308 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5309
5310 in_flags = elf_elfheader (ibfd)->e_flags;
5311 out_flags = elf_elfheader (obfd)->e_flags;
5312
5313 if (!elf_flags_init (obfd))
5314 {
5315 /* If the input is the default architecture and had the default
5316 flags then do not bother setting the flags for the output
5317 architecture, instead allow future merges to do this. If no
5318 future merges ever set these flags then they will retain their
5319 uninitialised values, which surprise surprise, correspond
5320 to the default values. */
5321 if (bfd_get_arch_info (ibfd)->the_default
5322 && elf_elfheader (ibfd)->e_flags == 0)
5323 return TRUE;
5324
5325 elf_flags_init (obfd) = TRUE;
5326 elf_elfheader (obfd)->e_flags = in_flags;
5327
5328 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5329 && bfd_get_arch_info (obfd)->the_default)
5330 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5331 bfd_get_mach (ibfd));
5332
5333 return TRUE;
5334 }
5335
5336 /* Identical flags must be compatible. */
5337 if (in_flags == out_flags)
5338 return TRUE;
5339
5340 /* Check to see if the input BFD actually contains any sections. If
5341 not, its flags may not have been initialised either, but it
5342 cannot actually cause any incompatiblity. Do not short-circuit
5343 dynamic objects; their section list may be emptied by
5344 elf_link_add_object_symbols.
5345
5346 Also check to see if there are no code sections in the input.
5347 In this case there is no need to check for code specific flags.
5348 XXX - do we need to worry about floating-point format compatability
5349 in data sections ? */
5350 if (!(ibfd->flags & DYNAMIC))
5351 {
5352 bfd_boolean null_input_bfd = TRUE;
5353 bfd_boolean only_data_sections = TRUE;
5354
5355 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5356 {
5357 if ((bfd_get_section_flags (ibfd, sec)
5358 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5359 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5360 only_data_sections = FALSE;
5361
5362 null_input_bfd = FALSE;
5363 break;
5364 }
5365
5366 if (null_input_bfd || only_data_sections)
5367 return TRUE;
5368 }
5369
5370 return flags_compatible;
5371 }
5372
5373 /* Display the flags field. */
5374
5375 static bfd_boolean
5376 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5377 {
5378 FILE *file = (FILE *) ptr;
5379 unsigned long flags;
5380
5381 BFD_ASSERT (abfd != NULL && ptr != NULL);
5382
5383 /* Print normal ELF private data. */
5384 _bfd_elf_print_private_bfd_data (abfd, ptr);
5385
5386 flags = elf_elfheader (abfd)->e_flags;
5387 /* Ignore init flag - it may not be set, despite the flags field
5388 containing valid data. */
5389
5390 /* xgettext:c-format */
5391 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5392
5393 if (flags)
5394 fprintf (file, _("<Unrecognised flag bits set>"));
5395
5396 fputc ('\n', file);
5397
5398 return TRUE;
5399 }
5400
5401 /* Update the got entry reference counts for the section being removed. */
5402
5403 static bfd_boolean
5404 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5405 struct bfd_link_info *info,
5406 asection *sec,
5407 const Elf_Internal_Rela * relocs)
5408 {
5409 struct elf_aarch64_link_hash_table *htab;
5410 Elf_Internal_Shdr *symtab_hdr;
5411 struct elf_link_hash_entry **sym_hashes;
5412 struct elf_aarch64_local_symbol *locals;
5413 const Elf_Internal_Rela *rel, *relend;
5414
5415 if (info->relocatable)
5416 return TRUE;
5417
5418 htab = elf_aarch64_hash_table (info);
5419
5420 if (htab == NULL)
5421 return FALSE;
5422
5423 elf_section_data (sec)->local_dynrel = NULL;
5424
5425 symtab_hdr = &elf_symtab_hdr (abfd);
5426 sym_hashes = elf_sym_hashes (abfd);
5427
5428 locals = elf_aarch64_locals (abfd);
5429
5430 relend = relocs + sec->reloc_count;
5431 for (rel = relocs; rel < relend; rel++)
5432 {
5433 unsigned long r_symndx;
5434 unsigned int r_type;
5435 struct elf_link_hash_entry *h = NULL;
5436
5437 r_symndx = ELFNN_R_SYM (rel->r_info);
5438
5439 if (r_symndx >= symtab_hdr->sh_info)
5440 {
5441
5442 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5443 while (h->root.type == bfd_link_hash_indirect
5444 || h->root.type == bfd_link_hash_warning)
5445 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5446 }
5447 else
5448 {
5449 Elf_Internal_Sym *isym;
5450
5451 /* A local symbol. */
5452 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5453 abfd, r_symndx);
5454
5455 /* Check relocation against local STT_GNU_IFUNC symbol. */
5456 if (isym != NULL
5457 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5458 {
5459 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
5460 if (h == NULL)
5461 abort ();
5462 }
5463 }
5464
5465 if (h)
5466 {
5467 struct elf_aarch64_link_hash_entry *eh;
5468 struct elf_dyn_relocs **pp;
5469 struct elf_dyn_relocs *p;
5470
5471 eh = (struct elf_aarch64_link_hash_entry *) h;
5472
5473 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
5474 if (p->sec == sec)
5475 {
5476 /* Everything must go for SEC. */
5477 *pp = p->next;
5478 break;
5479 }
5480 }
5481
5482 r_type = ELFNN_R_TYPE (rel->r_info);
5483 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
5484 {
5485 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5486 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5487 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5488 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5489 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5490 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5491 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5492 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5493 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5494 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5495 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5496 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5497 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5498 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5499 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5500 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5501 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5502 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5503 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5504 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5505 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5506 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5507 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5508 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5509 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5510 if (h != NULL)
5511 {
5512 if (h->got.refcount > 0)
5513 h->got.refcount -= 1;
5514
5515 if (h->type == STT_GNU_IFUNC)
5516 {
5517 if (h->plt.refcount > 0)
5518 h->plt.refcount -= 1;
5519 }
5520 }
5521 else if (locals != NULL)
5522 {
5523 if (locals[r_symndx].got_refcount > 0)
5524 locals[r_symndx].got_refcount -= 1;
5525 }
5526 break;
5527
5528 case BFD_RELOC_AARCH64_CALL26:
5529 case BFD_RELOC_AARCH64_JUMP26:
5530 /* If this is a local symbol then we resolve it
5531 directly without creating a PLT entry. */
5532 if (h == NULL)
5533 continue;
5534
5535 if (h->plt.refcount > 0)
5536 h->plt.refcount -= 1;
5537 break;
5538
5539 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5540 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5541 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5542 case BFD_RELOC_AARCH64_MOVW_G3:
5543 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5544 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5545 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5546 case BFD_RELOC_AARCH64_NN:
5547 if (h != NULL && info->executable)
5548 {
5549 if (h->plt.refcount > 0)
5550 h->plt.refcount -= 1;
5551 }
5552 break;
5553
5554 default:
5555 break;
5556 }
5557 }
5558
5559 return TRUE;
5560 }
5561
5562 /* Adjust a symbol defined by a dynamic object and referenced by a
5563 regular object. The current definition is in some section of the
5564 dynamic object, but we're not including those sections. We have to
5565 change the definition to something the rest of the link can
5566 understand. */
5567
5568 static bfd_boolean
5569 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5570 struct elf_link_hash_entry *h)
5571 {
5572 struct elf_aarch64_link_hash_table *htab;
5573 asection *s;
5574
5575 /* If this is a function, put it in the procedure linkage table. We
5576 will fill in the contents of the procedure linkage table later,
5577 when we know the address of the .got section. */
5578 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
5579 {
5580 if (h->plt.refcount <= 0
5581 || (h->type != STT_GNU_IFUNC
5582 && (SYMBOL_CALLS_LOCAL (info, h)
5583 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
5584 && h->root.type == bfd_link_hash_undefweak))))
5585 {
5586 /* This case can occur if we saw a CALL26 reloc in
5587 an input file, but the symbol wasn't referred to
5588 by a dynamic object or all references were
5589 garbage collected. In which case we can end up
5590 resolving. */
5591 h->plt.offset = (bfd_vma) - 1;
5592 h->needs_plt = 0;
5593 }
5594
5595 return TRUE;
5596 }
5597 else
5598 /* It's possible that we incorrectly decided a .plt reloc was
5599 needed for an R_X86_64_PC32 reloc to a non-function sym in
5600 check_relocs. We can't decide accurately between function and
5601 non-function syms in check-relocs; Objects loaded later in
5602 the link may change h->type. So fix it now. */
5603 h->plt.offset = (bfd_vma) - 1;
5604
5605
5606 /* If this is a weak symbol, and there is a real definition, the
5607 processor independent code will have arranged for us to see the
5608 real definition first, and we can just use the same value. */
5609 if (h->u.weakdef != NULL)
5610 {
5611 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5612 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5613 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5614 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5615 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5616 h->non_got_ref = h->u.weakdef->non_got_ref;
5617 return TRUE;
5618 }
5619
5620 /* If we are creating a shared library, we must presume that the
5621 only references to the symbol are via the global offset table.
5622 For such cases we need not do anything here; the relocations will
5623 be handled correctly by relocate_section. */
5624 if (info->shared)
5625 return TRUE;
5626
5627 /* If there are no references to this symbol that do not use the
5628 GOT, we don't need to generate a copy reloc. */
5629 if (!h->non_got_ref)
5630 return TRUE;
5631
5632 /* If -z nocopyreloc was given, we won't generate them either. */
5633 if (info->nocopyreloc)
5634 {
5635 h->non_got_ref = 0;
5636 return TRUE;
5637 }
5638
5639 /* We must allocate the symbol in our .dynbss section, which will
5640 become part of the .bss section of the executable. There will be
5641 an entry for this symbol in the .dynsym section. The dynamic
5642 object will contain position independent code, so all references
5643 from the dynamic object to this symbol will go through the global
5644 offset table. The dynamic linker will use the .dynsym entry to
5645 determine the address it must put in the global offset table, so
5646 both the dynamic object and the regular object will refer to the
5647 same memory location for the variable. */
5648
5649 htab = elf_aarch64_hash_table (info);
5650
5651 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5652 to copy the initial value out of the dynamic object and into the
5653 runtime process image. */
5654 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5655 {
5656 htab->srelbss->size += RELOC_SIZE (htab);
5657 h->needs_copy = 1;
5658 }
5659
5660 s = htab->sdynbss;
5661
5662 return _bfd_elf_adjust_dynamic_copy (info, h, s);
5663
5664 }
5665
5666 static bfd_boolean
5667 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5668 {
5669 struct elf_aarch64_local_symbol *locals;
5670 locals = elf_aarch64_locals (abfd);
5671 if (locals == NULL)
5672 {
5673 locals = (struct elf_aarch64_local_symbol *)
5674 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5675 if (locals == NULL)
5676 return FALSE;
5677 elf_aarch64_locals (abfd) = locals;
5678 }
5679 return TRUE;
5680 }
5681
5682 /* Create the .got section to hold the global offset table. */
5683
5684 static bfd_boolean
5685 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
5686 {
5687 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5688 flagword flags;
5689 asection *s;
5690 struct elf_link_hash_entry *h;
5691 struct elf_link_hash_table *htab = elf_hash_table (info);
5692
5693 /* This function may be called more than once. */
5694 s = bfd_get_linker_section (abfd, ".got");
5695 if (s != NULL)
5696 return TRUE;
5697
5698 flags = bed->dynamic_sec_flags;
5699
5700 s = bfd_make_section_anyway_with_flags (abfd,
5701 (bed->rela_plts_and_copies_p
5702 ? ".rela.got" : ".rel.got"),
5703 (bed->dynamic_sec_flags
5704 | SEC_READONLY));
5705 if (s == NULL
5706 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5707 return FALSE;
5708 htab->srelgot = s;
5709
5710 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
5711 if (s == NULL
5712 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5713 return FALSE;
5714 htab->sgot = s;
5715 htab->sgot->size += GOT_ENTRY_SIZE;
5716
5717 if (bed->want_got_sym)
5718 {
5719 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
5720 (or .got.plt) section. We don't do this in the linker script
5721 because we don't want to define the symbol if we are not creating
5722 a global offset table. */
5723 h = _bfd_elf_define_linkage_sym (abfd, info, s,
5724 "_GLOBAL_OFFSET_TABLE_");
5725 elf_hash_table (info)->hgot = h;
5726 if (h == NULL)
5727 return FALSE;
5728 }
5729
5730 if (bed->want_got_plt)
5731 {
5732 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
5733 if (s == NULL
5734 || !bfd_set_section_alignment (abfd, s,
5735 bed->s->log_file_align))
5736 return FALSE;
5737 htab->sgotplt = s;
5738 }
5739
5740 /* The first bit of the global offset table is the header. */
5741 s->size += bed->got_header_size;
5742
5743 return TRUE;
5744 }
5745
5746 /* Look through the relocs for a section during the first phase. */
5747
5748 static bfd_boolean
5749 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5750 asection *sec, const Elf_Internal_Rela *relocs)
5751 {
5752 Elf_Internal_Shdr *symtab_hdr;
5753 struct elf_link_hash_entry **sym_hashes;
5754 const Elf_Internal_Rela *rel;
5755 const Elf_Internal_Rela *rel_end;
5756 asection *sreloc;
5757
5758 struct elf_aarch64_link_hash_table *htab;
5759
5760 if (info->relocatable)
5761 return TRUE;
5762
5763 BFD_ASSERT (is_aarch64_elf (abfd));
5764
5765 htab = elf_aarch64_hash_table (info);
5766 sreloc = NULL;
5767
5768 symtab_hdr = &elf_symtab_hdr (abfd);
5769 sym_hashes = elf_sym_hashes (abfd);
5770
5771 rel_end = relocs + sec->reloc_count;
5772 for (rel = relocs; rel < rel_end; rel++)
5773 {
5774 struct elf_link_hash_entry *h;
5775 unsigned long r_symndx;
5776 unsigned int r_type;
5777 bfd_reloc_code_real_type bfd_r_type;
5778 Elf_Internal_Sym *isym;
5779
5780 r_symndx = ELFNN_R_SYM (rel->r_info);
5781 r_type = ELFNN_R_TYPE (rel->r_info);
5782
5783 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5784 {
5785 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5786 r_symndx);
5787 return FALSE;
5788 }
5789
5790 if (r_symndx < symtab_hdr->sh_info)
5791 {
5792 /* A local symbol. */
5793 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5794 abfd, r_symndx);
5795 if (isym == NULL)
5796 return FALSE;
5797
5798 /* Check relocation against local STT_GNU_IFUNC symbol. */
5799 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5800 {
5801 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
5802 TRUE);
5803 if (h == NULL)
5804 return FALSE;
5805
5806 /* Fake a STT_GNU_IFUNC symbol. */
5807 h->type = STT_GNU_IFUNC;
5808 h->def_regular = 1;
5809 h->ref_regular = 1;
5810 h->forced_local = 1;
5811 h->root.type = bfd_link_hash_defined;
5812 }
5813 else
5814 h = NULL;
5815 }
5816 else
5817 {
5818 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5819 while (h->root.type == bfd_link_hash_indirect
5820 || h->root.type == bfd_link_hash_warning)
5821 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5822
5823 /* PR15323, ref flags aren't set for references in the same
5824 object. */
5825 h->root.non_ir_ref = 1;
5826 }
5827
5828 /* Could be done earlier, if h were already available. */
5829 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5830
5831 if (h != NULL)
5832 {
5833 /* Create the ifunc sections for static executables. If we
5834 never see an indirect function symbol nor we are building
5835 a static executable, those sections will be empty and
5836 won't appear in output. */
5837 switch (bfd_r_type)
5838 {
5839 default:
5840 break;
5841
5842 case BFD_RELOC_AARCH64_NN:
5843 case BFD_RELOC_AARCH64_CALL26:
5844 case BFD_RELOC_AARCH64_JUMP26:
5845 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5846 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5847 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5848 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5849 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5850 case BFD_RELOC_AARCH64_ADD_LO12:
5851 if (htab->root.dynobj == NULL)
5852 htab->root.dynobj = abfd;
5853 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
5854 return FALSE;
5855 break;
5856 }
5857
5858 /* It is referenced by a non-shared object. */
5859 h->ref_regular = 1;
5860 h->root.non_ir_ref = 1;
5861 }
5862
5863 switch (bfd_r_type)
5864 {
5865 case BFD_RELOC_AARCH64_NN:
5866
5867 /* We don't need to handle relocs into sections not going into
5868 the "real" output. */
5869 if ((sec->flags & SEC_ALLOC) == 0)
5870 break;
5871
5872 if (h != NULL)
5873 {
5874 if (!info->shared)
5875 h->non_got_ref = 1;
5876
5877 h->plt.refcount += 1;
5878 h->pointer_equality_needed = 1;
5879 }
5880
5881 /* No need to do anything if we're not creating a shared
5882 object. */
5883 if (! info->shared)
5884 break;
5885
5886 {
5887 struct elf_dyn_relocs *p;
5888 struct elf_dyn_relocs **head;
5889
5890 /* We must copy these reloc types into the output file.
5891 Create a reloc section in dynobj and make room for
5892 this reloc. */
5893 if (sreloc == NULL)
5894 {
5895 if (htab->root.dynobj == NULL)
5896 htab->root.dynobj = abfd;
5897
5898 sreloc = _bfd_elf_make_dynamic_reloc_section
5899 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
5900
5901 if (sreloc == NULL)
5902 return FALSE;
5903 }
5904
5905 /* If this is a global symbol, we count the number of
5906 relocations we need for this symbol. */
5907 if (h != NULL)
5908 {
5909 struct elf_aarch64_link_hash_entry *eh;
5910 eh = (struct elf_aarch64_link_hash_entry *) h;
5911 head = &eh->dyn_relocs;
5912 }
5913 else
5914 {
5915 /* Track dynamic relocs needed for local syms too.
5916 We really need local syms available to do this
5917 easily. Oh well. */
5918
5919 asection *s;
5920 void **vpp;
5921
5922 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5923 abfd, r_symndx);
5924 if (isym == NULL)
5925 return FALSE;
5926
5927 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5928 if (s == NULL)
5929 s = sec;
5930
5931 /* Beware of type punned pointers vs strict aliasing
5932 rules. */
5933 vpp = &(elf_section_data (s)->local_dynrel);
5934 head = (struct elf_dyn_relocs **) vpp;
5935 }
5936
5937 p = *head;
5938 if (p == NULL || p->sec != sec)
5939 {
5940 bfd_size_type amt = sizeof *p;
5941 p = ((struct elf_dyn_relocs *)
5942 bfd_zalloc (htab->root.dynobj, amt));
5943 if (p == NULL)
5944 return FALSE;
5945 p->next = *head;
5946 *head = p;
5947 p->sec = sec;
5948 }
5949
5950 p->count += 1;
5951
5952 }
5953 break;
5954
5955 /* RR: We probably want to keep a consistency check that
5956 there are no dangling GOT_PAGE relocs. */
5957 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5958 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5959 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5960 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5961 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5962 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5963 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5964 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5965 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5966 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5967 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5968 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5969 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5970 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5971 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5972 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5973 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5974 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5975 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5976 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5977 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5978 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5979 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5980 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5981 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5982 {
5983 unsigned got_type;
5984 unsigned old_got_type;
5985
5986 got_type = aarch64_reloc_got_type (bfd_r_type);
5987
5988 if (h)
5989 {
5990 h->got.refcount += 1;
5991 old_got_type = elf_aarch64_hash_entry (h)->got_type;
5992 }
5993 else
5994 {
5995 struct elf_aarch64_local_symbol *locals;
5996
5997 if (!elfNN_aarch64_allocate_local_symbols
5998 (abfd, symtab_hdr->sh_info))
5999 return FALSE;
6000
6001 locals = elf_aarch64_locals (abfd);
6002 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6003 locals[r_symndx].got_refcount += 1;
6004 old_got_type = locals[r_symndx].got_type;
6005 }
6006
6007 /* If a variable is accessed with both general dynamic TLS
6008 methods, two slots may be created. */
6009 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6010 got_type |= old_got_type;
6011
6012 /* We will already have issued an error message if there
6013 is a TLS/non-TLS mismatch, based on the symbol type.
6014 So just combine any TLS types needed. */
6015 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6016 && got_type != GOT_NORMAL)
6017 got_type |= old_got_type;
6018
6019 /* If the symbol is accessed by both IE and GD methods, we
6020 are able to relax. Turn off the GD flag, without
6021 messing up with any other kind of TLS types that may be
6022 involved. */
6023 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6024 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6025
6026 if (old_got_type != got_type)
6027 {
6028 if (h != NULL)
6029 elf_aarch64_hash_entry (h)->got_type = got_type;
6030 else
6031 {
6032 struct elf_aarch64_local_symbol *locals;
6033 locals = elf_aarch64_locals (abfd);
6034 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6035 locals[r_symndx].got_type = got_type;
6036 }
6037 }
6038
6039 if (htab->root.dynobj == NULL)
6040 htab->root.dynobj = abfd;
6041 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6042 return FALSE;
6043 break;
6044 }
6045
6046 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6047 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6048 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6049 case BFD_RELOC_AARCH64_MOVW_G3:
6050 if (info->shared)
6051 {
6052 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6053 (*_bfd_error_handler)
6054 (_("%B: relocation %s against `%s' can not be used when making "
6055 "a shared object; recompile with -fPIC"),
6056 abfd, elfNN_aarch64_howto_table[howto_index].name,
6057 (h) ? h->root.root.string : "a local symbol");
6058 bfd_set_error (bfd_error_bad_value);
6059 return FALSE;
6060 }
6061
6062 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6063 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6064 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6065 if (h != NULL && info->executable)
6066 {
6067 /* If this reloc is in a read-only section, we might
6068 need a copy reloc. We can't check reliably at this
6069 stage whether the section is read-only, as input
6070 sections have not yet been mapped to output sections.
6071 Tentatively set the flag for now, and correct in
6072 adjust_dynamic_symbol. */
6073 h->non_got_ref = 1;
6074 h->plt.refcount += 1;
6075 h->pointer_equality_needed = 1;
6076 }
6077 /* FIXME:: RR need to handle these in shared libraries
6078 and essentially bomb out as these being non-PIC
6079 relocations in shared libraries. */
6080 break;
6081
6082 case BFD_RELOC_AARCH64_CALL26:
6083 case BFD_RELOC_AARCH64_JUMP26:
6084 /* If this is a local symbol then we resolve it
6085 directly without creating a PLT entry. */
6086 if (h == NULL)
6087 continue;
6088
6089 h->needs_plt = 1;
6090 if (h->plt.refcount <= 0)
6091 h->plt.refcount = 1;
6092 else
6093 h->plt.refcount += 1;
6094 break;
6095
6096 default:
6097 break;
6098 }
6099 }
6100
6101 return TRUE;
6102 }
6103
6104 /* Treat mapping symbols as special target symbols. */
6105
6106 static bfd_boolean
6107 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6108 asymbol *sym)
6109 {
6110 return bfd_is_aarch64_special_symbol_name (sym->name,
6111 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6112 }
6113
6114 /* This is a copy of elf_find_function () from elf.c except that
6115 AArch64 mapping symbols are ignored when looking for function names. */
6116
6117 static bfd_boolean
6118 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6119 asymbol **symbols,
6120 asection *section,
6121 bfd_vma offset,
6122 const char **filename_ptr,
6123 const char **functionname_ptr)
6124 {
6125 const char *filename = NULL;
6126 asymbol *func = NULL;
6127 bfd_vma low_func = 0;
6128 asymbol **p;
6129
6130 for (p = symbols; *p != NULL; p++)
6131 {
6132 elf_symbol_type *q;
6133
6134 q = (elf_symbol_type *) * p;
6135
6136 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6137 {
6138 default:
6139 break;
6140 case STT_FILE:
6141 filename = bfd_asymbol_name (&q->symbol);
6142 break;
6143 case STT_FUNC:
6144 case STT_NOTYPE:
6145 /* Skip mapping symbols. */
6146 if ((q->symbol.flags & BSF_LOCAL)
6147 && (bfd_is_aarch64_special_symbol_name
6148 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6149 continue;
6150 /* Fall through. */
6151 if (bfd_get_section (&q->symbol) == section
6152 && q->symbol.value >= low_func && q->symbol.value <= offset)
6153 {
6154 func = (asymbol *) q;
6155 low_func = q->symbol.value;
6156 }
6157 break;
6158 }
6159 }
6160
6161 if (func == NULL)
6162 return FALSE;
6163
6164 if (filename_ptr)
6165 *filename_ptr = filename;
6166 if (functionname_ptr)
6167 *functionname_ptr = bfd_asymbol_name (func);
6168
6169 return TRUE;
6170 }
6171
6172
6173 /* Find the nearest line to a particular section and offset, for error
6174 reporting. This code is a duplicate of the code in elf.c, except
6175 that it uses aarch64_elf_find_function. */
6176
6177 static bfd_boolean
6178 elfNN_aarch64_find_nearest_line (bfd *abfd,
6179 asymbol **symbols,
6180 asection *section,
6181 bfd_vma offset,
6182 const char **filename_ptr,
6183 const char **functionname_ptr,
6184 unsigned int *line_ptr,
6185 unsigned int *discriminator_ptr)
6186 {
6187 bfd_boolean found = FALSE;
6188
6189 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6190 filename_ptr, functionname_ptr,
6191 line_ptr, discriminator_ptr,
6192 dwarf_debug_sections, 0,
6193 &elf_tdata (abfd)->dwarf2_find_line_info))
6194 {
6195 if (!*functionname_ptr)
6196 aarch64_elf_find_function (abfd, symbols, section, offset,
6197 *filename_ptr ? NULL : filename_ptr,
6198 functionname_ptr);
6199
6200 return TRUE;
6201 }
6202
6203 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6204 toolchain uses DWARF1. */
6205
6206 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6207 &found, filename_ptr,
6208 functionname_ptr, line_ptr,
6209 &elf_tdata (abfd)->line_info))
6210 return FALSE;
6211
6212 if (found && (*functionname_ptr || *line_ptr))
6213 return TRUE;
6214
6215 if (symbols == NULL)
6216 return FALSE;
6217
6218 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6219 filename_ptr, functionname_ptr))
6220 return FALSE;
6221
6222 *line_ptr = 0;
6223 return TRUE;
6224 }
6225
6226 static bfd_boolean
6227 elfNN_aarch64_find_inliner_info (bfd *abfd,
6228 const char **filename_ptr,
6229 const char **functionname_ptr,
6230 unsigned int *line_ptr)
6231 {
6232 bfd_boolean found;
6233 found = _bfd_dwarf2_find_inliner_info
6234 (abfd, filename_ptr,
6235 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6236 return found;
6237 }
6238
6239
6240 static void
6241 elfNN_aarch64_post_process_headers (bfd *abfd,
6242 struct bfd_link_info *link_info)
6243 {
6244 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6245
6246 i_ehdrp = elf_elfheader (abfd);
6247 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6248
6249 _bfd_elf_post_process_headers (abfd, link_info);
6250 }
6251
6252 static enum elf_reloc_type_class
6253 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6254 const asection *rel_sec ATTRIBUTE_UNUSED,
6255 const Elf_Internal_Rela *rela)
6256 {
6257 switch ((int) ELFNN_R_TYPE (rela->r_info))
6258 {
6259 case AARCH64_R (RELATIVE):
6260 return reloc_class_relative;
6261 case AARCH64_R (JUMP_SLOT):
6262 return reloc_class_plt;
6263 case AARCH64_R (COPY):
6264 return reloc_class_copy;
6265 default:
6266 return reloc_class_normal;
6267 }
6268 }
6269
6270 /* Handle an AArch64 specific section when reading an object file. This is
6271 called when bfd_section_from_shdr finds a section with an unknown
6272 type. */
6273
6274 static bfd_boolean
6275 elfNN_aarch64_section_from_shdr (bfd *abfd,
6276 Elf_Internal_Shdr *hdr,
6277 const char *name, int shindex)
6278 {
6279 /* There ought to be a place to keep ELF backend specific flags, but
6280 at the moment there isn't one. We just keep track of the
6281 sections by their name, instead. Fortunately, the ABI gives
6282 names for all the AArch64 specific sections, so we will probably get
6283 away with this. */
6284 switch (hdr->sh_type)
6285 {
6286 case SHT_AARCH64_ATTRIBUTES:
6287 break;
6288
6289 default:
6290 return FALSE;
6291 }
6292
6293 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6294 return FALSE;
6295
6296 return TRUE;
6297 }
6298
6299 /* A structure used to record a list of sections, independently
6300 of the next and prev fields in the asection structure. */
6301 typedef struct section_list
6302 {
6303 asection *sec;
6304 struct section_list *next;
6305 struct section_list *prev;
6306 }
6307 section_list;
6308
6309 /* Unfortunately we need to keep a list of sections for which
6310 an _aarch64_elf_section_data structure has been allocated. This
6311 is because it is possible for functions like elfNN_aarch64_write_section
6312 to be called on a section which has had an elf_data_structure
6313 allocated for it (and so the used_by_bfd field is valid) but
6314 for which the AArch64 extended version of this structure - the
6315 _aarch64_elf_section_data structure - has not been allocated. */
6316 static section_list *sections_with_aarch64_elf_section_data = NULL;
6317
6318 static void
6319 record_section_with_aarch64_elf_section_data (asection *sec)
6320 {
6321 struct section_list *entry;
6322
6323 entry = bfd_malloc (sizeof (*entry));
6324 if (entry == NULL)
6325 return;
6326 entry->sec = sec;
6327 entry->next = sections_with_aarch64_elf_section_data;
6328 entry->prev = NULL;
6329 if (entry->next != NULL)
6330 entry->next->prev = entry;
6331 sections_with_aarch64_elf_section_data = entry;
6332 }
6333
6334 static struct section_list *
6335 find_aarch64_elf_section_entry (asection *sec)
6336 {
6337 struct section_list *entry;
6338 static struct section_list *last_entry = NULL;
6339
6340 /* This is a short cut for the typical case where the sections are added
6341 to the sections_with_aarch64_elf_section_data list in forward order and
6342 then looked up here in backwards order. This makes a real difference
6343 to the ld-srec/sec64k.exp linker test. */
6344 entry = sections_with_aarch64_elf_section_data;
6345 if (last_entry != NULL)
6346 {
6347 if (last_entry->sec == sec)
6348 entry = last_entry;
6349 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6350 entry = last_entry->next;
6351 }
6352
6353 for (; entry; entry = entry->next)
6354 if (entry->sec == sec)
6355 break;
6356
6357 if (entry)
6358 /* Record the entry prior to this one - it is the entry we are
6359 most likely to want to locate next time. Also this way if we
6360 have been called from
6361 unrecord_section_with_aarch64_elf_section_data () we will not
6362 be caching a pointer that is about to be freed. */
6363 last_entry = entry->prev;
6364
6365 return entry;
6366 }
6367
6368 static void
6369 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6370 {
6371 struct section_list *entry;
6372
6373 entry = find_aarch64_elf_section_entry (sec);
6374
6375 if (entry)
6376 {
6377 if (entry->prev != NULL)
6378 entry->prev->next = entry->next;
6379 if (entry->next != NULL)
6380 entry->next->prev = entry->prev;
6381 if (entry == sections_with_aarch64_elf_section_data)
6382 sections_with_aarch64_elf_section_data = entry->next;
6383 free (entry);
6384 }
6385 }
6386
6387
6388 typedef struct
6389 {
6390 void *finfo;
6391 struct bfd_link_info *info;
6392 asection *sec;
6393 int sec_shndx;
6394 int (*func) (void *, const char *, Elf_Internal_Sym *,
6395 asection *, struct elf_link_hash_entry *);
6396 } output_arch_syminfo;
6397
6398 enum map_symbol_type
6399 {
6400 AARCH64_MAP_INSN,
6401 AARCH64_MAP_DATA
6402 };
6403
6404
6405 /* Output a single mapping symbol. */
6406
6407 static bfd_boolean
6408 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6409 enum map_symbol_type type, bfd_vma offset)
6410 {
6411 static const char *names[2] = { "$x", "$d" };
6412 Elf_Internal_Sym sym;
6413
6414 sym.st_value = (osi->sec->output_section->vma
6415 + osi->sec->output_offset + offset);
6416 sym.st_size = 0;
6417 sym.st_other = 0;
6418 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6419 sym.st_shndx = osi->sec_shndx;
6420 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6421 }
6422
6423
6424
6425 /* Output mapping symbols for PLT entries associated with H. */
6426
6427 static bfd_boolean
6428 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6429 {
6430 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6431 bfd_vma addr;
6432
6433 if (h->root.type == bfd_link_hash_indirect)
6434 return TRUE;
6435
6436 if (h->root.type == bfd_link_hash_warning)
6437 /* When warning symbols are created, they **replace** the "real"
6438 entry in the hash table, thus we never get to see the real
6439 symbol in a hash traversal. So look at it now. */
6440 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6441
6442 if (h->plt.offset == (bfd_vma) - 1)
6443 return TRUE;
6444
6445 addr = h->plt.offset;
6446 if (addr == 32)
6447 {
6448 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6449 return FALSE;
6450 }
6451 return TRUE;
6452 }
6453
6454
6455 /* Output a single local symbol for a generated stub. */
6456
6457 static bfd_boolean
6458 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
6459 bfd_vma offset, bfd_vma size)
6460 {
6461 Elf_Internal_Sym sym;
6462
6463 sym.st_value = (osi->sec->output_section->vma
6464 + osi->sec->output_offset + offset);
6465 sym.st_size = size;
6466 sym.st_other = 0;
6467 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6468 sym.st_shndx = osi->sec_shndx;
6469 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
6470 }
6471
6472 static bfd_boolean
6473 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
6474 {
6475 struct elf_aarch64_stub_hash_entry *stub_entry;
6476 asection *stub_sec;
6477 bfd_vma addr;
6478 char *stub_name;
6479 output_arch_syminfo *osi;
6480
6481 /* Massage our args to the form they really have. */
6482 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6483 osi = (output_arch_syminfo *) in_arg;
6484
6485 stub_sec = stub_entry->stub_sec;
6486
6487 /* Ensure this stub is attached to the current section being
6488 processed. */
6489 if (stub_sec != osi->sec)
6490 return TRUE;
6491
6492 addr = (bfd_vma) stub_entry->stub_offset;
6493
6494 stub_name = stub_entry->output_name;
6495
6496 switch (stub_entry->stub_type)
6497 {
6498 case aarch64_stub_adrp_branch:
6499 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6500 sizeof (aarch64_adrp_branch_stub)))
6501 return FALSE;
6502 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6503 return FALSE;
6504 break;
6505 case aarch64_stub_long_branch:
6506 if (!elfNN_aarch64_output_stub_sym
6507 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
6508 return FALSE;
6509 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6510 return FALSE;
6511 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
6512 return FALSE;
6513 break;
6514 case aarch64_stub_erratum_835769_veneer:
6515 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6516 sizeof (aarch64_erratum_835769_stub)))
6517 return FALSE;
6518 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6519 return FALSE;
6520 break;
6521 default:
6522 abort ();
6523 }
6524
6525 return TRUE;
6526 }
6527
6528 /* Output mapping symbols for linker generated sections. */
6529
6530 static bfd_boolean
6531 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
6532 struct bfd_link_info *info,
6533 void *finfo,
6534 int (*func) (void *, const char *,
6535 Elf_Internal_Sym *,
6536 asection *,
6537 struct elf_link_hash_entry
6538 *))
6539 {
6540 output_arch_syminfo osi;
6541 struct elf_aarch64_link_hash_table *htab;
6542
6543 htab = elf_aarch64_hash_table (info);
6544
6545 osi.finfo = finfo;
6546 osi.info = info;
6547 osi.func = func;
6548
6549 /* Long calls stubs. */
6550 if (htab->stub_bfd && htab->stub_bfd->sections)
6551 {
6552 asection *stub_sec;
6553
6554 for (stub_sec = htab->stub_bfd->sections;
6555 stub_sec != NULL; stub_sec = stub_sec->next)
6556 {
6557 /* Ignore non-stub sections. */
6558 if (!strstr (stub_sec->name, STUB_SUFFIX))
6559 continue;
6560
6561 osi.sec = stub_sec;
6562
6563 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6564 (output_bfd, osi.sec->output_section);
6565
6566 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
6567 &osi);
6568 }
6569 }
6570
6571 /* Finally, output mapping symbols for the PLT. */
6572 if (!htab->root.splt || htab->root.splt->size == 0)
6573 return TRUE;
6574
6575 /* For now live without mapping symbols for the plt. */
6576 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6577 (output_bfd, htab->root.splt->output_section);
6578 osi.sec = htab->root.splt;
6579
6580 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
6581 (void *) &osi);
6582
6583 return TRUE;
6584
6585 }
6586
6587 /* Allocate target specific section data. */
6588
6589 static bfd_boolean
6590 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
6591 {
6592 if (!sec->used_by_bfd)
6593 {
6594 _aarch64_elf_section_data *sdata;
6595 bfd_size_type amt = sizeof (*sdata);
6596
6597 sdata = bfd_zalloc (abfd, amt);
6598 if (sdata == NULL)
6599 return FALSE;
6600 sec->used_by_bfd = sdata;
6601 }
6602
6603 record_section_with_aarch64_elf_section_data (sec);
6604
6605 return _bfd_elf_new_section_hook (abfd, sec);
6606 }
6607
6608
6609 static void
6610 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
6611 asection *sec,
6612 void *ignore ATTRIBUTE_UNUSED)
6613 {
6614 unrecord_section_with_aarch64_elf_section_data (sec);
6615 }
6616
6617 static bfd_boolean
6618 elfNN_aarch64_close_and_cleanup (bfd *abfd)
6619 {
6620 if (abfd->sections)
6621 bfd_map_over_sections (abfd,
6622 unrecord_section_via_map_over_sections, NULL);
6623
6624 return _bfd_elf_close_and_cleanup (abfd);
6625 }
6626
6627 static bfd_boolean
6628 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
6629 {
6630 if (abfd->sections)
6631 bfd_map_over_sections (abfd,
6632 unrecord_section_via_map_over_sections, NULL);
6633
6634 return _bfd_free_cached_info (abfd);
6635 }
6636
6637 /* Create dynamic sections. This is different from the ARM backend in that
6638 the got, plt, gotplt and their relocation sections are all created in the
6639 standard part of the bfd elf backend. */
6640
6641 static bfd_boolean
6642 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
6643 struct bfd_link_info *info)
6644 {
6645 struct elf_aarch64_link_hash_table *htab;
6646
6647 /* We need to create .got section. */
6648 if (!aarch64_elf_create_got_section (dynobj, info))
6649 return FALSE;
6650
6651 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
6652 return FALSE;
6653
6654 htab = elf_aarch64_hash_table (info);
6655 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
6656 if (!info->shared)
6657 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
6658
6659 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
6660 abort ();
6661
6662 return TRUE;
6663 }
6664
6665
6666 /* Allocate space in .plt, .got and associated reloc sections for
6667 dynamic relocs. */
6668
6669 static bfd_boolean
6670 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
6671 {
6672 struct bfd_link_info *info;
6673 struct elf_aarch64_link_hash_table *htab;
6674 struct elf_aarch64_link_hash_entry *eh;
6675 struct elf_dyn_relocs *p;
6676
6677 /* An example of a bfd_link_hash_indirect symbol is versioned
6678 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6679 -> __gxx_personality_v0(bfd_link_hash_defined)
6680
6681 There is no need to process bfd_link_hash_indirect symbols here
6682 because we will also be presented with the concrete instance of
6683 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6684 called to copy all relevant data from the generic to the concrete
6685 symbol instance.
6686 */
6687 if (h->root.type == bfd_link_hash_indirect)
6688 return TRUE;
6689
6690 if (h->root.type == bfd_link_hash_warning)
6691 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6692
6693 info = (struct bfd_link_info *) inf;
6694 htab = elf_aarch64_hash_table (info);
6695
6696 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6697 here if it is defined and referenced in a non-shared object. */
6698 if (h->type == STT_GNU_IFUNC
6699 && h->def_regular)
6700 return TRUE;
6701 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
6702 {
6703 /* Make sure this symbol is output as a dynamic symbol.
6704 Undefined weak syms won't yet be marked as dynamic. */
6705 if (h->dynindx == -1 && !h->forced_local)
6706 {
6707 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6708 return FALSE;
6709 }
6710
6711 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
6712 {
6713 asection *s = htab->root.splt;
6714
6715 /* If this is the first .plt entry, make room for the special
6716 first entry. */
6717 if (s->size == 0)
6718 s->size += htab->plt_header_size;
6719
6720 h->plt.offset = s->size;
6721
6722 /* If this symbol is not defined in a regular file, and we are
6723 not generating a shared library, then set the symbol to this
6724 location in the .plt. This is required to make function
6725 pointers compare as equal between the normal executable and
6726 the shared library. */
6727 if (!info->shared && !h->def_regular)
6728 {
6729 h->root.u.def.section = s;
6730 h->root.u.def.value = h->plt.offset;
6731 }
6732
6733 /* Make room for this entry. For now we only create the
6734 small model PLT entries. We later need to find a way
6735 of relaxing into these from the large model PLT entries. */
6736 s->size += PLT_SMALL_ENTRY_SIZE;
6737
6738 /* We also need to make an entry in the .got.plt section, which
6739 will be placed in the .got section by the linker script. */
6740 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6741
6742 /* We also need to make an entry in the .rela.plt section. */
6743 htab->root.srelplt->size += RELOC_SIZE (htab);
6744
6745 /* We need to ensure that all GOT entries that serve the PLT
6746 are consecutive with the special GOT slots [0] [1] and
6747 [2]. Any addtional relocations, such as
6748 R_AARCH64_TLSDESC, must be placed after the PLT related
6749 entries. We abuse the reloc_count such that during
6750 sizing we adjust reloc_count to indicate the number of
6751 PLT related reserved entries. In subsequent phases when
6752 filling in the contents of the reloc entries, PLT related
6753 entries are placed by computing their PLT index (0
6754 .. reloc_count). While other none PLT relocs are placed
6755 at the slot indicated by reloc_count and reloc_count is
6756 updated. */
6757
6758 htab->root.srelplt->reloc_count++;
6759 }
6760 else
6761 {
6762 h->plt.offset = (bfd_vma) - 1;
6763 h->needs_plt = 0;
6764 }
6765 }
6766 else
6767 {
6768 h->plt.offset = (bfd_vma) - 1;
6769 h->needs_plt = 0;
6770 }
6771
6772 eh = (struct elf_aarch64_link_hash_entry *) h;
6773 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6774
6775 if (h->got.refcount > 0)
6776 {
6777 bfd_boolean dyn;
6778 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
6779
6780 h->got.offset = (bfd_vma) - 1;
6781
6782 dyn = htab->root.dynamic_sections_created;
6783
6784 /* Make sure this symbol is output as a dynamic symbol.
6785 Undefined weak syms won't yet be marked as dynamic. */
6786 if (dyn && h->dynindx == -1 && !h->forced_local)
6787 {
6788 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6789 return FALSE;
6790 }
6791
6792 if (got_type == GOT_UNKNOWN)
6793 {
6794 }
6795 else if (got_type == GOT_NORMAL)
6796 {
6797 h->got.offset = htab->root.sgot->size;
6798 htab->root.sgot->size += GOT_ENTRY_SIZE;
6799 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6800 || h->root.type != bfd_link_hash_undefweak)
6801 && (info->shared
6802 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6803 {
6804 htab->root.srelgot->size += RELOC_SIZE (htab);
6805 }
6806 }
6807 else
6808 {
6809 int indx;
6810 if (got_type & GOT_TLSDESC_GD)
6811 {
6812 eh->tlsdesc_got_jump_table_offset =
6813 (htab->root.sgotplt->size
6814 - aarch64_compute_jump_table_size (htab));
6815 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6816 h->got.offset = (bfd_vma) - 2;
6817 }
6818
6819 if (got_type & GOT_TLS_GD)
6820 {
6821 h->got.offset = htab->root.sgot->size;
6822 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6823 }
6824
6825 if (got_type & GOT_TLS_IE)
6826 {
6827 h->got.offset = htab->root.sgot->size;
6828 htab->root.sgot->size += GOT_ENTRY_SIZE;
6829 }
6830
6831 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6832 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6833 || h->root.type != bfd_link_hash_undefweak)
6834 && (info->shared
6835 || indx != 0
6836 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6837 {
6838 if (got_type & GOT_TLSDESC_GD)
6839 {
6840 htab->root.srelplt->size += RELOC_SIZE (htab);
6841 /* Note reloc_count not incremented here! We have
6842 already adjusted reloc_count for this relocation
6843 type. */
6844
6845 /* TLSDESC PLT is now needed, but not yet determined. */
6846 htab->tlsdesc_plt = (bfd_vma) - 1;
6847 }
6848
6849 if (got_type & GOT_TLS_GD)
6850 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6851
6852 if (got_type & GOT_TLS_IE)
6853 htab->root.srelgot->size += RELOC_SIZE (htab);
6854 }
6855 }
6856 }
6857 else
6858 {
6859 h->got.offset = (bfd_vma) - 1;
6860 }
6861
6862 if (eh->dyn_relocs == NULL)
6863 return TRUE;
6864
6865 /* In the shared -Bsymbolic case, discard space allocated for
6866 dynamic pc-relative relocs against symbols which turn out to be
6867 defined in regular objects. For the normal shared case, discard
6868 space for pc-relative relocs that have become local due to symbol
6869 visibility changes. */
6870
6871 if (info->shared)
6872 {
6873 /* Relocs that use pc_count are those that appear on a call
6874 insn, or certain REL relocs that can generated via assembly.
6875 We want calls to protected symbols to resolve directly to the
6876 function rather than going via the plt. If people want
6877 function pointer comparisons to work as expected then they
6878 should avoid writing weird assembly. */
6879 if (SYMBOL_CALLS_LOCAL (info, h))
6880 {
6881 struct elf_dyn_relocs **pp;
6882
6883 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6884 {
6885 p->count -= p->pc_count;
6886 p->pc_count = 0;
6887 if (p->count == 0)
6888 *pp = p->next;
6889 else
6890 pp = &p->next;
6891 }
6892 }
6893
6894 /* Also discard relocs on undefined weak syms with non-default
6895 visibility. */
6896 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6897 {
6898 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6899 eh->dyn_relocs = NULL;
6900
6901 /* Make sure undefined weak symbols are output as a dynamic
6902 symbol in PIEs. */
6903 else if (h->dynindx == -1
6904 && !h->forced_local
6905 && !bfd_elf_link_record_dynamic_symbol (info, h))
6906 return FALSE;
6907 }
6908
6909 }
6910 else if (ELIMINATE_COPY_RELOCS)
6911 {
6912 /* For the non-shared case, discard space for relocs against
6913 symbols which turn out to need copy relocs or are not
6914 dynamic. */
6915
6916 if (!h->non_got_ref
6917 && ((h->def_dynamic
6918 && !h->def_regular)
6919 || (htab->root.dynamic_sections_created
6920 && (h->root.type == bfd_link_hash_undefweak
6921 || h->root.type == bfd_link_hash_undefined))))
6922 {
6923 /* Make sure this symbol is output as a dynamic symbol.
6924 Undefined weak syms won't yet be marked as dynamic. */
6925 if (h->dynindx == -1
6926 && !h->forced_local
6927 && !bfd_elf_link_record_dynamic_symbol (info, h))
6928 return FALSE;
6929
6930 /* If that succeeded, we know we'll be keeping all the
6931 relocs. */
6932 if (h->dynindx != -1)
6933 goto keep;
6934 }
6935
6936 eh->dyn_relocs = NULL;
6937
6938 keep:;
6939 }
6940
6941 /* Finally, allocate space. */
6942 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6943 {
6944 asection *sreloc;
6945
6946 sreloc = elf_section_data (p->sec)->sreloc;
6947
6948 BFD_ASSERT (sreloc != NULL);
6949
6950 sreloc->size += p->count * RELOC_SIZE (htab);
6951 }
6952
6953 return TRUE;
6954 }
6955
6956 /* Allocate space in .plt, .got and associated reloc sections for
6957 ifunc dynamic relocs. */
6958
6959 static bfd_boolean
6960 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
6961 void *inf)
6962 {
6963 struct bfd_link_info *info;
6964 struct elf_aarch64_link_hash_table *htab;
6965 struct elf_aarch64_link_hash_entry *eh;
6966
6967 /* An example of a bfd_link_hash_indirect symbol is versioned
6968 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6969 -> __gxx_personality_v0(bfd_link_hash_defined)
6970
6971 There is no need to process bfd_link_hash_indirect symbols here
6972 because we will also be presented with the concrete instance of
6973 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6974 called to copy all relevant data from the generic to the concrete
6975 symbol instance.
6976 */
6977 if (h->root.type == bfd_link_hash_indirect)
6978 return TRUE;
6979
6980 if (h->root.type == bfd_link_hash_warning)
6981 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6982
6983 info = (struct bfd_link_info *) inf;
6984 htab = elf_aarch64_hash_table (info);
6985
6986 eh = (struct elf_aarch64_link_hash_entry *) h;
6987
6988 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6989 here if it is defined and referenced in a non-shared object. */
6990 if (h->type == STT_GNU_IFUNC
6991 && h->def_regular)
6992 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
6993 &eh->dyn_relocs,
6994 htab->plt_entry_size,
6995 htab->plt_header_size,
6996 GOT_ENTRY_SIZE);
6997 return TRUE;
6998 }
6999
7000 /* Allocate space in .plt, .got and associated reloc sections for
7001 local dynamic relocs. */
7002
7003 static bfd_boolean
7004 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7005 {
7006 struct elf_link_hash_entry *h
7007 = (struct elf_link_hash_entry *) *slot;
7008
7009 if (h->type != STT_GNU_IFUNC
7010 || !h->def_regular
7011 || !h->ref_regular
7012 || !h->forced_local
7013 || h->root.type != bfd_link_hash_defined)
7014 abort ();
7015
7016 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7017 }
7018
7019 /* Allocate space in .plt, .got and associated reloc sections for
7020 local ifunc dynamic relocs. */
7021
7022 static bfd_boolean
7023 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7024 {
7025 struct elf_link_hash_entry *h
7026 = (struct elf_link_hash_entry *) *slot;
7027
7028 if (h->type != STT_GNU_IFUNC
7029 || !h->def_regular
7030 || !h->ref_regular
7031 || !h->forced_local
7032 || h->root.type != bfd_link_hash_defined)
7033 abort ();
7034
7035 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7036 }
7037
7038 /* This is the most important function of all . Innocuosly named
7039 though ! */
7040 static bfd_boolean
7041 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7042 struct bfd_link_info *info)
7043 {
7044 struct elf_aarch64_link_hash_table *htab;
7045 bfd *dynobj;
7046 asection *s;
7047 bfd_boolean relocs;
7048 bfd *ibfd;
7049
7050 htab = elf_aarch64_hash_table ((info));
7051 dynobj = htab->root.dynobj;
7052
7053 BFD_ASSERT (dynobj != NULL);
7054
7055 if (htab->root.dynamic_sections_created)
7056 {
7057 if (info->executable)
7058 {
7059 s = bfd_get_linker_section (dynobj, ".interp");
7060 if (s == NULL)
7061 abort ();
7062 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7063 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7064 }
7065 }
7066
7067 /* Set up .got offsets for local syms, and space for local dynamic
7068 relocs. */
7069 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7070 {
7071 struct elf_aarch64_local_symbol *locals = NULL;
7072 Elf_Internal_Shdr *symtab_hdr;
7073 asection *srel;
7074 unsigned int i;
7075
7076 if (!is_aarch64_elf (ibfd))
7077 continue;
7078
7079 for (s = ibfd->sections; s != NULL; s = s->next)
7080 {
7081 struct elf_dyn_relocs *p;
7082
7083 for (p = (struct elf_dyn_relocs *)
7084 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7085 {
7086 if (!bfd_is_abs_section (p->sec)
7087 && bfd_is_abs_section (p->sec->output_section))
7088 {
7089 /* Input section has been discarded, either because
7090 it is a copy of a linkonce section or due to
7091 linker script /DISCARD/, so we'll be discarding
7092 the relocs too. */
7093 }
7094 else if (p->count != 0)
7095 {
7096 srel = elf_section_data (p->sec)->sreloc;
7097 srel->size += p->count * RELOC_SIZE (htab);
7098 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7099 info->flags |= DF_TEXTREL;
7100 }
7101 }
7102 }
7103
7104 locals = elf_aarch64_locals (ibfd);
7105 if (!locals)
7106 continue;
7107
7108 symtab_hdr = &elf_symtab_hdr (ibfd);
7109 srel = htab->root.srelgot;
7110 for (i = 0; i < symtab_hdr->sh_info; i++)
7111 {
7112 locals[i].got_offset = (bfd_vma) - 1;
7113 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7114 if (locals[i].got_refcount > 0)
7115 {
7116 unsigned got_type = locals[i].got_type;
7117 if (got_type & GOT_TLSDESC_GD)
7118 {
7119 locals[i].tlsdesc_got_jump_table_offset =
7120 (htab->root.sgotplt->size
7121 - aarch64_compute_jump_table_size (htab));
7122 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7123 locals[i].got_offset = (bfd_vma) - 2;
7124 }
7125
7126 if (got_type & GOT_TLS_GD)
7127 {
7128 locals[i].got_offset = htab->root.sgot->size;
7129 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7130 }
7131
7132 if (got_type & GOT_TLS_IE)
7133 {
7134 locals[i].got_offset = htab->root.sgot->size;
7135 htab->root.sgot->size += GOT_ENTRY_SIZE;
7136 }
7137
7138 if (got_type == GOT_UNKNOWN)
7139 {
7140 }
7141
7142 if (got_type == GOT_NORMAL)
7143 {
7144 }
7145
7146 if (info->shared)
7147 {
7148 if (got_type & GOT_TLSDESC_GD)
7149 {
7150 htab->root.srelplt->size += RELOC_SIZE (htab);
7151 /* Note RELOC_COUNT not incremented here! */
7152 htab->tlsdesc_plt = (bfd_vma) - 1;
7153 }
7154
7155 if (got_type & GOT_TLS_GD)
7156 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7157
7158 if (got_type & GOT_TLS_IE)
7159 htab->root.srelgot->size += RELOC_SIZE (htab);
7160 }
7161 }
7162 else
7163 {
7164 locals[i].got_refcount = (bfd_vma) - 1;
7165 }
7166 }
7167 }
7168
7169
7170 /* Allocate global sym .plt and .got entries, and space for global
7171 sym dynamic relocs. */
7172 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7173 info);
7174
7175 /* Allocate global ifunc sym .plt and .got entries, and space for global
7176 ifunc sym dynamic relocs. */
7177 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7178 info);
7179
7180 /* Allocate .plt and .got entries, and space for local symbols. */
7181 htab_traverse (htab->loc_hash_table,
7182 elfNN_aarch64_allocate_local_dynrelocs,
7183 info);
7184
7185 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7186 htab_traverse (htab->loc_hash_table,
7187 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7188 info);
7189
7190 /* For every jump slot reserved in the sgotplt, reloc_count is
7191 incremented. However, when we reserve space for TLS descriptors,
7192 it's not incremented, so in order to compute the space reserved
7193 for them, it suffices to multiply the reloc count by the jump
7194 slot size. */
7195
7196 if (htab->root.srelplt)
7197 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7198
7199 if (htab->tlsdesc_plt)
7200 {
7201 if (htab->root.splt->size == 0)
7202 htab->root.splt->size += PLT_ENTRY_SIZE;
7203
7204 htab->tlsdesc_plt = htab->root.splt->size;
7205 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7206
7207 /* If we're not using lazy TLS relocations, don't generate the
7208 GOT entry required. */
7209 if (!(info->flags & DF_BIND_NOW))
7210 {
7211 htab->dt_tlsdesc_got = htab->root.sgot->size;
7212 htab->root.sgot->size += GOT_ENTRY_SIZE;
7213 }
7214 }
7215
7216 /* Init mapping symbols information to use later to distingush between
7217 code and data while scanning for erratam 835769. */
7218 if (htab->fix_erratum_835769)
7219 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7220 {
7221 if (!is_aarch64_elf (ibfd))
7222 continue;
7223 bfd_elfNN_aarch64_init_maps (ibfd);
7224 }
7225
7226 /* We now have determined the sizes of the various dynamic sections.
7227 Allocate memory for them. */
7228 relocs = FALSE;
7229 for (s = dynobj->sections; s != NULL; s = s->next)
7230 {
7231 if ((s->flags & SEC_LINKER_CREATED) == 0)
7232 continue;
7233
7234 if (s == htab->root.splt
7235 || s == htab->root.sgot
7236 || s == htab->root.sgotplt
7237 || s == htab->root.iplt
7238 || s == htab->root.igotplt || s == htab->sdynbss)
7239 {
7240 /* Strip this section if we don't need it; see the
7241 comment below. */
7242 }
7243 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7244 {
7245 if (s->size != 0 && s != htab->root.srelplt)
7246 relocs = TRUE;
7247
7248 /* We use the reloc_count field as a counter if we need
7249 to copy relocs into the output file. */
7250 if (s != htab->root.srelplt)
7251 s->reloc_count = 0;
7252 }
7253 else
7254 {
7255 /* It's not one of our sections, so don't allocate space. */
7256 continue;
7257 }
7258
7259 if (s->size == 0)
7260 {
7261 /* If we don't need this section, strip it from the
7262 output file. This is mostly to handle .rela.bss and
7263 .rela.plt. We must create both sections in
7264 create_dynamic_sections, because they must be created
7265 before the linker maps input sections to output
7266 sections. The linker does that before
7267 adjust_dynamic_symbol is called, and it is that
7268 function which decides whether anything needs to go
7269 into these sections. */
7270
7271 s->flags |= SEC_EXCLUDE;
7272 continue;
7273 }
7274
7275 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7276 continue;
7277
7278 /* Allocate memory for the section contents. We use bfd_zalloc
7279 here in case unused entries are not reclaimed before the
7280 section's contents are written out. This should not happen,
7281 but this way if it does, we get a R_AARCH64_NONE reloc instead
7282 of garbage. */
7283 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7284 if (s->contents == NULL)
7285 return FALSE;
7286 }
7287
7288 if (htab->root.dynamic_sections_created)
7289 {
7290 /* Add some entries to the .dynamic section. We fill in the
7291 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7292 must add the entries now so that we get the correct size for
7293 the .dynamic section. The DT_DEBUG entry is filled in by the
7294 dynamic linker and used by the debugger. */
7295 #define add_dynamic_entry(TAG, VAL) \
7296 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7297
7298 if (info->executable)
7299 {
7300 if (!add_dynamic_entry (DT_DEBUG, 0))
7301 return FALSE;
7302 }
7303
7304 if (htab->root.splt->size != 0)
7305 {
7306 if (!add_dynamic_entry (DT_PLTGOT, 0)
7307 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7308 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7309 || !add_dynamic_entry (DT_JMPREL, 0))
7310 return FALSE;
7311
7312 if (htab->tlsdesc_plt
7313 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7314 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7315 return FALSE;
7316 }
7317
7318 if (relocs)
7319 {
7320 if (!add_dynamic_entry (DT_RELA, 0)
7321 || !add_dynamic_entry (DT_RELASZ, 0)
7322 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7323 return FALSE;
7324
7325 /* If any dynamic relocs apply to a read-only section,
7326 then we need a DT_TEXTREL entry. */
7327 if ((info->flags & DF_TEXTREL) != 0)
7328 {
7329 if (!add_dynamic_entry (DT_TEXTREL, 0))
7330 return FALSE;
7331 }
7332 }
7333 }
7334 #undef add_dynamic_entry
7335
7336 return TRUE;
7337 }
7338
7339 static inline void
7340 elf_aarch64_update_plt_entry (bfd *output_bfd,
7341 bfd_reloc_code_real_type r_type,
7342 bfd_byte *plt_entry, bfd_vma value)
7343 {
7344 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7345
7346 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7347 }
7348
7349 static void
7350 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7351 struct elf_aarch64_link_hash_table
7352 *htab, bfd *output_bfd,
7353 struct bfd_link_info *info)
7354 {
7355 bfd_byte *plt_entry;
7356 bfd_vma plt_index;
7357 bfd_vma got_offset;
7358 bfd_vma gotplt_entry_address;
7359 bfd_vma plt_entry_address;
7360 Elf_Internal_Rela rela;
7361 bfd_byte *loc;
7362 asection *plt, *gotplt, *relplt;
7363
7364 /* When building a static executable, use .iplt, .igot.plt and
7365 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7366 if (htab->root.splt != NULL)
7367 {
7368 plt = htab->root.splt;
7369 gotplt = htab->root.sgotplt;
7370 relplt = htab->root.srelplt;
7371 }
7372 else
7373 {
7374 plt = htab->root.iplt;
7375 gotplt = htab->root.igotplt;
7376 relplt = htab->root.irelplt;
7377 }
7378
7379 /* Get the index in the procedure linkage table which
7380 corresponds to this symbol. This is the index of this symbol
7381 in all the symbols for which we are making plt entries. The
7382 first entry in the procedure linkage table is reserved.
7383
7384 Get the offset into the .got table of the entry that
7385 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7386 bytes. The first three are reserved for the dynamic linker.
7387
7388 For static executables, we don't reserve anything. */
7389
7390 if (plt == htab->root.splt)
7391 {
7392 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
7393 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
7394 }
7395 else
7396 {
7397 plt_index = h->plt.offset / htab->plt_entry_size;
7398 got_offset = plt_index * GOT_ENTRY_SIZE;
7399 }
7400
7401 plt_entry = plt->contents + h->plt.offset;
7402 plt_entry_address = plt->output_section->vma
7403 + plt->output_offset + h->plt.offset;
7404 gotplt_entry_address = gotplt->output_section->vma +
7405 gotplt->output_offset + got_offset;
7406
7407 /* Copy in the boiler-plate for the PLTn entry. */
7408 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
7409
7410 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7411 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7412 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7413 plt_entry,
7414 PG (gotplt_entry_address) -
7415 PG (plt_entry_address));
7416
7417 /* Fill in the lo12 bits for the load from the pltgot. */
7418 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7419 plt_entry + 4,
7420 PG_OFFSET (gotplt_entry_address));
7421
7422 /* Fill in the lo12 bits for the add from the pltgot entry. */
7423 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7424 plt_entry + 8,
7425 PG_OFFSET (gotplt_entry_address));
7426
7427 /* All the GOTPLT Entries are essentially initialized to PLT0. */
7428 bfd_put_NN (output_bfd,
7429 plt->output_section->vma + plt->output_offset,
7430 gotplt->contents + got_offset);
7431
7432 rela.r_offset = gotplt_entry_address;
7433
7434 if (h->dynindx == -1
7435 || ((info->executable
7436 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7437 && h->def_regular
7438 && h->type == STT_GNU_IFUNC))
7439 {
7440 /* If an STT_GNU_IFUNC symbol is locally defined, generate
7441 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
7442 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
7443 rela.r_addend = (h->root.u.def.value
7444 + h->root.u.def.section->output_section->vma
7445 + h->root.u.def.section->output_offset);
7446 }
7447 else
7448 {
7449 /* Fill in the entry in the .rela.plt section. */
7450 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
7451 rela.r_addend = 0;
7452 }
7453
7454 /* Compute the relocation entry to used based on PLT index and do
7455 not adjust reloc_count. The reloc_count has already been adjusted
7456 to account for this entry. */
7457 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
7458 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7459 }
7460
7461 /* Size sections even though they're not dynamic. We use it to setup
7462 _TLS_MODULE_BASE_, if needed. */
7463
7464 static bfd_boolean
7465 elfNN_aarch64_always_size_sections (bfd *output_bfd,
7466 struct bfd_link_info *info)
7467 {
7468 asection *tls_sec;
7469
7470 if (info->relocatable)
7471 return TRUE;
7472
7473 tls_sec = elf_hash_table (info)->tls_sec;
7474
7475 if (tls_sec)
7476 {
7477 struct elf_link_hash_entry *tlsbase;
7478
7479 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
7480 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
7481
7482 if (tlsbase)
7483 {
7484 struct bfd_link_hash_entry *h = NULL;
7485 const struct elf_backend_data *bed =
7486 get_elf_backend_data (output_bfd);
7487
7488 if (!(_bfd_generic_link_add_one_symbol
7489 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
7490 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
7491 return FALSE;
7492
7493 tlsbase->type = STT_TLS;
7494 tlsbase = (struct elf_link_hash_entry *) h;
7495 tlsbase->def_regular = 1;
7496 tlsbase->other = STV_HIDDEN;
7497 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
7498 }
7499 }
7500
7501 return TRUE;
7502 }
7503
7504 /* Finish up dynamic symbol handling. We set the contents of various
7505 dynamic sections here. */
7506 static bfd_boolean
7507 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
7508 struct bfd_link_info *info,
7509 struct elf_link_hash_entry *h,
7510 Elf_Internal_Sym *sym)
7511 {
7512 struct elf_aarch64_link_hash_table *htab;
7513 htab = elf_aarch64_hash_table (info);
7514
7515 if (h->plt.offset != (bfd_vma) - 1)
7516 {
7517 asection *plt, *gotplt, *relplt;
7518
7519 /* This symbol has an entry in the procedure linkage table. Set
7520 it up. */
7521
7522 /* When building a static executable, use .iplt, .igot.plt and
7523 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7524 if (htab->root.splt != NULL)
7525 {
7526 plt = htab->root.splt;
7527 gotplt = htab->root.sgotplt;
7528 relplt = htab->root.srelplt;
7529 }
7530 else
7531 {
7532 plt = htab->root.iplt;
7533 gotplt = htab->root.igotplt;
7534 relplt = htab->root.irelplt;
7535 }
7536
7537 /* This symbol has an entry in the procedure linkage table. Set
7538 it up. */
7539 if ((h->dynindx == -1
7540 && !((h->forced_local || info->executable)
7541 && h->def_regular
7542 && h->type == STT_GNU_IFUNC))
7543 || plt == NULL
7544 || gotplt == NULL
7545 || relplt == NULL)
7546 abort ();
7547
7548 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
7549 if (!h->def_regular)
7550 {
7551 /* Mark the symbol as undefined, rather than as defined in
7552 the .plt section. */
7553 sym->st_shndx = SHN_UNDEF;
7554 /* If the symbol is weak we need to clear the value.
7555 Otherwise, the PLT entry would provide a definition for
7556 the symbol even if the symbol wasn't defined anywhere,
7557 and so the symbol would never be NULL. Leave the value if
7558 there were any relocations where pointer equality matters
7559 (this is a clue for the dynamic linker, to make function
7560 pointer comparisons work between an application and shared
7561 library). */
7562 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
7563 sym->st_value = 0;
7564 }
7565 }
7566
7567 if (h->got.offset != (bfd_vma) - 1
7568 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
7569 {
7570 Elf_Internal_Rela rela;
7571 bfd_byte *loc;
7572
7573 /* This symbol has an entry in the global offset table. Set it
7574 up. */
7575 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
7576 abort ();
7577
7578 rela.r_offset = (htab->root.sgot->output_section->vma
7579 + htab->root.sgot->output_offset
7580 + (h->got.offset & ~(bfd_vma) 1));
7581
7582 if (h->def_regular
7583 && h->type == STT_GNU_IFUNC)
7584 {
7585 if (info->shared)
7586 {
7587 /* Generate R_AARCH64_GLOB_DAT. */
7588 goto do_glob_dat;
7589 }
7590 else
7591 {
7592 asection *plt;
7593
7594 if (!h->pointer_equality_needed)
7595 abort ();
7596
7597 /* For non-shared object, we can't use .got.plt, which
7598 contains the real function address if we need pointer
7599 equality. We load the GOT entry with the PLT entry. */
7600 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
7601 bfd_put_NN (output_bfd, (plt->output_section->vma
7602 + plt->output_offset
7603 + h->plt.offset),
7604 htab->root.sgot->contents
7605 + (h->got.offset & ~(bfd_vma) 1));
7606 return TRUE;
7607 }
7608 }
7609 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
7610 {
7611 if (!h->def_regular)
7612 return FALSE;
7613
7614 BFD_ASSERT ((h->got.offset & 1) != 0);
7615 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
7616 rela.r_addend = (h->root.u.def.value
7617 + h->root.u.def.section->output_section->vma
7618 + h->root.u.def.section->output_offset);
7619 }
7620 else
7621 {
7622 do_glob_dat:
7623 BFD_ASSERT ((h->got.offset & 1) == 0);
7624 bfd_put_NN (output_bfd, (bfd_vma) 0,
7625 htab->root.sgot->contents + h->got.offset);
7626 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
7627 rela.r_addend = 0;
7628 }
7629
7630 loc = htab->root.srelgot->contents;
7631 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
7632 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7633 }
7634
7635 if (h->needs_copy)
7636 {
7637 Elf_Internal_Rela rela;
7638 bfd_byte *loc;
7639
7640 /* This symbol needs a copy reloc. Set it up. */
7641
7642 if (h->dynindx == -1
7643 || (h->root.type != bfd_link_hash_defined
7644 && h->root.type != bfd_link_hash_defweak)
7645 || htab->srelbss == NULL)
7646 abort ();
7647
7648 rela.r_offset = (h->root.u.def.value
7649 + h->root.u.def.section->output_section->vma
7650 + h->root.u.def.section->output_offset);
7651 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
7652 rela.r_addend = 0;
7653 loc = htab->srelbss->contents;
7654 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
7655 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7656 }
7657
7658 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
7659 be NULL for local symbols. */
7660 if (sym != NULL
7661 && (h == elf_hash_table (info)->hdynamic
7662 || h == elf_hash_table (info)->hgot))
7663 sym->st_shndx = SHN_ABS;
7664
7665 return TRUE;
7666 }
7667
7668 /* Finish up local dynamic symbol handling. We set the contents of
7669 various dynamic sections here. */
7670
7671 static bfd_boolean
7672 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
7673 {
7674 struct elf_link_hash_entry *h
7675 = (struct elf_link_hash_entry *) *slot;
7676 struct bfd_link_info *info
7677 = (struct bfd_link_info *) inf;
7678
7679 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
7680 info, h, NULL);
7681 }
7682
7683 static void
7684 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
7685 struct elf_aarch64_link_hash_table
7686 *htab)
7687 {
7688 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
7689 small and large plts and at the minute just generates
7690 the small PLT. */
7691
7692 /* PLT0 of the small PLT looks like this in ELF64 -
7693 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
7694 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
7695 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
7696 // symbol resolver
7697 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
7698 // GOTPLT entry for this.
7699 br x17
7700 PLT0 will be slightly different in ELF32 due to different got entry
7701 size.
7702 */
7703 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
7704 bfd_vma plt_base;
7705
7706
7707 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
7708 PLT_ENTRY_SIZE);
7709 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
7710 PLT_ENTRY_SIZE;
7711
7712 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
7713 + htab->root.sgotplt->output_offset
7714 + GOT_ENTRY_SIZE * 2);
7715
7716 plt_base = htab->root.splt->output_section->vma +
7717 htab->root.splt->output_offset;
7718
7719 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7720 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7721 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7722 htab->root.splt->contents + 4,
7723 PG (plt_got_2nd_ent) - PG (plt_base + 4));
7724
7725 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7726 htab->root.splt->contents + 8,
7727 PG_OFFSET (plt_got_2nd_ent));
7728
7729 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7730 htab->root.splt->contents + 12,
7731 PG_OFFSET (plt_got_2nd_ent));
7732 }
7733
7734 static bfd_boolean
7735 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
7736 struct bfd_link_info *info)
7737 {
7738 struct elf_aarch64_link_hash_table *htab;
7739 bfd *dynobj;
7740 asection *sdyn;
7741
7742 htab = elf_aarch64_hash_table (info);
7743 dynobj = htab->root.dynobj;
7744 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
7745
7746 if (htab->root.dynamic_sections_created)
7747 {
7748 ElfNN_External_Dyn *dyncon, *dynconend;
7749
7750 if (sdyn == NULL || htab->root.sgot == NULL)
7751 abort ();
7752
7753 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
7754 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
7755 for (; dyncon < dynconend; dyncon++)
7756 {
7757 Elf_Internal_Dyn dyn;
7758 asection *s;
7759
7760 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
7761
7762 switch (dyn.d_tag)
7763 {
7764 default:
7765 continue;
7766
7767 case DT_PLTGOT:
7768 s = htab->root.sgotplt;
7769 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
7770 break;
7771
7772 case DT_JMPREL:
7773 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
7774 break;
7775
7776 case DT_PLTRELSZ:
7777 s = htab->root.srelplt;
7778 dyn.d_un.d_val = s->size;
7779 break;
7780
7781 case DT_RELASZ:
7782 /* The procedure linkage table relocs (DT_JMPREL) should
7783 not be included in the overall relocs (DT_RELA).
7784 Therefore, we override the DT_RELASZ entry here to
7785 make it not include the JMPREL relocs. Since the
7786 linker script arranges for .rela.plt to follow all
7787 other relocation sections, we don't have to worry
7788 about changing the DT_RELA entry. */
7789 if (htab->root.srelplt != NULL)
7790 {
7791 s = htab->root.srelplt;
7792 dyn.d_un.d_val -= s->size;
7793 }
7794 break;
7795
7796 case DT_TLSDESC_PLT:
7797 s = htab->root.splt;
7798 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7799 + htab->tlsdesc_plt;
7800 break;
7801
7802 case DT_TLSDESC_GOT:
7803 s = htab->root.sgot;
7804 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7805 + htab->dt_tlsdesc_got;
7806 break;
7807 }
7808
7809 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
7810 }
7811
7812 }
7813
7814 /* Fill in the special first entry in the procedure linkage table. */
7815 if (htab->root.splt && htab->root.splt->size > 0)
7816 {
7817 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
7818
7819 elf_section_data (htab->root.splt->output_section)->
7820 this_hdr.sh_entsize = htab->plt_entry_size;
7821
7822
7823 if (htab->tlsdesc_plt)
7824 {
7825 bfd_put_NN (output_bfd, (bfd_vma) 0,
7826 htab->root.sgot->contents + htab->dt_tlsdesc_got);
7827
7828 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
7829 elfNN_aarch64_tlsdesc_small_plt_entry,
7830 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
7831
7832 {
7833 bfd_vma adrp1_addr =
7834 htab->root.splt->output_section->vma
7835 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
7836
7837 bfd_vma adrp2_addr = adrp1_addr + 4;
7838
7839 bfd_vma got_addr =
7840 htab->root.sgot->output_section->vma
7841 + htab->root.sgot->output_offset;
7842
7843 bfd_vma pltgot_addr =
7844 htab->root.sgotplt->output_section->vma
7845 + htab->root.sgotplt->output_offset;
7846
7847 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
7848
7849 bfd_byte *plt_entry =
7850 htab->root.splt->contents + htab->tlsdesc_plt;
7851
7852 /* adrp x2, DT_TLSDESC_GOT */
7853 elf_aarch64_update_plt_entry (output_bfd,
7854 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7855 plt_entry + 4,
7856 (PG (dt_tlsdesc_got)
7857 - PG (adrp1_addr)));
7858
7859 /* adrp x3, 0 */
7860 elf_aarch64_update_plt_entry (output_bfd,
7861 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7862 plt_entry + 8,
7863 (PG (pltgot_addr)
7864 - PG (adrp2_addr)));
7865
7866 /* ldr x2, [x2, #0] */
7867 elf_aarch64_update_plt_entry (output_bfd,
7868 BFD_RELOC_AARCH64_LDSTNN_LO12,
7869 plt_entry + 12,
7870 PG_OFFSET (dt_tlsdesc_got));
7871
7872 /* add x3, x3, 0 */
7873 elf_aarch64_update_plt_entry (output_bfd,
7874 BFD_RELOC_AARCH64_ADD_LO12,
7875 plt_entry + 16,
7876 PG_OFFSET (pltgot_addr));
7877 }
7878 }
7879 }
7880
7881 if (htab->root.sgotplt)
7882 {
7883 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
7884 {
7885 (*_bfd_error_handler)
7886 (_("discarded output section: `%A'"), htab->root.sgotplt);
7887 return FALSE;
7888 }
7889
7890 /* Fill in the first three entries in the global offset table. */
7891 if (htab->root.sgotplt->size > 0)
7892 {
7893 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
7894
7895 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
7896 bfd_put_NN (output_bfd,
7897 (bfd_vma) 0,
7898 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
7899 bfd_put_NN (output_bfd,
7900 (bfd_vma) 0,
7901 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
7902 }
7903
7904 if (htab->root.sgot)
7905 {
7906 if (htab->root.sgot->size > 0)
7907 {
7908 bfd_vma addr =
7909 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
7910 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
7911 }
7912 }
7913
7914 elf_section_data (htab->root.sgotplt->output_section)->
7915 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
7916 }
7917
7918 if (htab->root.sgot && htab->root.sgot->size > 0)
7919 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
7920 = GOT_ENTRY_SIZE;
7921
7922 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
7923 htab_traverse (htab->loc_hash_table,
7924 elfNN_aarch64_finish_local_dynamic_symbol,
7925 info);
7926
7927 return TRUE;
7928 }
7929
7930 /* Return address for Ith PLT stub in section PLT, for relocation REL
7931 or (bfd_vma) -1 if it should not be included. */
7932
7933 static bfd_vma
7934 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
7935 const arelent *rel ATTRIBUTE_UNUSED)
7936 {
7937 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
7938 }
7939
7940
7941 /* We use this so we can override certain functions
7942 (though currently we don't). */
7943
7944 const struct elf_size_info elfNN_aarch64_size_info =
7945 {
7946 sizeof (ElfNN_External_Ehdr),
7947 sizeof (ElfNN_External_Phdr),
7948 sizeof (ElfNN_External_Shdr),
7949 sizeof (ElfNN_External_Rel),
7950 sizeof (ElfNN_External_Rela),
7951 sizeof (ElfNN_External_Sym),
7952 sizeof (ElfNN_External_Dyn),
7953 sizeof (Elf_External_Note),
7954 4, /* Hash table entry size. */
7955 1, /* Internal relocs per external relocs. */
7956 ARCH_SIZE, /* Arch size. */
7957 LOG_FILE_ALIGN, /* Log_file_align. */
7958 ELFCLASSNN, EV_CURRENT,
7959 bfd_elfNN_write_out_phdrs,
7960 bfd_elfNN_write_shdrs_and_ehdr,
7961 bfd_elfNN_checksum_contents,
7962 bfd_elfNN_write_relocs,
7963 bfd_elfNN_swap_symbol_in,
7964 bfd_elfNN_swap_symbol_out,
7965 bfd_elfNN_slurp_reloc_table,
7966 bfd_elfNN_slurp_symbol_table,
7967 bfd_elfNN_swap_dyn_in,
7968 bfd_elfNN_swap_dyn_out,
7969 bfd_elfNN_swap_reloc_in,
7970 bfd_elfNN_swap_reloc_out,
7971 bfd_elfNN_swap_reloca_in,
7972 bfd_elfNN_swap_reloca_out
7973 };
7974
7975 #define ELF_ARCH bfd_arch_aarch64
7976 #define ELF_MACHINE_CODE EM_AARCH64
7977 #define ELF_MAXPAGESIZE 0x10000
7978 #define ELF_MINPAGESIZE 0x1000
7979 #define ELF_COMMONPAGESIZE 0x1000
7980
7981 #define bfd_elfNN_close_and_cleanup \
7982 elfNN_aarch64_close_and_cleanup
7983
7984 #define bfd_elfNN_bfd_free_cached_info \
7985 elfNN_aarch64_bfd_free_cached_info
7986
7987 #define bfd_elfNN_bfd_is_target_special_symbol \
7988 elfNN_aarch64_is_target_special_symbol
7989
7990 #define bfd_elfNN_bfd_link_hash_table_create \
7991 elfNN_aarch64_link_hash_table_create
7992
7993 #define bfd_elfNN_bfd_merge_private_bfd_data \
7994 elfNN_aarch64_merge_private_bfd_data
7995
7996 #define bfd_elfNN_bfd_print_private_bfd_data \
7997 elfNN_aarch64_print_private_bfd_data
7998
7999 #define bfd_elfNN_bfd_reloc_type_lookup \
8000 elfNN_aarch64_reloc_type_lookup
8001
8002 #define bfd_elfNN_bfd_reloc_name_lookup \
8003 elfNN_aarch64_reloc_name_lookup
8004
8005 #define bfd_elfNN_bfd_set_private_flags \
8006 elfNN_aarch64_set_private_flags
8007
8008 #define bfd_elfNN_find_inliner_info \
8009 elfNN_aarch64_find_inliner_info
8010
8011 #define bfd_elfNN_find_nearest_line \
8012 elfNN_aarch64_find_nearest_line
8013
8014 #define bfd_elfNN_mkobject \
8015 elfNN_aarch64_mkobject
8016
8017 #define bfd_elfNN_new_section_hook \
8018 elfNN_aarch64_new_section_hook
8019
8020 #define elf_backend_adjust_dynamic_symbol \
8021 elfNN_aarch64_adjust_dynamic_symbol
8022
8023 #define elf_backend_always_size_sections \
8024 elfNN_aarch64_always_size_sections
8025
8026 #define elf_backend_check_relocs \
8027 elfNN_aarch64_check_relocs
8028
8029 #define elf_backend_copy_indirect_symbol \
8030 elfNN_aarch64_copy_indirect_symbol
8031
8032 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8033 to them in our hash. */
8034 #define elf_backend_create_dynamic_sections \
8035 elfNN_aarch64_create_dynamic_sections
8036
8037 #define elf_backend_init_index_section \
8038 _bfd_elf_init_2_index_sections
8039
8040 #define elf_backend_finish_dynamic_sections \
8041 elfNN_aarch64_finish_dynamic_sections
8042
8043 #define elf_backend_finish_dynamic_symbol \
8044 elfNN_aarch64_finish_dynamic_symbol
8045
8046 #define elf_backend_gc_sweep_hook \
8047 elfNN_aarch64_gc_sweep_hook
8048
8049 #define elf_backend_object_p \
8050 elfNN_aarch64_object_p
8051
8052 #define elf_backend_output_arch_local_syms \
8053 elfNN_aarch64_output_arch_local_syms
8054
8055 #define elf_backend_plt_sym_val \
8056 elfNN_aarch64_plt_sym_val
8057
8058 #define elf_backend_post_process_headers \
8059 elfNN_aarch64_post_process_headers
8060
8061 #define elf_backend_relocate_section \
8062 elfNN_aarch64_relocate_section
8063
8064 #define elf_backend_reloc_type_class \
8065 elfNN_aarch64_reloc_type_class
8066
8067 #define elf_backend_section_from_shdr \
8068 elfNN_aarch64_section_from_shdr
8069
8070 #define elf_backend_size_dynamic_sections \
8071 elfNN_aarch64_size_dynamic_sections
8072
8073 #define elf_backend_size_info \
8074 elfNN_aarch64_size_info
8075
8076 #define elf_backend_write_section \
8077 elfNN_aarch64_write_section
8078
8079 #define elf_backend_can_refcount 1
8080 #define elf_backend_can_gc_sections 1
8081 #define elf_backend_plt_readonly 1
8082 #define elf_backend_want_got_plt 1
8083 #define elf_backend_want_plt_sym 0
8084 #define elf_backend_may_use_rel_p 0
8085 #define elf_backend_may_use_rela_p 1
8086 #define elf_backend_default_use_rela_p 1
8087 #define elf_backend_rela_normal 1
8088 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8089 #define elf_backend_default_execstack 0
8090
8091 #undef elf_backend_obj_attrs_section
8092 #define elf_backend_obj_attrs_section ".ARM.attributes"
8093
8094 #include "elfNN-target.h"
This page took 0.21071 seconds and 4 git commands to generate.