65dab9a1ec672931403ef2356944ca2835dc3f97
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
188 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
189
190 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
191 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC)
203
204 #define ELIMINATE_COPY_RELOCS 0
205
206 /* Return size of a relocation entry. HTAB is the bfd's
207 elf_aarch64_link_hash_entry. */
208 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
209
210 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
211 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
212 #define PLT_ENTRY_SIZE (32)
213 #define PLT_SMALL_ENTRY_SIZE (16)
214 #define PLT_TLSDESC_ENTRY_SIZE (32)
215
216 /* Encoding of the nop instruction */
217 #define INSN_NOP 0xd503201f
218
219 #define aarch64_compute_jump_table_size(htab) \
220 (((htab)->root.srelplt == NULL) ? 0 \
221 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
222
223 /* The first entry in a procedure linkage table looks like this
224 if the distance between the PLTGOT and the PLT is < 4GB use
225 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
226 in x16 and needs to work out PLTGOT[1] by using an address of
227 [x16,#-GOT_ENTRY_SIZE]. */
228 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
229 {
230 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
231 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
232 #if ARCH_SIZE == 64
233 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
234 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
235 #else
236 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
237 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
238 #endif
239 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
240 0x1f, 0x20, 0x03, 0xd5, /* nop */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
243 };
244
245 /* Per function entry in a procedure linkage table looks like this
246 if the distance between the PLTGOT and the PLT is < 4GB use
247 these PLT entries. */
248 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
249 {
250 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
251 #if ARCH_SIZE == 64
252 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
253 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
254 #else
255 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
256 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
257 #endif
258 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
259 };
260
261 static const bfd_byte
262 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
263 {
264 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
265 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
266 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
267 #if ARCH_SIZE == 64
268 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
269 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
270 #else
271 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
272 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
273 #endif
274 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
275 0x1f, 0x20, 0x03, 0xd5, /* nop */
276 0x1f, 0x20, 0x03, 0xd5, /* nop */
277 };
278
279 #define elf_info_to_howto elfNN_aarch64_info_to_howto
280 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
281
282 #define AARCH64_ELF_ABI_VERSION 0
283
284 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
285 #define ALL_ONES (~ (bfd_vma) 0)
286
287 /* Indexed by the bfd interal reloc enumerators.
288 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
289 in reloc.c. */
290
291 static reloc_howto_type elfNN_aarch64_howto_table[] =
292 {
293 EMPTY_HOWTO (0),
294
295 /* Basic data relocations. */
296
297 #if ARCH_SIZE == 64
298 HOWTO (R_AARCH64_NULL, /* type */
299 0, /* rightshift */
300 3, /* size (0 = byte, 1 = short, 2 = long) */
301 0, /* bitsize */
302 FALSE, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_dont, /* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_AARCH64_NULL", /* name */
307 FALSE, /* partial_inplace */
308 0, /* src_mask */
309 0, /* dst_mask */
310 FALSE), /* pcrel_offset */
311 #else
312 HOWTO (R_AARCH64_NONE, /* type */
313 0, /* rightshift */
314 3, /* size (0 = byte, 1 = short, 2 = long) */
315 0, /* bitsize */
316 FALSE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_dont, /* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_AARCH64_NONE", /* name */
321 FALSE, /* partial_inplace */
322 0, /* src_mask */
323 0, /* dst_mask */
324 FALSE), /* pcrel_offset */
325 #endif
326
327 /* .xword: (S+A) */
328 HOWTO64 (AARCH64_R (ABS64), /* type */
329 0, /* rightshift */
330 4, /* size (4 = long long) */
331 64, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_unsigned, /* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 AARCH64_R_STR (ABS64), /* name */
337 FALSE, /* partial_inplace */
338 ALL_ONES, /* src_mask */
339 ALL_ONES, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 /* .word: (S+A) */
343 HOWTO (AARCH64_R (ABS32), /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_unsigned, /* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 AARCH64_R_STR (ABS32), /* name */
352 FALSE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
356
357 /* .half: (S+A) */
358 HOWTO (AARCH64_R (ABS16), /* type */
359 0, /* rightshift */
360 1, /* size (0 = byte, 1 = short, 2 = long) */
361 16, /* bitsize */
362 FALSE, /* pc_relative */
363 0, /* bitpos */
364 complain_overflow_unsigned, /* complain_on_overflow */
365 bfd_elf_generic_reloc, /* special_function */
366 AARCH64_R_STR (ABS16), /* name */
367 FALSE, /* partial_inplace */
368 0xffff, /* src_mask */
369 0xffff, /* dst_mask */
370 FALSE), /* pcrel_offset */
371
372 /* .xword: (S+A-P) */
373 HOWTO64 (AARCH64_R (PREL64), /* type */
374 0, /* rightshift */
375 4, /* size (4 = long long) */
376 64, /* bitsize */
377 TRUE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_signed, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 AARCH64_R_STR (PREL64), /* name */
382 FALSE, /* partial_inplace */
383 ALL_ONES, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 TRUE), /* pcrel_offset */
386
387 /* .word: (S+A-P) */
388 HOWTO (AARCH64_R (PREL32), /* type */
389 0, /* rightshift */
390 2, /* size (0 = byte, 1 = short, 2 = long) */
391 32, /* bitsize */
392 TRUE, /* pc_relative */
393 0, /* bitpos */
394 complain_overflow_signed, /* complain_on_overflow */
395 bfd_elf_generic_reloc, /* special_function */
396 AARCH64_R_STR (PREL32), /* name */
397 FALSE, /* partial_inplace */
398 0xffffffff, /* src_mask */
399 0xffffffff, /* dst_mask */
400 TRUE), /* pcrel_offset */
401
402 /* .half: (S+A-P) */
403 HOWTO (AARCH64_R (PREL16), /* type */
404 0, /* rightshift */
405 1, /* size (0 = byte, 1 = short, 2 = long) */
406 16, /* bitsize */
407 TRUE, /* pc_relative */
408 0, /* bitpos */
409 complain_overflow_signed, /* complain_on_overflow */
410 bfd_elf_generic_reloc, /* special_function */
411 AARCH64_R_STR (PREL16), /* name */
412 FALSE, /* partial_inplace */
413 0xffff, /* src_mask */
414 0xffff, /* dst_mask */
415 TRUE), /* pcrel_offset */
416
417 /* Group relocations to create a 16, 32, 48 or 64 bit
418 unsigned data or abs address inline. */
419
420 /* MOVZ: ((S+A) >> 0) & 0xffff */
421 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
422 0, /* rightshift */
423 2, /* size (0 = byte, 1 = short, 2 = long) */
424 16, /* bitsize */
425 FALSE, /* pc_relative */
426 0, /* bitpos */
427 complain_overflow_unsigned, /* complain_on_overflow */
428 bfd_elf_generic_reloc, /* special_function */
429 AARCH64_R_STR (MOVW_UABS_G0), /* name */
430 FALSE, /* partial_inplace */
431 0xffff, /* src_mask */
432 0xffff, /* dst_mask */
433 FALSE), /* pcrel_offset */
434
435 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
436 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
437 0, /* rightshift */
438 2, /* size (0 = byte, 1 = short, 2 = long) */
439 16, /* bitsize */
440 FALSE, /* pc_relative */
441 0, /* bitpos */
442 complain_overflow_dont, /* complain_on_overflow */
443 bfd_elf_generic_reloc, /* special_function */
444 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
445 FALSE, /* partial_inplace */
446 0xffff, /* src_mask */
447 0xffff, /* dst_mask */
448 FALSE), /* pcrel_offset */
449
450 /* MOVZ: ((S+A) >> 16) & 0xffff */
451 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
452 16, /* rightshift */
453 2, /* size (0 = byte, 1 = short, 2 = long) */
454 16, /* bitsize */
455 FALSE, /* pc_relative */
456 0, /* bitpos */
457 complain_overflow_unsigned, /* complain_on_overflow */
458 bfd_elf_generic_reloc, /* special_function */
459 AARCH64_R_STR (MOVW_UABS_G1), /* name */
460 FALSE, /* partial_inplace */
461 0xffff, /* src_mask */
462 0xffff, /* dst_mask */
463 FALSE), /* pcrel_offset */
464
465 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
466 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
467 16, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 16, /* bitsize */
470 FALSE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_dont, /* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
475 FALSE, /* partial_inplace */
476 0xffff, /* src_mask */
477 0xffff, /* dst_mask */
478 FALSE), /* pcrel_offset */
479
480 /* MOVZ: ((S+A) >> 32) & 0xffff */
481 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
482 32, /* rightshift */
483 2, /* size (0 = byte, 1 = short, 2 = long) */
484 16, /* bitsize */
485 FALSE, /* pc_relative */
486 0, /* bitpos */
487 complain_overflow_unsigned, /* complain_on_overflow */
488 bfd_elf_generic_reloc, /* special_function */
489 AARCH64_R_STR (MOVW_UABS_G2), /* name */
490 FALSE, /* partial_inplace */
491 0xffff, /* src_mask */
492 0xffff, /* dst_mask */
493 FALSE), /* pcrel_offset */
494
495 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
496 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
497 32, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 16, /* bitsize */
500 FALSE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_dont, /* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
505 FALSE, /* partial_inplace */
506 0xffff, /* src_mask */
507 0xffff, /* dst_mask */
508 FALSE), /* pcrel_offset */
509
510 /* MOVZ: ((S+A) >> 48) & 0xffff */
511 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
512 48, /* rightshift */
513 2, /* size (0 = byte, 1 = short, 2 = long) */
514 16, /* bitsize */
515 FALSE, /* pc_relative */
516 0, /* bitpos */
517 complain_overflow_unsigned, /* complain_on_overflow */
518 bfd_elf_generic_reloc, /* special_function */
519 AARCH64_R_STR (MOVW_UABS_G3), /* name */
520 FALSE, /* partial_inplace */
521 0xffff, /* src_mask */
522 0xffff, /* dst_mask */
523 FALSE), /* pcrel_offset */
524
525 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
526 signed data or abs address inline. Will change instruction
527 to MOVN or MOVZ depending on sign of calculated value. */
528
529 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
530 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
531 0, /* rightshift */
532 2, /* size (0 = byte, 1 = short, 2 = long) */
533 16, /* bitsize */
534 FALSE, /* pc_relative */
535 0, /* bitpos */
536 complain_overflow_signed, /* complain_on_overflow */
537 bfd_elf_generic_reloc, /* special_function */
538 AARCH64_R_STR (MOVW_SABS_G0), /* name */
539 FALSE, /* partial_inplace */
540 0xffff, /* src_mask */
541 0xffff, /* dst_mask */
542 FALSE), /* pcrel_offset */
543
544 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
545 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
546 16, /* rightshift */
547 2, /* size (0 = byte, 1 = short, 2 = long) */
548 16, /* bitsize */
549 FALSE, /* pc_relative */
550 0, /* bitpos */
551 complain_overflow_signed, /* complain_on_overflow */
552 bfd_elf_generic_reloc, /* special_function */
553 AARCH64_R_STR (MOVW_SABS_G1), /* name */
554 FALSE, /* partial_inplace */
555 0xffff, /* src_mask */
556 0xffff, /* dst_mask */
557 FALSE), /* pcrel_offset */
558
559 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
560 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
561 32, /* rightshift */
562 2, /* size (0 = byte, 1 = short, 2 = long) */
563 16, /* bitsize */
564 FALSE, /* pc_relative */
565 0, /* bitpos */
566 complain_overflow_signed, /* complain_on_overflow */
567 bfd_elf_generic_reloc, /* special_function */
568 AARCH64_R_STR (MOVW_SABS_G2), /* name */
569 FALSE, /* partial_inplace */
570 0xffff, /* src_mask */
571 0xffff, /* dst_mask */
572 FALSE), /* pcrel_offset */
573
574 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
575 addresses: PG(x) is (x & ~0xfff). */
576
577 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
578 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
579 2, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 19, /* bitsize */
582 TRUE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_signed, /* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 AARCH64_R_STR (LD_PREL_LO19), /* name */
587 FALSE, /* partial_inplace */
588 0x7ffff, /* src_mask */
589 0x7ffff, /* dst_mask */
590 TRUE), /* pcrel_offset */
591
592 /* ADR: (S+A-P) & 0x1fffff */
593 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
594 0, /* rightshift */
595 2, /* size (0 = byte, 1 = short, 2 = long) */
596 21, /* bitsize */
597 TRUE, /* pc_relative */
598 0, /* bitpos */
599 complain_overflow_signed, /* complain_on_overflow */
600 bfd_elf_generic_reloc, /* special_function */
601 AARCH64_R_STR (ADR_PREL_LO21), /* name */
602 FALSE, /* partial_inplace */
603 0x1fffff, /* src_mask */
604 0x1fffff, /* dst_mask */
605 TRUE), /* pcrel_offset */
606
607 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
608 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
609 12, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 21, /* bitsize */
612 TRUE, /* pc_relative */
613 0, /* bitpos */
614 complain_overflow_signed, /* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
617 FALSE, /* partial_inplace */
618 0x1fffff, /* src_mask */
619 0x1fffff, /* dst_mask */
620 TRUE), /* pcrel_offset */
621
622 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
623 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
624 12, /* rightshift */
625 2, /* size (0 = byte, 1 = short, 2 = long) */
626 21, /* bitsize */
627 TRUE, /* pc_relative */
628 0, /* bitpos */
629 complain_overflow_dont, /* complain_on_overflow */
630 bfd_elf_generic_reloc, /* special_function */
631 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
632 FALSE, /* partial_inplace */
633 0x1fffff, /* src_mask */
634 0x1fffff, /* dst_mask */
635 TRUE), /* pcrel_offset */
636
637 /* ADD: (S+A) & 0xfff [no overflow check] */
638 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 12, /* bitsize */
642 FALSE, /* pc_relative */
643 10, /* bitpos */
644 complain_overflow_dont, /* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
647 FALSE, /* partial_inplace */
648 0x3ffc00, /* src_mask */
649 0x3ffc00, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 /* LD/ST8: (S+A) & 0xfff */
653 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 12, /* bitsize */
657 FALSE, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont, /* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
662 FALSE, /* partial_inplace */
663 0xfff, /* src_mask */
664 0xfff, /* dst_mask */
665 FALSE), /* pcrel_offset */
666
667 /* Relocations for control-flow instructions. */
668
669 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
670 HOWTO (AARCH64_R (TSTBR14), /* type */
671 2, /* rightshift */
672 2, /* size (0 = byte, 1 = short, 2 = long) */
673 14, /* bitsize */
674 TRUE, /* pc_relative */
675 0, /* bitpos */
676 complain_overflow_signed, /* complain_on_overflow */
677 bfd_elf_generic_reloc, /* special_function */
678 AARCH64_R_STR (TSTBR14), /* name */
679 FALSE, /* partial_inplace */
680 0x3fff, /* src_mask */
681 0x3fff, /* dst_mask */
682 TRUE), /* pcrel_offset */
683
684 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
685 HOWTO (AARCH64_R (CONDBR19), /* type */
686 2, /* rightshift */
687 2, /* size (0 = byte, 1 = short, 2 = long) */
688 19, /* bitsize */
689 TRUE, /* pc_relative */
690 0, /* bitpos */
691 complain_overflow_signed, /* complain_on_overflow */
692 bfd_elf_generic_reloc, /* special_function */
693 AARCH64_R_STR (CONDBR19), /* name */
694 FALSE, /* partial_inplace */
695 0x7ffff, /* src_mask */
696 0x7ffff, /* dst_mask */
697 TRUE), /* pcrel_offset */
698
699 /* B: ((S+A-P) >> 2) & 0x3ffffff */
700 HOWTO (AARCH64_R (JUMP26), /* type */
701 2, /* rightshift */
702 2, /* size (0 = byte, 1 = short, 2 = long) */
703 26, /* bitsize */
704 TRUE, /* pc_relative */
705 0, /* bitpos */
706 complain_overflow_signed, /* complain_on_overflow */
707 bfd_elf_generic_reloc, /* special_function */
708 AARCH64_R_STR (JUMP26), /* name */
709 FALSE, /* partial_inplace */
710 0x3ffffff, /* src_mask */
711 0x3ffffff, /* dst_mask */
712 TRUE), /* pcrel_offset */
713
714 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
715 HOWTO (AARCH64_R (CALL26), /* type */
716 2, /* rightshift */
717 2, /* size (0 = byte, 1 = short, 2 = long) */
718 26, /* bitsize */
719 TRUE, /* pc_relative */
720 0, /* bitpos */
721 complain_overflow_signed, /* complain_on_overflow */
722 bfd_elf_generic_reloc, /* special_function */
723 AARCH64_R_STR (CALL26), /* name */
724 FALSE, /* partial_inplace */
725 0x3ffffff, /* src_mask */
726 0x3ffffff, /* dst_mask */
727 TRUE), /* pcrel_offset */
728
729 /* LD/ST16: (S+A) & 0xffe */
730 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
731 1, /* rightshift */
732 2, /* size (0 = byte, 1 = short, 2 = long) */
733 12, /* bitsize */
734 FALSE, /* pc_relative */
735 0, /* bitpos */
736 complain_overflow_dont, /* complain_on_overflow */
737 bfd_elf_generic_reloc, /* special_function */
738 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
739 FALSE, /* partial_inplace */
740 0xffe, /* src_mask */
741 0xffe, /* dst_mask */
742 FALSE), /* pcrel_offset */
743
744 /* LD/ST32: (S+A) & 0xffc */
745 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
746 2, /* rightshift */
747 2, /* size (0 = byte, 1 = short, 2 = long) */
748 12, /* bitsize */
749 FALSE, /* pc_relative */
750 0, /* bitpos */
751 complain_overflow_dont, /* complain_on_overflow */
752 bfd_elf_generic_reloc, /* special_function */
753 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
754 FALSE, /* partial_inplace */
755 0xffc, /* src_mask */
756 0xffc, /* dst_mask */
757 FALSE), /* pcrel_offset */
758
759 /* LD/ST64: (S+A) & 0xff8 */
760 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
761 3, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 12, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_dont, /* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
769 FALSE, /* partial_inplace */
770 0xff8, /* src_mask */
771 0xff8, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 /* LD/ST128: (S+A) & 0xff0 */
775 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
776 4, /* rightshift */
777 2, /* size (0 = byte, 1 = short, 2 = long) */
778 12, /* bitsize */
779 FALSE, /* pc_relative */
780 0, /* bitpos */
781 complain_overflow_dont, /* complain_on_overflow */
782 bfd_elf_generic_reloc, /* special_function */
783 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
784 FALSE, /* partial_inplace */
785 0xff0, /* src_mask */
786 0xff0, /* dst_mask */
787 FALSE), /* pcrel_offset */
788
789 /* Set a load-literal immediate field to bits
790 0x1FFFFC of G(S)-P */
791 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
792 2, /* rightshift */
793 2, /* size (0 = byte,1 = short,2 = long) */
794 19, /* bitsize */
795 TRUE, /* pc_relative */
796 0, /* bitpos */
797 complain_overflow_signed, /* complain_on_overflow */
798 bfd_elf_generic_reloc, /* special_function */
799 AARCH64_R_STR (GOT_LD_PREL19), /* name */
800 FALSE, /* partial_inplace */
801 0xffffe0, /* src_mask */
802 0xffffe0, /* dst_mask */
803 TRUE), /* pcrel_offset */
804
805 /* Get to the page for the GOT entry for the symbol
806 (G(S) - P) using an ADRP instruction. */
807 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
808 12, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 21, /* bitsize */
811 TRUE, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_dont, /* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
816 FALSE, /* partial_inplace */
817 0x1fffff, /* src_mask */
818 0x1fffff, /* dst_mask */
819 TRUE), /* pcrel_offset */
820
821 /* LD64: GOT offset G(S) & 0xff8 */
822 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
823 3, /* rightshift */
824 2, /* size (0 = byte, 1 = short, 2 = long) */
825 12, /* bitsize */
826 FALSE, /* pc_relative */
827 0, /* bitpos */
828 complain_overflow_dont, /* complain_on_overflow */
829 bfd_elf_generic_reloc, /* special_function */
830 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
831 FALSE, /* partial_inplace */
832 0xff8, /* src_mask */
833 0xff8, /* dst_mask */
834 FALSE), /* pcrel_offset */
835
836 /* LD32: GOT offset G(S) & 0xffc */
837 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
838 2, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 12, /* bitsize */
841 FALSE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont, /* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
846 FALSE, /* partial_inplace */
847 0xffc, /* src_mask */
848 0xffc, /* dst_mask */
849 FALSE), /* pcrel_offset */
850
851 /* Get to the page for the GOT entry for the symbol
852 (G(S) - P) using an ADRP instruction. */
853 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
854 12, /* rightshift */
855 2, /* size (0 = byte, 1 = short, 2 = long) */
856 21, /* bitsize */
857 TRUE, /* pc_relative */
858 0, /* bitpos */
859 complain_overflow_dont, /* complain_on_overflow */
860 bfd_elf_generic_reloc, /* special_function */
861 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
862 FALSE, /* partial_inplace */
863 0x1fffff, /* src_mask */
864 0x1fffff, /* dst_mask */
865 TRUE), /* pcrel_offset */
866
867 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
868 0, /* rightshift */
869 2, /* size (0 = byte, 1 = short, 2 = long) */
870 21, /* bitsize */
871 TRUE, /* pc_relative */
872 0, /* bitpos */
873 complain_overflow_dont, /* complain_on_overflow */
874 bfd_elf_generic_reloc, /* special_function */
875 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
876 FALSE, /* partial_inplace */
877 0x1fffff, /* src_mask */
878 0x1fffff, /* dst_mask */
879 TRUE), /* pcrel_offset */
880
881 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
882 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
883 0, /* rightshift */
884 2, /* size (0 = byte, 1 = short, 2 = long) */
885 12, /* bitsize */
886 FALSE, /* pc_relative */
887 0, /* bitpos */
888 complain_overflow_dont, /* complain_on_overflow */
889 bfd_elf_generic_reloc, /* special_function */
890 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
891 FALSE, /* partial_inplace */
892 0xfff, /* src_mask */
893 0xfff, /* dst_mask */
894 FALSE), /* pcrel_offset */
895
896 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
897 16, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 16, /* bitsize */
900 FALSE, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont, /* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
905 FALSE, /* partial_inplace */
906 0xffff, /* src_mask */
907 0xffff, /* dst_mask */
908 FALSE), /* pcrel_offset */
909
910 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 16, /* bitsize */
914 FALSE, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont, /* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
919 FALSE, /* partial_inplace */
920 0xffff, /* src_mask */
921 0xffff, /* dst_mask */
922 FALSE), /* pcrel_offset */
923
924 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
925 12, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 21, /* bitsize */
928 FALSE, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont, /* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
933 FALSE, /* partial_inplace */
934 0x1fffff, /* src_mask */
935 0x1fffff, /* dst_mask */
936 FALSE), /* pcrel_offset */
937
938 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
939 3, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 12, /* bitsize */
942 FALSE, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont, /* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
947 FALSE, /* partial_inplace */
948 0xff8, /* src_mask */
949 0xff8, /* dst_mask */
950 FALSE), /* pcrel_offset */
951
952 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
953 2, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 12, /* bitsize */
956 FALSE, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont, /* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
961 FALSE, /* partial_inplace */
962 0xffc, /* src_mask */
963 0xffc, /* dst_mask */
964 FALSE), /* pcrel_offset */
965
966 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
967 2, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 19, /* bitsize */
970 FALSE, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
975 FALSE, /* partial_inplace */
976 0x1ffffc, /* src_mask */
977 0x1ffffc, /* dst_mask */
978 FALSE), /* pcrel_offset */
979
980 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
981 32, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 16, /* bitsize */
984 FALSE, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_unsigned, /* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
989 FALSE, /* partial_inplace */
990 0xffff, /* src_mask */
991 0xffff, /* dst_mask */
992 FALSE), /* pcrel_offset */
993
994 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
995 16, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 16, /* bitsize */
998 FALSE, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont, /* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1003 FALSE, /* partial_inplace */
1004 0xffff, /* src_mask */
1005 0xffff, /* dst_mask */
1006 FALSE), /* pcrel_offset */
1007
1008 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1009 16, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 16, /* bitsize */
1012 FALSE, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont, /* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1017 FALSE, /* partial_inplace */
1018 0xffff, /* src_mask */
1019 0xffff, /* dst_mask */
1020 FALSE), /* pcrel_offset */
1021
1022 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 16, /* bitsize */
1026 FALSE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1035
1036 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 16, /* bitsize */
1040 FALSE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1049
1050 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 12, /* bitsize */
1054 FALSE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_unsigned, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1059 FALSE, /* partial_inplace */
1060 0xfff, /* src_mask */
1061 0xfff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1063
1064 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 12, /* bitsize */
1068 FALSE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1073 FALSE, /* partial_inplace */
1074 0xfff, /* src_mask */
1075 0xfff, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1077
1078 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 12, /* bitsize */
1082 FALSE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1087 FALSE, /* partial_inplace */
1088 0xfff, /* src_mask */
1089 0xfff, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1091
1092 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1093 2, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 19, /* bitsize */
1096 TRUE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1101 FALSE, /* partial_inplace */
1102 0x0ffffe0, /* src_mask */
1103 0x0ffffe0, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1105
1106 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 21, /* bitsize */
1110 TRUE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1115 FALSE, /* partial_inplace */
1116 0x1fffff, /* src_mask */
1117 0x1fffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1119
1120 /* Get to the page for the GOT entry for the symbol
1121 (G(S) - P) using an ADRP instruction. */
1122 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1123 12, /* rightshift */
1124 2, /* size (0 = byte, 1 = short, 2 = long) */
1125 21, /* bitsize */
1126 TRUE, /* pc_relative */
1127 0, /* bitpos */
1128 complain_overflow_dont, /* complain_on_overflow */
1129 bfd_elf_generic_reloc, /* special_function */
1130 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1131 FALSE, /* partial_inplace */
1132 0x1fffff, /* src_mask */
1133 0x1fffff, /* dst_mask */
1134 TRUE), /* pcrel_offset */
1135
1136 /* LD64: GOT offset G(S) & 0xff8. */
1137 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1138 3, /* rightshift */
1139 2, /* size (0 = byte, 1 = short, 2 = long) */
1140 12, /* bitsize */
1141 FALSE, /* pc_relative */
1142 0, /* bitpos */
1143 complain_overflow_dont, /* complain_on_overflow */
1144 bfd_elf_generic_reloc, /* special_function */
1145 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1146 FALSE, /* partial_inplace */
1147 0xff8, /* src_mask */
1148 0xff8, /* dst_mask */
1149 FALSE), /* pcrel_offset */
1150
1151 /* LD32: GOT offset G(S) & 0xffc. */
1152 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1153 2, /* rightshift */
1154 2, /* size (0 = byte, 1 = short, 2 = long) */
1155 12, /* bitsize */
1156 FALSE, /* pc_relative */
1157 0, /* bitpos */
1158 complain_overflow_dont, /* complain_on_overflow */
1159 bfd_elf_generic_reloc, /* special_function */
1160 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1161 FALSE, /* partial_inplace */
1162 0xffc, /* src_mask */
1163 0xffc, /* dst_mask */
1164 FALSE), /* pcrel_offset */
1165
1166 /* ADD: GOT offset G(S) & 0xfff. */
1167 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1168 0, /* rightshift */
1169 2, /* size (0 = byte, 1 = short, 2 = long) */
1170 12, /* bitsize */
1171 FALSE, /* pc_relative */
1172 0, /* bitpos */
1173 complain_overflow_dont, /* complain_on_overflow */
1174 bfd_elf_generic_reloc, /* special_function */
1175 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1176 FALSE, /* partial_inplace */
1177 0xfff, /* src_mask */
1178 0xfff, /* dst_mask */
1179 FALSE), /* pcrel_offset */
1180
1181 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1182 16, /* rightshift */
1183 2, /* size (0 = byte, 1 = short, 2 = long) */
1184 12, /* bitsize */
1185 FALSE, /* pc_relative */
1186 0, /* bitpos */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1190 FALSE, /* partial_inplace */
1191 0xffff, /* src_mask */
1192 0xffff, /* dst_mask */
1193 FALSE), /* pcrel_offset */
1194
1195 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1196 0, /* rightshift */
1197 2, /* size (0 = byte, 1 = short, 2 = long) */
1198 12, /* bitsize */
1199 FALSE, /* pc_relative */
1200 0, /* bitpos */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1204 FALSE, /* partial_inplace */
1205 0xffff, /* src_mask */
1206 0xffff, /* dst_mask */
1207 FALSE), /* pcrel_offset */
1208
1209 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1210 0, /* rightshift */
1211 2, /* size (0 = byte, 1 = short, 2 = long) */
1212 12, /* bitsize */
1213 FALSE, /* pc_relative */
1214 0, /* bitpos */
1215 complain_overflow_dont, /* complain_on_overflow */
1216 bfd_elf_generic_reloc, /* special_function */
1217 AARCH64_R_STR (TLSDESC_LDR), /* name */
1218 FALSE, /* partial_inplace */
1219 0x0, /* src_mask */
1220 0x0, /* dst_mask */
1221 FALSE), /* pcrel_offset */
1222
1223 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1224 0, /* rightshift */
1225 2, /* size (0 = byte, 1 = short, 2 = long) */
1226 12, /* bitsize */
1227 FALSE, /* pc_relative */
1228 0, /* bitpos */
1229 complain_overflow_dont, /* complain_on_overflow */
1230 bfd_elf_generic_reloc, /* special_function */
1231 AARCH64_R_STR (TLSDESC_ADD), /* name */
1232 FALSE, /* partial_inplace */
1233 0x0, /* src_mask */
1234 0x0, /* dst_mask */
1235 FALSE), /* pcrel_offset */
1236
1237 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1238 0, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1240 0, /* bitsize */
1241 FALSE, /* pc_relative */
1242 0, /* bitpos */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 AARCH64_R_STR (TLSDESC_CALL), /* name */
1246 FALSE, /* partial_inplace */
1247 0x0, /* src_mask */
1248 0x0, /* dst_mask */
1249 FALSE), /* pcrel_offset */
1250
1251 HOWTO (AARCH64_R (COPY), /* type */
1252 0, /* rightshift */
1253 2, /* size (0 = byte, 1 = short, 2 = long) */
1254 64, /* bitsize */
1255 FALSE, /* pc_relative */
1256 0, /* bitpos */
1257 complain_overflow_bitfield, /* complain_on_overflow */
1258 bfd_elf_generic_reloc, /* special_function */
1259 AARCH64_R_STR (COPY), /* name */
1260 TRUE, /* partial_inplace */
1261 0xffffffff, /* src_mask */
1262 0xffffffff, /* dst_mask */
1263 FALSE), /* pcrel_offset */
1264
1265 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1266 0, /* rightshift */
1267 2, /* size (0 = byte, 1 = short, 2 = long) */
1268 64, /* bitsize */
1269 FALSE, /* pc_relative */
1270 0, /* bitpos */
1271 complain_overflow_bitfield, /* complain_on_overflow */
1272 bfd_elf_generic_reloc, /* special_function */
1273 AARCH64_R_STR (GLOB_DAT), /* name */
1274 TRUE, /* partial_inplace */
1275 0xffffffff, /* src_mask */
1276 0xffffffff, /* dst_mask */
1277 FALSE), /* pcrel_offset */
1278
1279 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1280 0, /* rightshift */
1281 2, /* size (0 = byte, 1 = short, 2 = long) */
1282 64, /* bitsize */
1283 FALSE, /* pc_relative */
1284 0, /* bitpos */
1285 complain_overflow_bitfield, /* complain_on_overflow */
1286 bfd_elf_generic_reloc, /* special_function */
1287 AARCH64_R_STR (JUMP_SLOT), /* name */
1288 TRUE, /* partial_inplace */
1289 0xffffffff, /* src_mask */
1290 0xffffffff, /* dst_mask */
1291 FALSE), /* pcrel_offset */
1292
1293 HOWTO (AARCH64_R (RELATIVE), /* type */
1294 0, /* rightshift */
1295 2, /* size (0 = byte, 1 = short, 2 = long) */
1296 64, /* bitsize */
1297 FALSE, /* pc_relative */
1298 0, /* bitpos */
1299 complain_overflow_bitfield, /* complain_on_overflow */
1300 bfd_elf_generic_reloc, /* special_function */
1301 AARCH64_R_STR (RELATIVE), /* name */
1302 TRUE, /* partial_inplace */
1303 ALL_ONES, /* src_mask */
1304 ALL_ONES, /* dst_mask */
1305 FALSE), /* pcrel_offset */
1306
1307 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1308 0, /* rightshift */
1309 2, /* size (0 = byte, 1 = short, 2 = long) */
1310 64, /* bitsize */
1311 FALSE, /* pc_relative */
1312 0, /* bitpos */
1313 complain_overflow_dont, /* complain_on_overflow */
1314 bfd_elf_generic_reloc, /* special_function */
1315 #if ARCH_SIZE == 64
1316 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1317 #else
1318 AARCH64_R_STR (TLS_DTPMOD), /* name */
1319 #endif
1320 FALSE, /* partial_inplace */
1321 0, /* src_mask */
1322 ALL_ONES, /* dst_mask */
1323 FALSE), /* pc_reloffset */
1324
1325 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1326 0, /* rightshift */
1327 2, /* size (0 = byte, 1 = short, 2 = long) */
1328 64, /* bitsize */
1329 FALSE, /* pc_relative */
1330 0, /* bitpos */
1331 complain_overflow_dont, /* complain_on_overflow */
1332 bfd_elf_generic_reloc, /* special_function */
1333 #if ARCH_SIZE == 64
1334 AARCH64_R_STR (TLS_DTPREL64), /* name */
1335 #else
1336 AARCH64_R_STR (TLS_DTPREL), /* name */
1337 #endif
1338 FALSE, /* partial_inplace */
1339 0, /* src_mask */
1340 ALL_ONES, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 64, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont, /* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 #if ARCH_SIZE == 64
1352 AARCH64_R_STR (TLS_TPREL64), /* name */
1353 #else
1354 AARCH64_R_STR (TLS_TPREL), /* name */
1355 #endif
1356 FALSE, /* partial_inplace */
1357 0, /* src_mask */
1358 ALL_ONES, /* dst_mask */
1359 FALSE), /* pcrel_offset */
1360
1361 HOWTO (AARCH64_R (TLSDESC), /* type */
1362 0, /* rightshift */
1363 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 64, /* bitsize */
1365 FALSE, /* pc_relative */
1366 0, /* bitpos */
1367 complain_overflow_dont, /* complain_on_overflow */
1368 bfd_elf_generic_reloc, /* special_function */
1369 AARCH64_R_STR (TLSDESC), /* name */
1370 FALSE, /* partial_inplace */
1371 0, /* src_mask */
1372 ALL_ONES, /* dst_mask */
1373 FALSE), /* pcrel_offset */
1374
1375 HOWTO (AARCH64_R (IRELATIVE), /* type */
1376 0, /* rightshift */
1377 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 64, /* bitsize */
1379 FALSE, /* pc_relative */
1380 0, /* bitpos */
1381 complain_overflow_bitfield, /* complain_on_overflow */
1382 bfd_elf_generic_reloc, /* special_function */
1383 AARCH64_R_STR (IRELATIVE), /* name */
1384 FALSE, /* partial_inplace */
1385 0, /* src_mask */
1386 ALL_ONES, /* dst_mask */
1387 FALSE), /* pcrel_offset */
1388
1389 EMPTY_HOWTO (0),
1390 };
1391
1392 static reloc_howto_type elfNN_aarch64_howto_none =
1393 HOWTO (R_AARCH64_NONE, /* type */
1394 0, /* rightshift */
1395 3, /* size (0 = byte, 1 = short, 2 = long) */
1396 0, /* bitsize */
1397 FALSE, /* pc_relative */
1398 0, /* bitpos */
1399 complain_overflow_dont,/* complain_on_overflow */
1400 bfd_elf_generic_reloc, /* special_function */
1401 "R_AARCH64_NONE", /* name */
1402 FALSE, /* partial_inplace */
1403 0, /* src_mask */
1404 0, /* dst_mask */
1405 FALSE); /* pcrel_offset */
1406
1407 /* Given HOWTO, return the bfd internal relocation enumerator. */
1408
1409 static bfd_reloc_code_real_type
1410 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1411 {
1412 const int size
1413 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1414 const ptrdiff_t offset
1415 = howto - elfNN_aarch64_howto_table;
1416
1417 if (offset > 0 && offset < size - 1)
1418 return BFD_RELOC_AARCH64_RELOC_START + offset;
1419
1420 if (howto == &elfNN_aarch64_howto_none)
1421 return BFD_RELOC_AARCH64_NONE;
1422
1423 return BFD_RELOC_AARCH64_RELOC_START;
1424 }
1425
1426 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1427
1428 static bfd_reloc_code_real_type
1429 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1430 {
1431 static bfd_boolean initialized_p = FALSE;
1432 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1433 static unsigned int offsets[R_AARCH64_end];
1434
1435 if (initialized_p == FALSE)
1436 {
1437 unsigned int i;
1438
1439 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1440 if (elfNN_aarch64_howto_table[i].type != 0)
1441 offsets[elfNN_aarch64_howto_table[i].type] = i;
1442
1443 initialized_p = TRUE;
1444 }
1445
1446 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1447 return BFD_RELOC_AARCH64_NONE;
1448
1449 /* PR 17512: file: b371e70a. */
1450 if (r_type >= R_AARCH64_end)
1451 {
1452 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1453 bfd_set_error (bfd_error_bad_value);
1454 return BFD_RELOC_AARCH64_NONE;
1455 }
1456
1457 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1458 }
1459
1460 struct elf_aarch64_reloc_map
1461 {
1462 bfd_reloc_code_real_type from;
1463 bfd_reloc_code_real_type to;
1464 };
1465
1466 /* Map bfd generic reloc to AArch64-specific reloc. */
1467 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1468 {
1469 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1470
1471 /* Basic data relocations. */
1472 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1473 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1474 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1475 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1476 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1477 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1478 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1479 };
1480
1481 /* Given the bfd internal relocation enumerator in CODE, return the
1482 corresponding howto entry. */
1483
1484 static reloc_howto_type *
1485 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1486 {
1487 unsigned int i;
1488
1489 /* Convert bfd generic reloc to AArch64-specific reloc. */
1490 if (code < BFD_RELOC_AARCH64_RELOC_START
1491 || code > BFD_RELOC_AARCH64_RELOC_END)
1492 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1493 if (elf_aarch64_reloc_map[i].from == code)
1494 {
1495 code = elf_aarch64_reloc_map[i].to;
1496 break;
1497 }
1498
1499 if (code > BFD_RELOC_AARCH64_RELOC_START
1500 && code < BFD_RELOC_AARCH64_RELOC_END)
1501 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1502 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1503
1504 if (code == BFD_RELOC_AARCH64_NONE)
1505 return &elfNN_aarch64_howto_none;
1506
1507 return NULL;
1508 }
1509
1510 static reloc_howto_type *
1511 elfNN_aarch64_howto_from_type (unsigned int r_type)
1512 {
1513 bfd_reloc_code_real_type val;
1514 reloc_howto_type *howto;
1515
1516 #if ARCH_SIZE == 32
1517 if (r_type > 256)
1518 {
1519 bfd_set_error (bfd_error_bad_value);
1520 return NULL;
1521 }
1522 #endif
1523
1524 if (r_type == R_AARCH64_NONE)
1525 return &elfNN_aarch64_howto_none;
1526
1527 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1528 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1529
1530 if (howto != NULL)
1531 return howto;
1532
1533 bfd_set_error (bfd_error_bad_value);
1534 return NULL;
1535 }
1536
1537 static void
1538 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1539 Elf_Internal_Rela *elf_reloc)
1540 {
1541 unsigned int r_type;
1542
1543 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1544 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1545 }
1546
1547 static reloc_howto_type *
1548 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1549 bfd_reloc_code_real_type code)
1550 {
1551 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1552
1553 if (howto != NULL)
1554 return howto;
1555
1556 bfd_set_error (bfd_error_bad_value);
1557 return NULL;
1558 }
1559
1560 static reloc_howto_type *
1561 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1562 const char *r_name)
1563 {
1564 unsigned int i;
1565
1566 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1567 if (elfNN_aarch64_howto_table[i].name != NULL
1568 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1569 return &elfNN_aarch64_howto_table[i];
1570
1571 return NULL;
1572 }
1573
1574 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1575 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1576 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1577 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1578
1579 /* The linker script knows the section names for placement.
1580 The entry_names are used to do simple name mangling on the stubs.
1581 Given a function name, and its type, the stub can be found. The
1582 name can be changed. The only requirement is the %s be present. */
1583 #define STUB_ENTRY_NAME "__%s_veneer"
1584
1585 /* The name of the dynamic interpreter. This is put in the .interp
1586 section. */
1587 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1588
1589 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1590 (((1 << 25) - 1) << 2)
1591 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1592 (-((1 << 25) << 2))
1593
1594 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1595 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1596
1597 static int
1598 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1599 {
1600 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1601 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1602 }
1603
1604 static int
1605 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1606 {
1607 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1608 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1609 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1610 }
1611
1612 static const uint32_t aarch64_adrp_branch_stub [] =
1613 {
1614 0x90000010, /* adrp ip0, X */
1615 /* R_AARCH64_ADR_HI21_PCREL(X) */
1616 0x91000210, /* add ip0, ip0, :lo12:X */
1617 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1618 0xd61f0200, /* br ip0 */
1619 };
1620
1621 static const uint32_t aarch64_long_branch_stub[] =
1622 {
1623 #if ARCH_SIZE == 64
1624 0x58000090, /* ldr ip0, 1f */
1625 #else
1626 0x18000090, /* ldr wip0, 1f */
1627 #endif
1628 0x10000011, /* adr ip1, #0 */
1629 0x8b110210, /* add ip0, ip0, ip1 */
1630 0xd61f0200, /* br ip0 */
1631 0x00000000, /* 1: .xword or .word
1632 R_AARCH64_PRELNN(X) + 12
1633 */
1634 0x00000000,
1635 };
1636
1637 static const uint32_t aarch64_erratum_835769_stub[] =
1638 {
1639 0x00000000, /* Placeholder for multiply accumulate. */
1640 0x14000000, /* b <label> */
1641 };
1642
1643 /* Section name for stubs is the associated section name plus this
1644 string. */
1645 #define STUB_SUFFIX ".stub"
1646
1647 enum elf_aarch64_stub_type
1648 {
1649 aarch64_stub_none,
1650 aarch64_stub_adrp_branch,
1651 aarch64_stub_long_branch,
1652 aarch64_stub_erratum_835769_veneer,
1653 };
1654
1655 struct elf_aarch64_stub_hash_entry
1656 {
1657 /* Base hash table entry structure. */
1658 struct bfd_hash_entry root;
1659
1660 /* The stub section. */
1661 asection *stub_sec;
1662
1663 /* Offset within stub_sec of the beginning of this stub. */
1664 bfd_vma stub_offset;
1665
1666 /* Given the symbol's value and its section we can determine its final
1667 value when building the stubs (so the stub knows where to jump). */
1668 bfd_vma target_value;
1669 asection *target_section;
1670
1671 enum elf_aarch64_stub_type stub_type;
1672
1673 /* The symbol table entry, if any, that this was derived from. */
1674 struct elf_aarch64_link_hash_entry *h;
1675
1676 /* Destination symbol type */
1677 unsigned char st_type;
1678
1679 /* Where this stub is being called from, or, in the case of combined
1680 stub sections, the first input section in the group. */
1681 asection *id_sec;
1682
1683 /* The name for the local symbol at the start of this stub. The
1684 stub name in the hash table has to be unique; this does not, so
1685 it can be friendlier. */
1686 char *output_name;
1687
1688 /* The instruction which caused this stub to be generated (only valid for
1689 erratum 835769 workaround stubs at present). */
1690 uint32_t veneered_insn;
1691 };
1692
1693 /* Used to build a map of a section. This is required for mixed-endian
1694 code/data. */
1695
1696 typedef struct elf_elf_section_map
1697 {
1698 bfd_vma vma;
1699 char type;
1700 }
1701 elf_aarch64_section_map;
1702
1703
1704 typedef struct _aarch64_elf_section_data
1705 {
1706 struct bfd_elf_section_data elf;
1707 unsigned int mapcount;
1708 unsigned int mapsize;
1709 elf_aarch64_section_map *map;
1710 }
1711 _aarch64_elf_section_data;
1712
1713 #define elf_aarch64_section_data(sec) \
1714 ((_aarch64_elf_section_data *) elf_section_data (sec))
1715
1716 /* A fix-descriptor for erratum 835769. */
1717 struct aarch64_erratum_835769_fix
1718 {
1719 bfd *input_bfd;
1720 asection *section;
1721 bfd_vma offset;
1722 uint32_t veneered_insn;
1723 char *stub_name;
1724 enum elf_aarch64_stub_type stub_type;
1725 };
1726
1727 /* The size of the thread control block which is defined to be two pointers. */
1728 #define TCB_SIZE (ARCH_SIZE/8)*2
1729
1730 struct elf_aarch64_local_symbol
1731 {
1732 unsigned int got_type;
1733 bfd_signed_vma got_refcount;
1734 bfd_vma got_offset;
1735
1736 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1737 offset is from the end of the jump table and reserved entries
1738 within the PLTGOT.
1739
1740 The magic value (bfd_vma) -1 indicates that an offset has not be
1741 allocated. */
1742 bfd_vma tlsdesc_got_jump_table_offset;
1743 };
1744
1745 struct elf_aarch64_obj_tdata
1746 {
1747 struct elf_obj_tdata root;
1748
1749 /* local symbol descriptors */
1750 struct elf_aarch64_local_symbol *locals;
1751
1752 /* Zero to warn when linking objects with incompatible enum sizes. */
1753 int no_enum_size_warning;
1754
1755 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1756 int no_wchar_size_warning;
1757 };
1758
1759 #define elf_aarch64_tdata(bfd) \
1760 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1761
1762 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1763
1764 #define is_aarch64_elf(bfd) \
1765 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1766 && elf_tdata (bfd) != NULL \
1767 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1768
1769 static bfd_boolean
1770 elfNN_aarch64_mkobject (bfd *abfd)
1771 {
1772 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1773 AARCH64_ELF_DATA);
1774 }
1775
1776 #define elf_aarch64_hash_entry(ent) \
1777 ((struct elf_aarch64_link_hash_entry *)(ent))
1778
1779 #define GOT_UNKNOWN 0
1780 #define GOT_NORMAL 1
1781 #define GOT_TLS_GD 2
1782 #define GOT_TLS_IE 4
1783 #define GOT_TLSDESC_GD 8
1784
1785 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1786
1787 /* AArch64 ELF linker hash entry. */
1788 struct elf_aarch64_link_hash_entry
1789 {
1790 struct elf_link_hash_entry root;
1791
1792 /* Track dynamic relocs copied for this symbol. */
1793 struct elf_dyn_relocs *dyn_relocs;
1794
1795 /* Since PLT entries have variable size, we need to record the
1796 index into .got.plt instead of recomputing it from the PLT
1797 offset. */
1798 bfd_signed_vma plt_got_offset;
1799
1800 /* Bit mask representing the type of GOT entry(s) if any required by
1801 this symbol. */
1802 unsigned int got_type;
1803
1804 /* A pointer to the most recently used stub hash entry against this
1805 symbol. */
1806 struct elf_aarch64_stub_hash_entry *stub_cache;
1807
1808 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1809 is from the end of the jump table and reserved entries within the PLTGOT.
1810
1811 The magic value (bfd_vma) -1 indicates that an offset has not
1812 be allocated. */
1813 bfd_vma tlsdesc_got_jump_table_offset;
1814 };
1815
1816 static unsigned int
1817 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1818 bfd *abfd,
1819 unsigned long r_symndx)
1820 {
1821 if (h)
1822 return elf_aarch64_hash_entry (h)->got_type;
1823
1824 if (! elf_aarch64_locals (abfd))
1825 return GOT_UNKNOWN;
1826
1827 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1828 }
1829
1830 /* Get the AArch64 elf linker hash table from a link_info structure. */
1831 #define elf_aarch64_hash_table(info) \
1832 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1833
1834 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1835 ((struct elf_aarch64_stub_hash_entry *) \
1836 bfd_hash_lookup ((table), (string), (create), (copy)))
1837
1838 /* AArch64 ELF linker hash table. */
1839 struct elf_aarch64_link_hash_table
1840 {
1841 /* The main hash table. */
1842 struct elf_link_hash_table root;
1843
1844 /* Nonzero to force PIC branch veneers. */
1845 int pic_veneer;
1846
1847 /* Fix erratum 835769. */
1848 int fix_erratum_835769;
1849
1850 /* The number of bytes in the initial entry in the PLT. */
1851 bfd_size_type plt_header_size;
1852
1853 /* The number of bytes in the subsequent PLT etries. */
1854 bfd_size_type plt_entry_size;
1855
1856 /* Short-cuts to get to dynamic linker sections. */
1857 asection *sdynbss;
1858 asection *srelbss;
1859
1860 /* Small local sym cache. */
1861 struct sym_cache sym_cache;
1862
1863 /* For convenience in allocate_dynrelocs. */
1864 bfd *obfd;
1865
1866 /* The amount of space used by the reserved portion of the sgotplt
1867 section, plus whatever space is used by the jump slots. */
1868 bfd_vma sgotplt_jump_table_size;
1869
1870 /* The stub hash table. */
1871 struct bfd_hash_table stub_hash_table;
1872
1873 /* Linker stub bfd. */
1874 bfd *stub_bfd;
1875
1876 /* Linker call-backs. */
1877 asection *(*add_stub_section) (const char *, asection *);
1878 void (*layout_sections_again) (void);
1879
1880 /* Array to keep track of which stub sections have been created, and
1881 information on stub grouping. */
1882 struct map_stub
1883 {
1884 /* This is the section to which stubs in the group will be
1885 attached. */
1886 asection *link_sec;
1887 /* The stub section. */
1888 asection *stub_sec;
1889 } *stub_group;
1890
1891 /* Assorted information used by elfNN_aarch64_size_stubs. */
1892 unsigned int bfd_count;
1893 int top_index;
1894 asection **input_list;
1895
1896 /* The offset into splt of the PLT entry for the TLS descriptor
1897 resolver. Special values are 0, if not necessary (or not found
1898 to be necessary yet), and -1 if needed but not determined
1899 yet. */
1900 bfd_vma tlsdesc_plt;
1901
1902 /* The GOT offset for the lazy trampoline. Communicated to the
1903 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1904 indicates an offset is not allocated. */
1905 bfd_vma dt_tlsdesc_got;
1906
1907 /* Used by local STT_GNU_IFUNC symbols. */
1908 htab_t loc_hash_table;
1909 void * loc_hash_memory;
1910 };
1911
1912 /* Create an entry in an AArch64 ELF linker hash table. */
1913
1914 static struct bfd_hash_entry *
1915 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1916 struct bfd_hash_table *table,
1917 const char *string)
1918 {
1919 struct elf_aarch64_link_hash_entry *ret =
1920 (struct elf_aarch64_link_hash_entry *) entry;
1921
1922 /* Allocate the structure if it has not already been allocated by a
1923 subclass. */
1924 if (ret == NULL)
1925 ret = bfd_hash_allocate (table,
1926 sizeof (struct elf_aarch64_link_hash_entry));
1927 if (ret == NULL)
1928 return (struct bfd_hash_entry *) ret;
1929
1930 /* Call the allocation method of the superclass. */
1931 ret = ((struct elf_aarch64_link_hash_entry *)
1932 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1933 table, string));
1934 if (ret != NULL)
1935 {
1936 ret->dyn_relocs = NULL;
1937 ret->got_type = GOT_UNKNOWN;
1938 ret->plt_got_offset = (bfd_vma) - 1;
1939 ret->stub_cache = NULL;
1940 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1941 }
1942
1943 return (struct bfd_hash_entry *) ret;
1944 }
1945
1946 /* Initialize an entry in the stub hash table. */
1947
1948 static struct bfd_hash_entry *
1949 stub_hash_newfunc (struct bfd_hash_entry *entry,
1950 struct bfd_hash_table *table, const char *string)
1951 {
1952 /* Allocate the structure if it has not already been allocated by a
1953 subclass. */
1954 if (entry == NULL)
1955 {
1956 entry = bfd_hash_allocate (table,
1957 sizeof (struct
1958 elf_aarch64_stub_hash_entry));
1959 if (entry == NULL)
1960 return entry;
1961 }
1962
1963 /* Call the allocation method of the superclass. */
1964 entry = bfd_hash_newfunc (entry, table, string);
1965 if (entry != NULL)
1966 {
1967 struct elf_aarch64_stub_hash_entry *eh;
1968
1969 /* Initialize the local fields. */
1970 eh = (struct elf_aarch64_stub_hash_entry *) entry;
1971 eh->stub_sec = NULL;
1972 eh->stub_offset = 0;
1973 eh->target_value = 0;
1974 eh->target_section = NULL;
1975 eh->stub_type = aarch64_stub_none;
1976 eh->h = NULL;
1977 eh->id_sec = NULL;
1978 }
1979
1980 return entry;
1981 }
1982
1983 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
1984 for local symbol so that we can handle local STT_GNU_IFUNC symbols
1985 as global symbol. We reuse indx and dynstr_index for local symbol
1986 hash since they aren't used by global symbols in this backend. */
1987
1988 static hashval_t
1989 elfNN_aarch64_local_htab_hash (const void *ptr)
1990 {
1991 struct elf_link_hash_entry *h
1992 = (struct elf_link_hash_entry *) ptr;
1993 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
1994 }
1995
1996 /* Compare local hash entries. */
1997
1998 static int
1999 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2000 {
2001 struct elf_link_hash_entry *h1
2002 = (struct elf_link_hash_entry *) ptr1;
2003 struct elf_link_hash_entry *h2
2004 = (struct elf_link_hash_entry *) ptr2;
2005
2006 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2007 }
2008
2009 /* Find and/or create a hash entry for local symbol. */
2010
2011 static struct elf_link_hash_entry *
2012 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2013 bfd *abfd, const Elf_Internal_Rela *rel,
2014 bfd_boolean create)
2015 {
2016 struct elf_aarch64_link_hash_entry e, *ret;
2017 asection *sec = abfd->sections;
2018 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2019 ELFNN_R_SYM (rel->r_info));
2020 void **slot;
2021
2022 e.root.indx = sec->id;
2023 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2024 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2025 create ? INSERT : NO_INSERT);
2026
2027 if (!slot)
2028 return NULL;
2029
2030 if (*slot)
2031 {
2032 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2033 return &ret->root;
2034 }
2035
2036 ret = (struct elf_aarch64_link_hash_entry *)
2037 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2038 sizeof (struct elf_aarch64_link_hash_entry));
2039 if (ret)
2040 {
2041 memset (ret, 0, sizeof (*ret));
2042 ret->root.indx = sec->id;
2043 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2044 ret->root.dynindx = -1;
2045 *slot = ret;
2046 }
2047 return &ret->root;
2048 }
2049
2050 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2051
2052 static void
2053 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2054 struct elf_link_hash_entry *dir,
2055 struct elf_link_hash_entry *ind)
2056 {
2057 struct elf_aarch64_link_hash_entry *edir, *eind;
2058
2059 edir = (struct elf_aarch64_link_hash_entry *) dir;
2060 eind = (struct elf_aarch64_link_hash_entry *) ind;
2061
2062 if (eind->dyn_relocs != NULL)
2063 {
2064 if (edir->dyn_relocs != NULL)
2065 {
2066 struct elf_dyn_relocs **pp;
2067 struct elf_dyn_relocs *p;
2068
2069 /* Add reloc counts against the indirect sym to the direct sym
2070 list. Merge any entries against the same section. */
2071 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2072 {
2073 struct elf_dyn_relocs *q;
2074
2075 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2076 if (q->sec == p->sec)
2077 {
2078 q->pc_count += p->pc_count;
2079 q->count += p->count;
2080 *pp = p->next;
2081 break;
2082 }
2083 if (q == NULL)
2084 pp = &p->next;
2085 }
2086 *pp = edir->dyn_relocs;
2087 }
2088
2089 edir->dyn_relocs = eind->dyn_relocs;
2090 eind->dyn_relocs = NULL;
2091 }
2092
2093 if (ind->root.type == bfd_link_hash_indirect)
2094 {
2095 /* Copy over PLT info. */
2096 if (dir->got.refcount <= 0)
2097 {
2098 edir->got_type = eind->got_type;
2099 eind->got_type = GOT_UNKNOWN;
2100 }
2101 }
2102
2103 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2104 }
2105
2106 /* Destroy an AArch64 elf linker hash table. */
2107
2108 static void
2109 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2110 {
2111 struct elf_aarch64_link_hash_table *ret
2112 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2113
2114 if (ret->loc_hash_table)
2115 htab_delete (ret->loc_hash_table);
2116 if (ret->loc_hash_memory)
2117 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2118
2119 bfd_hash_table_free (&ret->stub_hash_table);
2120 _bfd_elf_link_hash_table_free (obfd);
2121 }
2122
2123 /* Create an AArch64 elf linker hash table. */
2124
2125 static struct bfd_link_hash_table *
2126 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2127 {
2128 struct elf_aarch64_link_hash_table *ret;
2129 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2130
2131 ret = bfd_zmalloc (amt);
2132 if (ret == NULL)
2133 return NULL;
2134
2135 if (!_bfd_elf_link_hash_table_init
2136 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2137 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2138 {
2139 free (ret);
2140 return NULL;
2141 }
2142
2143 ret->plt_header_size = PLT_ENTRY_SIZE;
2144 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2145 ret->obfd = abfd;
2146 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2147
2148 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2149 sizeof (struct elf_aarch64_stub_hash_entry)))
2150 {
2151 _bfd_elf_link_hash_table_free (abfd);
2152 return NULL;
2153 }
2154
2155 ret->loc_hash_table = htab_try_create (1024,
2156 elfNN_aarch64_local_htab_hash,
2157 elfNN_aarch64_local_htab_eq,
2158 NULL);
2159 ret->loc_hash_memory = objalloc_create ();
2160 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2161 {
2162 elfNN_aarch64_link_hash_table_free (abfd);
2163 return NULL;
2164 }
2165 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2166
2167 return &ret->root.root;
2168 }
2169
2170 static bfd_boolean
2171 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2172 bfd_vma offset, bfd_vma value)
2173 {
2174 reloc_howto_type *howto;
2175 bfd_vma place;
2176
2177 howto = elfNN_aarch64_howto_from_type (r_type);
2178 place = (input_section->output_section->vma + input_section->output_offset
2179 + offset);
2180
2181 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2182 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2183 return _bfd_aarch64_elf_put_addend (input_bfd,
2184 input_section->contents + offset, r_type,
2185 howto, value);
2186 }
2187
2188 static enum elf_aarch64_stub_type
2189 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2190 {
2191 if (aarch64_valid_for_adrp_p (value, place))
2192 return aarch64_stub_adrp_branch;
2193 return aarch64_stub_long_branch;
2194 }
2195
2196 /* Determine the type of stub needed, if any, for a call. */
2197
2198 static enum elf_aarch64_stub_type
2199 aarch64_type_of_stub (struct bfd_link_info *info,
2200 asection *input_sec,
2201 const Elf_Internal_Rela *rel,
2202 unsigned char st_type,
2203 struct elf_aarch64_link_hash_entry *hash,
2204 bfd_vma destination)
2205 {
2206 bfd_vma location;
2207 bfd_signed_vma branch_offset;
2208 unsigned int r_type;
2209 struct elf_aarch64_link_hash_table *globals;
2210 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2211 bfd_boolean via_plt_p;
2212
2213 if (st_type != STT_FUNC)
2214 return stub_type;
2215
2216 globals = elf_aarch64_hash_table (info);
2217 via_plt_p = (globals->root.splt != NULL && hash != NULL
2218 && hash->root.plt.offset != (bfd_vma) - 1);
2219
2220 if (via_plt_p)
2221 return stub_type;
2222
2223 /* Determine where the call point is. */
2224 location = (input_sec->output_offset
2225 + input_sec->output_section->vma + rel->r_offset);
2226
2227 branch_offset = (bfd_signed_vma) (destination - location);
2228
2229 r_type = ELFNN_R_TYPE (rel->r_info);
2230
2231 /* We don't want to redirect any old unconditional jump in this way,
2232 only one which is being used for a sibcall, where it is
2233 acceptable for the IP0 and IP1 registers to be clobbered. */
2234 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2235 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2236 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2237 {
2238 stub_type = aarch64_stub_long_branch;
2239 }
2240
2241 return stub_type;
2242 }
2243
2244 /* Build a name for an entry in the stub hash table. */
2245
2246 static char *
2247 elfNN_aarch64_stub_name (const asection *input_section,
2248 const asection *sym_sec,
2249 const struct elf_aarch64_link_hash_entry *hash,
2250 const Elf_Internal_Rela *rel)
2251 {
2252 char *stub_name;
2253 bfd_size_type len;
2254
2255 if (hash)
2256 {
2257 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2258 stub_name = bfd_malloc (len);
2259 if (stub_name != NULL)
2260 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2261 (unsigned int) input_section->id,
2262 hash->root.root.root.string,
2263 rel->r_addend);
2264 }
2265 else
2266 {
2267 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2268 stub_name = bfd_malloc (len);
2269 if (stub_name != NULL)
2270 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2271 (unsigned int) input_section->id,
2272 (unsigned int) sym_sec->id,
2273 (unsigned int) ELFNN_R_SYM (rel->r_info),
2274 rel->r_addend);
2275 }
2276
2277 return stub_name;
2278 }
2279
2280 /* Look up an entry in the stub hash. Stub entries are cached because
2281 creating the stub name takes a bit of time. */
2282
2283 static struct elf_aarch64_stub_hash_entry *
2284 elfNN_aarch64_get_stub_entry (const asection *input_section,
2285 const asection *sym_sec,
2286 struct elf_link_hash_entry *hash,
2287 const Elf_Internal_Rela *rel,
2288 struct elf_aarch64_link_hash_table *htab)
2289 {
2290 struct elf_aarch64_stub_hash_entry *stub_entry;
2291 struct elf_aarch64_link_hash_entry *h =
2292 (struct elf_aarch64_link_hash_entry *) hash;
2293 const asection *id_sec;
2294
2295 if ((input_section->flags & SEC_CODE) == 0)
2296 return NULL;
2297
2298 /* If this input section is part of a group of sections sharing one
2299 stub section, then use the id of the first section in the group.
2300 Stub names need to include a section id, as there may well be
2301 more than one stub used to reach say, printf, and we need to
2302 distinguish between them. */
2303 id_sec = htab->stub_group[input_section->id].link_sec;
2304
2305 if (h != NULL && h->stub_cache != NULL
2306 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2307 {
2308 stub_entry = h->stub_cache;
2309 }
2310 else
2311 {
2312 char *stub_name;
2313
2314 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2315 if (stub_name == NULL)
2316 return NULL;
2317
2318 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2319 stub_name, FALSE, FALSE);
2320 if (h != NULL)
2321 h->stub_cache = stub_entry;
2322
2323 free (stub_name);
2324 }
2325
2326 return stub_entry;
2327 }
2328
2329 /* Add a new stub entry to the stub hash. Not all fields of the new
2330 stub entry are initialised. */
2331
2332 static struct elf_aarch64_stub_hash_entry *
2333 elfNN_aarch64_add_stub (const char *stub_name,
2334 asection *section,
2335 struct elf_aarch64_link_hash_table *htab)
2336 {
2337 asection *link_sec;
2338 asection *stub_sec;
2339 struct elf_aarch64_stub_hash_entry *stub_entry;
2340
2341 link_sec = htab->stub_group[section->id].link_sec;
2342 stub_sec = htab->stub_group[section->id].stub_sec;
2343 if (stub_sec == NULL)
2344 {
2345 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2346 if (stub_sec == NULL)
2347 {
2348 size_t namelen;
2349 bfd_size_type len;
2350 char *s_name;
2351
2352 namelen = strlen (link_sec->name);
2353 len = namelen + sizeof (STUB_SUFFIX);
2354 s_name = bfd_alloc (htab->stub_bfd, len);
2355 if (s_name == NULL)
2356 return NULL;
2357
2358 memcpy (s_name, link_sec->name, namelen);
2359 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2360 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2361 if (stub_sec == NULL)
2362 return NULL;
2363 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2364 }
2365 htab->stub_group[section->id].stub_sec = stub_sec;
2366 }
2367
2368 /* Enter this entry into the linker stub hash table. */
2369 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2370 TRUE, FALSE);
2371 if (stub_entry == NULL)
2372 {
2373 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2374 section->owner, stub_name);
2375 return NULL;
2376 }
2377
2378 stub_entry->stub_sec = stub_sec;
2379 stub_entry->stub_offset = 0;
2380 stub_entry->id_sec = link_sec;
2381
2382 return stub_entry;
2383 }
2384
2385 static bfd_boolean
2386 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2387 void *in_arg ATTRIBUTE_UNUSED)
2388 {
2389 struct elf_aarch64_stub_hash_entry *stub_entry;
2390 asection *stub_sec;
2391 bfd *stub_bfd;
2392 bfd_byte *loc;
2393 bfd_vma sym_value;
2394 bfd_vma veneered_insn_loc;
2395 bfd_vma veneer_entry_loc;
2396 bfd_signed_vma branch_offset = 0;
2397 unsigned int template_size;
2398 const uint32_t *template;
2399 unsigned int i;
2400
2401 /* Massage our args to the form they really have. */
2402 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2403
2404 stub_sec = stub_entry->stub_sec;
2405
2406 /* Make a note of the offset within the stubs for this entry. */
2407 stub_entry->stub_offset = stub_sec->size;
2408 loc = stub_sec->contents + stub_entry->stub_offset;
2409
2410 stub_bfd = stub_sec->owner;
2411
2412 /* This is the address of the stub destination. */
2413 sym_value = (stub_entry->target_value
2414 + stub_entry->target_section->output_offset
2415 + stub_entry->target_section->output_section->vma);
2416
2417 if (stub_entry->stub_type == aarch64_stub_long_branch)
2418 {
2419 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2420 + stub_sec->output_offset);
2421
2422 /* See if we can relax the stub. */
2423 if (aarch64_valid_for_adrp_p (sym_value, place))
2424 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2425 }
2426
2427 switch (stub_entry->stub_type)
2428 {
2429 case aarch64_stub_adrp_branch:
2430 template = aarch64_adrp_branch_stub;
2431 template_size = sizeof (aarch64_adrp_branch_stub);
2432 break;
2433 case aarch64_stub_long_branch:
2434 template = aarch64_long_branch_stub;
2435 template_size = sizeof (aarch64_long_branch_stub);
2436 break;
2437 case aarch64_stub_erratum_835769_veneer:
2438 template = aarch64_erratum_835769_stub;
2439 template_size = sizeof (aarch64_erratum_835769_stub);
2440 break;
2441 default:
2442 abort ();
2443 }
2444
2445 for (i = 0; i < (template_size / sizeof template[0]); i++)
2446 {
2447 bfd_putl32 (template[i], loc);
2448 loc += 4;
2449 }
2450
2451 template_size = (template_size + 7) & ~7;
2452 stub_sec->size += template_size;
2453
2454 switch (stub_entry->stub_type)
2455 {
2456 case aarch64_stub_adrp_branch:
2457 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2458 stub_entry->stub_offset, sym_value))
2459 /* The stub would not have been relaxed if the offset was out
2460 of range. */
2461 BFD_FAIL ();
2462
2463 _bfd_final_link_relocate
2464 (elfNN_aarch64_howto_from_type (AARCH64_R (ADD_ABS_LO12_NC)),
2465 stub_bfd,
2466 stub_sec,
2467 stub_sec->contents,
2468 stub_entry->stub_offset + 4,
2469 sym_value,
2470 0);
2471 break;
2472
2473 case aarch64_stub_long_branch:
2474 /* We want the value relative to the address 12 bytes back from the
2475 value itself. */
2476 _bfd_final_link_relocate (elfNN_aarch64_howto_from_type
2477 (AARCH64_R (PRELNN)), stub_bfd, stub_sec,
2478 stub_sec->contents,
2479 stub_entry->stub_offset + 16,
2480 sym_value + 12, 0);
2481 break;
2482
2483 case aarch64_stub_erratum_835769_veneer:
2484 veneered_insn_loc = stub_entry->target_section->output_section->vma
2485 + stub_entry->target_section->output_offset
2486 + stub_entry->target_value;
2487 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2488 + stub_entry->stub_sec->output_offset
2489 + stub_entry->stub_offset;
2490 branch_offset = veneered_insn_loc - veneer_entry_loc;
2491 branch_offset >>= 2;
2492 branch_offset &= 0x3ffffff;
2493 bfd_putl32 (stub_entry->veneered_insn,
2494 stub_sec->contents + stub_entry->stub_offset);
2495 bfd_putl32 (template[1] | branch_offset,
2496 stub_sec->contents + stub_entry->stub_offset + 4);
2497 break;
2498
2499 default:
2500 abort ();
2501 }
2502
2503 return TRUE;
2504 }
2505
2506 /* As above, but don't actually build the stub. Just bump offset so
2507 we know stub section sizes. */
2508
2509 static bfd_boolean
2510 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2511 void *in_arg ATTRIBUTE_UNUSED)
2512 {
2513 struct elf_aarch64_stub_hash_entry *stub_entry;
2514 int size;
2515
2516 /* Massage our args to the form they really have. */
2517 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2518
2519 switch (stub_entry->stub_type)
2520 {
2521 case aarch64_stub_adrp_branch:
2522 size = sizeof (aarch64_adrp_branch_stub);
2523 break;
2524 case aarch64_stub_long_branch:
2525 size = sizeof (aarch64_long_branch_stub);
2526 break;
2527 case aarch64_stub_erratum_835769_veneer:
2528 size = sizeof (aarch64_erratum_835769_stub);
2529 break;
2530 default:
2531 abort ();
2532 }
2533
2534 size = (size + 7) & ~7;
2535 stub_entry->stub_sec->size += size;
2536 return TRUE;
2537 }
2538
2539 /* External entry points for sizing and building linker stubs. */
2540
2541 /* Set up various things so that we can make a list of input sections
2542 for each output section included in the link. Returns -1 on error,
2543 0 when no stubs will be needed, and 1 on success. */
2544
2545 int
2546 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2547 struct bfd_link_info *info)
2548 {
2549 bfd *input_bfd;
2550 unsigned int bfd_count;
2551 int top_id, top_index;
2552 asection *section;
2553 asection **input_list, **list;
2554 bfd_size_type amt;
2555 struct elf_aarch64_link_hash_table *htab =
2556 elf_aarch64_hash_table (info);
2557
2558 if (!is_elf_hash_table (htab))
2559 return 0;
2560
2561 /* Count the number of input BFDs and find the top input section id. */
2562 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2563 input_bfd != NULL; input_bfd = input_bfd->link.next)
2564 {
2565 bfd_count += 1;
2566 for (section = input_bfd->sections;
2567 section != NULL; section = section->next)
2568 {
2569 if (top_id < section->id)
2570 top_id = section->id;
2571 }
2572 }
2573 htab->bfd_count = bfd_count;
2574
2575 amt = sizeof (struct map_stub) * (top_id + 1);
2576 htab->stub_group = bfd_zmalloc (amt);
2577 if (htab->stub_group == NULL)
2578 return -1;
2579
2580 /* We can't use output_bfd->section_count here to find the top output
2581 section index as some sections may have been removed, and
2582 _bfd_strip_section_from_output doesn't renumber the indices. */
2583 for (section = output_bfd->sections, top_index = 0;
2584 section != NULL; section = section->next)
2585 {
2586 if (top_index < section->index)
2587 top_index = section->index;
2588 }
2589
2590 htab->top_index = top_index;
2591 amt = sizeof (asection *) * (top_index + 1);
2592 input_list = bfd_malloc (amt);
2593 htab->input_list = input_list;
2594 if (input_list == NULL)
2595 return -1;
2596
2597 /* For sections we aren't interested in, mark their entries with a
2598 value we can check later. */
2599 list = input_list + top_index;
2600 do
2601 *list = bfd_abs_section_ptr;
2602 while (list-- != input_list);
2603
2604 for (section = output_bfd->sections;
2605 section != NULL; section = section->next)
2606 {
2607 if ((section->flags & SEC_CODE) != 0)
2608 input_list[section->index] = NULL;
2609 }
2610
2611 return 1;
2612 }
2613
2614 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2615 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2616
2617 /* The linker repeatedly calls this function for each input section,
2618 in the order that input sections are linked into output sections.
2619 Build lists of input sections to determine groupings between which
2620 we may insert linker stubs. */
2621
2622 void
2623 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2624 {
2625 struct elf_aarch64_link_hash_table *htab =
2626 elf_aarch64_hash_table (info);
2627
2628 if (isec->output_section->index <= htab->top_index)
2629 {
2630 asection **list = htab->input_list + isec->output_section->index;
2631
2632 if (*list != bfd_abs_section_ptr)
2633 {
2634 /* Steal the link_sec pointer for our list. */
2635 /* This happens to make the list in reverse order,
2636 which is what we want. */
2637 PREV_SEC (isec) = *list;
2638 *list = isec;
2639 }
2640 }
2641 }
2642
2643 /* See whether we can group stub sections together. Grouping stub
2644 sections may result in fewer stubs. More importantly, we need to
2645 put all .init* and .fini* stubs at the beginning of the .init or
2646 .fini output sections respectively, because glibc splits the
2647 _init and _fini functions into multiple parts. Putting a stub in
2648 the middle of a function is not a good idea. */
2649
2650 static void
2651 group_sections (struct elf_aarch64_link_hash_table *htab,
2652 bfd_size_type stub_group_size,
2653 bfd_boolean stubs_always_before_branch)
2654 {
2655 asection **list = htab->input_list + htab->top_index;
2656
2657 do
2658 {
2659 asection *tail = *list;
2660
2661 if (tail == bfd_abs_section_ptr)
2662 continue;
2663
2664 while (tail != NULL)
2665 {
2666 asection *curr;
2667 asection *prev;
2668 bfd_size_type total;
2669
2670 curr = tail;
2671 total = tail->size;
2672 while ((prev = PREV_SEC (curr)) != NULL
2673 && ((total += curr->output_offset - prev->output_offset)
2674 < stub_group_size))
2675 curr = prev;
2676
2677 /* OK, the size from the start of CURR to the end is less
2678 than stub_group_size and thus can be handled by one stub
2679 section. (Or the tail section is itself larger than
2680 stub_group_size, in which case we may be toast.)
2681 We should really be keeping track of the total size of
2682 stubs added here, as stubs contribute to the final output
2683 section size. */
2684 do
2685 {
2686 prev = PREV_SEC (tail);
2687 /* Set up this stub group. */
2688 htab->stub_group[tail->id].link_sec = curr;
2689 }
2690 while (tail != curr && (tail = prev) != NULL);
2691
2692 /* But wait, there's more! Input sections up to stub_group_size
2693 bytes before the stub section can be handled by it too. */
2694 if (!stubs_always_before_branch)
2695 {
2696 total = 0;
2697 while (prev != NULL
2698 && ((total += tail->output_offset - prev->output_offset)
2699 < stub_group_size))
2700 {
2701 tail = prev;
2702 prev = PREV_SEC (tail);
2703 htab->stub_group[tail->id].link_sec = curr;
2704 }
2705 }
2706 tail = prev;
2707 }
2708 }
2709 while (list-- != htab->input_list);
2710
2711 free (htab->input_list);
2712 }
2713
2714 #undef PREV_SEC
2715
2716 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2717
2718 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2719 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2720 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2721 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2722 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2723 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2724
2725 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2726 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2727 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2728 #define AARCH64_ZR 0x1f
2729
2730 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2731 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2732
2733 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2734 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2735 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2736 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2737 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2738 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2739 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2740 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2741 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2742 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2743 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2744 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2745 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2746 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2747 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2748 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2749 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2750 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2751
2752 /* Classify an INSN if it is indeed a load/store.
2753
2754 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2755
2756 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2757 is set equal to RT.
2758
2759 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2760
2761 */
2762
2763 static bfd_boolean
2764 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2765 bfd_boolean *pair, bfd_boolean *load)
2766 {
2767 uint32_t opcode;
2768 unsigned int r;
2769 uint32_t opc = 0;
2770 uint32_t v = 0;
2771 uint32_t opc_v = 0;
2772
2773 /* Bail out quickly if INSN doesn't fall into the the load-store
2774 encoding space. */
2775 if (!AARCH64_LDST (insn))
2776 return FALSE;
2777
2778 *pair = FALSE;
2779 *load = FALSE;
2780 if (AARCH64_LDST_EX (insn))
2781 {
2782 *rt = AARCH64_RT (insn);
2783 *rt2 = *rt;
2784 if (AARCH64_BIT (insn, 21) == 1)
2785 {
2786 *pair = TRUE;
2787 *rt2 = AARCH64_RT2 (insn);
2788 }
2789 *load = AARCH64_LD (insn);
2790 return TRUE;
2791 }
2792 else if (AARCH64_LDST_NAP (insn)
2793 || AARCH64_LDSTP_PI (insn)
2794 || AARCH64_LDSTP_O (insn)
2795 || AARCH64_LDSTP_PRE (insn))
2796 {
2797 *pair = TRUE;
2798 *rt = AARCH64_RT (insn);
2799 *rt2 = AARCH64_RT2 (insn);
2800 *load = AARCH64_LD (insn);
2801 return TRUE;
2802 }
2803 else if (AARCH64_LDST_PCREL (insn)
2804 || AARCH64_LDST_UI (insn)
2805 || AARCH64_LDST_PIIMM (insn)
2806 || AARCH64_LDST_U (insn)
2807 || AARCH64_LDST_PREIMM (insn)
2808 || AARCH64_LDST_RO (insn)
2809 || AARCH64_LDST_UIMM (insn))
2810 {
2811 *rt = AARCH64_RT (insn);
2812 *rt2 = *rt;
2813 if (AARCH64_LDST_PCREL (insn))
2814 *load = TRUE;
2815 opc = AARCH64_BITS (insn, 22, 2);
2816 v = AARCH64_BIT (insn, 26);
2817 opc_v = opc | (v << 2);
2818 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2819 || opc_v == 5 || opc_v == 7);
2820 return TRUE;
2821 }
2822 else if (AARCH64_LDST_SIMD_M (insn)
2823 || AARCH64_LDST_SIMD_M_PI (insn))
2824 {
2825 *rt = AARCH64_RT (insn);
2826 *load = AARCH64_BIT (insn, 22);
2827 opcode = (insn >> 12) & 0xf;
2828 switch (opcode)
2829 {
2830 case 0:
2831 case 2:
2832 *rt2 = *rt + 3;
2833 break;
2834
2835 case 4:
2836 case 6:
2837 *rt2 = *rt + 2;
2838 break;
2839
2840 case 7:
2841 *rt2 = *rt;
2842 break;
2843
2844 case 8:
2845 case 10:
2846 *rt2 = *rt + 1;
2847 break;
2848
2849 default:
2850 return FALSE;
2851 }
2852 return TRUE;
2853 }
2854 else if (AARCH64_LDST_SIMD_S (insn)
2855 || AARCH64_LDST_SIMD_S_PI (insn))
2856 {
2857 *rt = AARCH64_RT (insn);
2858 r = (insn >> 21) & 1;
2859 *load = AARCH64_BIT (insn, 22);
2860 opcode = (insn >> 13) & 0x7;
2861 switch (opcode)
2862 {
2863 case 0:
2864 case 2:
2865 case 4:
2866 *rt2 = *rt + r;
2867 break;
2868
2869 case 1:
2870 case 3:
2871 case 5:
2872 *rt2 = *rt + (r == 0 ? 2 : 3);
2873 break;
2874
2875 case 6:
2876 *rt2 = *rt + r;
2877 break;
2878
2879 case 7:
2880 *rt2 = *rt + (r == 0 ? 2 : 3);
2881 break;
2882
2883 default:
2884 return FALSE;
2885 }
2886 return TRUE;
2887 }
2888
2889 return FALSE;
2890 }
2891
2892 /* Return TRUE if INSN is multiply-accumulate. */
2893
2894 static bfd_boolean
2895 aarch64_mlxl_p (uint32_t insn)
2896 {
2897 uint32_t op31 = AARCH64_OP31 (insn);
2898
2899 if (AARCH64_MAC (insn)
2900 && (op31 == 0 || op31 == 1 || op31 == 5)
2901 /* Exclude MUL instructions which are encoded as a multiple accumulate
2902 with RA = XZR. */
2903 && AARCH64_RA (insn) != AARCH64_ZR)
2904 return TRUE;
2905
2906 return FALSE;
2907 }
2908
2909 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
2910 it is possible for a 64-bit multiply-accumulate instruction to generate an
2911 incorrect result. The details are quite complex and hard to
2912 determine statically, since branches in the code may exist in some
2913 circumstances, but all cases end with a memory (load, store, or
2914 prefetch) instruction followed immediately by the multiply-accumulate
2915 operation. We employ a linker patching technique, by moving the potentially
2916 affected multiply-accumulate instruction into a patch region and replacing
2917 the original instruction with a branch to the patch. This function checks
2918 if INSN_1 is the memory operation followed by a multiply-accumulate
2919 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
2920 if INSN_1 and INSN_2 are safe. */
2921
2922 static bfd_boolean
2923 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
2924 {
2925 uint32_t rt;
2926 uint32_t rt2;
2927 uint32_t rn;
2928 uint32_t rm;
2929 uint32_t ra;
2930 bfd_boolean pair;
2931 bfd_boolean load;
2932
2933 if (aarch64_mlxl_p (insn_2)
2934 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
2935 {
2936 /* Any SIMD memory op is independent of the subsequent MLA
2937 by definition of the erratum. */
2938 if (AARCH64_BIT (insn_1, 26))
2939 return TRUE;
2940
2941 /* If not SIMD, check for integer memory ops and MLA relationship. */
2942 rn = AARCH64_RN (insn_2);
2943 ra = AARCH64_RA (insn_2);
2944 rm = AARCH64_RM (insn_2);
2945
2946 /* If this is a load and there's a true(RAW) dependency, we are safe
2947 and this is not an erratum sequence. */
2948 if (load &&
2949 (rt == rn || rt == rm || rt == ra
2950 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
2951 return FALSE;
2952
2953 /* We conservatively put out stubs for all other cases (including
2954 writebacks). */
2955 return TRUE;
2956 }
2957
2958 return FALSE;
2959 }
2960
2961 /* Used to order a list of mapping symbols by address. */
2962
2963 static int
2964 elf_aarch64_compare_mapping (const void *a, const void *b)
2965 {
2966 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
2967 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
2968
2969 if (amap->vma > bmap->vma)
2970 return 1;
2971 else if (amap->vma < bmap->vma)
2972 return -1;
2973 else if (amap->type > bmap->type)
2974 /* Ensure results do not depend on the host qsort for objects with
2975 multiple mapping symbols at the same address by sorting on type
2976 after vma. */
2977 return 1;
2978 else if (amap->type < bmap->type)
2979 return -1;
2980 else
2981 return 0;
2982 }
2983
2984 static bfd_boolean
2985 erratum_835769_scan (bfd *input_bfd,
2986 struct bfd_link_info *info,
2987 struct aarch64_erratum_835769_fix **fixes_p,
2988 unsigned int *num_fixes_p,
2989 unsigned int *fix_table_size_p)
2990 {
2991 asection *section;
2992 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
2993 struct aarch64_erratum_835769_fix *fixes = *fixes_p;
2994 unsigned int num_fixes = *num_fixes_p;
2995 unsigned int fix_table_size = *fix_table_size_p;
2996
2997 if (htab == NULL)
2998 return FALSE;
2999
3000 for (section = input_bfd->sections;
3001 section != NULL;
3002 section = section->next)
3003 {
3004 bfd_byte *contents = NULL;
3005 struct _aarch64_elf_section_data *sec_data;
3006 unsigned int span;
3007
3008 if (elf_section_type (section) != SHT_PROGBITS
3009 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3010 || (section->flags & SEC_EXCLUDE) != 0
3011 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3012 || (section->output_section == bfd_abs_section_ptr))
3013 continue;
3014
3015 if (elf_section_data (section)->this_hdr.contents != NULL)
3016 contents = elf_section_data (section)->this_hdr.contents;
3017 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3018 return TRUE;
3019
3020 sec_data = elf_aarch64_section_data (section);
3021
3022 qsort (sec_data->map, sec_data->mapcount,
3023 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3024
3025 for (span = 0; span < sec_data->mapcount; span++)
3026 {
3027 unsigned int span_start = sec_data->map[span].vma;
3028 unsigned int span_end = ((span == sec_data->mapcount - 1)
3029 ? sec_data->map[0].vma + section->size
3030 : sec_data->map[span + 1].vma);
3031 unsigned int i;
3032 char span_type = sec_data->map[span].type;
3033
3034 if (span_type == 'd')
3035 continue;
3036
3037 for (i = span_start; i + 4 < span_end; i += 4)
3038 {
3039 uint32_t insn_1 = bfd_getl32 (contents + i);
3040 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3041
3042 if (aarch64_erratum_sequence (insn_1, insn_2))
3043 {
3044 char *stub_name = NULL;
3045 stub_name = (char *) bfd_malloc
3046 (strlen ("__erratum_835769_veneer_") + 16);
3047 if (stub_name != NULL)
3048 sprintf
3049 (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3050 else
3051 return TRUE;
3052
3053 if (num_fixes == fix_table_size)
3054 {
3055 fix_table_size *= 2;
3056 fixes =
3057 (struct aarch64_erratum_835769_fix *)
3058 bfd_realloc (fixes,
3059 sizeof (struct aarch64_erratum_835769_fix)
3060 * fix_table_size);
3061 if (fixes == NULL)
3062 return TRUE;
3063 }
3064
3065 fixes[num_fixes].input_bfd = input_bfd;
3066 fixes[num_fixes].section = section;
3067 fixes[num_fixes].offset = i + 4;
3068 fixes[num_fixes].veneered_insn = insn_2;
3069 fixes[num_fixes].stub_name = stub_name;
3070 fixes[num_fixes].stub_type = aarch64_stub_erratum_835769_veneer;
3071 num_fixes++;
3072 }
3073 }
3074 }
3075 if (elf_section_data (section)->this_hdr.contents == NULL)
3076 free (contents);
3077 }
3078
3079 *fixes_p = fixes;
3080 *num_fixes_p = num_fixes;
3081 *fix_table_size_p = fix_table_size;
3082 return FALSE;
3083 }
3084
3085 /* Find or create a stub section. Returns a pointer to the stub section, and
3086 the section to which the stub section will be attached (in *LINK_SEC_P).
3087 LINK_SEC_P may be NULL. */
3088
3089 static asection *
3090 elf_aarch64_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3091 struct elf_aarch64_link_hash_table *htab)
3092 {
3093 asection *link_sec;
3094 asection *stub_sec;
3095
3096 link_sec = htab->stub_group[section->id].link_sec;
3097 BFD_ASSERT (link_sec != NULL);
3098 stub_sec = htab->stub_group[section->id].stub_sec;
3099
3100 if (stub_sec == NULL)
3101 {
3102 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3103 if (stub_sec == NULL)
3104 {
3105 size_t namelen;
3106 bfd_size_type len;
3107 char *s_name;
3108
3109 namelen = strlen (link_sec->name);
3110 len = namelen + sizeof (STUB_SUFFIX);
3111 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3112 if (s_name == NULL)
3113 return NULL;
3114
3115 memcpy (s_name, link_sec->name, namelen);
3116 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3117 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3118
3119 if (stub_sec == NULL)
3120 return NULL;
3121 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3122 }
3123 htab->stub_group[section->id].stub_sec = stub_sec;
3124 }
3125
3126 if (link_sec_p)
3127 *link_sec_p = link_sec;
3128
3129 return stub_sec;
3130 }
3131
3132 /* Determine and set the size of the stub section for a final link.
3133
3134 The basic idea here is to examine all the relocations looking for
3135 PC-relative calls to a target that is unreachable with a "bl"
3136 instruction. */
3137
3138 bfd_boolean
3139 elfNN_aarch64_size_stubs (bfd *output_bfd,
3140 bfd *stub_bfd,
3141 struct bfd_link_info *info,
3142 bfd_signed_vma group_size,
3143 asection * (*add_stub_section) (const char *,
3144 asection *),
3145 void (*layout_sections_again) (void))
3146 {
3147 bfd_size_type stub_group_size;
3148 bfd_boolean stubs_always_before_branch;
3149 bfd_boolean stub_changed = 0;
3150 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3151 struct aarch64_erratum_835769_fix *erratum_835769_fixes = NULL;
3152 unsigned int num_erratum_835769_fixes = 0;
3153 unsigned int erratum_835769_fix_table_size = 10;
3154 unsigned int i;
3155
3156 if (htab->fix_erratum_835769)
3157 {
3158 erratum_835769_fixes
3159 = (struct aarch64_erratum_835769_fix *)
3160 bfd_zmalloc
3161 (sizeof (struct aarch64_erratum_835769_fix) *
3162 erratum_835769_fix_table_size);
3163 if (erratum_835769_fixes == NULL)
3164 goto error_ret_free_local;
3165 }
3166
3167 /* Propagate mach to stub bfd, because it may not have been
3168 finalized when we created stub_bfd. */
3169 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3170 bfd_get_mach (output_bfd));
3171
3172 /* Stash our params away. */
3173 htab->stub_bfd = stub_bfd;
3174 htab->add_stub_section = add_stub_section;
3175 htab->layout_sections_again = layout_sections_again;
3176 stubs_always_before_branch = group_size < 0;
3177 if (group_size < 0)
3178 stub_group_size = -group_size;
3179 else
3180 stub_group_size = group_size;
3181
3182 if (stub_group_size == 1)
3183 {
3184 /* Default values. */
3185 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3186 stub_group_size = 127 * 1024 * 1024;
3187 }
3188
3189 group_sections (htab, stub_group_size, stubs_always_before_branch);
3190
3191 while (1)
3192 {
3193 bfd *input_bfd;
3194 asection *stub_sec;
3195 unsigned prev_num_erratum_835769_fixes = num_erratum_835769_fixes;
3196
3197 num_erratum_835769_fixes = 0;
3198 for (input_bfd = info->input_bfds;
3199 input_bfd != NULL; input_bfd = input_bfd->link.next)
3200 {
3201 Elf_Internal_Shdr *symtab_hdr;
3202 asection *section;
3203 Elf_Internal_Sym *local_syms = NULL;
3204
3205 /* We'll need the symbol table in a second. */
3206 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3207 if (symtab_hdr->sh_info == 0)
3208 continue;
3209
3210 /* Walk over each section attached to the input bfd. */
3211 for (section = input_bfd->sections;
3212 section != NULL; section = section->next)
3213 {
3214 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3215
3216 /* If there aren't any relocs, then there's nothing more
3217 to do. */
3218 if ((section->flags & SEC_RELOC) == 0
3219 || section->reloc_count == 0
3220 || (section->flags & SEC_CODE) == 0)
3221 continue;
3222
3223 /* If this section is a link-once section that will be
3224 discarded, then don't create any stubs. */
3225 if (section->output_section == NULL
3226 || section->output_section->owner != output_bfd)
3227 continue;
3228
3229 /* Get the relocs. */
3230 internal_relocs
3231 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3232 NULL, info->keep_memory);
3233 if (internal_relocs == NULL)
3234 goto error_ret_free_local;
3235
3236 /* Now examine each relocation. */
3237 irela = internal_relocs;
3238 irelaend = irela + section->reloc_count;
3239 for (; irela < irelaend; irela++)
3240 {
3241 unsigned int r_type, r_indx;
3242 enum elf_aarch64_stub_type stub_type;
3243 struct elf_aarch64_stub_hash_entry *stub_entry;
3244 asection *sym_sec;
3245 bfd_vma sym_value;
3246 bfd_vma destination;
3247 struct elf_aarch64_link_hash_entry *hash;
3248 const char *sym_name;
3249 char *stub_name;
3250 const asection *id_sec;
3251 unsigned char st_type;
3252 bfd_size_type len;
3253
3254 r_type = ELFNN_R_TYPE (irela->r_info);
3255 r_indx = ELFNN_R_SYM (irela->r_info);
3256
3257 if (r_type >= (unsigned int) R_AARCH64_end)
3258 {
3259 bfd_set_error (bfd_error_bad_value);
3260 error_ret_free_internal:
3261 if (elf_section_data (section)->relocs == NULL)
3262 free (internal_relocs);
3263 goto error_ret_free_local;
3264 }
3265
3266 /* Only look for stubs on unconditional branch and
3267 branch and link instructions. */
3268 if (r_type != (unsigned int) AARCH64_R (CALL26)
3269 && r_type != (unsigned int) AARCH64_R (JUMP26))
3270 continue;
3271
3272 /* Now determine the call target, its name, value,
3273 section. */
3274 sym_sec = NULL;
3275 sym_value = 0;
3276 destination = 0;
3277 hash = NULL;
3278 sym_name = NULL;
3279 if (r_indx < symtab_hdr->sh_info)
3280 {
3281 /* It's a local symbol. */
3282 Elf_Internal_Sym *sym;
3283 Elf_Internal_Shdr *hdr;
3284
3285 if (local_syms == NULL)
3286 {
3287 local_syms
3288 = (Elf_Internal_Sym *) symtab_hdr->contents;
3289 if (local_syms == NULL)
3290 local_syms
3291 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3292 symtab_hdr->sh_info, 0,
3293 NULL, NULL, NULL);
3294 if (local_syms == NULL)
3295 goto error_ret_free_internal;
3296 }
3297
3298 sym = local_syms + r_indx;
3299 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3300 sym_sec = hdr->bfd_section;
3301 if (!sym_sec)
3302 /* This is an undefined symbol. It can never
3303 be resolved. */
3304 continue;
3305
3306 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3307 sym_value = sym->st_value;
3308 destination = (sym_value + irela->r_addend
3309 + sym_sec->output_offset
3310 + sym_sec->output_section->vma);
3311 st_type = ELF_ST_TYPE (sym->st_info);
3312 sym_name
3313 = bfd_elf_string_from_elf_section (input_bfd,
3314 symtab_hdr->sh_link,
3315 sym->st_name);
3316 }
3317 else
3318 {
3319 int e_indx;
3320
3321 e_indx = r_indx - symtab_hdr->sh_info;
3322 hash = ((struct elf_aarch64_link_hash_entry *)
3323 elf_sym_hashes (input_bfd)[e_indx]);
3324
3325 while (hash->root.root.type == bfd_link_hash_indirect
3326 || hash->root.root.type == bfd_link_hash_warning)
3327 hash = ((struct elf_aarch64_link_hash_entry *)
3328 hash->root.root.u.i.link);
3329
3330 if (hash->root.root.type == bfd_link_hash_defined
3331 || hash->root.root.type == bfd_link_hash_defweak)
3332 {
3333 struct elf_aarch64_link_hash_table *globals =
3334 elf_aarch64_hash_table (info);
3335 sym_sec = hash->root.root.u.def.section;
3336 sym_value = hash->root.root.u.def.value;
3337 /* For a destination in a shared library,
3338 use the PLT stub as target address to
3339 decide whether a branch stub is
3340 needed. */
3341 if (globals->root.splt != NULL && hash != NULL
3342 && hash->root.plt.offset != (bfd_vma) - 1)
3343 {
3344 sym_sec = globals->root.splt;
3345 sym_value = hash->root.plt.offset;
3346 if (sym_sec->output_section != NULL)
3347 destination = (sym_value
3348 + sym_sec->output_offset
3349 +
3350 sym_sec->output_section->vma);
3351 }
3352 else if (sym_sec->output_section != NULL)
3353 destination = (sym_value + irela->r_addend
3354 + sym_sec->output_offset
3355 + sym_sec->output_section->vma);
3356 }
3357 else if (hash->root.root.type == bfd_link_hash_undefined
3358 || (hash->root.root.type
3359 == bfd_link_hash_undefweak))
3360 {
3361 /* For a shared library, use the PLT stub as
3362 target address to decide whether a long
3363 branch stub is needed.
3364 For absolute code, they cannot be handled. */
3365 struct elf_aarch64_link_hash_table *globals =
3366 elf_aarch64_hash_table (info);
3367
3368 if (globals->root.splt != NULL && hash != NULL
3369 && hash->root.plt.offset != (bfd_vma) - 1)
3370 {
3371 sym_sec = globals->root.splt;
3372 sym_value = hash->root.plt.offset;
3373 if (sym_sec->output_section != NULL)
3374 destination = (sym_value
3375 + sym_sec->output_offset
3376 +
3377 sym_sec->output_section->vma);
3378 }
3379 else
3380 continue;
3381 }
3382 else
3383 {
3384 bfd_set_error (bfd_error_bad_value);
3385 goto error_ret_free_internal;
3386 }
3387 st_type = ELF_ST_TYPE (hash->root.type);
3388 sym_name = hash->root.root.root.string;
3389 }
3390
3391 /* Determine what (if any) linker stub is needed. */
3392 stub_type = aarch64_type_of_stub
3393 (info, section, irela, st_type, hash, destination);
3394 if (stub_type == aarch64_stub_none)
3395 continue;
3396
3397 /* Support for grouping stub sections. */
3398 id_sec = htab->stub_group[section->id].link_sec;
3399
3400 /* Get the name of this stub. */
3401 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3402 irela);
3403 if (!stub_name)
3404 goto error_ret_free_internal;
3405
3406 stub_entry =
3407 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3408 stub_name, FALSE, FALSE);
3409 if (stub_entry != NULL)
3410 {
3411 /* The proper stub has already been created. */
3412 free (stub_name);
3413 continue;
3414 }
3415
3416 stub_entry = elfNN_aarch64_add_stub (stub_name, section,
3417 htab);
3418 if (stub_entry == NULL)
3419 {
3420 free (stub_name);
3421 goto error_ret_free_internal;
3422 }
3423
3424 stub_entry->target_value = sym_value;
3425 stub_entry->target_section = sym_sec;
3426 stub_entry->stub_type = stub_type;
3427 stub_entry->h = hash;
3428 stub_entry->st_type = st_type;
3429
3430 if (sym_name == NULL)
3431 sym_name = "unnamed";
3432 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3433 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3434 if (stub_entry->output_name == NULL)
3435 {
3436 free (stub_name);
3437 goto error_ret_free_internal;
3438 }
3439
3440 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3441 sym_name);
3442
3443 stub_changed = TRUE;
3444 }
3445
3446 /* We're done with the internal relocs, free them. */
3447 if (elf_section_data (section)->relocs == NULL)
3448 free (internal_relocs);
3449 }
3450
3451 if (htab->fix_erratum_835769)
3452 {
3453 /* Scan for sequences which might trigger erratum 835769. */
3454 if (erratum_835769_scan (input_bfd, info, &erratum_835769_fixes,
3455 &num_erratum_835769_fixes,
3456 &erratum_835769_fix_table_size) != 0)
3457 goto error_ret_free_local;
3458 }
3459 }
3460
3461 if (prev_num_erratum_835769_fixes != num_erratum_835769_fixes)
3462 stub_changed = TRUE;
3463
3464 if (!stub_changed)
3465 break;
3466
3467 /* OK, we've added some stubs. Find out the new size of the
3468 stub sections. */
3469 for (stub_sec = htab->stub_bfd->sections;
3470 stub_sec != NULL; stub_sec = stub_sec->next)
3471 {
3472 /* Ignore non-stub sections. */
3473 if (!strstr (stub_sec->name, STUB_SUFFIX))
3474 continue;
3475 stub_sec->size = 0;
3476 }
3477
3478 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3479
3480 /* Add erratum 835769 veneers to stub section sizes too. */
3481 if (htab->fix_erratum_835769)
3482 for (i = 0; i < num_erratum_835769_fixes; i++)
3483 {
3484 stub_sec = elf_aarch64_create_or_find_stub_sec (NULL,
3485 erratum_835769_fixes[i].section, htab);
3486
3487 if (stub_sec == NULL)
3488 goto error_ret_free_local;
3489
3490 stub_sec->size += 8;
3491 }
3492
3493 /* Ask the linker to do its stuff. */
3494 (*htab->layout_sections_again) ();
3495 stub_changed = FALSE;
3496 }
3497
3498 /* Add stubs for erratum 835769 fixes now. */
3499 if (htab->fix_erratum_835769)
3500 {
3501 for (i = 0; i < num_erratum_835769_fixes; i++)
3502 {
3503 struct elf_aarch64_stub_hash_entry *stub_entry;
3504 char *stub_name = erratum_835769_fixes[i].stub_name;
3505 asection *section = erratum_835769_fixes[i].section;
3506 unsigned int section_id = erratum_835769_fixes[i].section->id;
3507 asection *link_sec = htab->stub_group[section_id].link_sec;
3508 asection *stub_sec = htab->stub_group[section_id].stub_sec;
3509
3510 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
3511 stub_name, TRUE, FALSE);
3512 if (stub_entry == NULL)
3513 {
3514 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3515 section->owner,
3516 stub_name);
3517 return FALSE;
3518 }
3519
3520 stub_entry->stub_sec = stub_sec;
3521 stub_entry->stub_offset = 0;
3522 stub_entry->id_sec = link_sec;
3523 stub_entry->stub_type = erratum_835769_fixes[i].stub_type;
3524 stub_entry->target_section = section;
3525 stub_entry->target_value = erratum_835769_fixes[i].offset;
3526 stub_entry->veneered_insn = erratum_835769_fixes[i].veneered_insn;
3527 stub_entry->output_name = erratum_835769_fixes[i].stub_name;
3528 }
3529 }
3530
3531 return TRUE;
3532
3533 error_ret_free_local:
3534 return FALSE;
3535 }
3536
3537 /* Build all the stubs associated with the current output file. The
3538 stubs are kept in a hash table attached to the main linker hash
3539 table. We also set up the .plt entries for statically linked PIC
3540 functions here. This function is called via aarch64_elf_finish in the
3541 linker. */
3542
3543 bfd_boolean
3544 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3545 {
3546 asection *stub_sec;
3547 struct bfd_hash_table *table;
3548 struct elf_aarch64_link_hash_table *htab;
3549
3550 htab = elf_aarch64_hash_table (info);
3551
3552 for (stub_sec = htab->stub_bfd->sections;
3553 stub_sec != NULL; stub_sec = stub_sec->next)
3554 {
3555 bfd_size_type size;
3556
3557 /* Ignore non-stub sections. */
3558 if (!strstr (stub_sec->name, STUB_SUFFIX))
3559 continue;
3560
3561 /* Allocate memory to hold the linker stubs. */
3562 size = stub_sec->size;
3563 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3564 if (stub_sec->contents == NULL && size != 0)
3565 return FALSE;
3566 stub_sec->size = 0;
3567 }
3568
3569 /* Build the stubs as directed by the stub hash table. */
3570 table = &htab->stub_hash_table;
3571 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3572
3573 return TRUE;
3574 }
3575
3576
3577 /* Add an entry to the code/data map for section SEC. */
3578
3579 static void
3580 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3581 {
3582 struct _aarch64_elf_section_data *sec_data =
3583 elf_aarch64_section_data (sec);
3584 unsigned int newidx;
3585
3586 if (sec_data->map == NULL)
3587 {
3588 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3589 sec_data->mapcount = 0;
3590 sec_data->mapsize = 1;
3591 }
3592
3593 newidx = sec_data->mapcount++;
3594
3595 if (sec_data->mapcount > sec_data->mapsize)
3596 {
3597 sec_data->mapsize *= 2;
3598 sec_data->map = bfd_realloc_or_free
3599 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3600 }
3601
3602 if (sec_data->map)
3603 {
3604 sec_data->map[newidx].vma = vma;
3605 sec_data->map[newidx].type = type;
3606 }
3607 }
3608
3609
3610 /* Initialise maps of insn/data for input BFDs. */
3611 void
3612 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3613 {
3614 Elf_Internal_Sym *isymbuf;
3615 Elf_Internal_Shdr *hdr;
3616 unsigned int i, localsyms;
3617
3618 /* Make sure that we are dealing with an AArch64 elf binary. */
3619 if (!is_aarch64_elf (abfd))
3620 return;
3621
3622 if ((abfd->flags & DYNAMIC) != 0)
3623 return;
3624
3625 hdr = &elf_symtab_hdr (abfd);
3626 localsyms = hdr->sh_info;
3627
3628 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3629 should contain the number of local symbols, which should come before any
3630 global symbols. Mapping symbols are always local. */
3631 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3632
3633 /* No internal symbols read? Skip this BFD. */
3634 if (isymbuf == NULL)
3635 return;
3636
3637 for (i = 0; i < localsyms; i++)
3638 {
3639 Elf_Internal_Sym *isym = &isymbuf[i];
3640 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3641 const char *name;
3642
3643 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3644 {
3645 name = bfd_elf_string_from_elf_section (abfd,
3646 hdr->sh_link,
3647 isym->st_name);
3648
3649 if (bfd_is_aarch64_special_symbol_name
3650 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3651 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3652 }
3653 }
3654 }
3655
3656 /* Set option values needed during linking. */
3657 void
3658 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3659 struct bfd_link_info *link_info,
3660 int no_enum_warn,
3661 int no_wchar_warn, int pic_veneer,
3662 int fix_erratum_835769)
3663 {
3664 struct elf_aarch64_link_hash_table *globals;
3665
3666 globals = elf_aarch64_hash_table (link_info);
3667 globals->pic_veneer = pic_veneer;
3668 globals->fix_erratum_835769 = fix_erratum_835769;
3669
3670 BFD_ASSERT (is_aarch64_elf (output_bfd));
3671 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3672 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3673 }
3674
3675 static bfd_vma
3676 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3677 struct elf_aarch64_link_hash_table
3678 *globals, struct bfd_link_info *info,
3679 bfd_vma value, bfd *output_bfd,
3680 bfd_boolean *unresolved_reloc_p)
3681 {
3682 bfd_vma off = (bfd_vma) - 1;
3683 asection *basegot = globals->root.sgot;
3684 bfd_boolean dyn = globals->root.dynamic_sections_created;
3685
3686 if (h != NULL)
3687 {
3688 BFD_ASSERT (basegot != NULL);
3689 off = h->got.offset;
3690 BFD_ASSERT (off != (bfd_vma) - 1);
3691 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3692 || (info->shared
3693 && SYMBOL_REFERENCES_LOCAL (info, h))
3694 || (ELF_ST_VISIBILITY (h->other)
3695 && h->root.type == bfd_link_hash_undefweak))
3696 {
3697 /* This is actually a static link, or it is a -Bsymbolic link
3698 and the symbol is defined locally. We must initialize this
3699 entry in the global offset table. Since the offset must
3700 always be a multiple of 8 (4 in the case of ILP32), we use
3701 the least significant bit to record whether we have
3702 initialized it already.
3703 When doing a dynamic link, we create a .rel(a).got relocation
3704 entry to initialize the value. This is done in the
3705 finish_dynamic_symbol routine. */
3706 if ((off & 1) != 0)
3707 off &= ~1;
3708 else
3709 {
3710 bfd_put_NN (output_bfd, value, basegot->contents + off);
3711 h->got.offset |= 1;
3712 }
3713 }
3714 else
3715 *unresolved_reloc_p = FALSE;
3716
3717 off = off + basegot->output_section->vma + basegot->output_offset;
3718 }
3719
3720 return off;
3721 }
3722
3723 /* Change R_TYPE to a more efficient access model where possible,
3724 return the new reloc type. */
3725
3726 static bfd_reloc_code_real_type
3727 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
3728 struct elf_link_hash_entry *h)
3729 {
3730 bfd_boolean is_local = h == NULL;
3731
3732 switch (r_type)
3733 {
3734 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3735 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3736 return (is_local
3737 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3738 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
3739
3740 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3741 return (is_local
3742 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3743 : r_type);
3744
3745 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3746 return (is_local
3747 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3748 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3749
3750 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3751 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
3752 return (is_local
3753 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3754 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
3755
3756 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3757 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3758
3759 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
3760 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3761
3762 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3763 return r_type;
3764
3765 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3766 return (is_local
3767 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
3768 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3769
3770 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3771 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3772 /* Instructions with these relocations will become NOPs. */
3773 return BFD_RELOC_AARCH64_NONE;
3774
3775 default:
3776 break;
3777 }
3778
3779 return r_type;
3780 }
3781
3782 static unsigned int
3783 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
3784 {
3785 switch (r_type)
3786 {
3787 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3788 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3789 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3790 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3791 return GOT_NORMAL;
3792
3793 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3794 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3795 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3796 return GOT_TLS_GD;
3797
3798 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3799 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3800 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3801 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3802 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
3803 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3804 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3805 return GOT_TLSDESC_GD;
3806
3807 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3808 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3809 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3810 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3811 return GOT_TLS_IE;
3812
3813 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3814 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3815 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3816 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3817 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3818 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3819 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3820 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3821 return GOT_UNKNOWN;
3822
3823 default:
3824 break;
3825 }
3826 return GOT_UNKNOWN;
3827 }
3828
3829 static bfd_boolean
3830 aarch64_can_relax_tls (bfd *input_bfd,
3831 struct bfd_link_info *info,
3832 bfd_reloc_code_real_type r_type,
3833 struct elf_link_hash_entry *h,
3834 unsigned long r_symndx)
3835 {
3836 unsigned int symbol_got_type;
3837 unsigned int reloc_got_type;
3838
3839 if (! IS_AARCH64_TLS_RELOC (r_type))
3840 return FALSE;
3841
3842 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3843 reloc_got_type = aarch64_reloc_got_type (r_type);
3844
3845 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3846 return TRUE;
3847
3848 if (info->shared)
3849 return FALSE;
3850
3851 if (h && h->root.type == bfd_link_hash_undefweak)
3852 return FALSE;
3853
3854 return TRUE;
3855 }
3856
3857 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
3858 enumerator. */
3859
3860 static bfd_reloc_code_real_type
3861 aarch64_tls_transition (bfd *input_bfd,
3862 struct bfd_link_info *info,
3863 unsigned int r_type,
3864 struct elf_link_hash_entry *h,
3865 unsigned long r_symndx)
3866 {
3867 bfd_reloc_code_real_type bfd_r_type
3868 = elfNN_aarch64_bfd_reloc_from_type (r_type);
3869
3870 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
3871 return bfd_r_type;
3872
3873 return aarch64_tls_transition_without_check (bfd_r_type, h);
3874 }
3875
3876 /* Return the base VMA address which should be subtracted from real addresses
3877 when resolving R_AARCH64_TLS_DTPREL relocation. */
3878
3879 static bfd_vma
3880 dtpoff_base (struct bfd_link_info *info)
3881 {
3882 /* If tls_sec is NULL, we should have signalled an error already. */
3883 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3884 return elf_hash_table (info)->tls_sec->vma;
3885 }
3886
3887 /* Return the base VMA address which should be subtracted from real addresses
3888 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3889
3890 static bfd_vma
3891 tpoff_base (struct bfd_link_info *info)
3892 {
3893 struct elf_link_hash_table *htab = elf_hash_table (info);
3894
3895 /* If tls_sec is NULL, we should have signalled an error already. */
3896 BFD_ASSERT (htab->tls_sec != NULL);
3897
3898 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3899 htab->tls_sec->alignment_power);
3900 return htab->tls_sec->vma - base;
3901 }
3902
3903 static bfd_vma *
3904 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3905 unsigned long r_symndx)
3906 {
3907 /* Calculate the address of the GOT entry for symbol
3908 referred to in h. */
3909 if (h != NULL)
3910 return &h->got.offset;
3911 else
3912 {
3913 /* local symbol */
3914 struct elf_aarch64_local_symbol *l;
3915
3916 l = elf_aarch64_locals (input_bfd);
3917 return &l[r_symndx].got_offset;
3918 }
3919 }
3920
3921 static void
3922 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3923 unsigned long r_symndx)
3924 {
3925 bfd_vma *p;
3926 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3927 *p |= 1;
3928 }
3929
3930 static int
3931 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3932 unsigned long r_symndx)
3933 {
3934 bfd_vma value;
3935 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3936 return value & 1;
3937 }
3938
3939 static bfd_vma
3940 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3941 unsigned long r_symndx)
3942 {
3943 bfd_vma value;
3944 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3945 value &= ~1;
3946 return value;
3947 }
3948
3949 static bfd_vma *
3950 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3951 unsigned long r_symndx)
3952 {
3953 /* Calculate the address of the GOT entry for symbol
3954 referred to in h. */
3955 if (h != NULL)
3956 {
3957 struct elf_aarch64_link_hash_entry *eh;
3958 eh = (struct elf_aarch64_link_hash_entry *) h;
3959 return &eh->tlsdesc_got_jump_table_offset;
3960 }
3961 else
3962 {
3963 /* local symbol */
3964 struct elf_aarch64_local_symbol *l;
3965
3966 l = elf_aarch64_locals (input_bfd);
3967 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3968 }
3969 }
3970
3971 static void
3972 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3973 unsigned long r_symndx)
3974 {
3975 bfd_vma *p;
3976 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3977 *p |= 1;
3978 }
3979
3980 static int
3981 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3982 struct elf_link_hash_entry *h,
3983 unsigned long r_symndx)
3984 {
3985 bfd_vma value;
3986 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3987 return value & 1;
3988 }
3989
3990 static bfd_vma
3991 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3992 unsigned long r_symndx)
3993 {
3994 bfd_vma value;
3995 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3996 value &= ~1;
3997 return value;
3998 }
3999
4000 /* Data for make_branch_to_erratum_835769_stub(). */
4001
4002 struct erratum_835769_branch_to_stub_data
4003 {
4004 asection *output_section;
4005 bfd_byte *contents;
4006 };
4007
4008 /* Helper to insert branches to erratum 835769 stubs in the right
4009 places for a particular section. */
4010
4011 static bfd_boolean
4012 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4013 void *in_arg)
4014 {
4015 struct elf_aarch64_stub_hash_entry *stub_entry;
4016 struct erratum_835769_branch_to_stub_data *data;
4017 bfd_byte *contents;
4018 unsigned long branch_insn = 0;
4019 bfd_vma veneered_insn_loc, veneer_entry_loc;
4020 bfd_signed_vma branch_offset;
4021 unsigned int target;
4022 bfd *abfd;
4023
4024 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4025 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4026
4027 if (stub_entry->target_section != data->output_section
4028 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4029 return TRUE;
4030
4031 contents = data->contents;
4032 veneered_insn_loc = stub_entry->target_section->output_section->vma
4033 + stub_entry->target_section->output_offset
4034 + stub_entry->target_value;
4035 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4036 + stub_entry->stub_sec->output_offset
4037 + stub_entry->stub_offset;
4038 branch_offset = veneer_entry_loc - veneered_insn_loc;
4039
4040 abfd = stub_entry->target_section->owner;
4041 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4042 (*_bfd_error_handler)
4043 (_("%B: error: Erratum 835769 stub out "
4044 "of range (input file too large)"), abfd);
4045
4046 target = stub_entry->target_value;
4047 branch_insn = 0x14000000;
4048 branch_offset >>= 2;
4049 branch_offset &= 0x3ffffff;
4050 branch_insn |= branch_offset;
4051 bfd_putl32 (branch_insn, &contents[target]);
4052
4053 return TRUE;
4054 }
4055
4056 static bfd_boolean
4057 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4058 struct bfd_link_info *link_info,
4059 asection *sec,
4060 bfd_byte *contents)
4061
4062 {
4063 struct elf_aarch64_link_hash_table *globals =
4064 elf_aarch64_hash_table (link_info);
4065
4066 if (globals == NULL)
4067 return FALSE;
4068
4069 /* Fix code to point to erratum 835769 stubs. */
4070 if (globals->fix_erratum_835769)
4071 {
4072 struct erratum_835769_branch_to_stub_data data;
4073
4074 data.output_section = sec;
4075 data.contents = contents;
4076 bfd_hash_traverse (&globals->stub_hash_table,
4077 make_branch_to_erratum_835769_stub, &data);
4078 }
4079
4080 return FALSE;
4081 }
4082
4083 /* Perform a relocation as part of a final link. */
4084 static bfd_reloc_status_type
4085 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4086 bfd *input_bfd,
4087 bfd *output_bfd,
4088 asection *input_section,
4089 bfd_byte *contents,
4090 Elf_Internal_Rela *rel,
4091 bfd_vma value,
4092 struct bfd_link_info *info,
4093 asection *sym_sec,
4094 struct elf_link_hash_entry *h,
4095 bfd_boolean *unresolved_reloc_p,
4096 bfd_boolean save_addend,
4097 bfd_vma *saved_addend,
4098 Elf_Internal_Sym *sym)
4099 {
4100 Elf_Internal_Shdr *symtab_hdr;
4101 unsigned int r_type = howto->type;
4102 bfd_reloc_code_real_type bfd_r_type
4103 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4104 bfd_reloc_code_real_type new_bfd_r_type;
4105 unsigned long r_symndx;
4106 bfd_byte *hit_data = contents + rel->r_offset;
4107 bfd_vma place;
4108 bfd_signed_vma signed_addend;
4109 struct elf_aarch64_link_hash_table *globals;
4110 bfd_boolean weak_undef_p;
4111
4112 globals = elf_aarch64_hash_table (info);
4113
4114 symtab_hdr = &elf_symtab_hdr (input_bfd);
4115
4116 BFD_ASSERT (is_aarch64_elf (input_bfd));
4117
4118 r_symndx = ELFNN_R_SYM (rel->r_info);
4119
4120 /* It is possible to have linker relaxations on some TLS access
4121 models. Update our information here. */
4122 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4123 if (new_bfd_r_type != bfd_r_type)
4124 {
4125 bfd_r_type = new_bfd_r_type;
4126 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4127 BFD_ASSERT (howto != NULL);
4128 r_type = howto->type;
4129 }
4130
4131 place = input_section->output_section->vma
4132 + input_section->output_offset + rel->r_offset;
4133
4134 /* Get addend, accumulating the addend for consecutive relocs
4135 which refer to the same offset. */
4136 signed_addend = saved_addend ? *saved_addend : 0;
4137 signed_addend += rel->r_addend;
4138
4139 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4140 : bfd_is_und_section (sym_sec));
4141
4142 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4143 it here if it is defined in a non-shared object. */
4144 if (h != NULL
4145 && h->type == STT_GNU_IFUNC
4146 && h->def_regular)
4147 {
4148 asection *plt;
4149 const char *name;
4150 asection *base_got;
4151 bfd_vma off;
4152
4153 if ((input_section->flags & SEC_ALLOC) == 0
4154 || h->plt.offset == (bfd_vma) -1)
4155 abort ();
4156
4157 /* STT_GNU_IFUNC symbol must go through PLT. */
4158 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4159 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4160
4161 switch (bfd_r_type)
4162 {
4163 default:
4164 if (h->root.root.string)
4165 name = h->root.root.string;
4166 else
4167 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4168 NULL);
4169 (*_bfd_error_handler)
4170 (_("%B: relocation %s against STT_GNU_IFUNC "
4171 "symbol `%s' isn't handled by %s"), input_bfd,
4172 howto->name, name, __FUNCTION__);
4173 bfd_set_error (bfd_error_bad_value);
4174 return FALSE;
4175
4176 case BFD_RELOC_AARCH64_NN:
4177 if (rel->r_addend != 0)
4178 {
4179 if (h->root.root.string)
4180 name = h->root.root.string;
4181 else
4182 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4183 sym, NULL);
4184 (*_bfd_error_handler)
4185 (_("%B: relocation %s against STT_GNU_IFUNC "
4186 "symbol `%s' has non-zero addend: %d"),
4187 input_bfd, howto->name, name, rel->r_addend);
4188 bfd_set_error (bfd_error_bad_value);
4189 return FALSE;
4190 }
4191
4192 /* Generate dynamic relocation only when there is a
4193 non-GOT reference in a shared object. */
4194 if (info->shared && h->non_got_ref)
4195 {
4196 Elf_Internal_Rela outrel;
4197 asection *sreloc;
4198
4199 /* Need a dynamic relocation to get the real function
4200 address. */
4201 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4202 info,
4203 input_section,
4204 rel->r_offset);
4205 if (outrel.r_offset == (bfd_vma) -1
4206 || outrel.r_offset == (bfd_vma) -2)
4207 abort ();
4208
4209 outrel.r_offset += (input_section->output_section->vma
4210 + input_section->output_offset);
4211
4212 if (h->dynindx == -1
4213 || h->forced_local
4214 || info->executable)
4215 {
4216 /* This symbol is resolved locally. */
4217 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4218 outrel.r_addend = (h->root.u.def.value
4219 + h->root.u.def.section->output_section->vma
4220 + h->root.u.def.section->output_offset);
4221 }
4222 else
4223 {
4224 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4225 outrel.r_addend = 0;
4226 }
4227
4228 sreloc = globals->root.irelifunc;
4229 elf_append_rela (output_bfd, sreloc, &outrel);
4230
4231 /* If this reloc is against an external symbol, we
4232 do not want to fiddle with the addend. Otherwise,
4233 we need to include the symbol value so that it
4234 becomes an addend for the dynamic reloc. For an
4235 internal symbol, we have updated addend. */
4236 return bfd_reloc_ok;
4237 }
4238 /* FALLTHROUGH */
4239 case BFD_RELOC_AARCH64_JUMP26:
4240 case BFD_RELOC_AARCH64_CALL26:
4241 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4242 signed_addend,
4243 weak_undef_p);
4244 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4245 howto, value);
4246 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4247 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4248 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4249 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4250 base_got = globals->root.sgot;
4251 off = h->got.offset;
4252
4253 if (base_got == NULL)
4254 abort ();
4255
4256 if (off == (bfd_vma) -1)
4257 {
4258 bfd_vma plt_index;
4259
4260 /* We can't use h->got.offset here to save state, or
4261 even just remember the offset, as finish_dynamic_symbol
4262 would use that as offset into .got. */
4263
4264 if (globals->root.splt != NULL)
4265 {
4266 plt_index = ((h->plt.offset - globals->plt_header_size) /
4267 globals->plt_entry_size);
4268 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4269 base_got = globals->root.sgotplt;
4270 }
4271 else
4272 {
4273 plt_index = h->plt.offset / globals->plt_entry_size;
4274 off = plt_index * GOT_ENTRY_SIZE;
4275 base_got = globals->root.igotplt;
4276 }
4277
4278 if (h->dynindx == -1
4279 || h->forced_local
4280 || info->symbolic)
4281 {
4282 /* This references the local definition. We must
4283 initialize this entry in the global offset table.
4284 Since the offset must always be a multiple of 8,
4285 we use the least significant bit to record
4286 whether we have initialized it already.
4287
4288 When doing a dynamic link, we create a .rela.got
4289 relocation entry to initialize the value. This
4290 is done in the finish_dynamic_symbol routine. */
4291 if ((off & 1) != 0)
4292 off &= ~1;
4293 else
4294 {
4295 bfd_put_NN (output_bfd, value,
4296 base_got->contents + off);
4297 /* Note that this is harmless as -1 | 1 still is -1. */
4298 h->got.offset |= 1;
4299 }
4300 }
4301 value = (base_got->output_section->vma
4302 + base_got->output_offset + off);
4303 }
4304 else
4305 value = aarch64_calculate_got_entry_vma (h, globals, info,
4306 value, output_bfd,
4307 unresolved_reloc_p);
4308 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4309 0, weak_undef_p);
4310 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4311 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4312 case BFD_RELOC_AARCH64_ADD_LO12:
4313 break;
4314 }
4315 }
4316
4317 switch (bfd_r_type)
4318 {
4319 case BFD_RELOC_AARCH64_NONE:
4320 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4321 *unresolved_reloc_p = FALSE;
4322 return bfd_reloc_ok;
4323
4324 case BFD_RELOC_AARCH64_NN:
4325
4326 /* When generating a shared object or relocatable executable, these
4327 relocations are copied into the output file to be resolved at
4328 run time. */
4329 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4330 && (input_section->flags & SEC_ALLOC)
4331 && (h == NULL
4332 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4333 || h->root.type != bfd_link_hash_undefweak))
4334 {
4335 Elf_Internal_Rela outrel;
4336 bfd_byte *loc;
4337 bfd_boolean skip, relocate;
4338 asection *sreloc;
4339
4340 *unresolved_reloc_p = FALSE;
4341
4342 skip = FALSE;
4343 relocate = FALSE;
4344
4345 outrel.r_addend = signed_addend;
4346 outrel.r_offset =
4347 _bfd_elf_section_offset (output_bfd, info, input_section,
4348 rel->r_offset);
4349 if (outrel.r_offset == (bfd_vma) - 1)
4350 skip = TRUE;
4351 else if (outrel.r_offset == (bfd_vma) - 2)
4352 {
4353 skip = TRUE;
4354 relocate = TRUE;
4355 }
4356
4357 outrel.r_offset += (input_section->output_section->vma
4358 + input_section->output_offset);
4359
4360 if (skip)
4361 memset (&outrel, 0, sizeof outrel);
4362 else if (h != NULL
4363 && h->dynindx != -1
4364 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4365 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4366 else
4367 {
4368 int symbol;
4369
4370 /* On SVR4-ish systems, the dynamic loader cannot
4371 relocate the text and data segments independently,
4372 so the symbol does not matter. */
4373 symbol = 0;
4374 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4375 outrel.r_addend += value;
4376 }
4377
4378 sreloc = elf_section_data (input_section)->sreloc;
4379 if (sreloc == NULL || sreloc->contents == NULL)
4380 return bfd_reloc_notsupported;
4381
4382 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4383 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4384
4385 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4386 {
4387 /* Sanity to check that we have previously allocated
4388 sufficient space in the relocation section for the
4389 number of relocations we actually want to emit. */
4390 abort ();
4391 }
4392
4393 /* If this reloc is against an external symbol, we do not want to
4394 fiddle with the addend. Otherwise, we need to include the symbol
4395 value so that it becomes an addend for the dynamic reloc. */
4396 if (!relocate)
4397 return bfd_reloc_ok;
4398
4399 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4400 contents, rel->r_offset, value,
4401 signed_addend);
4402 }
4403 else
4404 value += signed_addend;
4405 break;
4406
4407 case BFD_RELOC_AARCH64_JUMP26:
4408 case BFD_RELOC_AARCH64_CALL26:
4409 {
4410 asection *splt = globals->root.splt;
4411 bfd_boolean via_plt_p =
4412 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4413
4414 /* A call to an undefined weak symbol is converted to a jump to
4415 the next instruction unless a PLT entry will be created.
4416 The jump to the next instruction is optimized as a NOP.
4417 Do the same for local undefined symbols. */
4418 if (weak_undef_p && ! via_plt_p)
4419 {
4420 bfd_putl32 (INSN_NOP, hit_data);
4421 return bfd_reloc_ok;
4422 }
4423
4424 /* If the call goes through a PLT entry, make sure to
4425 check distance to the right destination address. */
4426 if (via_plt_p)
4427 {
4428 value = (splt->output_section->vma
4429 + splt->output_offset + h->plt.offset);
4430 *unresolved_reloc_p = FALSE;
4431 }
4432
4433 /* If the target symbol is global and marked as a function the
4434 relocation applies a function call or a tail call. In this
4435 situation we can veneer out of range branches. The veneers
4436 use IP0 and IP1 hence cannot be used arbitrary out of range
4437 branches that occur within the body of a function. */
4438 if (h && h->type == STT_FUNC)
4439 {
4440 /* Check if a stub has to be inserted because the destination
4441 is too far away. */
4442 if (! aarch64_valid_branch_p (value, place))
4443 {
4444 /* The target is out of reach, so redirect the branch to
4445 the local stub for this function. */
4446 struct elf_aarch64_stub_hash_entry *stub_entry;
4447 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4448 sym_sec, h,
4449 rel, globals);
4450 if (stub_entry != NULL)
4451 value = (stub_entry->stub_offset
4452 + stub_entry->stub_sec->output_offset
4453 + stub_entry->stub_sec->output_section->vma);
4454 }
4455 }
4456 }
4457 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4458 signed_addend, weak_undef_p);
4459 break;
4460
4461 case BFD_RELOC_AARCH64_16:
4462 #if ARCH_SIZE == 64
4463 case BFD_RELOC_AARCH64_32:
4464 #endif
4465 case BFD_RELOC_AARCH64_ADD_LO12:
4466 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4467 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4468 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4469 case BFD_RELOC_AARCH64_BRANCH19:
4470 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4471 case BFD_RELOC_AARCH64_LDST8_LO12:
4472 case BFD_RELOC_AARCH64_LDST16_LO12:
4473 case BFD_RELOC_AARCH64_LDST32_LO12:
4474 case BFD_RELOC_AARCH64_LDST64_LO12:
4475 case BFD_RELOC_AARCH64_LDST128_LO12:
4476 case BFD_RELOC_AARCH64_MOVW_G0_S:
4477 case BFD_RELOC_AARCH64_MOVW_G1_S:
4478 case BFD_RELOC_AARCH64_MOVW_G2_S:
4479 case BFD_RELOC_AARCH64_MOVW_G0:
4480 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4481 case BFD_RELOC_AARCH64_MOVW_G1:
4482 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4483 case BFD_RELOC_AARCH64_MOVW_G2:
4484 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4485 case BFD_RELOC_AARCH64_MOVW_G3:
4486 case BFD_RELOC_AARCH64_16_PCREL:
4487 case BFD_RELOC_AARCH64_32_PCREL:
4488 case BFD_RELOC_AARCH64_64_PCREL:
4489 case BFD_RELOC_AARCH64_TSTBR14:
4490 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4491 signed_addend, weak_undef_p);
4492 break;
4493
4494 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4495 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4496 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4497 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4498 if (globals->root.sgot == NULL)
4499 BFD_ASSERT (h != NULL);
4500
4501 if (h != NULL)
4502 {
4503 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4504 output_bfd,
4505 unresolved_reloc_p);
4506 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4507 0, weak_undef_p);
4508 }
4509 break;
4510
4511 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4512 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4513 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4514 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4515 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4516 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4517 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4518 if (globals->root.sgot == NULL)
4519 return bfd_reloc_notsupported;
4520
4521 value = (symbol_got_offset (input_bfd, h, r_symndx)
4522 + globals->root.sgot->output_section->vma
4523 + globals->root.sgot->output_offset);
4524
4525 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4526 0, weak_undef_p);
4527 *unresolved_reloc_p = FALSE;
4528 break;
4529
4530 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4531 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4532 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4533 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4534 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4535 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4536 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4537 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4538 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4539 signed_addend - tpoff_base (info),
4540 weak_undef_p);
4541 *unresolved_reloc_p = FALSE;
4542 break;
4543
4544 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4545 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4546 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4547 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4548 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4549 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4550 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4551 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4552 if (globals->root.sgot == NULL)
4553 return bfd_reloc_notsupported;
4554 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4555 + globals->root.sgotplt->output_section->vma
4556 + globals->root.sgotplt->output_offset
4557 + globals->sgotplt_jump_table_size);
4558
4559 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4560 0, weak_undef_p);
4561 *unresolved_reloc_p = FALSE;
4562 break;
4563
4564 default:
4565 return bfd_reloc_notsupported;
4566 }
4567
4568 if (saved_addend)
4569 *saved_addend = value;
4570
4571 /* Only apply the final relocation in a sequence. */
4572 if (save_addend)
4573 return bfd_reloc_continue;
4574
4575 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4576 howto, value);
4577 }
4578
4579 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4580 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4581 link.
4582
4583 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4584 is to then call final_link_relocate. Return other values in the
4585 case of error. */
4586
4587 static bfd_reloc_status_type
4588 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
4589 bfd *input_bfd, bfd_byte *contents,
4590 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4591 {
4592 bfd_boolean is_local = h == NULL;
4593 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
4594 unsigned long insn;
4595
4596 BFD_ASSERT (globals && input_bfd && contents && rel);
4597
4598 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4599 {
4600 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4601 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4602 if (is_local)
4603 {
4604 /* GD->LE relaxation:
4605 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4606 or
4607 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4608 */
4609 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4610 return bfd_reloc_continue;
4611 }
4612 else
4613 {
4614 /* GD->IE relaxation:
4615 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4616 or
4617 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4618 */
4619 return bfd_reloc_continue;
4620 }
4621
4622 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4623 BFD_ASSERT (0);
4624 break;
4625
4626 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4627 if (is_local)
4628 {
4629 /* Tiny TLSDESC->LE relaxation:
4630 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
4631 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
4632 .tlsdesccall var
4633 blr x1 => nop
4634 */
4635 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4636 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4637
4638 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4639 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
4640 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4641
4642 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4643 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
4644 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4645 return bfd_reloc_continue;
4646 }
4647 else
4648 {
4649 /* Tiny TLSDESC->IE relaxation:
4650 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
4651 adr x0, :tlsdesc:var => nop
4652 .tlsdesccall var
4653 blr x1 => nop
4654 */
4655 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4656 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4657
4658 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4659 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4660
4661 bfd_putl32 (0x58000000, contents + rel->r_offset);
4662 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
4663 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4664 return bfd_reloc_continue;
4665 }
4666
4667 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4668 if (is_local)
4669 {
4670 /* Tiny GD->LE relaxation:
4671 adr x0, :tlsgd:var => mrs x1, tpidr_el0
4672 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
4673 nop => add x0, x0, #:tprel_lo12_nc:x
4674 */
4675
4676 /* First kill the tls_get_addr reloc on the bl instruction. */
4677 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4678
4679 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
4680 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
4681 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
4682
4683 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4684 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
4685 rel[1].r_offset = rel->r_offset + 8;
4686
4687 /* Move the current relocation to the second instruction in
4688 the sequence. */
4689 rel->r_offset += 4;
4690 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4691 AARCH64_R (TLSLE_ADD_TPREL_HI12));
4692 return bfd_reloc_continue;
4693 }
4694 else
4695 {
4696 /* Tiny GD->IE relaxation:
4697 adr x0, :tlsgd:var => ldr x0, :gottprel:var
4698 bl __tls_get_addr => mrs x1, tpidr_el0
4699 nop => add x0, x0, x1
4700 */
4701
4702 /* First kill the tls_get_addr reloc on the bl instruction. */
4703 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4704 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4705
4706 bfd_putl32 (0x58000000, contents + rel->r_offset);
4707 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4708 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4709 return bfd_reloc_continue;
4710 }
4711
4712 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4713 return bfd_reloc_continue;
4714
4715 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4716 if (is_local)
4717 {
4718 /* GD->LE relaxation:
4719 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4720 */
4721 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4722 return bfd_reloc_continue;
4723 }
4724 else
4725 {
4726 /* GD->IE relaxation:
4727 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4728 */
4729 insn = bfd_getl32 (contents + rel->r_offset);
4730 insn &= 0xffffffe0;
4731 bfd_putl32 (insn, contents + rel->r_offset);
4732 return bfd_reloc_continue;
4733 }
4734
4735 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4736 if (is_local)
4737 {
4738 /* GD->LE relaxation
4739 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4740 bl __tls_get_addr => mrs x1, tpidr_el0
4741 nop => add x0, x1, x0
4742 */
4743
4744 /* First kill the tls_get_addr reloc on the bl instruction. */
4745 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4746 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4747
4748 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4749 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4750 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4751 return bfd_reloc_continue;
4752 }
4753 else
4754 {
4755 /* GD->IE relaxation
4756 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4757 BL __tls_get_addr => mrs x1, tpidr_el0
4758 R_AARCH64_CALL26
4759 NOP => add x0, x1, x0
4760 */
4761
4762 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
4763
4764 /* Remove the relocation on the BL instruction. */
4765 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4766
4767 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4768
4769 /* We choose to fixup the BL and NOP instructions using the
4770 offset from the second relocation to allow flexibility in
4771 scheduling instructions between the ADD and BL. */
4772 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4773 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4774 return bfd_reloc_continue;
4775 }
4776
4777 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4778 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4779 /* GD->IE/LE relaxation:
4780 add x0, x0, #:tlsdesc_lo12:var => nop
4781 blr xd => nop
4782 */
4783 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4784 return bfd_reloc_ok;
4785
4786 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4787 /* IE->LE relaxation:
4788 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4789 */
4790 if (is_local)
4791 {
4792 insn = bfd_getl32 (contents + rel->r_offset);
4793 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4794 }
4795 return bfd_reloc_continue;
4796
4797 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4798 /* IE->LE relaxation:
4799 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4800 */
4801 if (is_local)
4802 {
4803 insn = bfd_getl32 (contents + rel->r_offset);
4804 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4805 }
4806 return bfd_reloc_continue;
4807
4808 default:
4809 return bfd_reloc_continue;
4810 }
4811
4812 return bfd_reloc_ok;
4813 }
4814
4815 /* Relocate an AArch64 ELF section. */
4816
4817 static bfd_boolean
4818 elfNN_aarch64_relocate_section (bfd *output_bfd,
4819 struct bfd_link_info *info,
4820 bfd *input_bfd,
4821 asection *input_section,
4822 bfd_byte *contents,
4823 Elf_Internal_Rela *relocs,
4824 Elf_Internal_Sym *local_syms,
4825 asection **local_sections)
4826 {
4827 Elf_Internal_Shdr *symtab_hdr;
4828 struct elf_link_hash_entry **sym_hashes;
4829 Elf_Internal_Rela *rel;
4830 Elf_Internal_Rela *relend;
4831 const char *name;
4832 struct elf_aarch64_link_hash_table *globals;
4833 bfd_boolean save_addend = FALSE;
4834 bfd_vma addend = 0;
4835
4836 globals = elf_aarch64_hash_table (info);
4837
4838 symtab_hdr = &elf_symtab_hdr (input_bfd);
4839 sym_hashes = elf_sym_hashes (input_bfd);
4840
4841 rel = relocs;
4842 relend = relocs + input_section->reloc_count;
4843 for (; rel < relend; rel++)
4844 {
4845 unsigned int r_type;
4846 bfd_reloc_code_real_type bfd_r_type;
4847 bfd_reloc_code_real_type relaxed_bfd_r_type;
4848 reloc_howto_type *howto;
4849 unsigned long r_symndx;
4850 Elf_Internal_Sym *sym;
4851 asection *sec;
4852 struct elf_link_hash_entry *h;
4853 bfd_vma relocation;
4854 bfd_reloc_status_type r;
4855 arelent bfd_reloc;
4856 char sym_type;
4857 bfd_boolean unresolved_reloc = FALSE;
4858 char *error_message = NULL;
4859
4860 r_symndx = ELFNN_R_SYM (rel->r_info);
4861 r_type = ELFNN_R_TYPE (rel->r_info);
4862
4863 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
4864 howto = bfd_reloc.howto;
4865
4866 if (howto == NULL)
4867 {
4868 (*_bfd_error_handler)
4869 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4870 input_bfd, input_section, r_type);
4871 return FALSE;
4872 }
4873 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
4874
4875 h = NULL;
4876 sym = NULL;
4877 sec = NULL;
4878
4879 if (r_symndx < symtab_hdr->sh_info)
4880 {
4881 sym = local_syms + r_symndx;
4882 sym_type = ELFNN_ST_TYPE (sym->st_info);
4883 sec = local_sections[r_symndx];
4884
4885 /* An object file might have a reference to a local
4886 undefined symbol. This is a daft object file, but we
4887 should at least do something about it. */
4888 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4889 && bfd_is_und_section (sec)
4890 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4891 {
4892 if (!info->callbacks->undefined_symbol
4893 (info, bfd_elf_string_from_elf_section
4894 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4895 input_bfd, input_section, rel->r_offset, TRUE))
4896 return FALSE;
4897 }
4898
4899 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4900
4901 /* Relocate against local STT_GNU_IFUNC symbol. */
4902 if (!info->relocatable
4903 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4904 {
4905 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
4906 rel, FALSE);
4907 if (h == NULL)
4908 abort ();
4909
4910 /* Set STT_GNU_IFUNC symbol value. */
4911 h->root.u.def.value = sym->st_value;
4912 h->root.u.def.section = sec;
4913 }
4914 }
4915 else
4916 {
4917 bfd_boolean warned, ignored;
4918
4919 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4920 r_symndx, symtab_hdr, sym_hashes,
4921 h, sec, relocation,
4922 unresolved_reloc, warned, ignored);
4923
4924 sym_type = h->type;
4925 }
4926
4927 if (sec != NULL && discarded_section (sec))
4928 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4929 rel, 1, relend, howto, 0, contents);
4930
4931 if (info->relocatable)
4932 continue;
4933
4934 if (h != NULL)
4935 name = h->root.root.string;
4936 else
4937 {
4938 name = (bfd_elf_string_from_elf_section
4939 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4940 if (name == NULL || *name == '\0')
4941 name = bfd_section_name (input_bfd, sec);
4942 }
4943
4944 if (r_symndx != 0
4945 && r_type != R_AARCH64_NONE
4946 && r_type != R_AARCH64_NULL
4947 && (h == NULL
4948 || h->root.type == bfd_link_hash_defined
4949 || h->root.type == bfd_link_hash_defweak)
4950 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
4951 {
4952 (*_bfd_error_handler)
4953 ((sym_type == STT_TLS
4954 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4955 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4956 input_bfd,
4957 input_section, (long) rel->r_offset, howto->name, name);
4958 }
4959
4960 /* We relax only if we can see that there can be a valid transition
4961 from a reloc type to another.
4962 We call elfNN_aarch64_final_link_relocate unless we're completely
4963 done, i.e., the relaxation produced the final output we want. */
4964
4965 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4966 h, r_symndx);
4967 if (relaxed_bfd_r_type != bfd_r_type)
4968 {
4969 bfd_r_type = relaxed_bfd_r_type;
4970 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4971 BFD_ASSERT (howto != NULL);
4972 r_type = howto->type;
4973 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4974 unresolved_reloc = 0;
4975 }
4976 else
4977 r = bfd_reloc_continue;
4978
4979 /* There may be multiple consecutive relocations for the
4980 same offset. In that case we are supposed to treat the
4981 output of each relocation as the addend for the next. */
4982 if (rel + 1 < relend
4983 && rel->r_offset == rel[1].r_offset
4984 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4985 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4986 save_addend = TRUE;
4987 else
4988 save_addend = FALSE;
4989
4990 if (r == bfd_reloc_continue)
4991 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4992 input_section, contents, rel,
4993 relocation, info, sec,
4994 h, &unresolved_reloc,
4995 save_addend, &addend, sym);
4996
4997 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4998 {
4999 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5000 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5001 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5002 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5003 {
5004 bfd_boolean need_relocs = FALSE;
5005 bfd_byte *loc;
5006 int indx;
5007 bfd_vma off;
5008
5009 off = symbol_got_offset (input_bfd, h, r_symndx);
5010 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5011
5012 need_relocs =
5013 (info->shared || indx != 0) &&
5014 (h == NULL
5015 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5016 || h->root.type != bfd_link_hash_undefweak);
5017
5018 BFD_ASSERT (globals->root.srelgot != NULL);
5019
5020 if (need_relocs)
5021 {
5022 Elf_Internal_Rela rela;
5023 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5024 rela.r_addend = 0;
5025 rela.r_offset = globals->root.sgot->output_section->vma +
5026 globals->root.sgot->output_offset + off;
5027
5028
5029 loc = globals->root.srelgot->contents;
5030 loc += globals->root.srelgot->reloc_count++
5031 * RELOC_SIZE (htab);
5032 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5033
5034 if (indx == 0)
5035 {
5036 bfd_put_NN (output_bfd,
5037 relocation - dtpoff_base (info),
5038 globals->root.sgot->contents + off
5039 + GOT_ENTRY_SIZE);
5040 }
5041 else
5042 {
5043 /* This TLS symbol is global. We emit a
5044 relocation to fixup the tls offset at load
5045 time. */
5046 rela.r_info =
5047 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5048 rela.r_addend = 0;
5049 rela.r_offset =
5050 (globals->root.sgot->output_section->vma
5051 + globals->root.sgot->output_offset + off
5052 + GOT_ENTRY_SIZE);
5053
5054 loc = globals->root.srelgot->contents;
5055 loc += globals->root.srelgot->reloc_count++
5056 * RELOC_SIZE (globals);
5057 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5058 bfd_put_NN (output_bfd, (bfd_vma) 0,
5059 globals->root.sgot->contents + off
5060 + GOT_ENTRY_SIZE);
5061 }
5062 }
5063 else
5064 {
5065 bfd_put_NN (output_bfd, (bfd_vma) 1,
5066 globals->root.sgot->contents + off);
5067 bfd_put_NN (output_bfd,
5068 relocation - dtpoff_base (info),
5069 globals->root.sgot->contents + off
5070 + GOT_ENTRY_SIZE);
5071 }
5072
5073 symbol_got_offset_mark (input_bfd, h, r_symndx);
5074 }
5075 break;
5076
5077 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5078 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5079 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5080 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5081 {
5082 bfd_boolean need_relocs = FALSE;
5083 bfd_byte *loc;
5084 int indx;
5085 bfd_vma off;
5086
5087 off = symbol_got_offset (input_bfd, h, r_symndx);
5088
5089 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5090
5091 need_relocs =
5092 (info->shared || indx != 0) &&
5093 (h == NULL
5094 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5095 || h->root.type != bfd_link_hash_undefweak);
5096
5097 BFD_ASSERT (globals->root.srelgot != NULL);
5098
5099 if (need_relocs)
5100 {
5101 Elf_Internal_Rela rela;
5102
5103 if (indx == 0)
5104 rela.r_addend = relocation - dtpoff_base (info);
5105 else
5106 rela.r_addend = 0;
5107
5108 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5109 rela.r_offset = globals->root.sgot->output_section->vma +
5110 globals->root.sgot->output_offset + off;
5111
5112 loc = globals->root.srelgot->contents;
5113 loc += globals->root.srelgot->reloc_count++
5114 * RELOC_SIZE (htab);
5115
5116 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5117
5118 bfd_put_NN (output_bfd, rela.r_addend,
5119 globals->root.sgot->contents + off);
5120 }
5121 else
5122 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5123 globals->root.sgot->contents + off);
5124
5125 symbol_got_offset_mark (input_bfd, h, r_symndx);
5126 }
5127 break;
5128
5129 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5130 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5131 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5132 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5133 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5134 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5135 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5136 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5137 break;
5138
5139 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5140 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5141 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5142 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5143 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5144 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5145 {
5146 bfd_boolean need_relocs = FALSE;
5147 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5148 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5149
5150 need_relocs = (h == NULL
5151 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5152 || h->root.type != bfd_link_hash_undefweak);
5153
5154 BFD_ASSERT (globals->root.srelgot != NULL);
5155 BFD_ASSERT (globals->root.sgot != NULL);
5156
5157 if (need_relocs)
5158 {
5159 bfd_byte *loc;
5160 Elf_Internal_Rela rela;
5161 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5162
5163 rela.r_addend = 0;
5164 rela.r_offset = (globals->root.sgotplt->output_section->vma
5165 + globals->root.sgotplt->output_offset
5166 + off + globals->sgotplt_jump_table_size);
5167
5168 if (indx == 0)
5169 rela.r_addend = relocation - dtpoff_base (info);
5170
5171 /* Allocate the next available slot in the PLT reloc
5172 section to hold our R_AARCH64_TLSDESC, the next
5173 available slot is determined from reloc_count,
5174 which we step. But note, reloc_count was
5175 artifically moved down while allocating slots for
5176 real PLT relocs such that all of the PLT relocs
5177 will fit above the initial reloc_count and the
5178 extra stuff will fit below. */
5179 loc = globals->root.srelplt->contents;
5180 loc += globals->root.srelplt->reloc_count++
5181 * RELOC_SIZE (globals);
5182
5183 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5184
5185 bfd_put_NN (output_bfd, (bfd_vma) 0,
5186 globals->root.sgotplt->contents + off +
5187 globals->sgotplt_jump_table_size);
5188 bfd_put_NN (output_bfd, (bfd_vma) 0,
5189 globals->root.sgotplt->contents + off +
5190 globals->sgotplt_jump_table_size +
5191 GOT_ENTRY_SIZE);
5192 }
5193
5194 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5195 }
5196 break;
5197 default:
5198 break;
5199 }
5200
5201 if (!save_addend)
5202 addend = 0;
5203
5204
5205 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5206 because such sections are not SEC_ALLOC and thus ld.so will
5207 not process them. */
5208 if (unresolved_reloc
5209 && !((input_section->flags & SEC_DEBUGGING) != 0
5210 && h->def_dynamic)
5211 && _bfd_elf_section_offset (output_bfd, info, input_section,
5212 +rel->r_offset) != (bfd_vma) - 1)
5213 {
5214 (*_bfd_error_handler)
5215 (_
5216 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5217 input_bfd, input_section, (long) rel->r_offset, howto->name,
5218 h->root.root.string);
5219 return FALSE;
5220 }
5221
5222 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5223 {
5224 switch (r)
5225 {
5226 case bfd_reloc_overflow:
5227 /* If the overflowing reloc was to an undefined symbol,
5228 we have already printed one error message and there
5229 is no point complaining again. */
5230 if ((!h ||
5231 h->root.type != bfd_link_hash_undefined)
5232 && (!((*info->callbacks->reloc_overflow)
5233 (info, (h ? &h->root : NULL), name, howto->name,
5234 (bfd_vma) 0, input_bfd, input_section,
5235 rel->r_offset))))
5236 return FALSE;
5237 break;
5238
5239 case bfd_reloc_undefined:
5240 if (!((*info->callbacks->undefined_symbol)
5241 (info, name, input_bfd, input_section,
5242 rel->r_offset, TRUE)))
5243 return FALSE;
5244 break;
5245
5246 case bfd_reloc_outofrange:
5247 error_message = _("out of range");
5248 goto common_error;
5249
5250 case bfd_reloc_notsupported:
5251 error_message = _("unsupported relocation");
5252 goto common_error;
5253
5254 case bfd_reloc_dangerous:
5255 /* error_message should already be set. */
5256 goto common_error;
5257
5258 default:
5259 error_message = _("unknown error");
5260 /* Fall through. */
5261
5262 common_error:
5263 BFD_ASSERT (error_message != NULL);
5264 if (!((*info->callbacks->reloc_dangerous)
5265 (info, error_message, input_bfd, input_section,
5266 rel->r_offset)))
5267 return FALSE;
5268 break;
5269 }
5270 }
5271 }
5272
5273 return TRUE;
5274 }
5275
5276 /* Set the right machine number. */
5277
5278 static bfd_boolean
5279 elfNN_aarch64_object_p (bfd *abfd)
5280 {
5281 #if ARCH_SIZE == 32
5282 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5283 #else
5284 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5285 #endif
5286 return TRUE;
5287 }
5288
5289 /* Function to keep AArch64 specific flags in the ELF header. */
5290
5291 static bfd_boolean
5292 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5293 {
5294 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5295 {
5296 }
5297 else
5298 {
5299 elf_elfheader (abfd)->e_flags = flags;
5300 elf_flags_init (abfd) = TRUE;
5301 }
5302
5303 return TRUE;
5304 }
5305
5306 /* Merge backend specific data from an object file to the output
5307 object file when linking. */
5308
5309 static bfd_boolean
5310 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5311 {
5312 flagword out_flags;
5313 flagword in_flags;
5314 bfd_boolean flags_compatible = TRUE;
5315 asection *sec;
5316
5317 /* Check if we have the same endianess. */
5318 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5319 return FALSE;
5320
5321 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5322 return TRUE;
5323
5324 /* The input BFD must have had its flags initialised. */
5325 /* The following seems bogus to me -- The flags are initialized in
5326 the assembler but I don't think an elf_flags_init field is
5327 written into the object. */
5328 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5329
5330 in_flags = elf_elfheader (ibfd)->e_flags;
5331 out_flags = elf_elfheader (obfd)->e_flags;
5332
5333 if (!elf_flags_init (obfd))
5334 {
5335 /* If the input is the default architecture and had the default
5336 flags then do not bother setting the flags for the output
5337 architecture, instead allow future merges to do this. If no
5338 future merges ever set these flags then they will retain their
5339 uninitialised values, which surprise surprise, correspond
5340 to the default values. */
5341 if (bfd_get_arch_info (ibfd)->the_default
5342 && elf_elfheader (ibfd)->e_flags == 0)
5343 return TRUE;
5344
5345 elf_flags_init (obfd) = TRUE;
5346 elf_elfheader (obfd)->e_flags = in_flags;
5347
5348 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5349 && bfd_get_arch_info (obfd)->the_default)
5350 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5351 bfd_get_mach (ibfd));
5352
5353 return TRUE;
5354 }
5355
5356 /* Identical flags must be compatible. */
5357 if (in_flags == out_flags)
5358 return TRUE;
5359
5360 /* Check to see if the input BFD actually contains any sections. If
5361 not, its flags may not have been initialised either, but it
5362 cannot actually cause any incompatiblity. Do not short-circuit
5363 dynamic objects; their section list may be emptied by
5364 elf_link_add_object_symbols.
5365
5366 Also check to see if there are no code sections in the input.
5367 In this case there is no need to check for code specific flags.
5368 XXX - do we need to worry about floating-point format compatability
5369 in data sections ? */
5370 if (!(ibfd->flags & DYNAMIC))
5371 {
5372 bfd_boolean null_input_bfd = TRUE;
5373 bfd_boolean only_data_sections = TRUE;
5374
5375 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5376 {
5377 if ((bfd_get_section_flags (ibfd, sec)
5378 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5379 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5380 only_data_sections = FALSE;
5381
5382 null_input_bfd = FALSE;
5383 break;
5384 }
5385
5386 if (null_input_bfd || only_data_sections)
5387 return TRUE;
5388 }
5389
5390 return flags_compatible;
5391 }
5392
5393 /* Display the flags field. */
5394
5395 static bfd_boolean
5396 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5397 {
5398 FILE *file = (FILE *) ptr;
5399 unsigned long flags;
5400
5401 BFD_ASSERT (abfd != NULL && ptr != NULL);
5402
5403 /* Print normal ELF private data. */
5404 _bfd_elf_print_private_bfd_data (abfd, ptr);
5405
5406 flags = elf_elfheader (abfd)->e_flags;
5407 /* Ignore init flag - it may not be set, despite the flags field
5408 containing valid data. */
5409
5410 /* xgettext:c-format */
5411 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5412
5413 if (flags)
5414 fprintf (file, _("<Unrecognised flag bits set>"));
5415
5416 fputc ('\n', file);
5417
5418 return TRUE;
5419 }
5420
5421 /* Update the got entry reference counts for the section being removed. */
5422
5423 static bfd_boolean
5424 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5425 struct bfd_link_info *info,
5426 asection *sec,
5427 const Elf_Internal_Rela * relocs)
5428 {
5429 struct elf_aarch64_link_hash_table *htab;
5430 Elf_Internal_Shdr *symtab_hdr;
5431 struct elf_link_hash_entry **sym_hashes;
5432 struct elf_aarch64_local_symbol *locals;
5433 const Elf_Internal_Rela *rel, *relend;
5434
5435 if (info->relocatable)
5436 return TRUE;
5437
5438 htab = elf_aarch64_hash_table (info);
5439
5440 if (htab == NULL)
5441 return FALSE;
5442
5443 elf_section_data (sec)->local_dynrel = NULL;
5444
5445 symtab_hdr = &elf_symtab_hdr (abfd);
5446 sym_hashes = elf_sym_hashes (abfd);
5447
5448 locals = elf_aarch64_locals (abfd);
5449
5450 relend = relocs + sec->reloc_count;
5451 for (rel = relocs; rel < relend; rel++)
5452 {
5453 unsigned long r_symndx;
5454 unsigned int r_type;
5455 struct elf_link_hash_entry *h = NULL;
5456
5457 r_symndx = ELFNN_R_SYM (rel->r_info);
5458
5459 if (r_symndx >= symtab_hdr->sh_info)
5460 {
5461
5462 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5463 while (h->root.type == bfd_link_hash_indirect
5464 || h->root.type == bfd_link_hash_warning)
5465 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5466 }
5467 else
5468 {
5469 Elf_Internal_Sym *isym;
5470
5471 /* A local symbol. */
5472 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5473 abfd, r_symndx);
5474
5475 /* Check relocation against local STT_GNU_IFUNC symbol. */
5476 if (isym != NULL
5477 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5478 {
5479 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
5480 if (h == NULL)
5481 abort ();
5482 }
5483 }
5484
5485 if (h)
5486 {
5487 struct elf_aarch64_link_hash_entry *eh;
5488 struct elf_dyn_relocs **pp;
5489 struct elf_dyn_relocs *p;
5490
5491 eh = (struct elf_aarch64_link_hash_entry *) h;
5492
5493 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
5494 if (p->sec == sec)
5495 {
5496 /* Everything must go for SEC. */
5497 *pp = p->next;
5498 break;
5499 }
5500 }
5501
5502 r_type = ELFNN_R_TYPE (rel->r_info);
5503 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
5504 {
5505 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5506 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5507 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5508 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5509 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5510 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5511 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5512 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5513 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5514 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5515 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5516 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5517 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5518 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5519 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5520 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5521 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5522 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5523 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5524 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5525 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5526 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5527 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5528 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5529 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5530 if (h != NULL)
5531 {
5532 if (h->got.refcount > 0)
5533 h->got.refcount -= 1;
5534
5535 if (h->type == STT_GNU_IFUNC)
5536 {
5537 if (h->plt.refcount > 0)
5538 h->plt.refcount -= 1;
5539 }
5540 }
5541 else if (locals != NULL)
5542 {
5543 if (locals[r_symndx].got_refcount > 0)
5544 locals[r_symndx].got_refcount -= 1;
5545 }
5546 break;
5547
5548 case BFD_RELOC_AARCH64_CALL26:
5549 case BFD_RELOC_AARCH64_JUMP26:
5550 /* If this is a local symbol then we resolve it
5551 directly without creating a PLT entry. */
5552 if (h == NULL)
5553 continue;
5554
5555 if (h->plt.refcount > 0)
5556 h->plt.refcount -= 1;
5557 break;
5558
5559 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5560 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5561 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5562 case BFD_RELOC_AARCH64_MOVW_G3:
5563 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5564 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5565 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5566 case BFD_RELOC_AARCH64_NN:
5567 if (h != NULL && info->executable)
5568 {
5569 if (h->plt.refcount > 0)
5570 h->plt.refcount -= 1;
5571 }
5572 break;
5573
5574 default:
5575 break;
5576 }
5577 }
5578
5579 return TRUE;
5580 }
5581
5582 /* Adjust a symbol defined by a dynamic object and referenced by a
5583 regular object. The current definition is in some section of the
5584 dynamic object, but we're not including those sections. We have to
5585 change the definition to something the rest of the link can
5586 understand. */
5587
5588 static bfd_boolean
5589 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5590 struct elf_link_hash_entry *h)
5591 {
5592 struct elf_aarch64_link_hash_table *htab;
5593 asection *s;
5594
5595 /* If this is a function, put it in the procedure linkage table. We
5596 will fill in the contents of the procedure linkage table later,
5597 when we know the address of the .got section. */
5598 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
5599 {
5600 if (h->plt.refcount <= 0
5601 || (h->type != STT_GNU_IFUNC
5602 && (SYMBOL_CALLS_LOCAL (info, h)
5603 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
5604 && h->root.type == bfd_link_hash_undefweak))))
5605 {
5606 /* This case can occur if we saw a CALL26 reloc in
5607 an input file, but the symbol wasn't referred to
5608 by a dynamic object or all references were
5609 garbage collected. In which case we can end up
5610 resolving. */
5611 h->plt.offset = (bfd_vma) - 1;
5612 h->needs_plt = 0;
5613 }
5614
5615 return TRUE;
5616 }
5617 else
5618 /* It's possible that we incorrectly decided a .plt reloc was
5619 needed for an R_X86_64_PC32 reloc to a non-function sym in
5620 check_relocs. We can't decide accurately between function and
5621 non-function syms in check-relocs; Objects loaded later in
5622 the link may change h->type. So fix it now. */
5623 h->plt.offset = (bfd_vma) - 1;
5624
5625
5626 /* If this is a weak symbol, and there is a real definition, the
5627 processor independent code will have arranged for us to see the
5628 real definition first, and we can just use the same value. */
5629 if (h->u.weakdef != NULL)
5630 {
5631 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5632 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5633 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5634 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5635 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5636 h->non_got_ref = h->u.weakdef->non_got_ref;
5637 return TRUE;
5638 }
5639
5640 /* If we are creating a shared library, we must presume that the
5641 only references to the symbol are via the global offset table.
5642 For such cases we need not do anything here; the relocations will
5643 be handled correctly by relocate_section. */
5644 if (info->shared)
5645 return TRUE;
5646
5647 /* If there are no references to this symbol that do not use the
5648 GOT, we don't need to generate a copy reloc. */
5649 if (!h->non_got_ref)
5650 return TRUE;
5651
5652 /* If -z nocopyreloc was given, we won't generate them either. */
5653 if (info->nocopyreloc)
5654 {
5655 h->non_got_ref = 0;
5656 return TRUE;
5657 }
5658
5659 /* We must allocate the symbol in our .dynbss section, which will
5660 become part of the .bss section of the executable. There will be
5661 an entry for this symbol in the .dynsym section. The dynamic
5662 object will contain position independent code, so all references
5663 from the dynamic object to this symbol will go through the global
5664 offset table. The dynamic linker will use the .dynsym entry to
5665 determine the address it must put in the global offset table, so
5666 both the dynamic object and the regular object will refer to the
5667 same memory location for the variable. */
5668
5669 htab = elf_aarch64_hash_table (info);
5670
5671 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5672 to copy the initial value out of the dynamic object and into the
5673 runtime process image. */
5674 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5675 {
5676 htab->srelbss->size += RELOC_SIZE (htab);
5677 h->needs_copy = 1;
5678 }
5679
5680 s = htab->sdynbss;
5681
5682 return _bfd_elf_adjust_dynamic_copy (info, h, s);
5683
5684 }
5685
5686 static bfd_boolean
5687 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5688 {
5689 struct elf_aarch64_local_symbol *locals;
5690 locals = elf_aarch64_locals (abfd);
5691 if (locals == NULL)
5692 {
5693 locals = (struct elf_aarch64_local_symbol *)
5694 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5695 if (locals == NULL)
5696 return FALSE;
5697 elf_aarch64_locals (abfd) = locals;
5698 }
5699 return TRUE;
5700 }
5701
5702 /* Create the .got section to hold the global offset table. */
5703
5704 static bfd_boolean
5705 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
5706 {
5707 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5708 flagword flags;
5709 asection *s;
5710 struct elf_link_hash_entry *h;
5711 struct elf_link_hash_table *htab = elf_hash_table (info);
5712
5713 /* This function may be called more than once. */
5714 s = bfd_get_linker_section (abfd, ".got");
5715 if (s != NULL)
5716 return TRUE;
5717
5718 flags = bed->dynamic_sec_flags;
5719
5720 s = bfd_make_section_anyway_with_flags (abfd,
5721 (bed->rela_plts_and_copies_p
5722 ? ".rela.got" : ".rel.got"),
5723 (bed->dynamic_sec_flags
5724 | SEC_READONLY));
5725 if (s == NULL
5726 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5727 return FALSE;
5728 htab->srelgot = s;
5729
5730 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
5731 if (s == NULL
5732 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5733 return FALSE;
5734 htab->sgot = s;
5735 htab->sgot->size += GOT_ENTRY_SIZE;
5736
5737 if (bed->want_got_sym)
5738 {
5739 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
5740 (or .got.plt) section. We don't do this in the linker script
5741 because we don't want to define the symbol if we are not creating
5742 a global offset table. */
5743 h = _bfd_elf_define_linkage_sym (abfd, info, s,
5744 "_GLOBAL_OFFSET_TABLE_");
5745 elf_hash_table (info)->hgot = h;
5746 if (h == NULL)
5747 return FALSE;
5748 }
5749
5750 if (bed->want_got_plt)
5751 {
5752 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
5753 if (s == NULL
5754 || !bfd_set_section_alignment (abfd, s,
5755 bed->s->log_file_align))
5756 return FALSE;
5757 htab->sgotplt = s;
5758 }
5759
5760 /* The first bit of the global offset table is the header. */
5761 s->size += bed->got_header_size;
5762
5763 return TRUE;
5764 }
5765
5766 /* Look through the relocs for a section during the first phase. */
5767
5768 static bfd_boolean
5769 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5770 asection *sec, const Elf_Internal_Rela *relocs)
5771 {
5772 Elf_Internal_Shdr *symtab_hdr;
5773 struct elf_link_hash_entry **sym_hashes;
5774 const Elf_Internal_Rela *rel;
5775 const Elf_Internal_Rela *rel_end;
5776 asection *sreloc;
5777
5778 struct elf_aarch64_link_hash_table *htab;
5779
5780 if (info->relocatable)
5781 return TRUE;
5782
5783 BFD_ASSERT (is_aarch64_elf (abfd));
5784
5785 htab = elf_aarch64_hash_table (info);
5786 sreloc = NULL;
5787
5788 symtab_hdr = &elf_symtab_hdr (abfd);
5789 sym_hashes = elf_sym_hashes (abfd);
5790
5791 rel_end = relocs + sec->reloc_count;
5792 for (rel = relocs; rel < rel_end; rel++)
5793 {
5794 struct elf_link_hash_entry *h;
5795 unsigned long r_symndx;
5796 unsigned int r_type;
5797 bfd_reloc_code_real_type bfd_r_type;
5798 Elf_Internal_Sym *isym;
5799
5800 r_symndx = ELFNN_R_SYM (rel->r_info);
5801 r_type = ELFNN_R_TYPE (rel->r_info);
5802
5803 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5804 {
5805 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5806 r_symndx);
5807 return FALSE;
5808 }
5809
5810 if (r_symndx < symtab_hdr->sh_info)
5811 {
5812 /* A local symbol. */
5813 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5814 abfd, r_symndx);
5815 if (isym == NULL)
5816 return FALSE;
5817
5818 /* Check relocation against local STT_GNU_IFUNC symbol. */
5819 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5820 {
5821 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
5822 TRUE);
5823 if (h == NULL)
5824 return FALSE;
5825
5826 /* Fake a STT_GNU_IFUNC symbol. */
5827 h->type = STT_GNU_IFUNC;
5828 h->def_regular = 1;
5829 h->ref_regular = 1;
5830 h->forced_local = 1;
5831 h->root.type = bfd_link_hash_defined;
5832 }
5833 else
5834 h = NULL;
5835 }
5836 else
5837 {
5838 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5839 while (h->root.type == bfd_link_hash_indirect
5840 || h->root.type == bfd_link_hash_warning)
5841 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5842
5843 /* PR15323, ref flags aren't set for references in the same
5844 object. */
5845 h->root.non_ir_ref = 1;
5846 }
5847
5848 /* Could be done earlier, if h were already available. */
5849 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5850
5851 if (h != NULL)
5852 {
5853 /* Create the ifunc sections for static executables. If we
5854 never see an indirect function symbol nor we are building
5855 a static executable, those sections will be empty and
5856 won't appear in output. */
5857 switch (bfd_r_type)
5858 {
5859 default:
5860 break;
5861
5862 case BFD_RELOC_AARCH64_NN:
5863 case BFD_RELOC_AARCH64_CALL26:
5864 case BFD_RELOC_AARCH64_JUMP26:
5865 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5866 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5867 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5868 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5869 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5870 case BFD_RELOC_AARCH64_ADD_LO12:
5871 if (htab->root.dynobj == NULL)
5872 htab->root.dynobj = abfd;
5873 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
5874 return FALSE;
5875 break;
5876 }
5877
5878 /* It is referenced by a non-shared object. */
5879 h->ref_regular = 1;
5880 h->root.non_ir_ref = 1;
5881 }
5882
5883 switch (bfd_r_type)
5884 {
5885 case BFD_RELOC_AARCH64_NN:
5886
5887 /* We don't need to handle relocs into sections not going into
5888 the "real" output. */
5889 if ((sec->flags & SEC_ALLOC) == 0)
5890 break;
5891
5892 if (h != NULL)
5893 {
5894 if (!info->shared)
5895 h->non_got_ref = 1;
5896
5897 h->plt.refcount += 1;
5898 h->pointer_equality_needed = 1;
5899 }
5900
5901 /* No need to do anything if we're not creating a shared
5902 object. */
5903 if (! info->shared)
5904 break;
5905
5906 {
5907 struct elf_dyn_relocs *p;
5908 struct elf_dyn_relocs **head;
5909
5910 /* We must copy these reloc types into the output file.
5911 Create a reloc section in dynobj and make room for
5912 this reloc. */
5913 if (sreloc == NULL)
5914 {
5915 if (htab->root.dynobj == NULL)
5916 htab->root.dynobj = abfd;
5917
5918 sreloc = _bfd_elf_make_dynamic_reloc_section
5919 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
5920
5921 if (sreloc == NULL)
5922 return FALSE;
5923 }
5924
5925 /* If this is a global symbol, we count the number of
5926 relocations we need for this symbol. */
5927 if (h != NULL)
5928 {
5929 struct elf_aarch64_link_hash_entry *eh;
5930 eh = (struct elf_aarch64_link_hash_entry *) h;
5931 head = &eh->dyn_relocs;
5932 }
5933 else
5934 {
5935 /* Track dynamic relocs needed for local syms too.
5936 We really need local syms available to do this
5937 easily. Oh well. */
5938
5939 asection *s;
5940 void **vpp;
5941
5942 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5943 abfd, r_symndx);
5944 if (isym == NULL)
5945 return FALSE;
5946
5947 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5948 if (s == NULL)
5949 s = sec;
5950
5951 /* Beware of type punned pointers vs strict aliasing
5952 rules. */
5953 vpp = &(elf_section_data (s)->local_dynrel);
5954 head = (struct elf_dyn_relocs **) vpp;
5955 }
5956
5957 p = *head;
5958 if (p == NULL || p->sec != sec)
5959 {
5960 bfd_size_type amt = sizeof *p;
5961 p = ((struct elf_dyn_relocs *)
5962 bfd_zalloc (htab->root.dynobj, amt));
5963 if (p == NULL)
5964 return FALSE;
5965 p->next = *head;
5966 *head = p;
5967 p->sec = sec;
5968 }
5969
5970 p->count += 1;
5971
5972 }
5973 break;
5974
5975 /* RR: We probably want to keep a consistency check that
5976 there are no dangling GOT_PAGE relocs. */
5977 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5978 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5979 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5980 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5981 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5982 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5983 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5984 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5985 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5986 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5987 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5988 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5989 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5990 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5991 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5992 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5993 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5994 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5995 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5996 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5997 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5998 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5999 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6000 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6001 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6002 {
6003 unsigned got_type;
6004 unsigned old_got_type;
6005
6006 got_type = aarch64_reloc_got_type (bfd_r_type);
6007
6008 if (h)
6009 {
6010 h->got.refcount += 1;
6011 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6012 }
6013 else
6014 {
6015 struct elf_aarch64_local_symbol *locals;
6016
6017 if (!elfNN_aarch64_allocate_local_symbols
6018 (abfd, symtab_hdr->sh_info))
6019 return FALSE;
6020
6021 locals = elf_aarch64_locals (abfd);
6022 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6023 locals[r_symndx].got_refcount += 1;
6024 old_got_type = locals[r_symndx].got_type;
6025 }
6026
6027 /* If a variable is accessed with both general dynamic TLS
6028 methods, two slots may be created. */
6029 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6030 got_type |= old_got_type;
6031
6032 /* We will already have issued an error message if there
6033 is a TLS/non-TLS mismatch, based on the symbol type.
6034 So just combine any TLS types needed. */
6035 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6036 && got_type != GOT_NORMAL)
6037 got_type |= old_got_type;
6038
6039 /* If the symbol is accessed by both IE and GD methods, we
6040 are able to relax. Turn off the GD flag, without
6041 messing up with any other kind of TLS types that may be
6042 involved. */
6043 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6044 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6045
6046 if (old_got_type != got_type)
6047 {
6048 if (h != NULL)
6049 elf_aarch64_hash_entry (h)->got_type = got_type;
6050 else
6051 {
6052 struct elf_aarch64_local_symbol *locals;
6053 locals = elf_aarch64_locals (abfd);
6054 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6055 locals[r_symndx].got_type = got_type;
6056 }
6057 }
6058
6059 if (htab->root.dynobj == NULL)
6060 htab->root.dynobj = abfd;
6061 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6062 return FALSE;
6063 break;
6064 }
6065
6066 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6067 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6068 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6069 case BFD_RELOC_AARCH64_MOVW_G3:
6070 if (info->shared)
6071 {
6072 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6073 (*_bfd_error_handler)
6074 (_("%B: relocation %s against `%s' can not be used when making "
6075 "a shared object; recompile with -fPIC"),
6076 abfd, elfNN_aarch64_howto_table[howto_index].name,
6077 (h) ? h->root.root.string : "a local symbol");
6078 bfd_set_error (bfd_error_bad_value);
6079 return FALSE;
6080 }
6081
6082 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6083 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6084 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6085 if (h != NULL && info->executable)
6086 {
6087 /* If this reloc is in a read-only section, we might
6088 need a copy reloc. We can't check reliably at this
6089 stage whether the section is read-only, as input
6090 sections have not yet been mapped to output sections.
6091 Tentatively set the flag for now, and correct in
6092 adjust_dynamic_symbol. */
6093 h->non_got_ref = 1;
6094 h->plt.refcount += 1;
6095 h->pointer_equality_needed = 1;
6096 }
6097 /* FIXME:: RR need to handle these in shared libraries
6098 and essentially bomb out as these being non-PIC
6099 relocations in shared libraries. */
6100 break;
6101
6102 case BFD_RELOC_AARCH64_CALL26:
6103 case BFD_RELOC_AARCH64_JUMP26:
6104 /* If this is a local symbol then we resolve it
6105 directly without creating a PLT entry. */
6106 if (h == NULL)
6107 continue;
6108
6109 h->needs_plt = 1;
6110 if (h->plt.refcount <= 0)
6111 h->plt.refcount = 1;
6112 else
6113 h->plt.refcount += 1;
6114 break;
6115
6116 default:
6117 break;
6118 }
6119 }
6120
6121 return TRUE;
6122 }
6123
6124 /* Treat mapping symbols as special target symbols. */
6125
6126 static bfd_boolean
6127 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6128 asymbol *sym)
6129 {
6130 return bfd_is_aarch64_special_symbol_name (sym->name,
6131 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6132 }
6133
6134 /* This is a copy of elf_find_function () from elf.c except that
6135 AArch64 mapping symbols are ignored when looking for function names. */
6136
6137 static bfd_boolean
6138 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6139 asymbol **symbols,
6140 asection *section,
6141 bfd_vma offset,
6142 const char **filename_ptr,
6143 const char **functionname_ptr)
6144 {
6145 const char *filename = NULL;
6146 asymbol *func = NULL;
6147 bfd_vma low_func = 0;
6148 asymbol **p;
6149
6150 for (p = symbols; *p != NULL; p++)
6151 {
6152 elf_symbol_type *q;
6153
6154 q = (elf_symbol_type *) * p;
6155
6156 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6157 {
6158 default:
6159 break;
6160 case STT_FILE:
6161 filename = bfd_asymbol_name (&q->symbol);
6162 break;
6163 case STT_FUNC:
6164 case STT_NOTYPE:
6165 /* Skip mapping symbols. */
6166 if ((q->symbol.flags & BSF_LOCAL)
6167 && (bfd_is_aarch64_special_symbol_name
6168 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6169 continue;
6170 /* Fall through. */
6171 if (bfd_get_section (&q->symbol) == section
6172 && q->symbol.value >= low_func && q->symbol.value <= offset)
6173 {
6174 func = (asymbol *) q;
6175 low_func = q->symbol.value;
6176 }
6177 break;
6178 }
6179 }
6180
6181 if (func == NULL)
6182 return FALSE;
6183
6184 if (filename_ptr)
6185 *filename_ptr = filename;
6186 if (functionname_ptr)
6187 *functionname_ptr = bfd_asymbol_name (func);
6188
6189 return TRUE;
6190 }
6191
6192
6193 /* Find the nearest line to a particular section and offset, for error
6194 reporting. This code is a duplicate of the code in elf.c, except
6195 that it uses aarch64_elf_find_function. */
6196
6197 static bfd_boolean
6198 elfNN_aarch64_find_nearest_line (bfd *abfd,
6199 asymbol **symbols,
6200 asection *section,
6201 bfd_vma offset,
6202 const char **filename_ptr,
6203 const char **functionname_ptr,
6204 unsigned int *line_ptr,
6205 unsigned int *discriminator_ptr)
6206 {
6207 bfd_boolean found = FALSE;
6208
6209 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6210 filename_ptr, functionname_ptr,
6211 line_ptr, discriminator_ptr,
6212 dwarf_debug_sections, 0,
6213 &elf_tdata (abfd)->dwarf2_find_line_info))
6214 {
6215 if (!*functionname_ptr)
6216 aarch64_elf_find_function (abfd, symbols, section, offset,
6217 *filename_ptr ? NULL : filename_ptr,
6218 functionname_ptr);
6219
6220 return TRUE;
6221 }
6222
6223 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6224 toolchain uses DWARF1. */
6225
6226 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6227 &found, filename_ptr,
6228 functionname_ptr, line_ptr,
6229 &elf_tdata (abfd)->line_info))
6230 return FALSE;
6231
6232 if (found && (*functionname_ptr || *line_ptr))
6233 return TRUE;
6234
6235 if (symbols == NULL)
6236 return FALSE;
6237
6238 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6239 filename_ptr, functionname_ptr))
6240 return FALSE;
6241
6242 *line_ptr = 0;
6243 return TRUE;
6244 }
6245
6246 static bfd_boolean
6247 elfNN_aarch64_find_inliner_info (bfd *abfd,
6248 const char **filename_ptr,
6249 const char **functionname_ptr,
6250 unsigned int *line_ptr)
6251 {
6252 bfd_boolean found;
6253 found = _bfd_dwarf2_find_inliner_info
6254 (abfd, filename_ptr,
6255 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6256 return found;
6257 }
6258
6259
6260 static void
6261 elfNN_aarch64_post_process_headers (bfd *abfd,
6262 struct bfd_link_info *link_info)
6263 {
6264 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6265
6266 i_ehdrp = elf_elfheader (abfd);
6267 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6268
6269 _bfd_elf_post_process_headers (abfd, link_info);
6270 }
6271
6272 static enum elf_reloc_type_class
6273 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6274 const asection *rel_sec ATTRIBUTE_UNUSED,
6275 const Elf_Internal_Rela *rela)
6276 {
6277 switch ((int) ELFNN_R_TYPE (rela->r_info))
6278 {
6279 case AARCH64_R (RELATIVE):
6280 return reloc_class_relative;
6281 case AARCH64_R (JUMP_SLOT):
6282 return reloc_class_plt;
6283 case AARCH64_R (COPY):
6284 return reloc_class_copy;
6285 default:
6286 return reloc_class_normal;
6287 }
6288 }
6289
6290 /* Handle an AArch64 specific section when reading an object file. This is
6291 called when bfd_section_from_shdr finds a section with an unknown
6292 type. */
6293
6294 static bfd_boolean
6295 elfNN_aarch64_section_from_shdr (bfd *abfd,
6296 Elf_Internal_Shdr *hdr,
6297 const char *name, int shindex)
6298 {
6299 /* There ought to be a place to keep ELF backend specific flags, but
6300 at the moment there isn't one. We just keep track of the
6301 sections by their name, instead. Fortunately, the ABI gives
6302 names for all the AArch64 specific sections, so we will probably get
6303 away with this. */
6304 switch (hdr->sh_type)
6305 {
6306 case SHT_AARCH64_ATTRIBUTES:
6307 break;
6308
6309 default:
6310 return FALSE;
6311 }
6312
6313 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6314 return FALSE;
6315
6316 return TRUE;
6317 }
6318
6319 /* A structure used to record a list of sections, independently
6320 of the next and prev fields in the asection structure. */
6321 typedef struct section_list
6322 {
6323 asection *sec;
6324 struct section_list *next;
6325 struct section_list *prev;
6326 }
6327 section_list;
6328
6329 /* Unfortunately we need to keep a list of sections for which
6330 an _aarch64_elf_section_data structure has been allocated. This
6331 is because it is possible for functions like elfNN_aarch64_write_section
6332 to be called on a section which has had an elf_data_structure
6333 allocated for it (and so the used_by_bfd field is valid) but
6334 for which the AArch64 extended version of this structure - the
6335 _aarch64_elf_section_data structure - has not been allocated. */
6336 static section_list *sections_with_aarch64_elf_section_data = NULL;
6337
6338 static void
6339 record_section_with_aarch64_elf_section_data (asection *sec)
6340 {
6341 struct section_list *entry;
6342
6343 entry = bfd_malloc (sizeof (*entry));
6344 if (entry == NULL)
6345 return;
6346 entry->sec = sec;
6347 entry->next = sections_with_aarch64_elf_section_data;
6348 entry->prev = NULL;
6349 if (entry->next != NULL)
6350 entry->next->prev = entry;
6351 sections_with_aarch64_elf_section_data = entry;
6352 }
6353
6354 static struct section_list *
6355 find_aarch64_elf_section_entry (asection *sec)
6356 {
6357 struct section_list *entry;
6358 static struct section_list *last_entry = NULL;
6359
6360 /* This is a short cut for the typical case where the sections are added
6361 to the sections_with_aarch64_elf_section_data list in forward order and
6362 then looked up here in backwards order. This makes a real difference
6363 to the ld-srec/sec64k.exp linker test. */
6364 entry = sections_with_aarch64_elf_section_data;
6365 if (last_entry != NULL)
6366 {
6367 if (last_entry->sec == sec)
6368 entry = last_entry;
6369 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6370 entry = last_entry->next;
6371 }
6372
6373 for (; entry; entry = entry->next)
6374 if (entry->sec == sec)
6375 break;
6376
6377 if (entry)
6378 /* Record the entry prior to this one - it is the entry we are
6379 most likely to want to locate next time. Also this way if we
6380 have been called from
6381 unrecord_section_with_aarch64_elf_section_data () we will not
6382 be caching a pointer that is about to be freed. */
6383 last_entry = entry->prev;
6384
6385 return entry;
6386 }
6387
6388 static void
6389 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6390 {
6391 struct section_list *entry;
6392
6393 entry = find_aarch64_elf_section_entry (sec);
6394
6395 if (entry)
6396 {
6397 if (entry->prev != NULL)
6398 entry->prev->next = entry->next;
6399 if (entry->next != NULL)
6400 entry->next->prev = entry->prev;
6401 if (entry == sections_with_aarch64_elf_section_data)
6402 sections_with_aarch64_elf_section_data = entry->next;
6403 free (entry);
6404 }
6405 }
6406
6407
6408 typedef struct
6409 {
6410 void *finfo;
6411 struct bfd_link_info *info;
6412 asection *sec;
6413 int sec_shndx;
6414 int (*func) (void *, const char *, Elf_Internal_Sym *,
6415 asection *, struct elf_link_hash_entry *);
6416 } output_arch_syminfo;
6417
6418 enum map_symbol_type
6419 {
6420 AARCH64_MAP_INSN,
6421 AARCH64_MAP_DATA
6422 };
6423
6424
6425 /* Output a single mapping symbol. */
6426
6427 static bfd_boolean
6428 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6429 enum map_symbol_type type, bfd_vma offset)
6430 {
6431 static const char *names[2] = { "$x", "$d" };
6432 Elf_Internal_Sym sym;
6433
6434 sym.st_value = (osi->sec->output_section->vma
6435 + osi->sec->output_offset + offset);
6436 sym.st_size = 0;
6437 sym.st_other = 0;
6438 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6439 sym.st_shndx = osi->sec_shndx;
6440 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6441 }
6442
6443
6444
6445 /* Output mapping symbols for PLT entries associated with H. */
6446
6447 static bfd_boolean
6448 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6449 {
6450 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6451 bfd_vma addr;
6452
6453 if (h->root.type == bfd_link_hash_indirect)
6454 return TRUE;
6455
6456 if (h->root.type == bfd_link_hash_warning)
6457 /* When warning symbols are created, they **replace** the "real"
6458 entry in the hash table, thus we never get to see the real
6459 symbol in a hash traversal. So look at it now. */
6460 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6461
6462 if (h->plt.offset == (bfd_vma) - 1)
6463 return TRUE;
6464
6465 addr = h->plt.offset;
6466 if (addr == 32)
6467 {
6468 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6469 return FALSE;
6470 }
6471 return TRUE;
6472 }
6473
6474
6475 /* Output a single local symbol for a generated stub. */
6476
6477 static bfd_boolean
6478 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
6479 bfd_vma offset, bfd_vma size)
6480 {
6481 Elf_Internal_Sym sym;
6482
6483 sym.st_value = (osi->sec->output_section->vma
6484 + osi->sec->output_offset + offset);
6485 sym.st_size = size;
6486 sym.st_other = 0;
6487 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6488 sym.st_shndx = osi->sec_shndx;
6489 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
6490 }
6491
6492 static bfd_boolean
6493 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
6494 {
6495 struct elf_aarch64_stub_hash_entry *stub_entry;
6496 asection *stub_sec;
6497 bfd_vma addr;
6498 char *stub_name;
6499 output_arch_syminfo *osi;
6500
6501 /* Massage our args to the form they really have. */
6502 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6503 osi = (output_arch_syminfo *) in_arg;
6504
6505 stub_sec = stub_entry->stub_sec;
6506
6507 /* Ensure this stub is attached to the current section being
6508 processed. */
6509 if (stub_sec != osi->sec)
6510 return TRUE;
6511
6512 addr = (bfd_vma) stub_entry->stub_offset;
6513
6514 stub_name = stub_entry->output_name;
6515
6516 switch (stub_entry->stub_type)
6517 {
6518 case aarch64_stub_adrp_branch:
6519 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6520 sizeof (aarch64_adrp_branch_stub)))
6521 return FALSE;
6522 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6523 return FALSE;
6524 break;
6525 case aarch64_stub_long_branch:
6526 if (!elfNN_aarch64_output_stub_sym
6527 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
6528 return FALSE;
6529 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6530 return FALSE;
6531 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
6532 return FALSE;
6533 break;
6534 case aarch64_stub_erratum_835769_veneer:
6535 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6536 sizeof (aarch64_erratum_835769_stub)))
6537 return FALSE;
6538 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6539 return FALSE;
6540 break;
6541 default:
6542 abort ();
6543 }
6544
6545 return TRUE;
6546 }
6547
6548 /* Output mapping symbols for linker generated sections. */
6549
6550 static bfd_boolean
6551 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
6552 struct bfd_link_info *info,
6553 void *finfo,
6554 int (*func) (void *, const char *,
6555 Elf_Internal_Sym *,
6556 asection *,
6557 struct elf_link_hash_entry
6558 *))
6559 {
6560 output_arch_syminfo osi;
6561 struct elf_aarch64_link_hash_table *htab;
6562
6563 htab = elf_aarch64_hash_table (info);
6564
6565 osi.finfo = finfo;
6566 osi.info = info;
6567 osi.func = func;
6568
6569 /* Long calls stubs. */
6570 if (htab->stub_bfd && htab->stub_bfd->sections)
6571 {
6572 asection *stub_sec;
6573
6574 for (stub_sec = htab->stub_bfd->sections;
6575 stub_sec != NULL; stub_sec = stub_sec->next)
6576 {
6577 /* Ignore non-stub sections. */
6578 if (!strstr (stub_sec->name, STUB_SUFFIX))
6579 continue;
6580
6581 osi.sec = stub_sec;
6582
6583 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6584 (output_bfd, osi.sec->output_section);
6585
6586 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
6587 &osi);
6588 }
6589 }
6590
6591 /* Finally, output mapping symbols for the PLT. */
6592 if (!htab->root.splt || htab->root.splt->size == 0)
6593 return TRUE;
6594
6595 /* For now live without mapping symbols for the plt. */
6596 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6597 (output_bfd, htab->root.splt->output_section);
6598 osi.sec = htab->root.splt;
6599
6600 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
6601 (void *) &osi);
6602
6603 return TRUE;
6604
6605 }
6606
6607 /* Allocate target specific section data. */
6608
6609 static bfd_boolean
6610 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
6611 {
6612 if (!sec->used_by_bfd)
6613 {
6614 _aarch64_elf_section_data *sdata;
6615 bfd_size_type amt = sizeof (*sdata);
6616
6617 sdata = bfd_zalloc (abfd, amt);
6618 if (sdata == NULL)
6619 return FALSE;
6620 sec->used_by_bfd = sdata;
6621 }
6622
6623 record_section_with_aarch64_elf_section_data (sec);
6624
6625 return _bfd_elf_new_section_hook (abfd, sec);
6626 }
6627
6628
6629 static void
6630 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
6631 asection *sec,
6632 void *ignore ATTRIBUTE_UNUSED)
6633 {
6634 unrecord_section_with_aarch64_elf_section_data (sec);
6635 }
6636
6637 static bfd_boolean
6638 elfNN_aarch64_close_and_cleanup (bfd *abfd)
6639 {
6640 if (abfd->sections)
6641 bfd_map_over_sections (abfd,
6642 unrecord_section_via_map_over_sections, NULL);
6643
6644 return _bfd_elf_close_and_cleanup (abfd);
6645 }
6646
6647 static bfd_boolean
6648 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
6649 {
6650 if (abfd->sections)
6651 bfd_map_over_sections (abfd,
6652 unrecord_section_via_map_over_sections, NULL);
6653
6654 return _bfd_free_cached_info (abfd);
6655 }
6656
6657 /* Create dynamic sections. This is different from the ARM backend in that
6658 the got, plt, gotplt and their relocation sections are all created in the
6659 standard part of the bfd elf backend. */
6660
6661 static bfd_boolean
6662 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
6663 struct bfd_link_info *info)
6664 {
6665 struct elf_aarch64_link_hash_table *htab;
6666
6667 /* We need to create .got section. */
6668 if (!aarch64_elf_create_got_section (dynobj, info))
6669 return FALSE;
6670
6671 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
6672 return FALSE;
6673
6674 htab = elf_aarch64_hash_table (info);
6675 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
6676 if (!info->shared)
6677 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
6678
6679 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
6680 abort ();
6681
6682 return TRUE;
6683 }
6684
6685
6686 /* Allocate space in .plt, .got and associated reloc sections for
6687 dynamic relocs. */
6688
6689 static bfd_boolean
6690 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
6691 {
6692 struct bfd_link_info *info;
6693 struct elf_aarch64_link_hash_table *htab;
6694 struct elf_aarch64_link_hash_entry *eh;
6695 struct elf_dyn_relocs *p;
6696
6697 /* An example of a bfd_link_hash_indirect symbol is versioned
6698 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6699 -> __gxx_personality_v0(bfd_link_hash_defined)
6700
6701 There is no need to process bfd_link_hash_indirect symbols here
6702 because we will also be presented with the concrete instance of
6703 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6704 called to copy all relevant data from the generic to the concrete
6705 symbol instance.
6706 */
6707 if (h->root.type == bfd_link_hash_indirect)
6708 return TRUE;
6709
6710 if (h->root.type == bfd_link_hash_warning)
6711 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6712
6713 info = (struct bfd_link_info *) inf;
6714 htab = elf_aarch64_hash_table (info);
6715
6716 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6717 here if it is defined and referenced in a non-shared object. */
6718 if (h->type == STT_GNU_IFUNC
6719 && h->def_regular)
6720 return TRUE;
6721 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
6722 {
6723 /* Make sure this symbol is output as a dynamic symbol.
6724 Undefined weak syms won't yet be marked as dynamic. */
6725 if (h->dynindx == -1 && !h->forced_local)
6726 {
6727 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6728 return FALSE;
6729 }
6730
6731 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
6732 {
6733 asection *s = htab->root.splt;
6734
6735 /* If this is the first .plt entry, make room for the special
6736 first entry. */
6737 if (s->size == 0)
6738 s->size += htab->plt_header_size;
6739
6740 h->plt.offset = s->size;
6741
6742 /* If this symbol is not defined in a regular file, and we are
6743 not generating a shared library, then set the symbol to this
6744 location in the .plt. This is required to make function
6745 pointers compare as equal between the normal executable and
6746 the shared library. */
6747 if (!info->shared && !h->def_regular)
6748 {
6749 h->root.u.def.section = s;
6750 h->root.u.def.value = h->plt.offset;
6751 }
6752
6753 /* Make room for this entry. For now we only create the
6754 small model PLT entries. We later need to find a way
6755 of relaxing into these from the large model PLT entries. */
6756 s->size += PLT_SMALL_ENTRY_SIZE;
6757
6758 /* We also need to make an entry in the .got.plt section, which
6759 will be placed in the .got section by the linker script. */
6760 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6761
6762 /* We also need to make an entry in the .rela.plt section. */
6763 htab->root.srelplt->size += RELOC_SIZE (htab);
6764
6765 /* We need to ensure that all GOT entries that serve the PLT
6766 are consecutive with the special GOT slots [0] [1] and
6767 [2]. Any addtional relocations, such as
6768 R_AARCH64_TLSDESC, must be placed after the PLT related
6769 entries. We abuse the reloc_count such that during
6770 sizing we adjust reloc_count to indicate the number of
6771 PLT related reserved entries. In subsequent phases when
6772 filling in the contents of the reloc entries, PLT related
6773 entries are placed by computing their PLT index (0
6774 .. reloc_count). While other none PLT relocs are placed
6775 at the slot indicated by reloc_count and reloc_count is
6776 updated. */
6777
6778 htab->root.srelplt->reloc_count++;
6779 }
6780 else
6781 {
6782 h->plt.offset = (bfd_vma) - 1;
6783 h->needs_plt = 0;
6784 }
6785 }
6786 else
6787 {
6788 h->plt.offset = (bfd_vma) - 1;
6789 h->needs_plt = 0;
6790 }
6791
6792 eh = (struct elf_aarch64_link_hash_entry *) h;
6793 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6794
6795 if (h->got.refcount > 0)
6796 {
6797 bfd_boolean dyn;
6798 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
6799
6800 h->got.offset = (bfd_vma) - 1;
6801
6802 dyn = htab->root.dynamic_sections_created;
6803
6804 /* Make sure this symbol is output as a dynamic symbol.
6805 Undefined weak syms won't yet be marked as dynamic. */
6806 if (dyn && h->dynindx == -1 && !h->forced_local)
6807 {
6808 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6809 return FALSE;
6810 }
6811
6812 if (got_type == GOT_UNKNOWN)
6813 {
6814 }
6815 else if (got_type == GOT_NORMAL)
6816 {
6817 h->got.offset = htab->root.sgot->size;
6818 htab->root.sgot->size += GOT_ENTRY_SIZE;
6819 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6820 || h->root.type != bfd_link_hash_undefweak)
6821 && (info->shared
6822 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6823 {
6824 htab->root.srelgot->size += RELOC_SIZE (htab);
6825 }
6826 }
6827 else
6828 {
6829 int indx;
6830 if (got_type & GOT_TLSDESC_GD)
6831 {
6832 eh->tlsdesc_got_jump_table_offset =
6833 (htab->root.sgotplt->size
6834 - aarch64_compute_jump_table_size (htab));
6835 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6836 h->got.offset = (bfd_vma) - 2;
6837 }
6838
6839 if (got_type & GOT_TLS_GD)
6840 {
6841 h->got.offset = htab->root.sgot->size;
6842 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6843 }
6844
6845 if (got_type & GOT_TLS_IE)
6846 {
6847 h->got.offset = htab->root.sgot->size;
6848 htab->root.sgot->size += GOT_ENTRY_SIZE;
6849 }
6850
6851 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6852 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6853 || h->root.type != bfd_link_hash_undefweak)
6854 && (info->shared
6855 || indx != 0
6856 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6857 {
6858 if (got_type & GOT_TLSDESC_GD)
6859 {
6860 htab->root.srelplt->size += RELOC_SIZE (htab);
6861 /* Note reloc_count not incremented here! We have
6862 already adjusted reloc_count for this relocation
6863 type. */
6864
6865 /* TLSDESC PLT is now needed, but not yet determined. */
6866 htab->tlsdesc_plt = (bfd_vma) - 1;
6867 }
6868
6869 if (got_type & GOT_TLS_GD)
6870 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6871
6872 if (got_type & GOT_TLS_IE)
6873 htab->root.srelgot->size += RELOC_SIZE (htab);
6874 }
6875 }
6876 }
6877 else
6878 {
6879 h->got.offset = (bfd_vma) - 1;
6880 }
6881
6882 if (eh->dyn_relocs == NULL)
6883 return TRUE;
6884
6885 /* In the shared -Bsymbolic case, discard space allocated for
6886 dynamic pc-relative relocs against symbols which turn out to be
6887 defined in regular objects. For the normal shared case, discard
6888 space for pc-relative relocs that have become local due to symbol
6889 visibility changes. */
6890
6891 if (info->shared)
6892 {
6893 /* Relocs that use pc_count are those that appear on a call
6894 insn, or certain REL relocs that can generated via assembly.
6895 We want calls to protected symbols to resolve directly to the
6896 function rather than going via the plt. If people want
6897 function pointer comparisons to work as expected then they
6898 should avoid writing weird assembly. */
6899 if (SYMBOL_CALLS_LOCAL (info, h))
6900 {
6901 struct elf_dyn_relocs **pp;
6902
6903 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6904 {
6905 p->count -= p->pc_count;
6906 p->pc_count = 0;
6907 if (p->count == 0)
6908 *pp = p->next;
6909 else
6910 pp = &p->next;
6911 }
6912 }
6913
6914 /* Also discard relocs on undefined weak syms with non-default
6915 visibility. */
6916 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6917 {
6918 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6919 eh->dyn_relocs = NULL;
6920
6921 /* Make sure undefined weak symbols are output as a dynamic
6922 symbol in PIEs. */
6923 else if (h->dynindx == -1
6924 && !h->forced_local
6925 && !bfd_elf_link_record_dynamic_symbol (info, h))
6926 return FALSE;
6927 }
6928
6929 }
6930 else if (ELIMINATE_COPY_RELOCS)
6931 {
6932 /* For the non-shared case, discard space for relocs against
6933 symbols which turn out to need copy relocs or are not
6934 dynamic. */
6935
6936 if (!h->non_got_ref
6937 && ((h->def_dynamic
6938 && !h->def_regular)
6939 || (htab->root.dynamic_sections_created
6940 && (h->root.type == bfd_link_hash_undefweak
6941 || h->root.type == bfd_link_hash_undefined))))
6942 {
6943 /* Make sure this symbol is output as a dynamic symbol.
6944 Undefined weak syms won't yet be marked as dynamic. */
6945 if (h->dynindx == -1
6946 && !h->forced_local
6947 && !bfd_elf_link_record_dynamic_symbol (info, h))
6948 return FALSE;
6949
6950 /* If that succeeded, we know we'll be keeping all the
6951 relocs. */
6952 if (h->dynindx != -1)
6953 goto keep;
6954 }
6955
6956 eh->dyn_relocs = NULL;
6957
6958 keep:;
6959 }
6960
6961 /* Finally, allocate space. */
6962 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6963 {
6964 asection *sreloc;
6965
6966 sreloc = elf_section_data (p->sec)->sreloc;
6967
6968 BFD_ASSERT (sreloc != NULL);
6969
6970 sreloc->size += p->count * RELOC_SIZE (htab);
6971 }
6972
6973 return TRUE;
6974 }
6975
6976 /* Allocate space in .plt, .got and associated reloc sections for
6977 ifunc dynamic relocs. */
6978
6979 static bfd_boolean
6980 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
6981 void *inf)
6982 {
6983 struct bfd_link_info *info;
6984 struct elf_aarch64_link_hash_table *htab;
6985 struct elf_aarch64_link_hash_entry *eh;
6986
6987 /* An example of a bfd_link_hash_indirect symbol is versioned
6988 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6989 -> __gxx_personality_v0(bfd_link_hash_defined)
6990
6991 There is no need to process bfd_link_hash_indirect symbols here
6992 because we will also be presented with the concrete instance of
6993 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6994 called to copy all relevant data from the generic to the concrete
6995 symbol instance.
6996 */
6997 if (h->root.type == bfd_link_hash_indirect)
6998 return TRUE;
6999
7000 if (h->root.type == bfd_link_hash_warning)
7001 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7002
7003 info = (struct bfd_link_info *) inf;
7004 htab = elf_aarch64_hash_table (info);
7005
7006 eh = (struct elf_aarch64_link_hash_entry *) h;
7007
7008 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7009 here if it is defined and referenced in a non-shared object. */
7010 if (h->type == STT_GNU_IFUNC
7011 && h->def_regular)
7012 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7013 &eh->dyn_relocs,
7014 htab->plt_entry_size,
7015 htab->plt_header_size,
7016 GOT_ENTRY_SIZE);
7017 return TRUE;
7018 }
7019
7020 /* Allocate space in .plt, .got and associated reloc sections for
7021 local dynamic relocs. */
7022
7023 static bfd_boolean
7024 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7025 {
7026 struct elf_link_hash_entry *h
7027 = (struct elf_link_hash_entry *) *slot;
7028
7029 if (h->type != STT_GNU_IFUNC
7030 || !h->def_regular
7031 || !h->ref_regular
7032 || !h->forced_local
7033 || h->root.type != bfd_link_hash_defined)
7034 abort ();
7035
7036 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7037 }
7038
7039 /* Allocate space in .plt, .got and associated reloc sections for
7040 local ifunc dynamic relocs. */
7041
7042 static bfd_boolean
7043 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7044 {
7045 struct elf_link_hash_entry *h
7046 = (struct elf_link_hash_entry *) *slot;
7047
7048 if (h->type != STT_GNU_IFUNC
7049 || !h->def_regular
7050 || !h->ref_regular
7051 || !h->forced_local
7052 || h->root.type != bfd_link_hash_defined)
7053 abort ();
7054
7055 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7056 }
7057
7058 /* This is the most important function of all . Innocuosly named
7059 though ! */
7060 static bfd_boolean
7061 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7062 struct bfd_link_info *info)
7063 {
7064 struct elf_aarch64_link_hash_table *htab;
7065 bfd *dynobj;
7066 asection *s;
7067 bfd_boolean relocs;
7068 bfd *ibfd;
7069
7070 htab = elf_aarch64_hash_table ((info));
7071 dynobj = htab->root.dynobj;
7072
7073 BFD_ASSERT (dynobj != NULL);
7074
7075 if (htab->root.dynamic_sections_created)
7076 {
7077 if (info->executable)
7078 {
7079 s = bfd_get_linker_section (dynobj, ".interp");
7080 if (s == NULL)
7081 abort ();
7082 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7083 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7084 }
7085 }
7086
7087 /* Set up .got offsets for local syms, and space for local dynamic
7088 relocs. */
7089 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7090 {
7091 struct elf_aarch64_local_symbol *locals = NULL;
7092 Elf_Internal_Shdr *symtab_hdr;
7093 asection *srel;
7094 unsigned int i;
7095
7096 if (!is_aarch64_elf (ibfd))
7097 continue;
7098
7099 for (s = ibfd->sections; s != NULL; s = s->next)
7100 {
7101 struct elf_dyn_relocs *p;
7102
7103 for (p = (struct elf_dyn_relocs *)
7104 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7105 {
7106 if (!bfd_is_abs_section (p->sec)
7107 && bfd_is_abs_section (p->sec->output_section))
7108 {
7109 /* Input section has been discarded, either because
7110 it is a copy of a linkonce section or due to
7111 linker script /DISCARD/, so we'll be discarding
7112 the relocs too. */
7113 }
7114 else if (p->count != 0)
7115 {
7116 srel = elf_section_data (p->sec)->sreloc;
7117 srel->size += p->count * RELOC_SIZE (htab);
7118 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7119 info->flags |= DF_TEXTREL;
7120 }
7121 }
7122 }
7123
7124 locals = elf_aarch64_locals (ibfd);
7125 if (!locals)
7126 continue;
7127
7128 symtab_hdr = &elf_symtab_hdr (ibfd);
7129 srel = htab->root.srelgot;
7130 for (i = 0; i < symtab_hdr->sh_info; i++)
7131 {
7132 locals[i].got_offset = (bfd_vma) - 1;
7133 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7134 if (locals[i].got_refcount > 0)
7135 {
7136 unsigned got_type = locals[i].got_type;
7137 if (got_type & GOT_TLSDESC_GD)
7138 {
7139 locals[i].tlsdesc_got_jump_table_offset =
7140 (htab->root.sgotplt->size
7141 - aarch64_compute_jump_table_size (htab));
7142 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7143 locals[i].got_offset = (bfd_vma) - 2;
7144 }
7145
7146 if (got_type & GOT_TLS_GD)
7147 {
7148 locals[i].got_offset = htab->root.sgot->size;
7149 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7150 }
7151
7152 if (got_type & GOT_TLS_IE)
7153 {
7154 locals[i].got_offset = htab->root.sgot->size;
7155 htab->root.sgot->size += GOT_ENTRY_SIZE;
7156 }
7157
7158 if (got_type == GOT_UNKNOWN)
7159 {
7160 }
7161
7162 if (got_type == GOT_NORMAL)
7163 {
7164 }
7165
7166 if (info->shared)
7167 {
7168 if (got_type & GOT_TLSDESC_GD)
7169 {
7170 htab->root.srelplt->size += RELOC_SIZE (htab);
7171 /* Note RELOC_COUNT not incremented here! */
7172 htab->tlsdesc_plt = (bfd_vma) - 1;
7173 }
7174
7175 if (got_type & GOT_TLS_GD)
7176 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7177
7178 if (got_type & GOT_TLS_IE)
7179 htab->root.srelgot->size += RELOC_SIZE (htab);
7180 }
7181 }
7182 else
7183 {
7184 locals[i].got_refcount = (bfd_vma) - 1;
7185 }
7186 }
7187 }
7188
7189
7190 /* Allocate global sym .plt and .got entries, and space for global
7191 sym dynamic relocs. */
7192 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7193 info);
7194
7195 /* Allocate global ifunc sym .plt and .got entries, and space for global
7196 ifunc sym dynamic relocs. */
7197 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7198 info);
7199
7200 /* Allocate .plt and .got entries, and space for local symbols. */
7201 htab_traverse (htab->loc_hash_table,
7202 elfNN_aarch64_allocate_local_dynrelocs,
7203 info);
7204
7205 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7206 htab_traverse (htab->loc_hash_table,
7207 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7208 info);
7209
7210 /* For every jump slot reserved in the sgotplt, reloc_count is
7211 incremented. However, when we reserve space for TLS descriptors,
7212 it's not incremented, so in order to compute the space reserved
7213 for them, it suffices to multiply the reloc count by the jump
7214 slot size. */
7215
7216 if (htab->root.srelplt)
7217 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7218
7219 if (htab->tlsdesc_plt)
7220 {
7221 if (htab->root.splt->size == 0)
7222 htab->root.splt->size += PLT_ENTRY_SIZE;
7223
7224 htab->tlsdesc_plt = htab->root.splt->size;
7225 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7226
7227 /* If we're not using lazy TLS relocations, don't generate the
7228 GOT entry required. */
7229 if (!(info->flags & DF_BIND_NOW))
7230 {
7231 htab->dt_tlsdesc_got = htab->root.sgot->size;
7232 htab->root.sgot->size += GOT_ENTRY_SIZE;
7233 }
7234 }
7235
7236 /* Init mapping symbols information to use later to distingush between
7237 code and data while scanning for erratam 835769. */
7238 if (htab->fix_erratum_835769)
7239 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7240 {
7241 if (!is_aarch64_elf (ibfd))
7242 continue;
7243 bfd_elfNN_aarch64_init_maps (ibfd);
7244 }
7245
7246 /* We now have determined the sizes of the various dynamic sections.
7247 Allocate memory for them. */
7248 relocs = FALSE;
7249 for (s = dynobj->sections; s != NULL; s = s->next)
7250 {
7251 if ((s->flags & SEC_LINKER_CREATED) == 0)
7252 continue;
7253
7254 if (s == htab->root.splt
7255 || s == htab->root.sgot
7256 || s == htab->root.sgotplt
7257 || s == htab->root.iplt
7258 || s == htab->root.igotplt || s == htab->sdynbss)
7259 {
7260 /* Strip this section if we don't need it; see the
7261 comment below. */
7262 }
7263 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7264 {
7265 if (s->size != 0 && s != htab->root.srelplt)
7266 relocs = TRUE;
7267
7268 /* We use the reloc_count field as a counter if we need
7269 to copy relocs into the output file. */
7270 if (s != htab->root.srelplt)
7271 s->reloc_count = 0;
7272 }
7273 else
7274 {
7275 /* It's not one of our sections, so don't allocate space. */
7276 continue;
7277 }
7278
7279 if (s->size == 0)
7280 {
7281 /* If we don't need this section, strip it from the
7282 output file. This is mostly to handle .rela.bss and
7283 .rela.plt. We must create both sections in
7284 create_dynamic_sections, because they must be created
7285 before the linker maps input sections to output
7286 sections. The linker does that before
7287 adjust_dynamic_symbol is called, and it is that
7288 function which decides whether anything needs to go
7289 into these sections. */
7290
7291 s->flags |= SEC_EXCLUDE;
7292 continue;
7293 }
7294
7295 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7296 continue;
7297
7298 /* Allocate memory for the section contents. We use bfd_zalloc
7299 here in case unused entries are not reclaimed before the
7300 section's contents are written out. This should not happen,
7301 but this way if it does, we get a R_AARCH64_NONE reloc instead
7302 of garbage. */
7303 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7304 if (s->contents == NULL)
7305 return FALSE;
7306 }
7307
7308 if (htab->root.dynamic_sections_created)
7309 {
7310 /* Add some entries to the .dynamic section. We fill in the
7311 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7312 must add the entries now so that we get the correct size for
7313 the .dynamic section. The DT_DEBUG entry is filled in by the
7314 dynamic linker and used by the debugger. */
7315 #define add_dynamic_entry(TAG, VAL) \
7316 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7317
7318 if (info->executable)
7319 {
7320 if (!add_dynamic_entry (DT_DEBUG, 0))
7321 return FALSE;
7322 }
7323
7324 if (htab->root.splt->size != 0)
7325 {
7326 if (!add_dynamic_entry (DT_PLTGOT, 0)
7327 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7328 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7329 || !add_dynamic_entry (DT_JMPREL, 0))
7330 return FALSE;
7331
7332 if (htab->tlsdesc_plt
7333 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7334 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7335 return FALSE;
7336 }
7337
7338 if (relocs)
7339 {
7340 if (!add_dynamic_entry (DT_RELA, 0)
7341 || !add_dynamic_entry (DT_RELASZ, 0)
7342 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7343 return FALSE;
7344
7345 /* If any dynamic relocs apply to a read-only section,
7346 then we need a DT_TEXTREL entry. */
7347 if ((info->flags & DF_TEXTREL) != 0)
7348 {
7349 if (!add_dynamic_entry (DT_TEXTREL, 0))
7350 return FALSE;
7351 }
7352 }
7353 }
7354 #undef add_dynamic_entry
7355
7356 return TRUE;
7357 }
7358
7359 static inline void
7360 elf_aarch64_update_plt_entry (bfd *output_bfd,
7361 bfd_reloc_code_real_type r_type,
7362 bfd_byte *plt_entry, bfd_vma value)
7363 {
7364 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7365
7366 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7367 }
7368
7369 static void
7370 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7371 struct elf_aarch64_link_hash_table
7372 *htab, bfd *output_bfd,
7373 struct bfd_link_info *info)
7374 {
7375 bfd_byte *plt_entry;
7376 bfd_vma plt_index;
7377 bfd_vma got_offset;
7378 bfd_vma gotplt_entry_address;
7379 bfd_vma plt_entry_address;
7380 Elf_Internal_Rela rela;
7381 bfd_byte *loc;
7382 asection *plt, *gotplt, *relplt;
7383
7384 /* When building a static executable, use .iplt, .igot.plt and
7385 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7386 if (htab->root.splt != NULL)
7387 {
7388 plt = htab->root.splt;
7389 gotplt = htab->root.sgotplt;
7390 relplt = htab->root.srelplt;
7391 }
7392 else
7393 {
7394 plt = htab->root.iplt;
7395 gotplt = htab->root.igotplt;
7396 relplt = htab->root.irelplt;
7397 }
7398
7399 /* Get the index in the procedure linkage table which
7400 corresponds to this symbol. This is the index of this symbol
7401 in all the symbols for which we are making plt entries. The
7402 first entry in the procedure linkage table is reserved.
7403
7404 Get the offset into the .got table of the entry that
7405 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7406 bytes. The first three are reserved for the dynamic linker.
7407
7408 For static executables, we don't reserve anything. */
7409
7410 if (plt == htab->root.splt)
7411 {
7412 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
7413 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
7414 }
7415 else
7416 {
7417 plt_index = h->plt.offset / htab->plt_entry_size;
7418 got_offset = plt_index * GOT_ENTRY_SIZE;
7419 }
7420
7421 plt_entry = plt->contents + h->plt.offset;
7422 plt_entry_address = plt->output_section->vma
7423 + plt->output_offset + h->plt.offset;
7424 gotplt_entry_address = gotplt->output_section->vma +
7425 gotplt->output_offset + got_offset;
7426
7427 /* Copy in the boiler-plate for the PLTn entry. */
7428 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
7429
7430 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7431 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7432 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7433 plt_entry,
7434 PG (gotplt_entry_address) -
7435 PG (plt_entry_address));
7436
7437 /* Fill in the lo12 bits for the load from the pltgot. */
7438 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7439 plt_entry + 4,
7440 PG_OFFSET (gotplt_entry_address));
7441
7442 /* Fill in the lo12 bits for the add from the pltgot entry. */
7443 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7444 plt_entry + 8,
7445 PG_OFFSET (gotplt_entry_address));
7446
7447 /* All the GOTPLT Entries are essentially initialized to PLT0. */
7448 bfd_put_NN (output_bfd,
7449 plt->output_section->vma + plt->output_offset,
7450 gotplt->contents + got_offset);
7451
7452 rela.r_offset = gotplt_entry_address;
7453
7454 if (h->dynindx == -1
7455 || ((info->executable
7456 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7457 && h->def_regular
7458 && h->type == STT_GNU_IFUNC))
7459 {
7460 /* If an STT_GNU_IFUNC symbol is locally defined, generate
7461 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
7462 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
7463 rela.r_addend = (h->root.u.def.value
7464 + h->root.u.def.section->output_section->vma
7465 + h->root.u.def.section->output_offset);
7466 }
7467 else
7468 {
7469 /* Fill in the entry in the .rela.plt section. */
7470 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
7471 rela.r_addend = 0;
7472 }
7473
7474 /* Compute the relocation entry to used based on PLT index and do
7475 not adjust reloc_count. The reloc_count has already been adjusted
7476 to account for this entry. */
7477 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
7478 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7479 }
7480
7481 /* Size sections even though they're not dynamic. We use it to setup
7482 _TLS_MODULE_BASE_, if needed. */
7483
7484 static bfd_boolean
7485 elfNN_aarch64_always_size_sections (bfd *output_bfd,
7486 struct bfd_link_info *info)
7487 {
7488 asection *tls_sec;
7489
7490 if (info->relocatable)
7491 return TRUE;
7492
7493 tls_sec = elf_hash_table (info)->tls_sec;
7494
7495 if (tls_sec)
7496 {
7497 struct elf_link_hash_entry *tlsbase;
7498
7499 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
7500 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
7501
7502 if (tlsbase)
7503 {
7504 struct bfd_link_hash_entry *h = NULL;
7505 const struct elf_backend_data *bed =
7506 get_elf_backend_data (output_bfd);
7507
7508 if (!(_bfd_generic_link_add_one_symbol
7509 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
7510 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
7511 return FALSE;
7512
7513 tlsbase->type = STT_TLS;
7514 tlsbase = (struct elf_link_hash_entry *) h;
7515 tlsbase->def_regular = 1;
7516 tlsbase->other = STV_HIDDEN;
7517 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
7518 }
7519 }
7520
7521 return TRUE;
7522 }
7523
7524 /* Finish up dynamic symbol handling. We set the contents of various
7525 dynamic sections here. */
7526 static bfd_boolean
7527 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
7528 struct bfd_link_info *info,
7529 struct elf_link_hash_entry *h,
7530 Elf_Internal_Sym *sym)
7531 {
7532 struct elf_aarch64_link_hash_table *htab;
7533 htab = elf_aarch64_hash_table (info);
7534
7535 if (h->plt.offset != (bfd_vma) - 1)
7536 {
7537 asection *plt, *gotplt, *relplt;
7538
7539 /* This symbol has an entry in the procedure linkage table. Set
7540 it up. */
7541
7542 /* When building a static executable, use .iplt, .igot.plt and
7543 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7544 if (htab->root.splt != NULL)
7545 {
7546 plt = htab->root.splt;
7547 gotplt = htab->root.sgotplt;
7548 relplt = htab->root.srelplt;
7549 }
7550 else
7551 {
7552 plt = htab->root.iplt;
7553 gotplt = htab->root.igotplt;
7554 relplt = htab->root.irelplt;
7555 }
7556
7557 /* This symbol has an entry in the procedure linkage table. Set
7558 it up. */
7559 if ((h->dynindx == -1
7560 && !((h->forced_local || info->executable)
7561 && h->def_regular
7562 && h->type == STT_GNU_IFUNC))
7563 || plt == NULL
7564 || gotplt == NULL
7565 || relplt == NULL)
7566 abort ();
7567
7568 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
7569 if (!h->def_regular)
7570 {
7571 /* Mark the symbol as undefined, rather than as defined in
7572 the .plt section. */
7573 sym->st_shndx = SHN_UNDEF;
7574 /* If the symbol is weak we need to clear the value.
7575 Otherwise, the PLT entry would provide a definition for
7576 the symbol even if the symbol wasn't defined anywhere,
7577 and so the symbol would never be NULL. Leave the value if
7578 there were any relocations where pointer equality matters
7579 (this is a clue for the dynamic linker, to make function
7580 pointer comparisons work between an application and shared
7581 library). */
7582 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
7583 sym->st_value = 0;
7584 }
7585 }
7586
7587 if (h->got.offset != (bfd_vma) - 1
7588 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
7589 {
7590 Elf_Internal_Rela rela;
7591 bfd_byte *loc;
7592
7593 /* This symbol has an entry in the global offset table. Set it
7594 up. */
7595 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
7596 abort ();
7597
7598 rela.r_offset = (htab->root.sgot->output_section->vma
7599 + htab->root.sgot->output_offset
7600 + (h->got.offset & ~(bfd_vma) 1));
7601
7602 if (h->def_regular
7603 && h->type == STT_GNU_IFUNC)
7604 {
7605 if (info->shared)
7606 {
7607 /* Generate R_AARCH64_GLOB_DAT. */
7608 goto do_glob_dat;
7609 }
7610 else
7611 {
7612 asection *plt;
7613
7614 if (!h->pointer_equality_needed)
7615 abort ();
7616
7617 /* For non-shared object, we can't use .got.plt, which
7618 contains the real function address if we need pointer
7619 equality. We load the GOT entry with the PLT entry. */
7620 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
7621 bfd_put_NN (output_bfd, (plt->output_section->vma
7622 + plt->output_offset
7623 + h->plt.offset),
7624 htab->root.sgot->contents
7625 + (h->got.offset & ~(bfd_vma) 1));
7626 return TRUE;
7627 }
7628 }
7629 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
7630 {
7631 if (!h->def_regular)
7632 return FALSE;
7633
7634 BFD_ASSERT ((h->got.offset & 1) != 0);
7635 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
7636 rela.r_addend = (h->root.u.def.value
7637 + h->root.u.def.section->output_section->vma
7638 + h->root.u.def.section->output_offset);
7639 }
7640 else
7641 {
7642 do_glob_dat:
7643 BFD_ASSERT ((h->got.offset & 1) == 0);
7644 bfd_put_NN (output_bfd, (bfd_vma) 0,
7645 htab->root.sgot->contents + h->got.offset);
7646 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
7647 rela.r_addend = 0;
7648 }
7649
7650 loc = htab->root.srelgot->contents;
7651 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
7652 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7653 }
7654
7655 if (h->needs_copy)
7656 {
7657 Elf_Internal_Rela rela;
7658 bfd_byte *loc;
7659
7660 /* This symbol needs a copy reloc. Set it up. */
7661
7662 if (h->dynindx == -1
7663 || (h->root.type != bfd_link_hash_defined
7664 && h->root.type != bfd_link_hash_defweak)
7665 || htab->srelbss == NULL)
7666 abort ();
7667
7668 rela.r_offset = (h->root.u.def.value
7669 + h->root.u.def.section->output_section->vma
7670 + h->root.u.def.section->output_offset);
7671 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
7672 rela.r_addend = 0;
7673 loc = htab->srelbss->contents;
7674 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
7675 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7676 }
7677
7678 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
7679 be NULL for local symbols. */
7680 if (sym != NULL
7681 && (h == elf_hash_table (info)->hdynamic
7682 || h == elf_hash_table (info)->hgot))
7683 sym->st_shndx = SHN_ABS;
7684
7685 return TRUE;
7686 }
7687
7688 /* Finish up local dynamic symbol handling. We set the contents of
7689 various dynamic sections here. */
7690
7691 static bfd_boolean
7692 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
7693 {
7694 struct elf_link_hash_entry *h
7695 = (struct elf_link_hash_entry *) *slot;
7696 struct bfd_link_info *info
7697 = (struct bfd_link_info *) inf;
7698
7699 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
7700 info, h, NULL);
7701 }
7702
7703 static void
7704 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
7705 struct elf_aarch64_link_hash_table
7706 *htab)
7707 {
7708 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
7709 small and large plts and at the minute just generates
7710 the small PLT. */
7711
7712 /* PLT0 of the small PLT looks like this in ELF64 -
7713 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
7714 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
7715 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
7716 // symbol resolver
7717 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
7718 // GOTPLT entry for this.
7719 br x17
7720 PLT0 will be slightly different in ELF32 due to different got entry
7721 size.
7722 */
7723 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
7724 bfd_vma plt_base;
7725
7726
7727 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
7728 PLT_ENTRY_SIZE);
7729 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
7730 PLT_ENTRY_SIZE;
7731
7732 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
7733 + htab->root.sgotplt->output_offset
7734 + GOT_ENTRY_SIZE * 2);
7735
7736 plt_base = htab->root.splt->output_section->vma +
7737 htab->root.splt->output_offset;
7738
7739 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7740 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7741 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7742 htab->root.splt->contents + 4,
7743 PG (plt_got_2nd_ent) - PG (plt_base + 4));
7744
7745 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7746 htab->root.splt->contents + 8,
7747 PG_OFFSET (plt_got_2nd_ent));
7748
7749 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7750 htab->root.splt->contents + 12,
7751 PG_OFFSET (plt_got_2nd_ent));
7752 }
7753
7754 static bfd_boolean
7755 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
7756 struct bfd_link_info *info)
7757 {
7758 struct elf_aarch64_link_hash_table *htab;
7759 bfd *dynobj;
7760 asection *sdyn;
7761
7762 htab = elf_aarch64_hash_table (info);
7763 dynobj = htab->root.dynobj;
7764 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
7765
7766 if (htab->root.dynamic_sections_created)
7767 {
7768 ElfNN_External_Dyn *dyncon, *dynconend;
7769
7770 if (sdyn == NULL || htab->root.sgot == NULL)
7771 abort ();
7772
7773 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
7774 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
7775 for (; dyncon < dynconend; dyncon++)
7776 {
7777 Elf_Internal_Dyn dyn;
7778 asection *s;
7779
7780 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
7781
7782 switch (dyn.d_tag)
7783 {
7784 default:
7785 continue;
7786
7787 case DT_PLTGOT:
7788 s = htab->root.sgotplt;
7789 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
7790 break;
7791
7792 case DT_JMPREL:
7793 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
7794 break;
7795
7796 case DT_PLTRELSZ:
7797 s = htab->root.srelplt;
7798 dyn.d_un.d_val = s->size;
7799 break;
7800
7801 case DT_RELASZ:
7802 /* The procedure linkage table relocs (DT_JMPREL) should
7803 not be included in the overall relocs (DT_RELA).
7804 Therefore, we override the DT_RELASZ entry here to
7805 make it not include the JMPREL relocs. Since the
7806 linker script arranges for .rela.plt to follow all
7807 other relocation sections, we don't have to worry
7808 about changing the DT_RELA entry. */
7809 if (htab->root.srelplt != NULL)
7810 {
7811 s = htab->root.srelplt;
7812 dyn.d_un.d_val -= s->size;
7813 }
7814 break;
7815
7816 case DT_TLSDESC_PLT:
7817 s = htab->root.splt;
7818 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7819 + htab->tlsdesc_plt;
7820 break;
7821
7822 case DT_TLSDESC_GOT:
7823 s = htab->root.sgot;
7824 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7825 + htab->dt_tlsdesc_got;
7826 break;
7827 }
7828
7829 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
7830 }
7831
7832 }
7833
7834 /* Fill in the special first entry in the procedure linkage table. */
7835 if (htab->root.splt && htab->root.splt->size > 0)
7836 {
7837 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
7838
7839 elf_section_data (htab->root.splt->output_section)->
7840 this_hdr.sh_entsize = htab->plt_entry_size;
7841
7842
7843 if (htab->tlsdesc_plt)
7844 {
7845 bfd_put_NN (output_bfd, (bfd_vma) 0,
7846 htab->root.sgot->contents + htab->dt_tlsdesc_got);
7847
7848 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
7849 elfNN_aarch64_tlsdesc_small_plt_entry,
7850 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
7851
7852 {
7853 bfd_vma adrp1_addr =
7854 htab->root.splt->output_section->vma
7855 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
7856
7857 bfd_vma adrp2_addr = adrp1_addr + 4;
7858
7859 bfd_vma got_addr =
7860 htab->root.sgot->output_section->vma
7861 + htab->root.sgot->output_offset;
7862
7863 bfd_vma pltgot_addr =
7864 htab->root.sgotplt->output_section->vma
7865 + htab->root.sgotplt->output_offset;
7866
7867 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
7868
7869 bfd_byte *plt_entry =
7870 htab->root.splt->contents + htab->tlsdesc_plt;
7871
7872 /* adrp x2, DT_TLSDESC_GOT */
7873 elf_aarch64_update_plt_entry (output_bfd,
7874 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7875 plt_entry + 4,
7876 (PG (dt_tlsdesc_got)
7877 - PG (adrp1_addr)));
7878
7879 /* adrp x3, 0 */
7880 elf_aarch64_update_plt_entry (output_bfd,
7881 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7882 plt_entry + 8,
7883 (PG (pltgot_addr)
7884 - PG (adrp2_addr)));
7885
7886 /* ldr x2, [x2, #0] */
7887 elf_aarch64_update_plt_entry (output_bfd,
7888 BFD_RELOC_AARCH64_LDSTNN_LO12,
7889 plt_entry + 12,
7890 PG_OFFSET (dt_tlsdesc_got));
7891
7892 /* add x3, x3, 0 */
7893 elf_aarch64_update_plt_entry (output_bfd,
7894 BFD_RELOC_AARCH64_ADD_LO12,
7895 plt_entry + 16,
7896 PG_OFFSET (pltgot_addr));
7897 }
7898 }
7899 }
7900
7901 if (htab->root.sgotplt)
7902 {
7903 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
7904 {
7905 (*_bfd_error_handler)
7906 (_("discarded output section: `%A'"), htab->root.sgotplt);
7907 return FALSE;
7908 }
7909
7910 /* Fill in the first three entries in the global offset table. */
7911 if (htab->root.sgotplt->size > 0)
7912 {
7913 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
7914
7915 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
7916 bfd_put_NN (output_bfd,
7917 (bfd_vma) 0,
7918 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
7919 bfd_put_NN (output_bfd,
7920 (bfd_vma) 0,
7921 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
7922 }
7923
7924 if (htab->root.sgot)
7925 {
7926 if (htab->root.sgot->size > 0)
7927 {
7928 bfd_vma addr =
7929 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
7930 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
7931 }
7932 }
7933
7934 elf_section_data (htab->root.sgotplt->output_section)->
7935 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
7936 }
7937
7938 if (htab->root.sgot && htab->root.sgot->size > 0)
7939 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
7940 = GOT_ENTRY_SIZE;
7941
7942 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
7943 htab_traverse (htab->loc_hash_table,
7944 elfNN_aarch64_finish_local_dynamic_symbol,
7945 info);
7946
7947 return TRUE;
7948 }
7949
7950 /* Return address for Ith PLT stub in section PLT, for relocation REL
7951 or (bfd_vma) -1 if it should not be included. */
7952
7953 static bfd_vma
7954 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
7955 const arelent *rel ATTRIBUTE_UNUSED)
7956 {
7957 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
7958 }
7959
7960
7961 /* We use this so we can override certain functions
7962 (though currently we don't). */
7963
7964 const struct elf_size_info elfNN_aarch64_size_info =
7965 {
7966 sizeof (ElfNN_External_Ehdr),
7967 sizeof (ElfNN_External_Phdr),
7968 sizeof (ElfNN_External_Shdr),
7969 sizeof (ElfNN_External_Rel),
7970 sizeof (ElfNN_External_Rela),
7971 sizeof (ElfNN_External_Sym),
7972 sizeof (ElfNN_External_Dyn),
7973 sizeof (Elf_External_Note),
7974 4, /* Hash table entry size. */
7975 1, /* Internal relocs per external relocs. */
7976 ARCH_SIZE, /* Arch size. */
7977 LOG_FILE_ALIGN, /* Log_file_align. */
7978 ELFCLASSNN, EV_CURRENT,
7979 bfd_elfNN_write_out_phdrs,
7980 bfd_elfNN_write_shdrs_and_ehdr,
7981 bfd_elfNN_checksum_contents,
7982 bfd_elfNN_write_relocs,
7983 bfd_elfNN_swap_symbol_in,
7984 bfd_elfNN_swap_symbol_out,
7985 bfd_elfNN_slurp_reloc_table,
7986 bfd_elfNN_slurp_symbol_table,
7987 bfd_elfNN_swap_dyn_in,
7988 bfd_elfNN_swap_dyn_out,
7989 bfd_elfNN_swap_reloc_in,
7990 bfd_elfNN_swap_reloc_out,
7991 bfd_elfNN_swap_reloca_in,
7992 bfd_elfNN_swap_reloca_out
7993 };
7994
7995 #define ELF_ARCH bfd_arch_aarch64
7996 #define ELF_MACHINE_CODE EM_AARCH64
7997 #define ELF_MAXPAGESIZE 0x10000
7998 #define ELF_MINPAGESIZE 0x1000
7999 #define ELF_COMMONPAGESIZE 0x1000
8000
8001 #define bfd_elfNN_close_and_cleanup \
8002 elfNN_aarch64_close_and_cleanup
8003
8004 #define bfd_elfNN_bfd_free_cached_info \
8005 elfNN_aarch64_bfd_free_cached_info
8006
8007 #define bfd_elfNN_bfd_is_target_special_symbol \
8008 elfNN_aarch64_is_target_special_symbol
8009
8010 #define bfd_elfNN_bfd_link_hash_table_create \
8011 elfNN_aarch64_link_hash_table_create
8012
8013 #define bfd_elfNN_bfd_merge_private_bfd_data \
8014 elfNN_aarch64_merge_private_bfd_data
8015
8016 #define bfd_elfNN_bfd_print_private_bfd_data \
8017 elfNN_aarch64_print_private_bfd_data
8018
8019 #define bfd_elfNN_bfd_reloc_type_lookup \
8020 elfNN_aarch64_reloc_type_lookup
8021
8022 #define bfd_elfNN_bfd_reloc_name_lookup \
8023 elfNN_aarch64_reloc_name_lookup
8024
8025 #define bfd_elfNN_bfd_set_private_flags \
8026 elfNN_aarch64_set_private_flags
8027
8028 #define bfd_elfNN_find_inliner_info \
8029 elfNN_aarch64_find_inliner_info
8030
8031 #define bfd_elfNN_find_nearest_line \
8032 elfNN_aarch64_find_nearest_line
8033
8034 #define bfd_elfNN_mkobject \
8035 elfNN_aarch64_mkobject
8036
8037 #define bfd_elfNN_new_section_hook \
8038 elfNN_aarch64_new_section_hook
8039
8040 #define elf_backend_adjust_dynamic_symbol \
8041 elfNN_aarch64_adjust_dynamic_symbol
8042
8043 #define elf_backend_always_size_sections \
8044 elfNN_aarch64_always_size_sections
8045
8046 #define elf_backend_check_relocs \
8047 elfNN_aarch64_check_relocs
8048
8049 #define elf_backend_copy_indirect_symbol \
8050 elfNN_aarch64_copy_indirect_symbol
8051
8052 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8053 to them in our hash. */
8054 #define elf_backend_create_dynamic_sections \
8055 elfNN_aarch64_create_dynamic_sections
8056
8057 #define elf_backend_init_index_section \
8058 _bfd_elf_init_2_index_sections
8059
8060 #define elf_backend_finish_dynamic_sections \
8061 elfNN_aarch64_finish_dynamic_sections
8062
8063 #define elf_backend_finish_dynamic_symbol \
8064 elfNN_aarch64_finish_dynamic_symbol
8065
8066 #define elf_backend_gc_sweep_hook \
8067 elfNN_aarch64_gc_sweep_hook
8068
8069 #define elf_backend_object_p \
8070 elfNN_aarch64_object_p
8071
8072 #define elf_backend_output_arch_local_syms \
8073 elfNN_aarch64_output_arch_local_syms
8074
8075 #define elf_backend_plt_sym_val \
8076 elfNN_aarch64_plt_sym_val
8077
8078 #define elf_backend_post_process_headers \
8079 elfNN_aarch64_post_process_headers
8080
8081 #define elf_backend_relocate_section \
8082 elfNN_aarch64_relocate_section
8083
8084 #define elf_backend_reloc_type_class \
8085 elfNN_aarch64_reloc_type_class
8086
8087 #define elf_backend_section_from_shdr \
8088 elfNN_aarch64_section_from_shdr
8089
8090 #define elf_backend_size_dynamic_sections \
8091 elfNN_aarch64_size_dynamic_sections
8092
8093 #define elf_backend_size_info \
8094 elfNN_aarch64_size_info
8095
8096 #define elf_backend_write_section \
8097 elfNN_aarch64_write_section
8098
8099 #define elf_backend_can_refcount 1
8100 #define elf_backend_can_gc_sections 1
8101 #define elf_backend_plt_readonly 1
8102 #define elf_backend_want_got_plt 1
8103 #define elf_backend_want_plt_sym 0
8104 #define elf_backend_may_use_rel_p 0
8105 #define elf_backend_may_use_rela_p 1
8106 #define elf_backend_default_use_rela_p 1
8107 #define elf_backend_rela_normal 1
8108 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8109 #define elf_backend_default_execstack 0
8110
8111 #undef elf_backend_obj_attrs_section
8112 #define elf_backend_obj_attrs_section ".ARM.attributes"
8113
8114 #include "elfNN-target.h"
This page took 0.201891 seconds and 4 git commands to generate.