Add output_type to bfd_link_info
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
191 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
192 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
193
194 #define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \
195 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
206 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC)
207
208 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
209 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
210 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
211 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
212 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
213 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
214 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
215 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
216 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
217 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
218 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
219 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
220 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
221
222 #define ELIMINATE_COPY_RELOCS 0
223
224 /* Return size of a relocation entry. HTAB is the bfd's
225 elf_aarch64_link_hash_entry. */
226 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
227
228 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
229 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
230 #define PLT_ENTRY_SIZE (32)
231 #define PLT_SMALL_ENTRY_SIZE (16)
232 #define PLT_TLSDESC_ENTRY_SIZE (32)
233
234 /* Encoding of the nop instruction */
235 #define INSN_NOP 0xd503201f
236
237 #define aarch64_compute_jump_table_size(htab) \
238 (((htab)->root.srelplt == NULL) ? 0 \
239 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
240
241 /* The first entry in a procedure linkage table looks like this
242 if the distance between the PLTGOT and the PLT is < 4GB use
243 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
244 in x16 and needs to work out PLTGOT[1] by using an address of
245 [x16,#-GOT_ENTRY_SIZE]. */
246 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
247 {
248 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
249 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
250 #if ARCH_SIZE == 64
251 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
252 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
253 #else
254 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
255 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
256 #endif
257 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
258 0x1f, 0x20, 0x03, 0xd5, /* nop */
259 0x1f, 0x20, 0x03, 0xd5, /* nop */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
261 };
262
263 /* Per function entry in a procedure linkage table looks like this
264 if the distance between the PLTGOT and the PLT is < 4GB use
265 these PLT entries. */
266 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
267 {
268 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
269 #if ARCH_SIZE == 64
270 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
271 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
272 #else
273 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
274 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
275 #endif
276 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
277 };
278
279 static const bfd_byte
280 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
281 {
282 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
283 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
284 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
285 #if ARCH_SIZE == 64
286 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
287 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
288 #else
289 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
290 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
291 #endif
292 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
293 0x1f, 0x20, 0x03, 0xd5, /* nop */
294 0x1f, 0x20, 0x03, 0xd5, /* nop */
295 };
296
297 #define elf_info_to_howto elfNN_aarch64_info_to_howto
298 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
299
300 #define AARCH64_ELF_ABI_VERSION 0
301
302 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
303 #define ALL_ONES (~ (bfd_vma) 0)
304
305 /* Indexed by the bfd interal reloc enumerators.
306 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
307 in reloc.c. */
308
309 static reloc_howto_type elfNN_aarch64_howto_table[] =
310 {
311 EMPTY_HOWTO (0),
312
313 /* Basic data relocations. */
314
315 #if ARCH_SIZE == 64
316 HOWTO (R_AARCH64_NULL, /* type */
317 0, /* rightshift */
318 3, /* size (0 = byte, 1 = short, 2 = long) */
319 0, /* bitsize */
320 FALSE, /* pc_relative */
321 0, /* bitpos */
322 complain_overflow_dont, /* complain_on_overflow */
323 bfd_elf_generic_reloc, /* special_function */
324 "R_AARCH64_NULL", /* name */
325 FALSE, /* partial_inplace */
326 0, /* src_mask */
327 0, /* dst_mask */
328 FALSE), /* pcrel_offset */
329 #else
330 HOWTO (R_AARCH64_NONE, /* type */
331 0, /* rightshift */
332 3, /* size (0 = byte, 1 = short, 2 = long) */
333 0, /* bitsize */
334 FALSE, /* pc_relative */
335 0, /* bitpos */
336 complain_overflow_dont, /* complain_on_overflow */
337 bfd_elf_generic_reloc, /* special_function */
338 "R_AARCH64_NONE", /* name */
339 FALSE, /* partial_inplace */
340 0, /* src_mask */
341 0, /* dst_mask */
342 FALSE), /* pcrel_offset */
343 #endif
344
345 /* .xword: (S+A) */
346 HOWTO64 (AARCH64_R (ABS64), /* type */
347 0, /* rightshift */
348 4, /* size (4 = long long) */
349 64, /* bitsize */
350 FALSE, /* pc_relative */
351 0, /* bitpos */
352 complain_overflow_unsigned, /* complain_on_overflow */
353 bfd_elf_generic_reloc, /* special_function */
354 AARCH64_R_STR (ABS64), /* name */
355 FALSE, /* partial_inplace */
356 ALL_ONES, /* src_mask */
357 ALL_ONES, /* dst_mask */
358 FALSE), /* pcrel_offset */
359
360 /* .word: (S+A) */
361 HOWTO (AARCH64_R (ABS32), /* type */
362 0, /* rightshift */
363 2, /* size (0 = byte, 1 = short, 2 = long) */
364 32, /* bitsize */
365 FALSE, /* pc_relative */
366 0, /* bitpos */
367 complain_overflow_unsigned, /* complain_on_overflow */
368 bfd_elf_generic_reloc, /* special_function */
369 AARCH64_R_STR (ABS32), /* name */
370 FALSE, /* partial_inplace */
371 0xffffffff, /* src_mask */
372 0xffffffff, /* dst_mask */
373 FALSE), /* pcrel_offset */
374
375 /* .half: (S+A) */
376 HOWTO (AARCH64_R (ABS16), /* type */
377 0, /* rightshift */
378 1, /* size (0 = byte, 1 = short, 2 = long) */
379 16, /* bitsize */
380 FALSE, /* pc_relative */
381 0, /* bitpos */
382 complain_overflow_unsigned, /* complain_on_overflow */
383 bfd_elf_generic_reloc, /* special_function */
384 AARCH64_R_STR (ABS16), /* name */
385 FALSE, /* partial_inplace */
386 0xffff, /* src_mask */
387 0xffff, /* dst_mask */
388 FALSE), /* pcrel_offset */
389
390 /* .xword: (S+A-P) */
391 HOWTO64 (AARCH64_R (PREL64), /* type */
392 0, /* rightshift */
393 4, /* size (4 = long long) */
394 64, /* bitsize */
395 TRUE, /* pc_relative */
396 0, /* bitpos */
397 complain_overflow_signed, /* complain_on_overflow */
398 bfd_elf_generic_reloc, /* special_function */
399 AARCH64_R_STR (PREL64), /* name */
400 FALSE, /* partial_inplace */
401 ALL_ONES, /* src_mask */
402 ALL_ONES, /* dst_mask */
403 TRUE), /* pcrel_offset */
404
405 /* .word: (S+A-P) */
406 HOWTO (AARCH64_R (PREL32), /* type */
407 0, /* rightshift */
408 2, /* size (0 = byte, 1 = short, 2 = long) */
409 32, /* bitsize */
410 TRUE, /* pc_relative */
411 0, /* bitpos */
412 complain_overflow_signed, /* complain_on_overflow */
413 bfd_elf_generic_reloc, /* special_function */
414 AARCH64_R_STR (PREL32), /* name */
415 FALSE, /* partial_inplace */
416 0xffffffff, /* src_mask */
417 0xffffffff, /* dst_mask */
418 TRUE), /* pcrel_offset */
419
420 /* .half: (S+A-P) */
421 HOWTO (AARCH64_R (PREL16), /* type */
422 0, /* rightshift */
423 1, /* size (0 = byte, 1 = short, 2 = long) */
424 16, /* bitsize */
425 TRUE, /* pc_relative */
426 0, /* bitpos */
427 complain_overflow_signed, /* complain_on_overflow */
428 bfd_elf_generic_reloc, /* special_function */
429 AARCH64_R_STR (PREL16), /* name */
430 FALSE, /* partial_inplace */
431 0xffff, /* src_mask */
432 0xffff, /* dst_mask */
433 TRUE), /* pcrel_offset */
434
435 /* Group relocations to create a 16, 32, 48 or 64 bit
436 unsigned data or abs address inline. */
437
438 /* MOVZ: ((S+A) >> 0) & 0xffff */
439 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
440 0, /* rightshift */
441 2, /* size (0 = byte, 1 = short, 2 = long) */
442 16, /* bitsize */
443 FALSE, /* pc_relative */
444 0, /* bitpos */
445 complain_overflow_unsigned, /* complain_on_overflow */
446 bfd_elf_generic_reloc, /* special_function */
447 AARCH64_R_STR (MOVW_UABS_G0), /* name */
448 FALSE, /* partial_inplace */
449 0xffff, /* src_mask */
450 0xffff, /* dst_mask */
451 FALSE), /* pcrel_offset */
452
453 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
454 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
455 0, /* rightshift */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
457 16, /* bitsize */
458 FALSE, /* pc_relative */
459 0, /* bitpos */
460 complain_overflow_dont, /* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
463 FALSE, /* partial_inplace */
464 0xffff, /* src_mask */
465 0xffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
467
468 /* MOVZ: ((S+A) >> 16) & 0xffff */
469 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
470 16, /* rightshift */
471 2, /* size (0 = byte, 1 = short, 2 = long) */
472 16, /* bitsize */
473 FALSE, /* pc_relative */
474 0, /* bitpos */
475 complain_overflow_unsigned, /* complain_on_overflow */
476 bfd_elf_generic_reloc, /* special_function */
477 AARCH64_R_STR (MOVW_UABS_G1), /* name */
478 FALSE, /* partial_inplace */
479 0xffff, /* src_mask */
480 0xffff, /* dst_mask */
481 FALSE), /* pcrel_offset */
482
483 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
484 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
485 16, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 16, /* bitsize */
488 FALSE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_dont, /* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
493 FALSE, /* partial_inplace */
494 0xffff, /* src_mask */
495 0xffff, /* dst_mask */
496 FALSE), /* pcrel_offset */
497
498 /* MOVZ: ((S+A) >> 32) & 0xffff */
499 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
500 32, /* rightshift */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
502 16, /* bitsize */
503 FALSE, /* pc_relative */
504 0, /* bitpos */
505 complain_overflow_unsigned, /* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 AARCH64_R_STR (MOVW_UABS_G2), /* name */
508 FALSE, /* partial_inplace */
509 0xffff, /* src_mask */
510 0xffff, /* dst_mask */
511 FALSE), /* pcrel_offset */
512
513 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
514 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
515 32, /* rightshift */
516 2, /* size (0 = byte, 1 = short, 2 = long) */
517 16, /* bitsize */
518 FALSE, /* pc_relative */
519 0, /* bitpos */
520 complain_overflow_dont, /* complain_on_overflow */
521 bfd_elf_generic_reloc, /* special_function */
522 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
523 FALSE, /* partial_inplace */
524 0xffff, /* src_mask */
525 0xffff, /* dst_mask */
526 FALSE), /* pcrel_offset */
527
528 /* MOVZ: ((S+A) >> 48) & 0xffff */
529 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
530 48, /* rightshift */
531 2, /* size (0 = byte, 1 = short, 2 = long) */
532 16, /* bitsize */
533 FALSE, /* pc_relative */
534 0, /* bitpos */
535 complain_overflow_unsigned, /* complain_on_overflow */
536 bfd_elf_generic_reloc, /* special_function */
537 AARCH64_R_STR (MOVW_UABS_G3), /* name */
538 FALSE, /* partial_inplace */
539 0xffff, /* src_mask */
540 0xffff, /* dst_mask */
541 FALSE), /* pcrel_offset */
542
543 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
544 signed data or abs address inline. Will change instruction
545 to MOVN or MOVZ depending on sign of calculated value. */
546
547 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
548 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
549 0, /* rightshift */
550 2, /* size (0 = byte, 1 = short, 2 = long) */
551 16, /* bitsize */
552 FALSE, /* pc_relative */
553 0, /* bitpos */
554 complain_overflow_signed, /* complain_on_overflow */
555 bfd_elf_generic_reloc, /* special_function */
556 AARCH64_R_STR (MOVW_SABS_G0), /* name */
557 FALSE, /* partial_inplace */
558 0xffff, /* src_mask */
559 0xffff, /* dst_mask */
560 FALSE), /* pcrel_offset */
561
562 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
563 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
564 16, /* rightshift */
565 2, /* size (0 = byte, 1 = short, 2 = long) */
566 16, /* bitsize */
567 FALSE, /* pc_relative */
568 0, /* bitpos */
569 complain_overflow_signed, /* complain_on_overflow */
570 bfd_elf_generic_reloc, /* special_function */
571 AARCH64_R_STR (MOVW_SABS_G1), /* name */
572 FALSE, /* partial_inplace */
573 0xffff, /* src_mask */
574 0xffff, /* dst_mask */
575 FALSE), /* pcrel_offset */
576
577 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
578 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
579 32, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 16, /* bitsize */
582 FALSE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_signed, /* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 AARCH64_R_STR (MOVW_SABS_G2), /* name */
587 FALSE, /* partial_inplace */
588 0xffff, /* src_mask */
589 0xffff, /* dst_mask */
590 FALSE), /* pcrel_offset */
591
592 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
593 addresses: PG(x) is (x & ~0xfff). */
594
595 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
596 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
597 2, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 19, /* bitsize */
600 TRUE, /* pc_relative */
601 0, /* bitpos */
602 complain_overflow_signed, /* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 AARCH64_R_STR (LD_PREL_LO19), /* name */
605 FALSE, /* partial_inplace */
606 0x7ffff, /* src_mask */
607 0x7ffff, /* dst_mask */
608 TRUE), /* pcrel_offset */
609
610 /* ADR: (S+A-P) & 0x1fffff */
611 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
612 0, /* rightshift */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
614 21, /* bitsize */
615 TRUE, /* pc_relative */
616 0, /* bitpos */
617 complain_overflow_signed, /* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 AARCH64_R_STR (ADR_PREL_LO21), /* name */
620 FALSE, /* partial_inplace */
621 0x1fffff, /* src_mask */
622 0x1fffff, /* dst_mask */
623 TRUE), /* pcrel_offset */
624
625 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
626 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
627 12, /* rightshift */
628 2, /* size (0 = byte, 1 = short, 2 = long) */
629 21, /* bitsize */
630 TRUE, /* pc_relative */
631 0, /* bitpos */
632 complain_overflow_signed, /* complain_on_overflow */
633 bfd_elf_generic_reloc, /* special_function */
634 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
635 FALSE, /* partial_inplace */
636 0x1fffff, /* src_mask */
637 0x1fffff, /* dst_mask */
638 TRUE), /* pcrel_offset */
639
640 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
641 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
642 12, /* rightshift */
643 2, /* size (0 = byte, 1 = short, 2 = long) */
644 21, /* bitsize */
645 TRUE, /* pc_relative */
646 0, /* bitpos */
647 complain_overflow_dont, /* complain_on_overflow */
648 bfd_elf_generic_reloc, /* special_function */
649 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
650 FALSE, /* partial_inplace */
651 0x1fffff, /* src_mask */
652 0x1fffff, /* dst_mask */
653 TRUE), /* pcrel_offset */
654
655 /* ADD: (S+A) & 0xfff [no overflow check] */
656 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
657 0, /* rightshift */
658 2, /* size (0 = byte, 1 = short, 2 = long) */
659 12, /* bitsize */
660 FALSE, /* pc_relative */
661 10, /* bitpos */
662 complain_overflow_dont, /* complain_on_overflow */
663 bfd_elf_generic_reloc, /* special_function */
664 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
665 FALSE, /* partial_inplace */
666 0x3ffc00, /* src_mask */
667 0x3ffc00, /* dst_mask */
668 FALSE), /* pcrel_offset */
669
670 /* LD/ST8: (S+A) & 0xfff */
671 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
672 0, /* rightshift */
673 2, /* size (0 = byte, 1 = short, 2 = long) */
674 12, /* bitsize */
675 FALSE, /* pc_relative */
676 0, /* bitpos */
677 complain_overflow_dont, /* complain_on_overflow */
678 bfd_elf_generic_reloc, /* special_function */
679 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
680 FALSE, /* partial_inplace */
681 0xfff, /* src_mask */
682 0xfff, /* dst_mask */
683 FALSE), /* pcrel_offset */
684
685 /* Relocations for control-flow instructions. */
686
687 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
688 HOWTO (AARCH64_R (TSTBR14), /* type */
689 2, /* rightshift */
690 2, /* size (0 = byte, 1 = short, 2 = long) */
691 14, /* bitsize */
692 TRUE, /* pc_relative */
693 0, /* bitpos */
694 complain_overflow_signed, /* complain_on_overflow */
695 bfd_elf_generic_reloc, /* special_function */
696 AARCH64_R_STR (TSTBR14), /* name */
697 FALSE, /* partial_inplace */
698 0x3fff, /* src_mask */
699 0x3fff, /* dst_mask */
700 TRUE), /* pcrel_offset */
701
702 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
703 HOWTO (AARCH64_R (CONDBR19), /* type */
704 2, /* rightshift */
705 2, /* size (0 = byte, 1 = short, 2 = long) */
706 19, /* bitsize */
707 TRUE, /* pc_relative */
708 0, /* bitpos */
709 complain_overflow_signed, /* complain_on_overflow */
710 bfd_elf_generic_reloc, /* special_function */
711 AARCH64_R_STR (CONDBR19), /* name */
712 FALSE, /* partial_inplace */
713 0x7ffff, /* src_mask */
714 0x7ffff, /* dst_mask */
715 TRUE), /* pcrel_offset */
716
717 /* B: ((S+A-P) >> 2) & 0x3ffffff */
718 HOWTO (AARCH64_R (JUMP26), /* type */
719 2, /* rightshift */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
721 26, /* bitsize */
722 TRUE, /* pc_relative */
723 0, /* bitpos */
724 complain_overflow_signed, /* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 AARCH64_R_STR (JUMP26), /* name */
727 FALSE, /* partial_inplace */
728 0x3ffffff, /* src_mask */
729 0x3ffffff, /* dst_mask */
730 TRUE), /* pcrel_offset */
731
732 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
733 HOWTO (AARCH64_R (CALL26), /* type */
734 2, /* rightshift */
735 2, /* size (0 = byte, 1 = short, 2 = long) */
736 26, /* bitsize */
737 TRUE, /* pc_relative */
738 0, /* bitpos */
739 complain_overflow_signed, /* complain_on_overflow */
740 bfd_elf_generic_reloc, /* special_function */
741 AARCH64_R_STR (CALL26), /* name */
742 FALSE, /* partial_inplace */
743 0x3ffffff, /* src_mask */
744 0x3ffffff, /* dst_mask */
745 TRUE), /* pcrel_offset */
746
747 /* LD/ST16: (S+A) & 0xffe */
748 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
749 1, /* rightshift */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
751 12, /* bitsize */
752 FALSE, /* pc_relative */
753 0, /* bitpos */
754 complain_overflow_dont, /* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
757 FALSE, /* partial_inplace */
758 0xffe, /* src_mask */
759 0xffe, /* dst_mask */
760 FALSE), /* pcrel_offset */
761
762 /* LD/ST32: (S+A) & 0xffc */
763 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
764 2, /* rightshift */
765 2, /* size (0 = byte, 1 = short, 2 = long) */
766 12, /* bitsize */
767 FALSE, /* pc_relative */
768 0, /* bitpos */
769 complain_overflow_dont, /* complain_on_overflow */
770 bfd_elf_generic_reloc, /* special_function */
771 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
772 FALSE, /* partial_inplace */
773 0xffc, /* src_mask */
774 0xffc, /* dst_mask */
775 FALSE), /* pcrel_offset */
776
777 /* LD/ST64: (S+A) & 0xff8 */
778 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
779 3, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 12, /* bitsize */
782 FALSE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont, /* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
787 FALSE, /* partial_inplace */
788 0xff8, /* src_mask */
789 0xff8, /* dst_mask */
790 FALSE), /* pcrel_offset */
791
792 /* LD/ST128: (S+A) & 0xff0 */
793 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
794 4, /* rightshift */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
796 12, /* bitsize */
797 FALSE, /* pc_relative */
798 0, /* bitpos */
799 complain_overflow_dont, /* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
802 FALSE, /* partial_inplace */
803 0xff0, /* src_mask */
804 0xff0, /* dst_mask */
805 FALSE), /* pcrel_offset */
806
807 /* Set a load-literal immediate field to bits
808 0x1FFFFC of G(S)-P */
809 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
810 2, /* rightshift */
811 2, /* size (0 = byte,1 = short,2 = long) */
812 19, /* bitsize */
813 TRUE, /* pc_relative */
814 0, /* bitpos */
815 complain_overflow_signed, /* complain_on_overflow */
816 bfd_elf_generic_reloc, /* special_function */
817 AARCH64_R_STR (GOT_LD_PREL19), /* name */
818 FALSE, /* partial_inplace */
819 0xffffe0, /* src_mask */
820 0xffffe0, /* dst_mask */
821 TRUE), /* pcrel_offset */
822
823 /* Get to the page for the GOT entry for the symbol
824 (G(S) - P) using an ADRP instruction. */
825 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
826 12, /* rightshift */
827 2, /* size (0 = byte, 1 = short, 2 = long) */
828 21, /* bitsize */
829 TRUE, /* pc_relative */
830 0, /* bitpos */
831 complain_overflow_dont, /* complain_on_overflow */
832 bfd_elf_generic_reloc, /* special_function */
833 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
834 FALSE, /* partial_inplace */
835 0x1fffff, /* src_mask */
836 0x1fffff, /* dst_mask */
837 TRUE), /* pcrel_offset */
838
839 /* LD64: GOT offset G(S) & 0xff8 */
840 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
841 3, /* rightshift */
842 2, /* size (0 = byte, 1 = short, 2 = long) */
843 12, /* bitsize */
844 FALSE, /* pc_relative */
845 0, /* bitpos */
846 complain_overflow_dont, /* complain_on_overflow */
847 bfd_elf_generic_reloc, /* special_function */
848 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
849 FALSE, /* partial_inplace */
850 0xff8, /* src_mask */
851 0xff8, /* dst_mask */
852 FALSE), /* pcrel_offset */
853
854 /* LD32: GOT offset G(S) & 0xffc */
855 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
856 2, /* rightshift */
857 2, /* size (0 = byte, 1 = short, 2 = long) */
858 12, /* bitsize */
859 FALSE, /* pc_relative */
860 0, /* bitpos */
861 complain_overflow_dont, /* complain_on_overflow */
862 bfd_elf_generic_reloc, /* special_function */
863 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
864 FALSE, /* partial_inplace */
865 0xffc, /* src_mask */
866 0xffc, /* dst_mask */
867 FALSE), /* pcrel_offset */
868
869 /* LD64: GOT offset for the symbol. */
870 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
871 3, /* rightshift */
872 2, /* size (0 = byte, 1 = short, 2 = long) */
873 12, /* bitsize */
874 FALSE, /* pc_relative */
875 0, /* bitpos */
876 complain_overflow_unsigned, /* complain_on_overflow */
877 bfd_elf_generic_reloc, /* special_function */
878 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
879 FALSE, /* partial_inplace */
880 0x7ff8, /* src_mask */
881 0x7ff8, /* dst_mask */
882 FALSE), /* pcrel_offset */
883
884 /* LD32: GOT offset to the page address of GOT table.
885 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
886 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
887 2, /* rightshift */
888 2, /* size (0 = byte, 1 = short, 2 = long) */
889 12, /* bitsize */
890 FALSE, /* pc_relative */
891 0, /* bitpos */
892 complain_overflow_unsigned, /* complain_on_overflow */
893 bfd_elf_generic_reloc, /* special_function */
894 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
895 FALSE, /* partial_inplace */
896 0x5ffc, /* src_mask */
897 0x5ffc, /* dst_mask */
898 FALSE), /* pcrel_offset */
899
900 /* LD64: GOT offset to the page address of GOT table.
901 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
902 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
903 3, /* rightshift */
904 2, /* size (0 = byte, 1 = short, 2 = long) */
905 12, /* bitsize */
906 FALSE, /* pc_relative */
907 0, /* bitpos */
908 complain_overflow_unsigned, /* complain_on_overflow */
909 bfd_elf_generic_reloc, /* special_function */
910 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
911 FALSE, /* partial_inplace */
912 0x7ff8, /* src_mask */
913 0x7ff8, /* dst_mask */
914 FALSE), /* pcrel_offset */
915
916 /* Get to the page for the GOT entry for the symbol
917 (G(S) - P) using an ADRP instruction. */
918 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
919 12, /* rightshift */
920 2, /* size (0 = byte, 1 = short, 2 = long) */
921 21, /* bitsize */
922 TRUE, /* pc_relative */
923 0, /* bitpos */
924 complain_overflow_dont, /* complain_on_overflow */
925 bfd_elf_generic_reloc, /* special_function */
926 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
927 FALSE, /* partial_inplace */
928 0x1fffff, /* src_mask */
929 0x1fffff, /* dst_mask */
930 TRUE), /* pcrel_offset */
931
932 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
933 0, /* rightshift */
934 2, /* size (0 = byte, 1 = short, 2 = long) */
935 21, /* bitsize */
936 TRUE, /* pc_relative */
937 0, /* bitpos */
938 complain_overflow_dont, /* complain_on_overflow */
939 bfd_elf_generic_reloc, /* special_function */
940 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
941 FALSE, /* partial_inplace */
942 0x1fffff, /* src_mask */
943 0x1fffff, /* dst_mask */
944 TRUE), /* pcrel_offset */
945
946 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
947 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
948 0, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 12, /* bitsize */
951 FALSE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
956 FALSE, /* partial_inplace */
957 0xfff, /* src_mask */
958 0xfff, /* dst_mask */
959 FALSE), /* pcrel_offset */
960
961 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
962 16, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 16, /* bitsize */
965 FALSE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont, /* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
970 FALSE, /* partial_inplace */
971 0xffff, /* src_mask */
972 0xffff, /* dst_mask */
973 FALSE), /* pcrel_offset */
974
975 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
976 0, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 16, /* bitsize */
979 FALSE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont, /* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
984 FALSE, /* partial_inplace */
985 0xffff, /* src_mask */
986 0xffff, /* dst_mask */
987 FALSE), /* pcrel_offset */
988
989 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
990 12, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 21, /* bitsize */
993 FALSE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont, /* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
998 FALSE, /* partial_inplace */
999 0x1fffff, /* src_mask */
1000 0x1fffff, /* dst_mask */
1001 FALSE), /* pcrel_offset */
1002
1003 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
1004 3, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 12, /* bitsize */
1007 FALSE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont, /* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
1012 FALSE, /* partial_inplace */
1013 0xff8, /* src_mask */
1014 0xff8, /* dst_mask */
1015 FALSE), /* pcrel_offset */
1016
1017 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1018 2, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 12, /* bitsize */
1021 FALSE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont, /* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1026 FALSE, /* partial_inplace */
1027 0xffc, /* src_mask */
1028 0xffc, /* dst_mask */
1029 FALSE), /* pcrel_offset */
1030
1031 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1032 2, /* rightshift */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1034 19, /* bitsize */
1035 FALSE, /* pc_relative */
1036 0, /* bitpos */
1037 complain_overflow_dont, /* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1040 FALSE, /* partial_inplace */
1041 0x1ffffc, /* src_mask */
1042 0x1ffffc, /* dst_mask */
1043 FALSE), /* pcrel_offset */
1044
1045 /* Unsigned 12 bit byte offset to module TLS base address. */
1046 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1047 0, /* rightshift */
1048 2, /* size (0 = byte, 1 = short, 2 = long) */
1049 12, /* bitsize */
1050 FALSE, /* pc_relative */
1051 0, /* bitpos */
1052 complain_overflow_unsigned, /* complain_on_overflow */
1053 bfd_elf_generic_reloc, /* special_function */
1054 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1055 FALSE, /* partial_inplace */
1056 0xfff, /* src_mask */
1057 0xfff, /* dst_mask */
1058 FALSE), /* pcrel_offset */
1059
1060 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1061 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1062 0, /* rightshift */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1064 12, /* bitsize */
1065 FALSE, /* pc_relative */
1066 0, /* bitpos */
1067 complain_overflow_dont, /* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1070 FALSE, /* partial_inplace */
1071 0xfff, /* src_mask */
1072 0xfff, /* dst_mask */
1073 FALSE), /* pcrel_offset */
1074
1075 /* Get to the page for the GOT entry for the symbol
1076 (G(S) - P) using an ADRP instruction. */
1077 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1078 12, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 21, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_signed, /* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1086 FALSE, /* partial_inplace */
1087 0x1fffff, /* src_mask */
1088 0x1fffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 21, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_signed, /* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1100 FALSE, /* partial_inplace */
1101 0x1fffff, /* src_mask */
1102 0x1fffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1106 32, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 16, /* bitsize */
1109 FALSE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_unsigned, /* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1114 FALSE, /* partial_inplace */
1115 0xffff, /* src_mask */
1116 0xffff, /* dst_mask */
1117 FALSE), /* pcrel_offset */
1118
1119 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1120 16, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 16, /* bitsize */
1123 FALSE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont, /* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1128 FALSE, /* partial_inplace */
1129 0xffff, /* src_mask */
1130 0xffff, /* dst_mask */
1131 FALSE), /* pcrel_offset */
1132
1133 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1134 16, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 16, /* bitsize */
1137 FALSE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont, /* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1142 FALSE, /* partial_inplace */
1143 0xffff, /* src_mask */
1144 0xffff, /* dst_mask */
1145 FALSE), /* pcrel_offset */
1146
1147 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 16, /* bitsize */
1151 FALSE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont, /* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1156 FALSE, /* partial_inplace */
1157 0xffff, /* src_mask */
1158 0xffff, /* dst_mask */
1159 FALSE), /* pcrel_offset */
1160
1161 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 16, /* bitsize */
1165 FALSE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont, /* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1170 FALSE, /* partial_inplace */
1171 0xffff, /* src_mask */
1172 0xffff, /* dst_mask */
1173 FALSE), /* pcrel_offset */
1174
1175 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1176 12, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 12, /* bitsize */
1179 FALSE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_unsigned, /* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1184 FALSE, /* partial_inplace */
1185 0xfff, /* src_mask */
1186 0xfff, /* dst_mask */
1187 FALSE), /* pcrel_offset */
1188
1189 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 12, /* bitsize */
1193 FALSE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_unsigned, /* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1198 FALSE, /* partial_inplace */
1199 0xfff, /* src_mask */
1200 0xfff, /* dst_mask */
1201 FALSE), /* pcrel_offset */
1202
1203 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 12, /* bitsize */
1207 FALSE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont, /* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1212 FALSE, /* partial_inplace */
1213 0xfff, /* src_mask */
1214 0xfff, /* dst_mask */
1215 FALSE), /* pcrel_offset */
1216
1217 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1218 2, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 19, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont, /* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1226 FALSE, /* partial_inplace */
1227 0x0ffffe0, /* src_mask */
1228 0x0ffffe0, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 21, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont, /* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1240 FALSE, /* partial_inplace */
1241 0x1fffff, /* src_mask */
1242 0x1fffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 /* Get to the page for the GOT entry for the symbol
1246 (G(S) - P) using an ADRP instruction. */
1247 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1248 12, /* rightshift */
1249 2, /* size (0 = byte, 1 = short, 2 = long) */
1250 21, /* bitsize */
1251 TRUE, /* pc_relative */
1252 0, /* bitpos */
1253 complain_overflow_dont, /* complain_on_overflow */
1254 bfd_elf_generic_reloc, /* special_function */
1255 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1256 FALSE, /* partial_inplace */
1257 0x1fffff, /* src_mask */
1258 0x1fffff, /* dst_mask */
1259 TRUE), /* pcrel_offset */
1260
1261 /* LD64: GOT offset G(S) & 0xff8. */
1262 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1263 3, /* rightshift */
1264 2, /* size (0 = byte, 1 = short, 2 = long) */
1265 12, /* bitsize */
1266 FALSE, /* pc_relative */
1267 0, /* bitpos */
1268 complain_overflow_dont, /* complain_on_overflow */
1269 bfd_elf_generic_reloc, /* special_function */
1270 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1271 FALSE, /* partial_inplace */
1272 0xff8, /* src_mask */
1273 0xff8, /* dst_mask */
1274 FALSE), /* pcrel_offset */
1275
1276 /* LD32: GOT offset G(S) & 0xffc. */
1277 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1278 2, /* rightshift */
1279 2, /* size (0 = byte, 1 = short, 2 = long) */
1280 12, /* bitsize */
1281 FALSE, /* pc_relative */
1282 0, /* bitpos */
1283 complain_overflow_dont, /* complain_on_overflow */
1284 bfd_elf_generic_reloc, /* special_function */
1285 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1286 FALSE, /* partial_inplace */
1287 0xffc, /* src_mask */
1288 0xffc, /* dst_mask */
1289 FALSE), /* pcrel_offset */
1290
1291 /* ADD: GOT offset G(S) & 0xfff. */
1292 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1293 0, /* rightshift */
1294 2, /* size (0 = byte, 1 = short, 2 = long) */
1295 12, /* bitsize */
1296 FALSE, /* pc_relative */
1297 0, /* bitpos */
1298 complain_overflow_dont, /* complain_on_overflow */
1299 bfd_elf_generic_reloc, /* special_function */
1300 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1301 FALSE, /* partial_inplace */
1302 0xfff, /* src_mask */
1303 0xfff, /* dst_mask */
1304 FALSE), /* pcrel_offset */
1305
1306 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1307 16, /* rightshift */
1308 2, /* size (0 = byte, 1 = short, 2 = long) */
1309 12, /* bitsize */
1310 FALSE, /* pc_relative */
1311 0, /* bitpos */
1312 complain_overflow_dont, /* complain_on_overflow */
1313 bfd_elf_generic_reloc, /* special_function */
1314 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1315 FALSE, /* partial_inplace */
1316 0xffff, /* src_mask */
1317 0xffff, /* dst_mask */
1318 FALSE), /* pcrel_offset */
1319
1320 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1321 0, /* rightshift */
1322 2, /* size (0 = byte, 1 = short, 2 = long) */
1323 12, /* bitsize */
1324 FALSE, /* pc_relative */
1325 0, /* bitpos */
1326 complain_overflow_dont, /* complain_on_overflow */
1327 bfd_elf_generic_reloc, /* special_function */
1328 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1329 FALSE, /* partial_inplace */
1330 0xffff, /* src_mask */
1331 0xffff, /* dst_mask */
1332 FALSE), /* pcrel_offset */
1333
1334 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1335 0, /* rightshift */
1336 2, /* size (0 = byte, 1 = short, 2 = long) */
1337 12, /* bitsize */
1338 FALSE, /* pc_relative */
1339 0, /* bitpos */
1340 complain_overflow_dont, /* complain_on_overflow */
1341 bfd_elf_generic_reloc, /* special_function */
1342 AARCH64_R_STR (TLSDESC_LDR), /* name */
1343 FALSE, /* partial_inplace */
1344 0x0, /* src_mask */
1345 0x0, /* dst_mask */
1346 FALSE), /* pcrel_offset */
1347
1348 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1349 0, /* rightshift */
1350 2, /* size (0 = byte, 1 = short, 2 = long) */
1351 12, /* bitsize */
1352 FALSE, /* pc_relative */
1353 0, /* bitpos */
1354 complain_overflow_dont, /* complain_on_overflow */
1355 bfd_elf_generic_reloc, /* special_function */
1356 AARCH64_R_STR (TLSDESC_ADD), /* name */
1357 FALSE, /* partial_inplace */
1358 0x0, /* src_mask */
1359 0x0, /* dst_mask */
1360 FALSE), /* pcrel_offset */
1361
1362 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1363 0, /* rightshift */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 0, /* bitsize */
1366 FALSE, /* pc_relative */
1367 0, /* bitpos */
1368 complain_overflow_dont, /* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 AARCH64_R_STR (TLSDESC_CALL), /* name */
1371 FALSE, /* partial_inplace */
1372 0x0, /* src_mask */
1373 0x0, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1375
1376 HOWTO (AARCH64_R (COPY), /* type */
1377 0, /* rightshift */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 64, /* bitsize */
1380 FALSE, /* pc_relative */
1381 0, /* bitpos */
1382 complain_overflow_bitfield, /* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 AARCH64_R_STR (COPY), /* name */
1385 TRUE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1389
1390 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1391 0, /* rightshift */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 64, /* bitsize */
1394 FALSE, /* pc_relative */
1395 0, /* bitpos */
1396 complain_overflow_bitfield, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 AARCH64_R_STR (GLOB_DAT), /* name */
1399 TRUE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 FALSE), /* pcrel_offset */
1403
1404 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1405 0, /* rightshift */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 64, /* bitsize */
1408 FALSE, /* pc_relative */
1409 0, /* bitpos */
1410 complain_overflow_bitfield, /* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 AARCH64_R_STR (JUMP_SLOT), /* name */
1413 TRUE, /* partial_inplace */
1414 0xffffffff, /* src_mask */
1415 0xffffffff, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1417
1418 HOWTO (AARCH64_R (RELATIVE), /* type */
1419 0, /* rightshift */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 64, /* bitsize */
1422 FALSE, /* pc_relative */
1423 0, /* bitpos */
1424 complain_overflow_bitfield, /* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 AARCH64_R_STR (RELATIVE), /* name */
1427 TRUE, /* partial_inplace */
1428 ALL_ONES, /* src_mask */
1429 ALL_ONES, /* dst_mask */
1430 FALSE), /* pcrel_offset */
1431
1432 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1433 0, /* rightshift */
1434 2, /* size (0 = byte, 1 = short, 2 = long) */
1435 64, /* bitsize */
1436 FALSE, /* pc_relative */
1437 0, /* bitpos */
1438 complain_overflow_dont, /* complain_on_overflow */
1439 bfd_elf_generic_reloc, /* special_function */
1440 #if ARCH_SIZE == 64
1441 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1442 #else
1443 AARCH64_R_STR (TLS_DTPMOD), /* name */
1444 #endif
1445 FALSE, /* partial_inplace */
1446 0, /* src_mask */
1447 ALL_ONES, /* dst_mask */
1448 FALSE), /* pc_reloffset */
1449
1450 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1451 0, /* rightshift */
1452 2, /* size (0 = byte, 1 = short, 2 = long) */
1453 64, /* bitsize */
1454 FALSE, /* pc_relative */
1455 0, /* bitpos */
1456 complain_overflow_dont, /* complain_on_overflow */
1457 bfd_elf_generic_reloc, /* special_function */
1458 #if ARCH_SIZE == 64
1459 AARCH64_R_STR (TLS_DTPREL64), /* name */
1460 #else
1461 AARCH64_R_STR (TLS_DTPREL), /* name */
1462 #endif
1463 FALSE, /* partial_inplace */
1464 0, /* src_mask */
1465 ALL_ONES, /* dst_mask */
1466 FALSE), /* pcrel_offset */
1467
1468 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1469 0, /* rightshift */
1470 2, /* size (0 = byte, 1 = short, 2 = long) */
1471 64, /* bitsize */
1472 FALSE, /* pc_relative */
1473 0, /* bitpos */
1474 complain_overflow_dont, /* complain_on_overflow */
1475 bfd_elf_generic_reloc, /* special_function */
1476 #if ARCH_SIZE == 64
1477 AARCH64_R_STR (TLS_TPREL64), /* name */
1478 #else
1479 AARCH64_R_STR (TLS_TPREL), /* name */
1480 #endif
1481 FALSE, /* partial_inplace */
1482 0, /* src_mask */
1483 ALL_ONES, /* dst_mask */
1484 FALSE), /* pcrel_offset */
1485
1486 HOWTO (AARCH64_R (TLSDESC), /* type */
1487 0, /* rightshift */
1488 2, /* size (0 = byte, 1 = short, 2 = long) */
1489 64, /* bitsize */
1490 FALSE, /* pc_relative */
1491 0, /* bitpos */
1492 complain_overflow_dont, /* complain_on_overflow */
1493 bfd_elf_generic_reloc, /* special_function */
1494 AARCH64_R_STR (TLSDESC), /* name */
1495 FALSE, /* partial_inplace */
1496 0, /* src_mask */
1497 ALL_ONES, /* dst_mask */
1498 FALSE), /* pcrel_offset */
1499
1500 HOWTO (AARCH64_R (IRELATIVE), /* type */
1501 0, /* rightshift */
1502 2, /* size (0 = byte, 1 = short, 2 = long) */
1503 64, /* bitsize */
1504 FALSE, /* pc_relative */
1505 0, /* bitpos */
1506 complain_overflow_bitfield, /* complain_on_overflow */
1507 bfd_elf_generic_reloc, /* special_function */
1508 AARCH64_R_STR (IRELATIVE), /* name */
1509 FALSE, /* partial_inplace */
1510 0, /* src_mask */
1511 ALL_ONES, /* dst_mask */
1512 FALSE), /* pcrel_offset */
1513
1514 EMPTY_HOWTO (0),
1515 };
1516
1517 static reloc_howto_type elfNN_aarch64_howto_none =
1518 HOWTO (R_AARCH64_NONE, /* type */
1519 0, /* rightshift */
1520 3, /* size (0 = byte, 1 = short, 2 = long) */
1521 0, /* bitsize */
1522 FALSE, /* pc_relative */
1523 0, /* bitpos */
1524 complain_overflow_dont,/* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_AARCH64_NONE", /* name */
1527 FALSE, /* partial_inplace */
1528 0, /* src_mask */
1529 0, /* dst_mask */
1530 FALSE); /* pcrel_offset */
1531
1532 /* Given HOWTO, return the bfd internal relocation enumerator. */
1533
1534 static bfd_reloc_code_real_type
1535 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1536 {
1537 const int size
1538 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1539 const ptrdiff_t offset
1540 = howto - elfNN_aarch64_howto_table;
1541
1542 if (offset > 0 && offset < size - 1)
1543 return BFD_RELOC_AARCH64_RELOC_START + offset;
1544
1545 if (howto == &elfNN_aarch64_howto_none)
1546 return BFD_RELOC_AARCH64_NONE;
1547
1548 return BFD_RELOC_AARCH64_RELOC_START;
1549 }
1550
1551 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1552
1553 static bfd_reloc_code_real_type
1554 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1555 {
1556 static bfd_boolean initialized_p = FALSE;
1557 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1558 static unsigned int offsets[R_AARCH64_end];
1559
1560 if (initialized_p == FALSE)
1561 {
1562 unsigned int i;
1563
1564 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1565 if (elfNN_aarch64_howto_table[i].type != 0)
1566 offsets[elfNN_aarch64_howto_table[i].type] = i;
1567
1568 initialized_p = TRUE;
1569 }
1570
1571 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1572 return BFD_RELOC_AARCH64_NONE;
1573
1574 /* PR 17512: file: b371e70a. */
1575 if (r_type >= R_AARCH64_end)
1576 {
1577 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1578 bfd_set_error (bfd_error_bad_value);
1579 return BFD_RELOC_AARCH64_NONE;
1580 }
1581
1582 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1583 }
1584
1585 struct elf_aarch64_reloc_map
1586 {
1587 bfd_reloc_code_real_type from;
1588 bfd_reloc_code_real_type to;
1589 };
1590
1591 /* Map bfd generic reloc to AArch64-specific reloc. */
1592 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1593 {
1594 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1595
1596 /* Basic data relocations. */
1597 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1598 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1599 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1600 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1601 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1602 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1603 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1604 };
1605
1606 /* Given the bfd internal relocation enumerator in CODE, return the
1607 corresponding howto entry. */
1608
1609 static reloc_howto_type *
1610 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1611 {
1612 unsigned int i;
1613
1614 /* Convert bfd generic reloc to AArch64-specific reloc. */
1615 if (code < BFD_RELOC_AARCH64_RELOC_START
1616 || code > BFD_RELOC_AARCH64_RELOC_END)
1617 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1618 if (elf_aarch64_reloc_map[i].from == code)
1619 {
1620 code = elf_aarch64_reloc_map[i].to;
1621 break;
1622 }
1623
1624 if (code > BFD_RELOC_AARCH64_RELOC_START
1625 && code < BFD_RELOC_AARCH64_RELOC_END)
1626 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1627 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1628
1629 if (code == BFD_RELOC_AARCH64_NONE)
1630 return &elfNN_aarch64_howto_none;
1631
1632 return NULL;
1633 }
1634
1635 static reloc_howto_type *
1636 elfNN_aarch64_howto_from_type (unsigned int r_type)
1637 {
1638 bfd_reloc_code_real_type val;
1639 reloc_howto_type *howto;
1640
1641 #if ARCH_SIZE == 32
1642 if (r_type > 256)
1643 {
1644 bfd_set_error (bfd_error_bad_value);
1645 return NULL;
1646 }
1647 #endif
1648
1649 if (r_type == R_AARCH64_NONE)
1650 return &elfNN_aarch64_howto_none;
1651
1652 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1653 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1654
1655 if (howto != NULL)
1656 return howto;
1657
1658 bfd_set_error (bfd_error_bad_value);
1659 return NULL;
1660 }
1661
1662 static void
1663 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1664 Elf_Internal_Rela *elf_reloc)
1665 {
1666 unsigned int r_type;
1667
1668 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1669 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1670 }
1671
1672 static reloc_howto_type *
1673 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1674 bfd_reloc_code_real_type code)
1675 {
1676 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1677
1678 if (howto != NULL)
1679 return howto;
1680
1681 bfd_set_error (bfd_error_bad_value);
1682 return NULL;
1683 }
1684
1685 static reloc_howto_type *
1686 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1687 const char *r_name)
1688 {
1689 unsigned int i;
1690
1691 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1692 if (elfNN_aarch64_howto_table[i].name != NULL
1693 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1694 return &elfNN_aarch64_howto_table[i];
1695
1696 return NULL;
1697 }
1698
1699 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1700 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1701 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1702 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1703
1704 /* The linker script knows the section names for placement.
1705 The entry_names are used to do simple name mangling on the stubs.
1706 Given a function name, and its type, the stub can be found. The
1707 name can be changed. The only requirement is the %s be present. */
1708 #define STUB_ENTRY_NAME "__%s_veneer"
1709
1710 /* The name of the dynamic interpreter. This is put in the .interp
1711 section. */
1712 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1713
1714 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1715 (((1 << 25) - 1) << 2)
1716 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1717 (-((1 << 25) << 2))
1718
1719 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1720 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1721
1722 static int
1723 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1724 {
1725 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1726 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1727 }
1728
1729 static int
1730 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1731 {
1732 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1733 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1734 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1735 }
1736
1737 static const uint32_t aarch64_adrp_branch_stub [] =
1738 {
1739 0x90000010, /* adrp ip0, X */
1740 /* R_AARCH64_ADR_HI21_PCREL(X) */
1741 0x91000210, /* add ip0, ip0, :lo12:X */
1742 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1743 0xd61f0200, /* br ip0 */
1744 };
1745
1746 static const uint32_t aarch64_long_branch_stub[] =
1747 {
1748 #if ARCH_SIZE == 64
1749 0x58000090, /* ldr ip0, 1f */
1750 #else
1751 0x18000090, /* ldr wip0, 1f */
1752 #endif
1753 0x10000011, /* adr ip1, #0 */
1754 0x8b110210, /* add ip0, ip0, ip1 */
1755 0xd61f0200, /* br ip0 */
1756 0x00000000, /* 1: .xword or .word
1757 R_AARCH64_PRELNN(X) + 12
1758 */
1759 0x00000000,
1760 };
1761
1762 static const uint32_t aarch64_erratum_835769_stub[] =
1763 {
1764 0x00000000, /* Placeholder for multiply accumulate. */
1765 0x14000000, /* b <label> */
1766 };
1767
1768 static const uint32_t aarch64_erratum_843419_stub[] =
1769 {
1770 0x00000000, /* Placeholder for LDR instruction. */
1771 0x14000000, /* b <label> */
1772 };
1773
1774 /* Section name for stubs is the associated section name plus this
1775 string. */
1776 #define STUB_SUFFIX ".stub"
1777
1778 enum elf_aarch64_stub_type
1779 {
1780 aarch64_stub_none,
1781 aarch64_stub_adrp_branch,
1782 aarch64_stub_long_branch,
1783 aarch64_stub_erratum_835769_veneer,
1784 aarch64_stub_erratum_843419_veneer,
1785 };
1786
1787 struct elf_aarch64_stub_hash_entry
1788 {
1789 /* Base hash table entry structure. */
1790 struct bfd_hash_entry root;
1791
1792 /* The stub section. */
1793 asection *stub_sec;
1794
1795 /* Offset within stub_sec of the beginning of this stub. */
1796 bfd_vma stub_offset;
1797
1798 /* Given the symbol's value and its section we can determine its final
1799 value when building the stubs (so the stub knows where to jump). */
1800 bfd_vma target_value;
1801 asection *target_section;
1802
1803 enum elf_aarch64_stub_type stub_type;
1804
1805 /* The symbol table entry, if any, that this was derived from. */
1806 struct elf_aarch64_link_hash_entry *h;
1807
1808 /* Destination symbol type */
1809 unsigned char st_type;
1810
1811 /* Where this stub is being called from, or, in the case of combined
1812 stub sections, the first input section in the group. */
1813 asection *id_sec;
1814
1815 /* The name for the local symbol at the start of this stub. The
1816 stub name in the hash table has to be unique; this does not, so
1817 it can be friendlier. */
1818 char *output_name;
1819
1820 /* The instruction which caused this stub to be generated (only valid for
1821 erratum 835769 workaround stubs at present). */
1822 uint32_t veneered_insn;
1823
1824 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
1825 bfd_vma adrp_offset;
1826 };
1827
1828 /* Used to build a map of a section. This is required for mixed-endian
1829 code/data. */
1830
1831 typedef struct elf_elf_section_map
1832 {
1833 bfd_vma vma;
1834 char type;
1835 }
1836 elf_aarch64_section_map;
1837
1838
1839 typedef struct _aarch64_elf_section_data
1840 {
1841 struct bfd_elf_section_data elf;
1842 unsigned int mapcount;
1843 unsigned int mapsize;
1844 elf_aarch64_section_map *map;
1845 }
1846 _aarch64_elf_section_data;
1847
1848 #define elf_aarch64_section_data(sec) \
1849 ((_aarch64_elf_section_data *) elf_section_data (sec))
1850
1851 /* The size of the thread control block which is defined to be two pointers. */
1852 #define TCB_SIZE (ARCH_SIZE/8)*2
1853
1854 struct elf_aarch64_local_symbol
1855 {
1856 unsigned int got_type;
1857 bfd_signed_vma got_refcount;
1858 bfd_vma got_offset;
1859
1860 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1861 offset is from the end of the jump table and reserved entries
1862 within the PLTGOT.
1863
1864 The magic value (bfd_vma) -1 indicates that an offset has not be
1865 allocated. */
1866 bfd_vma tlsdesc_got_jump_table_offset;
1867 };
1868
1869 struct elf_aarch64_obj_tdata
1870 {
1871 struct elf_obj_tdata root;
1872
1873 /* local symbol descriptors */
1874 struct elf_aarch64_local_symbol *locals;
1875
1876 /* Zero to warn when linking objects with incompatible enum sizes. */
1877 int no_enum_size_warning;
1878
1879 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1880 int no_wchar_size_warning;
1881 };
1882
1883 #define elf_aarch64_tdata(bfd) \
1884 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1885
1886 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1887
1888 #define is_aarch64_elf(bfd) \
1889 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1890 && elf_tdata (bfd) != NULL \
1891 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1892
1893 static bfd_boolean
1894 elfNN_aarch64_mkobject (bfd *abfd)
1895 {
1896 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1897 AARCH64_ELF_DATA);
1898 }
1899
1900 #define elf_aarch64_hash_entry(ent) \
1901 ((struct elf_aarch64_link_hash_entry *)(ent))
1902
1903 #define GOT_UNKNOWN 0
1904 #define GOT_NORMAL 1
1905 #define GOT_TLS_GD 2
1906 #define GOT_TLS_IE 4
1907 #define GOT_TLSDESC_GD 8
1908
1909 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1910
1911 /* AArch64 ELF linker hash entry. */
1912 struct elf_aarch64_link_hash_entry
1913 {
1914 struct elf_link_hash_entry root;
1915
1916 /* Track dynamic relocs copied for this symbol. */
1917 struct elf_dyn_relocs *dyn_relocs;
1918
1919 /* Since PLT entries have variable size, we need to record the
1920 index into .got.plt instead of recomputing it from the PLT
1921 offset. */
1922 bfd_signed_vma plt_got_offset;
1923
1924 /* Bit mask representing the type of GOT entry(s) if any required by
1925 this symbol. */
1926 unsigned int got_type;
1927
1928 /* A pointer to the most recently used stub hash entry against this
1929 symbol. */
1930 struct elf_aarch64_stub_hash_entry *stub_cache;
1931
1932 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1933 is from the end of the jump table and reserved entries within the PLTGOT.
1934
1935 The magic value (bfd_vma) -1 indicates that an offset has not
1936 be allocated. */
1937 bfd_vma tlsdesc_got_jump_table_offset;
1938 };
1939
1940 static unsigned int
1941 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1942 bfd *abfd,
1943 unsigned long r_symndx)
1944 {
1945 if (h)
1946 return elf_aarch64_hash_entry (h)->got_type;
1947
1948 if (! elf_aarch64_locals (abfd))
1949 return GOT_UNKNOWN;
1950
1951 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1952 }
1953
1954 /* Get the AArch64 elf linker hash table from a link_info structure. */
1955 #define elf_aarch64_hash_table(info) \
1956 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1957
1958 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1959 ((struct elf_aarch64_stub_hash_entry *) \
1960 bfd_hash_lookup ((table), (string), (create), (copy)))
1961
1962 /* AArch64 ELF linker hash table. */
1963 struct elf_aarch64_link_hash_table
1964 {
1965 /* The main hash table. */
1966 struct elf_link_hash_table root;
1967
1968 /* Nonzero to force PIC branch veneers. */
1969 int pic_veneer;
1970
1971 /* Fix erratum 835769. */
1972 int fix_erratum_835769;
1973
1974 /* Fix erratum 843419. */
1975 int fix_erratum_843419;
1976
1977 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
1978 int fix_erratum_843419_adr;
1979
1980 /* The number of bytes in the initial entry in the PLT. */
1981 bfd_size_type plt_header_size;
1982
1983 /* The number of bytes in the subsequent PLT etries. */
1984 bfd_size_type plt_entry_size;
1985
1986 /* Short-cuts to get to dynamic linker sections. */
1987 asection *sdynbss;
1988 asection *srelbss;
1989
1990 /* Small local sym cache. */
1991 struct sym_cache sym_cache;
1992
1993 /* For convenience in allocate_dynrelocs. */
1994 bfd *obfd;
1995
1996 /* The amount of space used by the reserved portion of the sgotplt
1997 section, plus whatever space is used by the jump slots. */
1998 bfd_vma sgotplt_jump_table_size;
1999
2000 /* The stub hash table. */
2001 struct bfd_hash_table stub_hash_table;
2002
2003 /* Linker stub bfd. */
2004 bfd *stub_bfd;
2005
2006 /* Linker call-backs. */
2007 asection *(*add_stub_section) (const char *, asection *);
2008 void (*layout_sections_again) (void);
2009
2010 /* Array to keep track of which stub sections have been created, and
2011 information on stub grouping. */
2012 struct map_stub
2013 {
2014 /* This is the section to which stubs in the group will be
2015 attached. */
2016 asection *link_sec;
2017 /* The stub section. */
2018 asection *stub_sec;
2019 } *stub_group;
2020
2021 /* Assorted information used by elfNN_aarch64_size_stubs. */
2022 unsigned int bfd_count;
2023 int top_index;
2024 asection **input_list;
2025
2026 /* The offset into splt of the PLT entry for the TLS descriptor
2027 resolver. Special values are 0, if not necessary (or not found
2028 to be necessary yet), and -1 if needed but not determined
2029 yet. */
2030 bfd_vma tlsdesc_plt;
2031
2032 /* The GOT offset for the lazy trampoline. Communicated to the
2033 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
2034 indicates an offset is not allocated. */
2035 bfd_vma dt_tlsdesc_got;
2036
2037 /* Used by local STT_GNU_IFUNC symbols. */
2038 htab_t loc_hash_table;
2039 void * loc_hash_memory;
2040 };
2041
2042 /* Create an entry in an AArch64 ELF linker hash table. */
2043
2044 static struct bfd_hash_entry *
2045 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2046 struct bfd_hash_table *table,
2047 const char *string)
2048 {
2049 struct elf_aarch64_link_hash_entry *ret =
2050 (struct elf_aarch64_link_hash_entry *) entry;
2051
2052 /* Allocate the structure if it has not already been allocated by a
2053 subclass. */
2054 if (ret == NULL)
2055 ret = bfd_hash_allocate (table,
2056 sizeof (struct elf_aarch64_link_hash_entry));
2057 if (ret == NULL)
2058 return (struct bfd_hash_entry *) ret;
2059
2060 /* Call the allocation method of the superclass. */
2061 ret = ((struct elf_aarch64_link_hash_entry *)
2062 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2063 table, string));
2064 if (ret != NULL)
2065 {
2066 ret->dyn_relocs = NULL;
2067 ret->got_type = GOT_UNKNOWN;
2068 ret->plt_got_offset = (bfd_vma) - 1;
2069 ret->stub_cache = NULL;
2070 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2071 }
2072
2073 return (struct bfd_hash_entry *) ret;
2074 }
2075
2076 /* Initialize an entry in the stub hash table. */
2077
2078 static struct bfd_hash_entry *
2079 stub_hash_newfunc (struct bfd_hash_entry *entry,
2080 struct bfd_hash_table *table, const char *string)
2081 {
2082 /* Allocate the structure if it has not already been allocated by a
2083 subclass. */
2084 if (entry == NULL)
2085 {
2086 entry = bfd_hash_allocate (table,
2087 sizeof (struct
2088 elf_aarch64_stub_hash_entry));
2089 if (entry == NULL)
2090 return entry;
2091 }
2092
2093 /* Call the allocation method of the superclass. */
2094 entry = bfd_hash_newfunc (entry, table, string);
2095 if (entry != NULL)
2096 {
2097 struct elf_aarch64_stub_hash_entry *eh;
2098
2099 /* Initialize the local fields. */
2100 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2101 eh->adrp_offset = 0;
2102 eh->stub_sec = NULL;
2103 eh->stub_offset = 0;
2104 eh->target_value = 0;
2105 eh->target_section = NULL;
2106 eh->stub_type = aarch64_stub_none;
2107 eh->h = NULL;
2108 eh->id_sec = NULL;
2109 }
2110
2111 return entry;
2112 }
2113
2114 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2115 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2116 as global symbol. We reuse indx and dynstr_index for local symbol
2117 hash since they aren't used by global symbols in this backend. */
2118
2119 static hashval_t
2120 elfNN_aarch64_local_htab_hash (const void *ptr)
2121 {
2122 struct elf_link_hash_entry *h
2123 = (struct elf_link_hash_entry *) ptr;
2124 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2125 }
2126
2127 /* Compare local hash entries. */
2128
2129 static int
2130 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2131 {
2132 struct elf_link_hash_entry *h1
2133 = (struct elf_link_hash_entry *) ptr1;
2134 struct elf_link_hash_entry *h2
2135 = (struct elf_link_hash_entry *) ptr2;
2136
2137 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2138 }
2139
2140 /* Find and/or create a hash entry for local symbol. */
2141
2142 static struct elf_link_hash_entry *
2143 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2144 bfd *abfd, const Elf_Internal_Rela *rel,
2145 bfd_boolean create)
2146 {
2147 struct elf_aarch64_link_hash_entry e, *ret;
2148 asection *sec = abfd->sections;
2149 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2150 ELFNN_R_SYM (rel->r_info));
2151 void **slot;
2152
2153 e.root.indx = sec->id;
2154 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2155 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2156 create ? INSERT : NO_INSERT);
2157
2158 if (!slot)
2159 return NULL;
2160
2161 if (*slot)
2162 {
2163 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2164 return &ret->root;
2165 }
2166
2167 ret = (struct elf_aarch64_link_hash_entry *)
2168 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2169 sizeof (struct elf_aarch64_link_hash_entry));
2170 if (ret)
2171 {
2172 memset (ret, 0, sizeof (*ret));
2173 ret->root.indx = sec->id;
2174 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2175 ret->root.dynindx = -1;
2176 *slot = ret;
2177 }
2178 return &ret->root;
2179 }
2180
2181 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2182
2183 static void
2184 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2185 struct elf_link_hash_entry *dir,
2186 struct elf_link_hash_entry *ind)
2187 {
2188 struct elf_aarch64_link_hash_entry *edir, *eind;
2189
2190 edir = (struct elf_aarch64_link_hash_entry *) dir;
2191 eind = (struct elf_aarch64_link_hash_entry *) ind;
2192
2193 if (eind->dyn_relocs != NULL)
2194 {
2195 if (edir->dyn_relocs != NULL)
2196 {
2197 struct elf_dyn_relocs **pp;
2198 struct elf_dyn_relocs *p;
2199
2200 /* Add reloc counts against the indirect sym to the direct sym
2201 list. Merge any entries against the same section. */
2202 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2203 {
2204 struct elf_dyn_relocs *q;
2205
2206 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2207 if (q->sec == p->sec)
2208 {
2209 q->pc_count += p->pc_count;
2210 q->count += p->count;
2211 *pp = p->next;
2212 break;
2213 }
2214 if (q == NULL)
2215 pp = &p->next;
2216 }
2217 *pp = edir->dyn_relocs;
2218 }
2219
2220 edir->dyn_relocs = eind->dyn_relocs;
2221 eind->dyn_relocs = NULL;
2222 }
2223
2224 if (ind->root.type == bfd_link_hash_indirect)
2225 {
2226 /* Copy over PLT info. */
2227 if (dir->got.refcount <= 0)
2228 {
2229 edir->got_type = eind->got_type;
2230 eind->got_type = GOT_UNKNOWN;
2231 }
2232 }
2233
2234 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2235 }
2236
2237 /* Destroy an AArch64 elf linker hash table. */
2238
2239 static void
2240 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2241 {
2242 struct elf_aarch64_link_hash_table *ret
2243 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2244
2245 if (ret->loc_hash_table)
2246 htab_delete (ret->loc_hash_table);
2247 if (ret->loc_hash_memory)
2248 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2249
2250 bfd_hash_table_free (&ret->stub_hash_table);
2251 _bfd_elf_link_hash_table_free (obfd);
2252 }
2253
2254 /* Create an AArch64 elf linker hash table. */
2255
2256 static struct bfd_link_hash_table *
2257 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2258 {
2259 struct elf_aarch64_link_hash_table *ret;
2260 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2261
2262 ret = bfd_zmalloc (amt);
2263 if (ret == NULL)
2264 return NULL;
2265
2266 if (!_bfd_elf_link_hash_table_init
2267 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2268 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2269 {
2270 free (ret);
2271 return NULL;
2272 }
2273
2274 ret->plt_header_size = PLT_ENTRY_SIZE;
2275 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2276 ret->obfd = abfd;
2277 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2278
2279 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2280 sizeof (struct elf_aarch64_stub_hash_entry)))
2281 {
2282 _bfd_elf_link_hash_table_free (abfd);
2283 return NULL;
2284 }
2285
2286 ret->loc_hash_table = htab_try_create (1024,
2287 elfNN_aarch64_local_htab_hash,
2288 elfNN_aarch64_local_htab_eq,
2289 NULL);
2290 ret->loc_hash_memory = objalloc_create ();
2291 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2292 {
2293 elfNN_aarch64_link_hash_table_free (abfd);
2294 return NULL;
2295 }
2296 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2297
2298 return &ret->root.root;
2299 }
2300
2301 static bfd_boolean
2302 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2303 bfd_vma offset, bfd_vma value)
2304 {
2305 reloc_howto_type *howto;
2306 bfd_vma place;
2307
2308 howto = elfNN_aarch64_howto_from_type (r_type);
2309 place = (input_section->output_section->vma + input_section->output_offset
2310 + offset);
2311
2312 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2313 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2314 return _bfd_aarch64_elf_put_addend (input_bfd,
2315 input_section->contents + offset, r_type,
2316 howto, value);
2317 }
2318
2319 static enum elf_aarch64_stub_type
2320 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2321 {
2322 if (aarch64_valid_for_adrp_p (value, place))
2323 return aarch64_stub_adrp_branch;
2324 return aarch64_stub_long_branch;
2325 }
2326
2327 /* Determine the type of stub needed, if any, for a call. */
2328
2329 static enum elf_aarch64_stub_type
2330 aarch64_type_of_stub (struct bfd_link_info *info,
2331 asection *input_sec,
2332 const Elf_Internal_Rela *rel,
2333 asection *sym_sec,
2334 unsigned char st_type,
2335 struct elf_aarch64_link_hash_entry *hash,
2336 bfd_vma destination)
2337 {
2338 bfd_vma location;
2339 bfd_signed_vma branch_offset;
2340 unsigned int r_type;
2341 struct elf_aarch64_link_hash_table *globals;
2342 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2343 bfd_boolean via_plt_p;
2344
2345 if (st_type != STT_FUNC
2346 && (sym_sec != bfd_abs_section_ptr))
2347 return stub_type;
2348
2349 globals = elf_aarch64_hash_table (info);
2350 via_plt_p = (globals->root.splt != NULL && hash != NULL
2351 && hash->root.plt.offset != (bfd_vma) - 1);
2352 /* Make sure call to plt stub can fit into the branch range. */
2353 if (via_plt_p)
2354 destination = (globals->root.splt->output_section->vma
2355 + globals->root.splt->output_offset
2356 + hash->root.plt.offset);
2357
2358 /* Determine where the call point is. */
2359 location = (input_sec->output_offset
2360 + input_sec->output_section->vma + rel->r_offset);
2361
2362 branch_offset = (bfd_signed_vma) (destination - location);
2363
2364 r_type = ELFNN_R_TYPE (rel->r_info);
2365
2366 /* We don't want to redirect any old unconditional jump in this way,
2367 only one which is being used for a sibcall, where it is
2368 acceptable for the IP0 and IP1 registers to be clobbered. */
2369 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2370 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2371 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2372 {
2373 stub_type = aarch64_stub_long_branch;
2374 }
2375
2376 return stub_type;
2377 }
2378
2379 /* Build a name for an entry in the stub hash table. */
2380
2381 static char *
2382 elfNN_aarch64_stub_name (const asection *input_section,
2383 const asection *sym_sec,
2384 const struct elf_aarch64_link_hash_entry *hash,
2385 const Elf_Internal_Rela *rel)
2386 {
2387 char *stub_name;
2388 bfd_size_type len;
2389
2390 if (hash)
2391 {
2392 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2393 stub_name = bfd_malloc (len);
2394 if (stub_name != NULL)
2395 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2396 (unsigned int) input_section->id,
2397 hash->root.root.root.string,
2398 rel->r_addend);
2399 }
2400 else
2401 {
2402 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2403 stub_name = bfd_malloc (len);
2404 if (stub_name != NULL)
2405 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2406 (unsigned int) input_section->id,
2407 (unsigned int) sym_sec->id,
2408 (unsigned int) ELFNN_R_SYM (rel->r_info),
2409 rel->r_addend);
2410 }
2411
2412 return stub_name;
2413 }
2414
2415 /* Look up an entry in the stub hash. Stub entries are cached because
2416 creating the stub name takes a bit of time. */
2417
2418 static struct elf_aarch64_stub_hash_entry *
2419 elfNN_aarch64_get_stub_entry (const asection *input_section,
2420 const asection *sym_sec,
2421 struct elf_link_hash_entry *hash,
2422 const Elf_Internal_Rela *rel,
2423 struct elf_aarch64_link_hash_table *htab)
2424 {
2425 struct elf_aarch64_stub_hash_entry *stub_entry;
2426 struct elf_aarch64_link_hash_entry *h =
2427 (struct elf_aarch64_link_hash_entry *) hash;
2428 const asection *id_sec;
2429
2430 if ((input_section->flags & SEC_CODE) == 0)
2431 return NULL;
2432
2433 /* If this input section is part of a group of sections sharing one
2434 stub section, then use the id of the first section in the group.
2435 Stub names need to include a section id, as there may well be
2436 more than one stub used to reach say, printf, and we need to
2437 distinguish between them. */
2438 id_sec = htab->stub_group[input_section->id].link_sec;
2439
2440 if (h != NULL && h->stub_cache != NULL
2441 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2442 {
2443 stub_entry = h->stub_cache;
2444 }
2445 else
2446 {
2447 char *stub_name;
2448
2449 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2450 if (stub_name == NULL)
2451 return NULL;
2452
2453 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2454 stub_name, FALSE, FALSE);
2455 if (h != NULL)
2456 h->stub_cache = stub_entry;
2457
2458 free (stub_name);
2459 }
2460
2461 return stub_entry;
2462 }
2463
2464
2465 /* Create a stub section. */
2466
2467 static asection *
2468 _bfd_aarch64_create_stub_section (asection *section,
2469 struct elf_aarch64_link_hash_table *htab)
2470 {
2471 size_t namelen;
2472 bfd_size_type len;
2473 char *s_name;
2474
2475 namelen = strlen (section->name);
2476 len = namelen + sizeof (STUB_SUFFIX);
2477 s_name = bfd_alloc (htab->stub_bfd, len);
2478 if (s_name == NULL)
2479 return NULL;
2480
2481 memcpy (s_name, section->name, namelen);
2482 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2483 return (*htab->add_stub_section) (s_name, section);
2484 }
2485
2486
2487 /* Find or create a stub section for a link section.
2488
2489 Fix or create the stub section used to collect stubs attached to
2490 the specified link section. */
2491
2492 static asection *
2493 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2494 struct elf_aarch64_link_hash_table *htab)
2495 {
2496 if (htab->stub_group[link_section->id].stub_sec == NULL)
2497 htab->stub_group[link_section->id].stub_sec
2498 = _bfd_aarch64_create_stub_section (link_section, htab);
2499 return htab->stub_group[link_section->id].stub_sec;
2500 }
2501
2502
2503 /* Find or create a stub section in the stub group for an input
2504 section. */
2505
2506 static asection *
2507 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2508 struct elf_aarch64_link_hash_table *htab)
2509 {
2510 asection *link_sec = htab->stub_group[section->id].link_sec;
2511 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2512 }
2513
2514
2515 /* Add a new stub entry in the stub group associated with an input
2516 section to the stub hash. Not all fields of the new stub entry are
2517 initialised. */
2518
2519 static struct elf_aarch64_stub_hash_entry *
2520 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2521 asection *section,
2522 struct elf_aarch64_link_hash_table *htab)
2523 {
2524 asection *link_sec;
2525 asection *stub_sec;
2526 struct elf_aarch64_stub_hash_entry *stub_entry;
2527
2528 link_sec = htab->stub_group[section->id].link_sec;
2529 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2530
2531 /* Enter this entry into the linker stub hash table. */
2532 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2533 TRUE, FALSE);
2534 if (stub_entry == NULL)
2535 {
2536 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2537 section->owner, stub_name);
2538 return NULL;
2539 }
2540
2541 stub_entry->stub_sec = stub_sec;
2542 stub_entry->stub_offset = 0;
2543 stub_entry->id_sec = link_sec;
2544
2545 return stub_entry;
2546 }
2547
2548 /* Add a new stub entry in the final stub section to the stub hash.
2549 Not all fields of the new stub entry are initialised. */
2550
2551 static struct elf_aarch64_stub_hash_entry *
2552 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2553 asection *link_section,
2554 struct elf_aarch64_link_hash_table *htab)
2555 {
2556 asection *stub_sec;
2557 struct elf_aarch64_stub_hash_entry *stub_entry;
2558
2559 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2560 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2561 TRUE, FALSE);
2562 if (stub_entry == NULL)
2563 {
2564 (*_bfd_error_handler) (_("cannot create stub entry %s"), stub_name);
2565 return NULL;
2566 }
2567
2568 stub_entry->stub_sec = stub_sec;
2569 stub_entry->stub_offset = 0;
2570 stub_entry->id_sec = link_section;
2571
2572 return stub_entry;
2573 }
2574
2575
2576 static bfd_boolean
2577 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2578 void *in_arg ATTRIBUTE_UNUSED)
2579 {
2580 struct elf_aarch64_stub_hash_entry *stub_entry;
2581 asection *stub_sec;
2582 bfd *stub_bfd;
2583 bfd_byte *loc;
2584 bfd_vma sym_value;
2585 bfd_vma veneered_insn_loc;
2586 bfd_vma veneer_entry_loc;
2587 bfd_signed_vma branch_offset = 0;
2588 unsigned int template_size;
2589 const uint32_t *template;
2590 unsigned int i;
2591
2592 /* Massage our args to the form they really have. */
2593 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2594
2595 stub_sec = stub_entry->stub_sec;
2596
2597 /* Make a note of the offset within the stubs for this entry. */
2598 stub_entry->stub_offset = stub_sec->size;
2599 loc = stub_sec->contents + stub_entry->stub_offset;
2600
2601 stub_bfd = stub_sec->owner;
2602
2603 /* This is the address of the stub destination. */
2604 sym_value = (stub_entry->target_value
2605 + stub_entry->target_section->output_offset
2606 + stub_entry->target_section->output_section->vma);
2607
2608 if (stub_entry->stub_type == aarch64_stub_long_branch)
2609 {
2610 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2611 + stub_sec->output_offset);
2612
2613 /* See if we can relax the stub. */
2614 if (aarch64_valid_for_adrp_p (sym_value, place))
2615 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2616 }
2617
2618 switch (stub_entry->stub_type)
2619 {
2620 case aarch64_stub_adrp_branch:
2621 template = aarch64_adrp_branch_stub;
2622 template_size = sizeof (aarch64_adrp_branch_stub);
2623 break;
2624 case aarch64_stub_long_branch:
2625 template = aarch64_long_branch_stub;
2626 template_size = sizeof (aarch64_long_branch_stub);
2627 break;
2628 case aarch64_stub_erratum_835769_veneer:
2629 template = aarch64_erratum_835769_stub;
2630 template_size = sizeof (aarch64_erratum_835769_stub);
2631 break;
2632 case aarch64_stub_erratum_843419_veneer:
2633 template = aarch64_erratum_843419_stub;
2634 template_size = sizeof (aarch64_erratum_843419_stub);
2635 break;
2636 default:
2637 abort ();
2638 }
2639
2640 for (i = 0; i < (template_size / sizeof template[0]); i++)
2641 {
2642 bfd_putl32 (template[i], loc);
2643 loc += 4;
2644 }
2645
2646 template_size = (template_size + 7) & ~7;
2647 stub_sec->size += template_size;
2648
2649 switch (stub_entry->stub_type)
2650 {
2651 case aarch64_stub_adrp_branch:
2652 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2653 stub_entry->stub_offset, sym_value))
2654 /* The stub would not have been relaxed if the offset was out
2655 of range. */
2656 BFD_FAIL ();
2657
2658 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2659 stub_entry->stub_offset + 4, sym_value))
2660 BFD_FAIL ();
2661 break;
2662
2663 case aarch64_stub_long_branch:
2664 /* We want the value relative to the address 12 bytes back from the
2665 value itself. */
2666 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2667 stub_entry->stub_offset + 16, sym_value + 12))
2668 BFD_FAIL ();
2669 break;
2670
2671 case aarch64_stub_erratum_835769_veneer:
2672 veneered_insn_loc = stub_entry->target_section->output_section->vma
2673 + stub_entry->target_section->output_offset
2674 + stub_entry->target_value;
2675 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2676 + stub_entry->stub_sec->output_offset
2677 + stub_entry->stub_offset;
2678 branch_offset = veneered_insn_loc - veneer_entry_loc;
2679 branch_offset >>= 2;
2680 branch_offset &= 0x3ffffff;
2681 bfd_putl32 (stub_entry->veneered_insn,
2682 stub_sec->contents + stub_entry->stub_offset);
2683 bfd_putl32 (template[1] | branch_offset,
2684 stub_sec->contents + stub_entry->stub_offset + 4);
2685 break;
2686
2687 case aarch64_stub_erratum_843419_veneer:
2688 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
2689 stub_entry->stub_offset + 4, sym_value + 4))
2690 BFD_FAIL ();
2691 break;
2692
2693 default:
2694 abort ();
2695 }
2696
2697 return TRUE;
2698 }
2699
2700 /* As above, but don't actually build the stub. Just bump offset so
2701 we know stub section sizes. */
2702
2703 static bfd_boolean
2704 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2705 void *in_arg ATTRIBUTE_UNUSED)
2706 {
2707 struct elf_aarch64_stub_hash_entry *stub_entry;
2708 int size;
2709
2710 /* Massage our args to the form they really have. */
2711 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2712
2713 switch (stub_entry->stub_type)
2714 {
2715 case aarch64_stub_adrp_branch:
2716 size = sizeof (aarch64_adrp_branch_stub);
2717 break;
2718 case aarch64_stub_long_branch:
2719 size = sizeof (aarch64_long_branch_stub);
2720 break;
2721 case aarch64_stub_erratum_835769_veneer:
2722 size = sizeof (aarch64_erratum_835769_stub);
2723 break;
2724 case aarch64_stub_erratum_843419_veneer:
2725 size = sizeof (aarch64_erratum_843419_stub);
2726 break;
2727 default:
2728 abort ();
2729 }
2730
2731 size = (size + 7) & ~7;
2732 stub_entry->stub_sec->size += size;
2733 return TRUE;
2734 }
2735
2736 /* External entry points for sizing and building linker stubs. */
2737
2738 /* Set up various things so that we can make a list of input sections
2739 for each output section included in the link. Returns -1 on error,
2740 0 when no stubs will be needed, and 1 on success. */
2741
2742 int
2743 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2744 struct bfd_link_info *info)
2745 {
2746 bfd *input_bfd;
2747 unsigned int bfd_count;
2748 int top_id, top_index;
2749 asection *section;
2750 asection **input_list, **list;
2751 bfd_size_type amt;
2752 struct elf_aarch64_link_hash_table *htab =
2753 elf_aarch64_hash_table (info);
2754
2755 if (!is_elf_hash_table (htab))
2756 return 0;
2757
2758 /* Count the number of input BFDs and find the top input section id. */
2759 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2760 input_bfd != NULL; input_bfd = input_bfd->link.next)
2761 {
2762 bfd_count += 1;
2763 for (section = input_bfd->sections;
2764 section != NULL; section = section->next)
2765 {
2766 if (top_id < section->id)
2767 top_id = section->id;
2768 }
2769 }
2770 htab->bfd_count = bfd_count;
2771
2772 amt = sizeof (struct map_stub) * (top_id + 1);
2773 htab->stub_group = bfd_zmalloc (amt);
2774 if (htab->stub_group == NULL)
2775 return -1;
2776
2777 /* We can't use output_bfd->section_count here to find the top output
2778 section index as some sections may have been removed, and
2779 _bfd_strip_section_from_output doesn't renumber the indices. */
2780 for (section = output_bfd->sections, top_index = 0;
2781 section != NULL; section = section->next)
2782 {
2783 if (top_index < section->index)
2784 top_index = section->index;
2785 }
2786
2787 htab->top_index = top_index;
2788 amt = sizeof (asection *) * (top_index + 1);
2789 input_list = bfd_malloc (amt);
2790 htab->input_list = input_list;
2791 if (input_list == NULL)
2792 return -1;
2793
2794 /* For sections we aren't interested in, mark their entries with a
2795 value we can check later. */
2796 list = input_list + top_index;
2797 do
2798 *list = bfd_abs_section_ptr;
2799 while (list-- != input_list);
2800
2801 for (section = output_bfd->sections;
2802 section != NULL; section = section->next)
2803 {
2804 if ((section->flags & SEC_CODE) != 0)
2805 input_list[section->index] = NULL;
2806 }
2807
2808 return 1;
2809 }
2810
2811 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2812 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2813
2814 /* The linker repeatedly calls this function for each input section,
2815 in the order that input sections are linked into output sections.
2816 Build lists of input sections to determine groupings between which
2817 we may insert linker stubs. */
2818
2819 void
2820 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2821 {
2822 struct elf_aarch64_link_hash_table *htab =
2823 elf_aarch64_hash_table (info);
2824
2825 if (isec->output_section->index <= htab->top_index)
2826 {
2827 asection **list = htab->input_list + isec->output_section->index;
2828
2829 if (*list != bfd_abs_section_ptr)
2830 {
2831 /* Steal the link_sec pointer for our list. */
2832 /* This happens to make the list in reverse order,
2833 which is what we want. */
2834 PREV_SEC (isec) = *list;
2835 *list = isec;
2836 }
2837 }
2838 }
2839
2840 /* See whether we can group stub sections together. Grouping stub
2841 sections may result in fewer stubs. More importantly, we need to
2842 put all .init* and .fini* stubs at the beginning of the .init or
2843 .fini output sections respectively, because glibc splits the
2844 _init and _fini functions into multiple parts. Putting a stub in
2845 the middle of a function is not a good idea. */
2846
2847 static void
2848 group_sections (struct elf_aarch64_link_hash_table *htab,
2849 bfd_size_type stub_group_size,
2850 bfd_boolean stubs_always_before_branch)
2851 {
2852 asection **list = htab->input_list + htab->top_index;
2853
2854 do
2855 {
2856 asection *tail = *list;
2857
2858 if (tail == bfd_abs_section_ptr)
2859 continue;
2860
2861 while (tail != NULL)
2862 {
2863 asection *curr;
2864 asection *prev;
2865 bfd_size_type total;
2866
2867 curr = tail;
2868 total = tail->size;
2869 while ((prev = PREV_SEC (curr)) != NULL
2870 && ((total += curr->output_offset - prev->output_offset)
2871 < stub_group_size))
2872 curr = prev;
2873
2874 /* OK, the size from the start of CURR to the end is less
2875 than stub_group_size and thus can be handled by one stub
2876 section. (Or the tail section is itself larger than
2877 stub_group_size, in which case we may be toast.)
2878 We should really be keeping track of the total size of
2879 stubs added here, as stubs contribute to the final output
2880 section size. */
2881 do
2882 {
2883 prev = PREV_SEC (tail);
2884 /* Set up this stub group. */
2885 htab->stub_group[tail->id].link_sec = curr;
2886 }
2887 while (tail != curr && (tail = prev) != NULL);
2888
2889 /* But wait, there's more! Input sections up to stub_group_size
2890 bytes before the stub section can be handled by it too. */
2891 if (!stubs_always_before_branch)
2892 {
2893 total = 0;
2894 while (prev != NULL
2895 && ((total += tail->output_offset - prev->output_offset)
2896 < stub_group_size))
2897 {
2898 tail = prev;
2899 prev = PREV_SEC (tail);
2900 htab->stub_group[tail->id].link_sec = curr;
2901 }
2902 }
2903 tail = prev;
2904 }
2905 }
2906 while (list-- != htab->input_list);
2907
2908 free (htab->input_list);
2909 }
2910
2911 #undef PREV_SEC
2912
2913 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2914
2915 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2916 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2917 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2918 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2919 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2920 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2921
2922 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2923 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2924 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2925 #define AARCH64_ZR 0x1f
2926
2927 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2928 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2929
2930 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2931 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2932 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2933 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2934 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2935 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2936 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2937 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2938 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2939 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2940 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2941 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2942 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2943 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2944 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2945 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2946 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2947 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2948
2949 /* Classify an INSN if it is indeed a load/store.
2950
2951 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2952
2953 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2954 is set equal to RT.
2955
2956 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2957
2958 */
2959
2960 static bfd_boolean
2961 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2962 bfd_boolean *pair, bfd_boolean *load)
2963 {
2964 uint32_t opcode;
2965 unsigned int r;
2966 uint32_t opc = 0;
2967 uint32_t v = 0;
2968 uint32_t opc_v = 0;
2969
2970 /* Bail out quickly if INSN doesn't fall into the the load-store
2971 encoding space. */
2972 if (!AARCH64_LDST (insn))
2973 return FALSE;
2974
2975 *pair = FALSE;
2976 *load = FALSE;
2977 if (AARCH64_LDST_EX (insn))
2978 {
2979 *rt = AARCH64_RT (insn);
2980 *rt2 = *rt;
2981 if (AARCH64_BIT (insn, 21) == 1)
2982 {
2983 *pair = TRUE;
2984 *rt2 = AARCH64_RT2 (insn);
2985 }
2986 *load = AARCH64_LD (insn);
2987 return TRUE;
2988 }
2989 else if (AARCH64_LDST_NAP (insn)
2990 || AARCH64_LDSTP_PI (insn)
2991 || AARCH64_LDSTP_O (insn)
2992 || AARCH64_LDSTP_PRE (insn))
2993 {
2994 *pair = TRUE;
2995 *rt = AARCH64_RT (insn);
2996 *rt2 = AARCH64_RT2 (insn);
2997 *load = AARCH64_LD (insn);
2998 return TRUE;
2999 }
3000 else if (AARCH64_LDST_PCREL (insn)
3001 || AARCH64_LDST_UI (insn)
3002 || AARCH64_LDST_PIIMM (insn)
3003 || AARCH64_LDST_U (insn)
3004 || AARCH64_LDST_PREIMM (insn)
3005 || AARCH64_LDST_RO (insn)
3006 || AARCH64_LDST_UIMM (insn))
3007 {
3008 *rt = AARCH64_RT (insn);
3009 *rt2 = *rt;
3010 if (AARCH64_LDST_PCREL (insn))
3011 *load = TRUE;
3012 opc = AARCH64_BITS (insn, 22, 2);
3013 v = AARCH64_BIT (insn, 26);
3014 opc_v = opc | (v << 2);
3015 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
3016 || opc_v == 5 || opc_v == 7);
3017 return TRUE;
3018 }
3019 else if (AARCH64_LDST_SIMD_M (insn)
3020 || AARCH64_LDST_SIMD_M_PI (insn))
3021 {
3022 *rt = AARCH64_RT (insn);
3023 *load = AARCH64_BIT (insn, 22);
3024 opcode = (insn >> 12) & 0xf;
3025 switch (opcode)
3026 {
3027 case 0:
3028 case 2:
3029 *rt2 = *rt + 3;
3030 break;
3031
3032 case 4:
3033 case 6:
3034 *rt2 = *rt + 2;
3035 break;
3036
3037 case 7:
3038 *rt2 = *rt;
3039 break;
3040
3041 case 8:
3042 case 10:
3043 *rt2 = *rt + 1;
3044 break;
3045
3046 default:
3047 return FALSE;
3048 }
3049 return TRUE;
3050 }
3051 else if (AARCH64_LDST_SIMD_S (insn)
3052 || AARCH64_LDST_SIMD_S_PI (insn))
3053 {
3054 *rt = AARCH64_RT (insn);
3055 r = (insn >> 21) & 1;
3056 *load = AARCH64_BIT (insn, 22);
3057 opcode = (insn >> 13) & 0x7;
3058 switch (opcode)
3059 {
3060 case 0:
3061 case 2:
3062 case 4:
3063 *rt2 = *rt + r;
3064 break;
3065
3066 case 1:
3067 case 3:
3068 case 5:
3069 *rt2 = *rt + (r == 0 ? 2 : 3);
3070 break;
3071
3072 case 6:
3073 *rt2 = *rt + r;
3074 break;
3075
3076 case 7:
3077 *rt2 = *rt + (r == 0 ? 2 : 3);
3078 break;
3079
3080 default:
3081 return FALSE;
3082 }
3083 return TRUE;
3084 }
3085
3086 return FALSE;
3087 }
3088
3089 /* Return TRUE if INSN is multiply-accumulate. */
3090
3091 static bfd_boolean
3092 aarch64_mlxl_p (uint32_t insn)
3093 {
3094 uint32_t op31 = AARCH64_OP31 (insn);
3095
3096 if (AARCH64_MAC (insn)
3097 && (op31 == 0 || op31 == 1 || op31 == 5)
3098 /* Exclude MUL instructions which are encoded as a multiple accumulate
3099 with RA = XZR. */
3100 && AARCH64_RA (insn) != AARCH64_ZR)
3101 return TRUE;
3102
3103 return FALSE;
3104 }
3105
3106 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3107 it is possible for a 64-bit multiply-accumulate instruction to generate an
3108 incorrect result. The details are quite complex and hard to
3109 determine statically, since branches in the code may exist in some
3110 circumstances, but all cases end with a memory (load, store, or
3111 prefetch) instruction followed immediately by the multiply-accumulate
3112 operation. We employ a linker patching technique, by moving the potentially
3113 affected multiply-accumulate instruction into a patch region and replacing
3114 the original instruction with a branch to the patch. This function checks
3115 if INSN_1 is the memory operation followed by a multiply-accumulate
3116 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3117 if INSN_1 and INSN_2 are safe. */
3118
3119 static bfd_boolean
3120 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3121 {
3122 uint32_t rt;
3123 uint32_t rt2;
3124 uint32_t rn;
3125 uint32_t rm;
3126 uint32_t ra;
3127 bfd_boolean pair;
3128 bfd_boolean load;
3129
3130 if (aarch64_mlxl_p (insn_2)
3131 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3132 {
3133 /* Any SIMD memory op is independent of the subsequent MLA
3134 by definition of the erratum. */
3135 if (AARCH64_BIT (insn_1, 26))
3136 return TRUE;
3137
3138 /* If not SIMD, check for integer memory ops and MLA relationship. */
3139 rn = AARCH64_RN (insn_2);
3140 ra = AARCH64_RA (insn_2);
3141 rm = AARCH64_RM (insn_2);
3142
3143 /* If this is a load and there's a true(RAW) dependency, we are safe
3144 and this is not an erratum sequence. */
3145 if (load &&
3146 (rt == rn || rt == rm || rt == ra
3147 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3148 return FALSE;
3149
3150 /* We conservatively put out stubs for all other cases (including
3151 writebacks). */
3152 return TRUE;
3153 }
3154
3155 return FALSE;
3156 }
3157
3158 /* Used to order a list of mapping symbols by address. */
3159
3160 static int
3161 elf_aarch64_compare_mapping (const void *a, const void *b)
3162 {
3163 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3164 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3165
3166 if (amap->vma > bmap->vma)
3167 return 1;
3168 else if (amap->vma < bmap->vma)
3169 return -1;
3170 else if (amap->type > bmap->type)
3171 /* Ensure results do not depend on the host qsort for objects with
3172 multiple mapping symbols at the same address by sorting on type
3173 after vma. */
3174 return 1;
3175 else if (amap->type < bmap->type)
3176 return -1;
3177 else
3178 return 0;
3179 }
3180
3181
3182 static char *
3183 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3184 {
3185 char *stub_name = (char *) bfd_malloc
3186 (strlen ("__erratum_835769_veneer_") + 16);
3187 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3188 return stub_name;
3189 }
3190
3191 /* Scan for Cortex-A53 erratum 835769 sequence.
3192
3193 Return TRUE else FALSE on abnormal termination. */
3194
3195 static bfd_boolean
3196 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3197 struct bfd_link_info *info,
3198 unsigned int *num_fixes_p)
3199 {
3200 asection *section;
3201 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3202 unsigned int num_fixes = *num_fixes_p;
3203
3204 if (htab == NULL)
3205 return TRUE;
3206
3207 for (section = input_bfd->sections;
3208 section != NULL;
3209 section = section->next)
3210 {
3211 bfd_byte *contents = NULL;
3212 struct _aarch64_elf_section_data *sec_data;
3213 unsigned int span;
3214
3215 if (elf_section_type (section) != SHT_PROGBITS
3216 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3217 || (section->flags & SEC_EXCLUDE) != 0
3218 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3219 || (section->output_section == bfd_abs_section_ptr))
3220 continue;
3221
3222 if (elf_section_data (section)->this_hdr.contents != NULL)
3223 contents = elf_section_data (section)->this_hdr.contents;
3224 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3225 return FALSE;
3226
3227 sec_data = elf_aarch64_section_data (section);
3228
3229 qsort (sec_data->map, sec_data->mapcount,
3230 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3231
3232 for (span = 0; span < sec_data->mapcount; span++)
3233 {
3234 unsigned int span_start = sec_data->map[span].vma;
3235 unsigned int span_end = ((span == sec_data->mapcount - 1)
3236 ? sec_data->map[0].vma + section->size
3237 : sec_data->map[span + 1].vma);
3238 unsigned int i;
3239 char span_type = sec_data->map[span].type;
3240
3241 if (span_type == 'd')
3242 continue;
3243
3244 for (i = span_start; i + 4 < span_end; i += 4)
3245 {
3246 uint32_t insn_1 = bfd_getl32 (contents + i);
3247 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3248
3249 if (aarch64_erratum_sequence (insn_1, insn_2))
3250 {
3251 struct elf_aarch64_stub_hash_entry *stub_entry;
3252 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3253 if (! stub_name)
3254 return FALSE;
3255
3256 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3257 section,
3258 htab);
3259 if (! stub_entry)
3260 return FALSE;
3261
3262 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3263 stub_entry->target_section = section;
3264 stub_entry->target_value = i + 4;
3265 stub_entry->veneered_insn = insn_2;
3266 stub_entry->output_name = stub_name;
3267 num_fixes++;
3268 }
3269 }
3270 }
3271 if (elf_section_data (section)->this_hdr.contents == NULL)
3272 free (contents);
3273 }
3274
3275 *num_fixes_p = num_fixes;
3276
3277 return TRUE;
3278 }
3279
3280
3281 /* Test if instruction INSN is ADRP. */
3282
3283 static bfd_boolean
3284 _bfd_aarch64_adrp_p (uint32_t insn)
3285 {
3286 return ((insn & 0x9f000000) == 0x90000000);
3287 }
3288
3289
3290 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3291
3292 static bfd_boolean
3293 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3294 uint32_t insn_3)
3295 {
3296 uint32_t rt;
3297 uint32_t rt2;
3298 bfd_boolean pair;
3299 bfd_boolean load;
3300
3301 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3302 && (!pair
3303 || (pair && !load))
3304 && AARCH64_LDST_UIMM (insn_3)
3305 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3306 }
3307
3308
3309 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3310
3311 Return TRUE if section CONTENTS at offset I contains one of the
3312 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3313 seen set P_VENEER_I to the offset of the final LOAD/STORE
3314 instruction in the sequence.
3315 */
3316
3317 static bfd_boolean
3318 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3319 bfd_vma i, bfd_vma span_end,
3320 bfd_vma *p_veneer_i)
3321 {
3322 uint32_t insn_1 = bfd_getl32 (contents + i);
3323
3324 if (!_bfd_aarch64_adrp_p (insn_1))
3325 return FALSE;
3326
3327 if (span_end < i + 12)
3328 return FALSE;
3329
3330 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3331 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3332
3333 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3334 return FALSE;
3335
3336 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3337 {
3338 *p_veneer_i = i + 8;
3339 return TRUE;
3340 }
3341
3342 if (span_end < i + 16)
3343 return FALSE;
3344
3345 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3346
3347 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3348 {
3349 *p_veneer_i = i + 12;
3350 return TRUE;
3351 }
3352
3353 return FALSE;
3354 }
3355
3356
3357 /* Resize all stub sections. */
3358
3359 static void
3360 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3361 {
3362 asection *section;
3363
3364 /* OK, we've added some stubs. Find out the new size of the
3365 stub sections. */
3366 for (section = htab->stub_bfd->sections;
3367 section != NULL; section = section->next)
3368 {
3369 /* Ignore non-stub sections. */
3370 if (!strstr (section->name, STUB_SUFFIX))
3371 continue;
3372 section->size = 0;
3373 }
3374
3375 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3376
3377 for (section = htab->stub_bfd->sections;
3378 section != NULL; section = section->next)
3379 {
3380 if (!strstr (section->name, STUB_SUFFIX))
3381 continue;
3382
3383 if (section->size)
3384 section->size += 4;
3385
3386 /* Ensure all stub sections have a size which is a multiple of
3387 4096. This is important in order to ensure that the insertion
3388 of stub sections does not in itself move existing code around
3389 in such a way that new errata sequences are created. */
3390 if (htab->fix_erratum_843419)
3391 if (section->size)
3392 section->size = BFD_ALIGN (section->size, 0x1000);
3393 }
3394 }
3395
3396
3397 /* Construct an erratum 843419 workaround stub name.
3398 */
3399
3400 static char *
3401 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3402 bfd_vma offset)
3403 {
3404 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3405 char *stub_name = bfd_malloc (len);
3406
3407 if (stub_name != NULL)
3408 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3409 input_section->owner->id,
3410 input_section->id,
3411 offset);
3412 return stub_name;
3413 }
3414
3415 /* Build a stub_entry structure describing an 843419 fixup.
3416
3417 The stub_entry constructed is populated with the bit pattern INSN
3418 of the instruction located at OFFSET within input SECTION.
3419
3420 Returns TRUE on success. */
3421
3422 static bfd_boolean
3423 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3424 bfd_vma adrp_offset,
3425 bfd_vma ldst_offset,
3426 asection *section,
3427 struct bfd_link_info *info)
3428 {
3429 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3430 char *stub_name;
3431 struct elf_aarch64_stub_hash_entry *stub_entry;
3432
3433 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3434 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3435 FALSE, FALSE);
3436 if (stub_entry)
3437 {
3438 free (stub_name);
3439 return TRUE;
3440 }
3441
3442 /* We always place an 843419 workaround veneer in the stub section
3443 attached to the input section in which an erratum sequence has
3444 been found. This ensures that later in the link process (in
3445 elfNN_aarch64_write_section) when we copy the veneered
3446 instruction from the input section into the stub section the
3447 copied instruction will have had any relocations applied to it.
3448 If we placed workaround veneers in any other stub section then we
3449 could not assume that all relocations have been processed on the
3450 corresponding input section at the point we output the stub
3451 section.
3452 */
3453
3454 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3455 if (stub_entry == NULL)
3456 {
3457 free (stub_name);
3458 return FALSE;
3459 }
3460
3461 stub_entry->adrp_offset = adrp_offset;
3462 stub_entry->target_value = ldst_offset;
3463 stub_entry->target_section = section;
3464 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3465 stub_entry->veneered_insn = insn;
3466 stub_entry->output_name = stub_name;
3467
3468 return TRUE;
3469 }
3470
3471
3472 /* Scan an input section looking for the signature of erratum 843419.
3473
3474 Scans input SECTION in INPUT_BFD looking for erratum 843419
3475 signatures, for each signature found a stub_entry is created
3476 describing the location of the erratum for subsequent fixup.
3477
3478 Return TRUE on successful scan, FALSE on failure to scan.
3479 */
3480
3481 static bfd_boolean
3482 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3483 struct bfd_link_info *info)
3484 {
3485 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3486
3487 if (htab == NULL)
3488 return TRUE;
3489
3490 if (elf_section_type (section) != SHT_PROGBITS
3491 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3492 || (section->flags & SEC_EXCLUDE) != 0
3493 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3494 || (section->output_section == bfd_abs_section_ptr))
3495 return TRUE;
3496
3497 do
3498 {
3499 bfd_byte *contents = NULL;
3500 struct _aarch64_elf_section_data *sec_data;
3501 unsigned int span;
3502
3503 if (elf_section_data (section)->this_hdr.contents != NULL)
3504 contents = elf_section_data (section)->this_hdr.contents;
3505 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3506 return FALSE;
3507
3508 sec_data = elf_aarch64_section_data (section);
3509
3510 qsort (sec_data->map, sec_data->mapcount,
3511 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3512
3513 for (span = 0; span < sec_data->mapcount; span++)
3514 {
3515 unsigned int span_start = sec_data->map[span].vma;
3516 unsigned int span_end = ((span == sec_data->mapcount - 1)
3517 ? sec_data->map[0].vma + section->size
3518 : sec_data->map[span + 1].vma);
3519 unsigned int i;
3520 char span_type = sec_data->map[span].type;
3521
3522 if (span_type == 'd')
3523 continue;
3524
3525 for (i = span_start; i + 8 < span_end; i += 4)
3526 {
3527 bfd_vma vma = (section->output_section->vma
3528 + section->output_offset
3529 + i);
3530 bfd_vma veneer_i;
3531
3532 if (_bfd_aarch64_erratum_843419_p
3533 (contents, vma, i, span_end, &veneer_i))
3534 {
3535 uint32_t insn = bfd_getl32 (contents + veneer_i);
3536
3537 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3538 section, info))
3539 return FALSE;
3540 }
3541 }
3542 }
3543
3544 if (elf_section_data (section)->this_hdr.contents == NULL)
3545 free (contents);
3546 }
3547 while (0);
3548
3549 return TRUE;
3550 }
3551
3552
3553 /* Determine and set the size of the stub section for a final link.
3554
3555 The basic idea here is to examine all the relocations looking for
3556 PC-relative calls to a target that is unreachable with a "bl"
3557 instruction. */
3558
3559 bfd_boolean
3560 elfNN_aarch64_size_stubs (bfd *output_bfd,
3561 bfd *stub_bfd,
3562 struct bfd_link_info *info,
3563 bfd_signed_vma group_size,
3564 asection * (*add_stub_section) (const char *,
3565 asection *),
3566 void (*layout_sections_again) (void))
3567 {
3568 bfd_size_type stub_group_size;
3569 bfd_boolean stubs_always_before_branch;
3570 bfd_boolean stub_changed = FALSE;
3571 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3572 unsigned int num_erratum_835769_fixes = 0;
3573
3574 /* Propagate mach to stub bfd, because it may not have been
3575 finalized when we created stub_bfd. */
3576 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3577 bfd_get_mach (output_bfd));
3578
3579 /* Stash our params away. */
3580 htab->stub_bfd = stub_bfd;
3581 htab->add_stub_section = add_stub_section;
3582 htab->layout_sections_again = layout_sections_again;
3583 stubs_always_before_branch = group_size < 0;
3584 if (group_size < 0)
3585 stub_group_size = -group_size;
3586 else
3587 stub_group_size = group_size;
3588
3589 if (stub_group_size == 1)
3590 {
3591 /* Default values. */
3592 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3593 stub_group_size = 127 * 1024 * 1024;
3594 }
3595
3596 group_sections (htab, stub_group_size, stubs_always_before_branch);
3597
3598 (*htab->layout_sections_again) ();
3599
3600 if (htab->fix_erratum_835769)
3601 {
3602 bfd *input_bfd;
3603
3604 for (input_bfd = info->input_bfds;
3605 input_bfd != NULL; input_bfd = input_bfd->link.next)
3606 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3607 &num_erratum_835769_fixes))
3608 return FALSE;
3609
3610 _bfd_aarch64_resize_stubs (htab);
3611 (*htab->layout_sections_again) ();
3612 }
3613
3614 if (htab->fix_erratum_843419)
3615 {
3616 bfd *input_bfd;
3617
3618 for (input_bfd = info->input_bfds;
3619 input_bfd != NULL;
3620 input_bfd = input_bfd->link.next)
3621 {
3622 asection *section;
3623
3624 for (section = input_bfd->sections;
3625 section != NULL;
3626 section = section->next)
3627 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3628 return FALSE;
3629 }
3630
3631 _bfd_aarch64_resize_stubs (htab);
3632 (*htab->layout_sections_again) ();
3633 }
3634
3635 while (1)
3636 {
3637 bfd *input_bfd;
3638
3639 for (input_bfd = info->input_bfds;
3640 input_bfd != NULL; input_bfd = input_bfd->link.next)
3641 {
3642 Elf_Internal_Shdr *symtab_hdr;
3643 asection *section;
3644 Elf_Internal_Sym *local_syms = NULL;
3645
3646 /* We'll need the symbol table in a second. */
3647 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3648 if (symtab_hdr->sh_info == 0)
3649 continue;
3650
3651 /* Walk over each section attached to the input bfd. */
3652 for (section = input_bfd->sections;
3653 section != NULL; section = section->next)
3654 {
3655 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3656
3657 /* If there aren't any relocs, then there's nothing more
3658 to do. */
3659 if ((section->flags & SEC_RELOC) == 0
3660 || section->reloc_count == 0
3661 || (section->flags & SEC_CODE) == 0)
3662 continue;
3663
3664 /* If this section is a link-once section that will be
3665 discarded, then don't create any stubs. */
3666 if (section->output_section == NULL
3667 || section->output_section->owner != output_bfd)
3668 continue;
3669
3670 /* Get the relocs. */
3671 internal_relocs
3672 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3673 NULL, info->keep_memory);
3674 if (internal_relocs == NULL)
3675 goto error_ret_free_local;
3676
3677 /* Now examine each relocation. */
3678 irela = internal_relocs;
3679 irelaend = irela + section->reloc_count;
3680 for (; irela < irelaend; irela++)
3681 {
3682 unsigned int r_type, r_indx;
3683 enum elf_aarch64_stub_type stub_type;
3684 struct elf_aarch64_stub_hash_entry *stub_entry;
3685 asection *sym_sec;
3686 bfd_vma sym_value;
3687 bfd_vma destination;
3688 struct elf_aarch64_link_hash_entry *hash;
3689 const char *sym_name;
3690 char *stub_name;
3691 const asection *id_sec;
3692 unsigned char st_type;
3693 bfd_size_type len;
3694
3695 r_type = ELFNN_R_TYPE (irela->r_info);
3696 r_indx = ELFNN_R_SYM (irela->r_info);
3697
3698 if (r_type >= (unsigned int) R_AARCH64_end)
3699 {
3700 bfd_set_error (bfd_error_bad_value);
3701 error_ret_free_internal:
3702 if (elf_section_data (section)->relocs == NULL)
3703 free (internal_relocs);
3704 goto error_ret_free_local;
3705 }
3706
3707 /* Only look for stubs on unconditional branch and
3708 branch and link instructions. */
3709 if (r_type != (unsigned int) AARCH64_R (CALL26)
3710 && r_type != (unsigned int) AARCH64_R (JUMP26))
3711 continue;
3712
3713 /* Now determine the call target, its name, value,
3714 section. */
3715 sym_sec = NULL;
3716 sym_value = 0;
3717 destination = 0;
3718 hash = NULL;
3719 sym_name = NULL;
3720 if (r_indx < symtab_hdr->sh_info)
3721 {
3722 /* It's a local symbol. */
3723 Elf_Internal_Sym *sym;
3724 Elf_Internal_Shdr *hdr;
3725
3726 if (local_syms == NULL)
3727 {
3728 local_syms
3729 = (Elf_Internal_Sym *) symtab_hdr->contents;
3730 if (local_syms == NULL)
3731 local_syms
3732 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3733 symtab_hdr->sh_info, 0,
3734 NULL, NULL, NULL);
3735 if (local_syms == NULL)
3736 goto error_ret_free_internal;
3737 }
3738
3739 sym = local_syms + r_indx;
3740 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3741 sym_sec = hdr->bfd_section;
3742 if (!sym_sec)
3743 /* This is an undefined symbol. It can never
3744 be resolved. */
3745 continue;
3746
3747 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3748 sym_value = sym->st_value;
3749 destination = (sym_value + irela->r_addend
3750 + sym_sec->output_offset
3751 + sym_sec->output_section->vma);
3752 st_type = ELF_ST_TYPE (sym->st_info);
3753 sym_name
3754 = bfd_elf_string_from_elf_section (input_bfd,
3755 symtab_hdr->sh_link,
3756 sym->st_name);
3757 }
3758 else
3759 {
3760 int e_indx;
3761
3762 e_indx = r_indx - symtab_hdr->sh_info;
3763 hash = ((struct elf_aarch64_link_hash_entry *)
3764 elf_sym_hashes (input_bfd)[e_indx]);
3765
3766 while (hash->root.root.type == bfd_link_hash_indirect
3767 || hash->root.root.type == bfd_link_hash_warning)
3768 hash = ((struct elf_aarch64_link_hash_entry *)
3769 hash->root.root.u.i.link);
3770
3771 if (hash->root.root.type == bfd_link_hash_defined
3772 || hash->root.root.type == bfd_link_hash_defweak)
3773 {
3774 struct elf_aarch64_link_hash_table *globals =
3775 elf_aarch64_hash_table (info);
3776 sym_sec = hash->root.root.u.def.section;
3777 sym_value = hash->root.root.u.def.value;
3778 /* For a destination in a shared library,
3779 use the PLT stub as target address to
3780 decide whether a branch stub is
3781 needed. */
3782 if (globals->root.splt != NULL && hash != NULL
3783 && hash->root.plt.offset != (bfd_vma) - 1)
3784 {
3785 sym_sec = globals->root.splt;
3786 sym_value = hash->root.plt.offset;
3787 if (sym_sec->output_section != NULL)
3788 destination = (sym_value
3789 + sym_sec->output_offset
3790 +
3791 sym_sec->output_section->vma);
3792 }
3793 else if (sym_sec->output_section != NULL)
3794 destination = (sym_value + irela->r_addend
3795 + sym_sec->output_offset
3796 + sym_sec->output_section->vma);
3797 }
3798 else if (hash->root.root.type == bfd_link_hash_undefined
3799 || (hash->root.root.type
3800 == bfd_link_hash_undefweak))
3801 {
3802 /* For a shared library, use the PLT stub as
3803 target address to decide whether a long
3804 branch stub is needed.
3805 For absolute code, they cannot be handled. */
3806 struct elf_aarch64_link_hash_table *globals =
3807 elf_aarch64_hash_table (info);
3808
3809 if (globals->root.splt != NULL && hash != NULL
3810 && hash->root.plt.offset != (bfd_vma) - 1)
3811 {
3812 sym_sec = globals->root.splt;
3813 sym_value = hash->root.plt.offset;
3814 if (sym_sec->output_section != NULL)
3815 destination = (sym_value
3816 + sym_sec->output_offset
3817 +
3818 sym_sec->output_section->vma);
3819 }
3820 else
3821 continue;
3822 }
3823 else
3824 {
3825 bfd_set_error (bfd_error_bad_value);
3826 goto error_ret_free_internal;
3827 }
3828 st_type = ELF_ST_TYPE (hash->root.type);
3829 sym_name = hash->root.root.root.string;
3830 }
3831
3832 /* Determine what (if any) linker stub is needed. */
3833 stub_type = aarch64_type_of_stub
3834 (info, section, irela, sym_sec, st_type, hash, destination);
3835 if (stub_type == aarch64_stub_none)
3836 continue;
3837
3838 /* Support for grouping stub sections. */
3839 id_sec = htab->stub_group[section->id].link_sec;
3840
3841 /* Get the name of this stub. */
3842 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3843 irela);
3844 if (!stub_name)
3845 goto error_ret_free_internal;
3846
3847 stub_entry =
3848 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3849 stub_name, FALSE, FALSE);
3850 if (stub_entry != NULL)
3851 {
3852 /* The proper stub has already been created. */
3853 free (stub_name);
3854 continue;
3855 }
3856
3857 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3858 (stub_name, section, htab);
3859 if (stub_entry == NULL)
3860 {
3861 free (stub_name);
3862 goto error_ret_free_internal;
3863 }
3864
3865 stub_entry->target_value = sym_value;
3866 stub_entry->target_section = sym_sec;
3867 stub_entry->stub_type = stub_type;
3868 stub_entry->h = hash;
3869 stub_entry->st_type = st_type;
3870
3871 if (sym_name == NULL)
3872 sym_name = "unnamed";
3873 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3874 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3875 if (stub_entry->output_name == NULL)
3876 {
3877 free (stub_name);
3878 goto error_ret_free_internal;
3879 }
3880
3881 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3882 sym_name);
3883
3884 stub_changed = TRUE;
3885 }
3886
3887 /* We're done with the internal relocs, free them. */
3888 if (elf_section_data (section)->relocs == NULL)
3889 free (internal_relocs);
3890 }
3891 }
3892
3893 if (!stub_changed)
3894 break;
3895
3896 _bfd_aarch64_resize_stubs (htab);
3897
3898 /* Ask the linker to do its stuff. */
3899 (*htab->layout_sections_again) ();
3900 stub_changed = FALSE;
3901 }
3902
3903 return TRUE;
3904
3905 error_ret_free_local:
3906 return FALSE;
3907 }
3908
3909 /* Build all the stubs associated with the current output file. The
3910 stubs are kept in a hash table attached to the main linker hash
3911 table. We also set up the .plt entries for statically linked PIC
3912 functions here. This function is called via aarch64_elf_finish in the
3913 linker. */
3914
3915 bfd_boolean
3916 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3917 {
3918 asection *stub_sec;
3919 struct bfd_hash_table *table;
3920 struct elf_aarch64_link_hash_table *htab;
3921
3922 htab = elf_aarch64_hash_table (info);
3923
3924 for (stub_sec = htab->stub_bfd->sections;
3925 stub_sec != NULL; stub_sec = stub_sec->next)
3926 {
3927 bfd_size_type size;
3928
3929 /* Ignore non-stub sections. */
3930 if (!strstr (stub_sec->name, STUB_SUFFIX))
3931 continue;
3932
3933 /* Allocate memory to hold the linker stubs. */
3934 size = stub_sec->size;
3935 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3936 if (stub_sec->contents == NULL && size != 0)
3937 return FALSE;
3938 stub_sec->size = 0;
3939
3940 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3941 stub_sec->size += 4;
3942 }
3943
3944 /* Build the stubs as directed by the stub hash table. */
3945 table = &htab->stub_hash_table;
3946 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3947
3948 return TRUE;
3949 }
3950
3951
3952 /* Add an entry to the code/data map for section SEC. */
3953
3954 static void
3955 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3956 {
3957 struct _aarch64_elf_section_data *sec_data =
3958 elf_aarch64_section_data (sec);
3959 unsigned int newidx;
3960
3961 if (sec_data->map == NULL)
3962 {
3963 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3964 sec_data->mapcount = 0;
3965 sec_data->mapsize = 1;
3966 }
3967
3968 newidx = sec_data->mapcount++;
3969
3970 if (sec_data->mapcount > sec_data->mapsize)
3971 {
3972 sec_data->mapsize *= 2;
3973 sec_data->map = bfd_realloc_or_free
3974 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3975 }
3976
3977 if (sec_data->map)
3978 {
3979 sec_data->map[newidx].vma = vma;
3980 sec_data->map[newidx].type = type;
3981 }
3982 }
3983
3984
3985 /* Initialise maps of insn/data for input BFDs. */
3986 void
3987 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3988 {
3989 Elf_Internal_Sym *isymbuf;
3990 Elf_Internal_Shdr *hdr;
3991 unsigned int i, localsyms;
3992
3993 /* Make sure that we are dealing with an AArch64 elf binary. */
3994 if (!is_aarch64_elf (abfd))
3995 return;
3996
3997 if ((abfd->flags & DYNAMIC) != 0)
3998 return;
3999
4000 hdr = &elf_symtab_hdr (abfd);
4001 localsyms = hdr->sh_info;
4002
4003 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
4004 should contain the number of local symbols, which should come before any
4005 global symbols. Mapping symbols are always local. */
4006 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
4007
4008 /* No internal symbols read? Skip this BFD. */
4009 if (isymbuf == NULL)
4010 return;
4011
4012 for (i = 0; i < localsyms; i++)
4013 {
4014 Elf_Internal_Sym *isym = &isymbuf[i];
4015 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
4016 const char *name;
4017
4018 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4019 {
4020 name = bfd_elf_string_from_elf_section (abfd,
4021 hdr->sh_link,
4022 isym->st_name);
4023
4024 if (bfd_is_aarch64_special_symbol_name
4025 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4026 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4027 }
4028 }
4029 }
4030
4031 /* Set option values needed during linking. */
4032 void
4033 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
4034 struct bfd_link_info *link_info,
4035 int no_enum_warn,
4036 int no_wchar_warn, int pic_veneer,
4037 int fix_erratum_835769,
4038 int fix_erratum_843419)
4039 {
4040 struct elf_aarch64_link_hash_table *globals;
4041
4042 globals = elf_aarch64_hash_table (link_info);
4043 globals->pic_veneer = pic_veneer;
4044 globals->fix_erratum_835769 = fix_erratum_835769;
4045 globals->fix_erratum_843419 = fix_erratum_843419;
4046 globals->fix_erratum_843419_adr = TRUE;
4047
4048 BFD_ASSERT (is_aarch64_elf (output_bfd));
4049 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
4050 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4051 }
4052
4053 static bfd_vma
4054 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
4055 struct elf_aarch64_link_hash_table
4056 *globals, struct bfd_link_info *info,
4057 bfd_vma value, bfd *output_bfd,
4058 bfd_boolean *unresolved_reloc_p)
4059 {
4060 bfd_vma off = (bfd_vma) - 1;
4061 asection *basegot = globals->root.sgot;
4062 bfd_boolean dyn = globals->root.dynamic_sections_created;
4063
4064 if (h != NULL)
4065 {
4066 BFD_ASSERT (basegot != NULL);
4067 off = h->got.offset;
4068 BFD_ASSERT (off != (bfd_vma) - 1);
4069 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4070 || (bfd_link_pic (info)
4071 && SYMBOL_REFERENCES_LOCAL (info, h))
4072 || (ELF_ST_VISIBILITY (h->other)
4073 && h->root.type == bfd_link_hash_undefweak))
4074 {
4075 /* This is actually a static link, or it is a -Bsymbolic link
4076 and the symbol is defined locally. We must initialize this
4077 entry in the global offset table. Since the offset must
4078 always be a multiple of 8 (4 in the case of ILP32), we use
4079 the least significant bit to record whether we have
4080 initialized it already.
4081 When doing a dynamic link, we create a .rel(a).got relocation
4082 entry to initialize the value. This is done in the
4083 finish_dynamic_symbol routine. */
4084 if ((off & 1) != 0)
4085 off &= ~1;
4086 else
4087 {
4088 bfd_put_NN (output_bfd, value, basegot->contents + off);
4089 h->got.offset |= 1;
4090 }
4091 }
4092 else
4093 *unresolved_reloc_p = FALSE;
4094
4095 off = off + basegot->output_section->vma + basegot->output_offset;
4096 }
4097
4098 return off;
4099 }
4100
4101 /* Change R_TYPE to a more efficient access model where possible,
4102 return the new reloc type. */
4103
4104 static bfd_reloc_code_real_type
4105 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4106 struct elf_link_hash_entry *h)
4107 {
4108 bfd_boolean is_local = h == NULL;
4109
4110 switch (r_type)
4111 {
4112 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4113 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4114 return (is_local
4115 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4116 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4117
4118 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4119 return (is_local
4120 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4121 : r_type);
4122
4123 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4124 return (is_local
4125 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4126 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4127
4128 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4129 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4130 return (is_local
4131 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4132 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4133
4134 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4135 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4136
4137 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4138 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4139
4140 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4141 return r_type;
4142
4143 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4144 return (is_local
4145 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4146 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4147
4148 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4149 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4150 /* Instructions with these relocations will become NOPs. */
4151 return BFD_RELOC_AARCH64_NONE;
4152
4153 default:
4154 break;
4155 }
4156
4157 return r_type;
4158 }
4159
4160 static unsigned int
4161 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4162 {
4163 switch (r_type)
4164 {
4165 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4166 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4167 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4168 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4169 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4170 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4171 return GOT_NORMAL;
4172
4173 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4174 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4175 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4176 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
4177 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4178 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4179 return GOT_TLS_GD;
4180
4181 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4182 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4183 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4184 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4185 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4186 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4187 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4188 return GOT_TLSDESC_GD;
4189
4190 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4191 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4192 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4193 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4194 return GOT_TLS_IE;
4195
4196 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
4197 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4198 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4199 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4200 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4201 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4202 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4203 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4204 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4205 return GOT_UNKNOWN;
4206
4207 default:
4208 break;
4209 }
4210 return GOT_UNKNOWN;
4211 }
4212
4213 static bfd_boolean
4214 aarch64_can_relax_tls (bfd *input_bfd,
4215 struct bfd_link_info *info,
4216 bfd_reloc_code_real_type r_type,
4217 struct elf_link_hash_entry *h,
4218 unsigned long r_symndx)
4219 {
4220 unsigned int symbol_got_type;
4221 unsigned int reloc_got_type;
4222
4223 if (! IS_AARCH64_TLS_RELAX_RELOC (r_type))
4224 return FALSE;
4225
4226 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4227 reloc_got_type = aarch64_reloc_got_type (r_type);
4228
4229 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4230 return TRUE;
4231
4232 if (bfd_link_pic (info))
4233 return FALSE;
4234
4235 if (h && h->root.type == bfd_link_hash_undefweak)
4236 return FALSE;
4237
4238 return TRUE;
4239 }
4240
4241 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4242 enumerator. */
4243
4244 static bfd_reloc_code_real_type
4245 aarch64_tls_transition (bfd *input_bfd,
4246 struct bfd_link_info *info,
4247 unsigned int r_type,
4248 struct elf_link_hash_entry *h,
4249 unsigned long r_symndx)
4250 {
4251 bfd_reloc_code_real_type bfd_r_type
4252 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4253
4254 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4255 return bfd_r_type;
4256
4257 return aarch64_tls_transition_without_check (bfd_r_type, h);
4258 }
4259
4260 /* Return the base VMA address which should be subtracted from real addresses
4261 when resolving R_AARCH64_TLS_DTPREL relocation. */
4262
4263 static bfd_vma
4264 dtpoff_base (struct bfd_link_info *info)
4265 {
4266 /* If tls_sec is NULL, we should have signalled an error already. */
4267 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4268 return elf_hash_table (info)->tls_sec->vma;
4269 }
4270
4271 /* Return the base VMA address which should be subtracted from real addresses
4272 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4273
4274 static bfd_vma
4275 tpoff_base (struct bfd_link_info *info)
4276 {
4277 struct elf_link_hash_table *htab = elf_hash_table (info);
4278
4279 /* If tls_sec is NULL, we should have signalled an error already. */
4280 BFD_ASSERT (htab->tls_sec != NULL);
4281
4282 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4283 htab->tls_sec->alignment_power);
4284 return htab->tls_sec->vma - base;
4285 }
4286
4287 static bfd_vma *
4288 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4289 unsigned long r_symndx)
4290 {
4291 /* Calculate the address of the GOT entry for symbol
4292 referred to in h. */
4293 if (h != NULL)
4294 return &h->got.offset;
4295 else
4296 {
4297 /* local symbol */
4298 struct elf_aarch64_local_symbol *l;
4299
4300 l = elf_aarch64_locals (input_bfd);
4301 return &l[r_symndx].got_offset;
4302 }
4303 }
4304
4305 static void
4306 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4307 unsigned long r_symndx)
4308 {
4309 bfd_vma *p;
4310 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4311 *p |= 1;
4312 }
4313
4314 static int
4315 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4316 unsigned long r_symndx)
4317 {
4318 bfd_vma value;
4319 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4320 return value & 1;
4321 }
4322
4323 static bfd_vma
4324 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4325 unsigned long r_symndx)
4326 {
4327 bfd_vma value;
4328 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4329 value &= ~1;
4330 return value;
4331 }
4332
4333 static bfd_vma *
4334 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4335 unsigned long r_symndx)
4336 {
4337 /* Calculate the address of the GOT entry for symbol
4338 referred to in h. */
4339 if (h != NULL)
4340 {
4341 struct elf_aarch64_link_hash_entry *eh;
4342 eh = (struct elf_aarch64_link_hash_entry *) h;
4343 return &eh->tlsdesc_got_jump_table_offset;
4344 }
4345 else
4346 {
4347 /* local symbol */
4348 struct elf_aarch64_local_symbol *l;
4349
4350 l = elf_aarch64_locals (input_bfd);
4351 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4352 }
4353 }
4354
4355 static void
4356 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4357 unsigned long r_symndx)
4358 {
4359 bfd_vma *p;
4360 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4361 *p |= 1;
4362 }
4363
4364 static int
4365 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4366 struct elf_link_hash_entry *h,
4367 unsigned long r_symndx)
4368 {
4369 bfd_vma value;
4370 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4371 return value & 1;
4372 }
4373
4374 static bfd_vma
4375 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4376 unsigned long r_symndx)
4377 {
4378 bfd_vma value;
4379 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4380 value &= ~1;
4381 return value;
4382 }
4383
4384 /* Data for make_branch_to_erratum_835769_stub(). */
4385
4386 struct erratum_835769_branch_to_stub_data
4387 {
4388 struct bfd_link_info *info;
4389 asection *output_section;
4390 bfd_byte *contents;
4391 };
4392
4393 /* Helper to insert branches to erratum 835769 stubs in the right
4394 places for a particular section. */
4395
4396 static bfd_boolean
4397 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4398 void *in_arg)
4399 {
4400 struct elf_aarch64_stub_hash_entry *stub_entry;
4401 struct erratum_835769_branch_to_stub_data *data;
4402 bfd_byte *contents;
4403 unsigned long branch_insn = 0;
4404 bfd_vma veneered_insn_loc, veneer_entry_loc;
4405 bfd_signed_vma branch_offset;
4406 unsigned int target;
4407 bfd *abfd;
4408
4409 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4410 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4411
4412 if (stub_entry->target_section != data->output_section
4413 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4414 return TRUE;
4415
4416 contents = data->contents;
4417 veneered_insn_loc = stub_entry->target_section->output_section->vma
4418 + stub_entry->target_section->output_offset
4419 + stub_entry->target_value;
4420 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4421 + stub_entry->stub_sec->output_offset
4422 + stub_entry->stub_offset;
4423 branch_offset = veneer_entry_loc - veneered_insn_loc;
4424
4425 abfd = stub_entry->target_section->owner;
4426 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4427 (*_bfd_error_handler)
4428 (_("%B: error: Erratum 835769 stub out "
4429 "of range (input file too large)"), abfd);
4430
4431 target = stub_entry->target_value;
4432 branch_insn = 0x14000000;
4433 branch_offset >>= 2;
4434 branch_offset &= 0x3ffffff;
4435 branch_insn |= branch_offset;
4436 bfd_putl32 (branch_insn, &contents[target]);
4437
4438 return TRUE;
4439 }
4440
4441
4442 static bfd_boolean
4443 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4444 void *in_arg)
4445 {
4446 struct elf_aarch64_stub_hash_entry *stub_entry
4447 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4448 struct erratum_835769_branch_to_stub_data *data
4449 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4450 struct bfd_link_info *info;
4451 struct elf_aarch64_link_hash_table *htab;
4452 bfd_byte *contents;
4453 asection *section;
4454 bfd *abfd;
4455 bfd_vma place;
4456 uint32_t insn;
4457
4458 info = data->info;
4459 contents = data->contents;
4460 section = data->output_section;
4461
4462 htab = elf_aarch64_hash_table (info);
4463
4464 if (stub_entry->target_section != section
4465 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4466 return TRUE;
4467
4468 insn = bfd_getl32 (contents + stub_entry->target_value);
4469 bfd_putl32 (insn,
4470 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4471
4472 place = (section->output_section->vma + section->output_offset
4473 + stub_entry->adrp_offset);
4474 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4475
4476 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4477 abort ();
4478
4479 bfd_signed_vma imm =
4480 (_bfd_aarch64_sign_extend
4481 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4482 - (place & 0xfff));
4483
4484 if (htab->fix_erratum_843419_adr
4485 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4486 {
4487 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4488 | AARCH64_RT (insn));
4489 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4490 }
4491 else
4492 {
4493 bfd_vma veneered_insn_loc;
4494 bfd_vma veneer_entry_loc;
4495 bfd_signed_vma branch_offset;
4496 uint32_t branch_insn;
4497
4498 veneered_insn_loc = stub_entry->target_section->output_section->vma
4499 + stub_entry->target_section->output_offset
4500 + stub_entry->target_value;
4501 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4502 + stub_entry->stub_sec->output_offset
4503 + stub_entry->stub_offset;
4504 branch_offset = veneer_entry_loc - veneered_insn_loc;
4505
4506 abfd = stub_entry->target_section->owner;
4507 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4508 (*_bfd_error_handler)
4509 (_("%B: error: Erratum 843419 stub out "
4510 "of range (input file too large)"), abfd);
4511
4512 branch_insn = 0x14000000;
4513 branch_offset >>= 2;
4514 branch_offset &= 0x3ffffff;
4515 branch_insn |= branch_offset;
4516 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4517 }
4518 return TRUE;
4519 }
4520
4521
4522 static bfd_boolean
4523 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4524 struct bfd_link_info *link_info,
4525 asection *sec,
4526 bfd_byte *contents)
4527
4528 {
4529 struct elf_aarch64_link_hash_table *globals =
4530 elf_aarch64_hash_table (link_info);
4531
4532 if (globals == NULL)
4533 return FALSE;
4534
4535 /* Fix code to point to erratum 835769 stubs. */
4536 if (globals->fix_erratum_835769)
4537 {
4538 struct erratum_835769_branch_to_stub_data data;
4539
4540 data.info = link_info;
4541 data.output_section = sec;
4542 data.contents = contents;
4543 bfd_hash_traverse (&globals->stub_hash_table,
4544 make_branch_to_erratum_835769_stub, &data);
4545 }
4546
4547 if (globals->fix_erratum_843419)
4548 {
4549 struct erratum_835769_branch_to_stub_data data;
4550
4551 data.info = link_info;
4552 data.output_section = sec;
4553 data.contents = contents;
4554 bfd_hash_traverse (&globals->stub_hash_table,
4555 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4556 }
4557
4558 return FALSE;
4559 }
4560
4561 /* Perform a relocation as part of a final link. */
4562 static bfd_reloc_status_type
4563 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4564 bfd *input_bfd,
4565 bfd *output_bfd,
4566 asection *input_section,
4567 bfd_byte *contents,
4568 Elf_Internal_Rela *rel,
4569 bfd_vma value,
4570 struct bfd_link_info *info,
4571 asection *sym_sec,
4572 struct elf_link_hash_entry *h,
4573 bfd_boolean *unresolved_reloc_p,
4574 bfd_boolean save_addend,
4575 bfd_vma *saved_addend,
4576 Elf_Internal_Sym *sym)
4577 {
4578 Elf_Internal_Shdr *symtab_hdr;
4579 unsigned int r_type = howto->type;
4580 bfd_reloc_code_real_type bfd_r_type
4581 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4582 bfd_reloc_code_real_type new_bfd_r_type;
4583 unsigned long r_symndx;
4584 bfd_byte *hit_data = contents + rel->r_offset;
4585 bfd_vma place, off;
4586 bfd_signed_vma signed_addend;
4587 struct elf_aarch64_link_hash_table *globals;
4588 bfd_boolean weak_undef_p;
4589 asection *base_got;
4590
4591 globals = elf_aarch64_hash_table (info);
4592
4593 symtab_hdr = &elf_symtab_hdr (input_bfd);
4594
4595 BFD_ASSERT (is_aarch64_elf (input_bfd));
4596
4597 r_symndx = ELFNN_R_SYM (rel->r_info);
4598
4599 /* It is possible to have linker relaxations on some TLS access
4600 models. Update our information here. */
4601 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4602 if (new_bfd_r_type != bfd_r_type)
4603 {
4604 bfd_r_type = new_bfd_r_type;
4605 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4606 BFD_ASSERT (howto != NULL);
4607 r_type = howto->type;
4608 }
4609
4610 place = input_section->output_section->vma
4611 + input_section->output_offset + rel->r_offset;
4612
4613 /* Get addend, accumulating the addend for consecutive relocs
4614 which refer to the same offset. */
4615 signed_addend = saved_addend ? *saved_addend : 0;
4616 signed_addend += rel->r_addend;
4617
4618 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4619 : bfd_is_und_section (sym_sec));
4620
4621 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4622 it here if it is defined in a non-shared object. */
4623 if (h != NULL
4624 && h->type == STT_GNU_IFUNC
4625 && h->def_regular)
4626 {
4627 asection *plt;
4628 const char *name;
4629 bfd_vma addend = 0;
4630
4631 if ((input_section->flags & SEC_ALLOC) == 0
4632 || h->plt.offset == (bfd_vma) -1)
4633 abort ();
4634
4635 /* STT_GNU_IFUNC symbol must go through PLT. */
4636 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4637 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4638
4639 switch (bfd_r_type)
4640 {
4641 default:
4642 if (h->root.root.string)
4643 name = h->root.root.string;
4644 else
4645 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4646 NULL);
4647 (*_bfd_error_handler)
4648 (_("%B: relocation %s against STT_GNU_IFUNC "
4649 "symbol `%s' isn't handled by %s"), input_bfd,
4650 howto->name, name, __FUNCTION__);
4651 bfd_set_error (bfd_error_bad_value);
4652 return FALSE;
4653
4654 case BFD_RELOC_AARCH64_NN:
4655 if (rel->r_addend != 0)
4656 {
4657 if (h->root.root.string)
4658 name = h->root.root.string;
4659 else
4660 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4661 sym, NULL);
4662 (*_bfd_error_handler)
4663 (_("%B: relocation %s against STT_GNU_IFUNC "
4664 "symbol `%s' has non-zero addend: %d"),
4665 input_bfd, howto->name, name, rel->r_addend);
4666 bfd_set_error (bfd_error_bad_value);
4667 return FALSE;
4668 }
4669
4670 /* Generate dynamic relocation only when there is a
4671 non-GOT reference in a shared object. */
4672 if (bfd_link_pic (info) && h->non_got_ref)
4673 {
4674 Elf_Internal_Rela outrel;
4675 asection *sreloc;
4676
4677 /* Need a dynamic relocation to get the real function
4678 address. */
4679 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4680 info,
4681 input_section,
4682 rel->r_offset);
4683 if (outrel.r_offset == (bfd_vma) -1
4684 || outrel.r_offset == (bfd_vma) -2)
4685 abort ();
4686
4687 outrel.r_offset += (input_section->output_section->vma
4688 + input_section->output_offset);
4689
4690 if (h->dynindx == -1
4691 || h->forced_local
4692 || bfd_link_executable (info))
4693 {
4694 /* This symbol is resolved locally. */
4695 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4696 outrel.r_addend = (h->root.u.def.value
4697 + h->root.u.def.section->output_section->vma
4698 + h->root.u.def.section->output_offset);
4699 }
4700 else
4701 {
4702 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4703 outrel.r_addend = 0;
4704 }
4705
4706 sreloc = globals->root.irelifunc;
4707 elf_append_rela (output_bfd, sreloc, &outrel);
4708
4709 /* If this reloc is against an external symbol, we
4710 do not want to fiddle with the addend. Otherwise,
4711 we need to include the symbol value so that it
4712 becomes an addend for the dynamic reloc. For an
4713 internal symbol, we have updated addend. */
4714 return bfd_reloc_ok;
4715 }
4716 /* FALLTHROUGH */
4717 case BFD_RELOC_AARCH64_CALL26:
4718 case BFD_RELOC_AARCH64_JUMP26:
4719 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4720 signed_addend,
4721 weak_undef_p);
4722 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4723 howto, value);
4724 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4725 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4726 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4727 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4728 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4729 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4730 base_got = globals->root.sgot;
4731 off = h->got.offset;
4732
4733 if (base_got == NULL)
4734 abort ();
4735
4736 if (off == (bfd_vma) -1)
4737 {
4738 bfd_vma plt_index;
4739
4740 /* We can't use h->got.offset here to save state, or
4741 even just remember the offset, as finish_dynamic_symbol
4742 would use that as offset into .got. */
4743
4744 if (globals->root.splt != NULL)
4745 {
4746 plt_index = ((h->plt.offset - globals->plt_header_size) /
4747 globals->plt_entry_size);
4748 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4749 base_got = globals->root.sgotplt;
4750 }
4751 else
4752 {
4753 plt_index = h->plt.offset / globals->plt_entry_size;
4754 off = plt_index * GOT_ENTRY_SIZE;
4755 base_got = globals->root.igotplt;
4756 }
4757
4758 if (h->dynindx == -1
4759 || h->forced_local
4760 || info->symbolic)
4761 {
4762 /* This references the local definition. We must
4763 initialize this entry in the global offset table.
4764 Since the offset must always be a multiple of 8,
4765 we use the least significant bit to record
4766 whether we have initialized it already.
4767
4768 When doing a dynamic link, we create a .rela.got
4769 relocation entry to initialize the value. This
4770 is done in the finish_dynamic_symbol routine. */
4771 if ((off & 1) != 0)
4772 off &= ~1;
4773 else
4774 {
4775 bfd_put_NN (output_bfd, value,
4776 base_got->contents + off);
4777 /* Note that this is harmless as -1 | 1 still is -1. */
4778 h->got.offset |= 1;
4779 }
4780 }
4781 value = (base_got->output_section->vma
4782 + base_got->output_offset + off);
4783 }
4784 else
4785 value = aarch64_calculate_got_entry_vma (h, globals, info,
4786 value, output_bfd,
4787 unresolved_reloc_p);
4788 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4789 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4790 addend = (globals->root.sgot->output_section->vma
4791 + globals->root.sgot->output_offset);
4792 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4793 addend, weak_undef_p);
4794 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4795 case BFD_RELOC_AARCH64_ADD_LO12:
4796 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4797 break;
4798 }
4799 }
4800
4801 switch (bfd_r_type)
4802 {
4803 case BFD_RELOC_AARCH64_NONE:
4804 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4805 *unresolved_reloc_p = FALSE;
4806 return bfd_reloc_ok;
4807
4808 case BFD_RELOC_AARCH64_NN:
4809
4810 /* When generating a shared object or relocatable executable, these
4811 relocations are copied into the output file to be resolved at
4812 run time. */
4813 if (((bfd_link_pic (info) == TRUE)
4814 || globals->root.is_relocatable_executable)
4815 && (input_section->flags & SEC_ALLOC)
4816 && (h == NULL
4817 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4818 || h->root.type != bfd_link_hash_undefweak))
4819 {
4820 Elf_Internal_Rela outrel;
4821 bfd_byte *loc;
4822 bfd_boolean skip, relocate;
4823 asection *sreloc;
4824
4825 *unresolved_reloc_p = FALSE;
4826
4827 skip = FALSE;
4828 relocate = FALSE;
4829
4830 outrel.r_addend = signed_addend;
4831 outrel.r_offset =
4832 _bfd_elf_section_offset (output_bfd, info, input_section,
4833 rel->r_offset);
4834 if (outrel.r_offset == (bfd_vma) - 1)
4835 skip = TRUE;
4836 else if (outrel.r_offset == (bfd_vma) - 2)
4837 {
4838 skip = TRUE;
4839 relocate = TRUE;
4840 }
4841
4842 outrel.r_offset += (input_section->output_section->vma
4843 + input_section->output_offset);
4844
4845 if (skip)
4846 memset (&outrel, 0, sizeof outrel);
4847 else if (h != NULL
4848 && h->dynindx != -1
4849 && (!bfd_link_pic (info)
4850 || !SYMBOLIC_BIND (info, h)
4851 || !h->def_regular))
4852 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4853 else
4854 {
4855 int symbol;
4856
4857 /* On SVR4-ish systems, the dynamic loader cannot
4858 relocate the text and data segments independently,
4859 so the symbol does not matter. */
4860 symbol = 0;
4861 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4862 outrel.r_addend += value;
4863 }
4864
4865 sreloc = elf_section_data (input_section)->sreloc;
4866 if (sreloc == NULL || sreloc->contents == NULL)
4867 return bfd_reloc_notsupported;
4868
4869 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4870 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4871
4872 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4873 {
4874 /* Sanity to check that we have previously allocated
4875 sufficient space in the relocation section for the
4876 number of relocations we actually want to emit. */
4877 abort ();
4878 }
4879
4880 /* If this reloc is against an external symbol, we do not want to
4881 fiddle with the addend. Otherwise, we need to include the symbol
4882 value so that it becomes an addend for the dynamic reloc. */
4883 if (!relocate)
4884 return bfd_reloc_ok;
4885
4886 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4887 contents, rel->r_offset, value,
4888 signed_addend);
4889 }
4890 else
4891 value += signed_addend;
4892 break;
4893
4894 case BFD_RELOC_AARCH64_CALL26:
4895 case BFD_RELOC_AARCH64_JUMP26:
4896 {
4897 asection *splt = globals->root.splt;
4898 bfd_boolean via_plt_p =
4899 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4900
4901 /* A call to an undefined weak symbol is converted to a jump to
4902 the next instruction unless a PLT entry will be created.
4903 The jump to the next instruction is optimized as a NOP.
4904 Do the same for local undefined symbols. */
4905 if (weak_undef_p && ! via_plt_p)
4906 {
4907 bfd_putl32 (INSN_NOP, hit_data);
4908 return bfd_reloc_ok;
4909 }
4910
4911 /* If the call goes through a PLT entry, make sure to
4912 check distance to the right destination address. */
4913 if (via_plt_p)
4914 value = (splt->output_section->vma
4915 + splt->output_offset + h->plt.offset);
4916
4917 /* Check if a stub has to be inserted because the destination
4918 is too far away. */
4919 struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
4920 if (! aarch64_valid_branch_p (value, place))
4921 /* The target is out of reach, so redirect the branch to
4922 the local stub for this function. */
4923 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
4924 rel, globals);
4925 if (stub_entry != NULL)
4926 value = (stub_entry->stub_offset
4927 + stub_entry->stub_sec->output_offset
4928 + stub_entry->stub_sec->output_section->vma);
4929 }
4930 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4931 signed_addend, weak_undef_p);
4932 *unresolved_reloc_p = FALSE;
4933 break;
4934
4935 case BFD_RELOC_AARCH64_16_PCREL:
4936 case BFD_RELOC_AARCH64_32_PCREL:
4937 case BFD_RELOC_AARCH64_64_PCREL:
4938 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4939 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4940 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4941 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4942 if (bfd_link_pic (info)
4943 && (input_section->flags & SEC_ALLOC) != 0
4944 && (input_section->flags & SEC_READONLY) != 0
4945 && h != NULL
4946 && !h->def_regular)
4947 {
4948 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4949
4950 (*_bfd_error_handler)
4951 (_("%B: relocation %s against external symbol `%s' can not be used"
4952 " when making a shared object; recompile with -fPIC"),
4953 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
4954 h->root.root.string);
4955 bfd_set_error (bfd_error_bad_value);
4956 return FALSE;
4957 }
4958
4959 case BFD_RELOC_AARCH64_16:
4960 #if ARCH_SIZE == 64
4961 case BFD_RELOC_AARCH64_32:
4962 #endif
4963 case BFD_RELOC_AARCH64_ADD_LO12:
4964 case BFD_RELOC_AARCH64_BRANCH19:
4965 case BFD_RELOC_AARCH64_LDST128_LO12:
4966 case BFD_RELOC_AARCH64_LDST16_LO12:
4967 case BFD_RELOC_AARCH64_LDST32_LO12:
4968 case BFD_RELOC_AARCH64_LDST64_LO12:
4969 case BFD_RELOC_AARCH64_LDST8_LO12:
4970 case BFD_RELOC_AARCH64_MOVW_G0:
4971 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4972 case BFD_RELOC_AARCH64_MOVW_G0_S:
4973 case BFD_RELOC_AARCH64_MOVW_G1:
4974 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4975 case BFD_RELOC_AARCH64_MOVW_G1_S:
4976 case BFD_RELOC_AARCH64_MOVW_G2:
4977 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4978 case BFD_RELOC_AARCH64_MOVW_G2_S:
4979 case BFD_RELOC_AARCH64_MOVW_G3:
4980 case BFD_RELOC_AARCH64_TSTBR14:
4981 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4982 signed_addend, weak_undef_p);
4983 break;
4984
4985 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4986 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4987 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4988 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4989 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4990 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4991 if (globals->root.sgot == NULL)
4992 BFD_ASSERT (h != NULL);
4993
4994 if (h != NULL)
4995 {
4996 bfd_vma addend = 0;
4997 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4998 output_bfd,
4999 unresolved_reloc_p);
5000 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5001 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5002 addend = (globals->root.sgot->output_section->vma
5003 + globals->root.sgot->output_offset);
5004 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5005 addend, weak_undef_p);
5006 }
5007 else
5008 {
5009 bfd_vma addend = 0;
5010 struct elf_aarch64_local_symbol *locals
5011 = elf_aarch64_locals (input_bfd);
5012
5013 if (locals == NULL)
5014 {
5015 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5016 (*_bfd_error_handler)
5017 (_("%B: Local symbol descriptor table be NULL when applying "
5018 "relocation %s against local symbol"),
5019 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
5020 abort ();
5021 }
5022
5023 off = symbol_got_offset (input_bfd, h, r_symndx);
5024 base_got = globals->root.sgot;
5025 bfd_vma got_entry_addr = (base_got->output_section->vma
5026 + base_got->output_offset + off);
5027
5028 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5029 {
5030 bfd_put_64 (output_bfd, value, base_got->contents + off);
5031
5032 if (bfd_link_pic (info))
5033 {
5034 asection *s;
5035 Elf_Internal_Rela outrel;
5036
5037 /* For local symbol, we have done absolute relocation in static
5038 linking stageh. While for share library, we need to update
5039 the content of GOT entry according to the share objects
5040 loading base address. So we need to generate a
5041 R_AARCH64_RELATIVE reloc for dynamic linker. */
5042 s = globals->root.srelgot;
5043 if (s == NULL)
5044 abort ();
5045
5046 outrel.r_offset = got_entry_addr;
5047 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
5048 outrel.r_addend = value;
5049 elf_append_rela (output_bfd, s, &outrel);
5050 }
5051
5052 symbol_got_offset_mark (input_bfd, h, r_symndx);
5053 }
5054
5055 /* Update the relocation value to GOT entry addr as we have transformed
5056 the direct data access into indirect data access through GOT. */
5057 value = got_entry_addr;
5058
5059 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5060 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5061 addend = base_got->output_section->vma + base_got->output_offset;
5062
5063 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5064 addend, weak_undef_p);
5065 }
5066
5067 break;
5068
5069 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5070 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5071 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5072 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5073 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5074 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5075 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5076 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5077 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5078 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5079 if (globals->root.sgot == NULL)
5080 return bfd_reloc_notsupported;
5081
5082 value = (symbol_got_offset (input_bfd, h, r_symndx)
5083 + globals->root.sgot->output_section->vma
5084 + globals->root.sgot->output_offset);
5085
5086 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5087 0, weak_undef_p);
5088 *unresolved_reloc_p = FALSE;
5089 break;
5090
5091 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
5092 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5093 signed_addend - dtpoff_base (info),
5094 weak_undef_p);
5095 break;
5096
5097 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5098 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5099 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5100 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5101 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5102 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5103 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5104 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5105 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5106 signed_addend - tpoff_base (info),
5107 weak_undef_p);
5108 *unresolved_reloc_p = FALSE;
5109 break;
5110
5111 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5112 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5113 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5114 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5115 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5116 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5117 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5118 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5119 if (globals->root.sgot == NULL)
5120 return bfd_reloc_notsupported;
5121 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5122 + globals->root.sgotplt->output_section->vma
5123 + globals->root.sgotplt->output_offset
5124 + globals->sgotplt_jump_table_size);
5125
5126 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5127 0, weak_undef_p);
5128 *unresolved_reloc_p = FALSE;
5129 break;
5130
5131 default:
5132 return bfd_reloc_notsupported;
5133 }
5134
5135 if (saved_addend)
5136 *saved_addend = value;
5137
5138 /* Only apply the final relocation in a sequence. */
5139 if (save_addend)
5140 return bfd_reloc_continue;
5141
5142 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5143 howto, value);
5144 }
5145
5146 /* Handle TLS relaxations. Relaxing is possible for symbols that use
5147 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
5148 link.
5149
5150 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
5151 is to then call final_link_relocate. Return other values in the
5152 case of error. */
5153
5154 static bfd_reloc_status_type
5155 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5156 bfd *input_bfd, bfd_byte *contents,
5157 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5158 {
5159 bfd_boolean is_local = h == NULL;
5160 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5161 unsigned long insn;
5162
5163 BFD_ASSERT (globals && input_bfd && contents && rel);
5164
5165 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5166 {
5167 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5168 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5169 if (is_local)
5170 {
5171 /* GD->LE relaxation:
5172 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
5173 or
5174 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
5175 */
5176 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5177 return bfd_reloc_continue;
5178 }
5179 else
5180 {
5181 /* GD->IE relaxation:
5182 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5183 or
5184 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5185 */
5186 return bfd_reloc_continue;
5187 }
5188
5189 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5190 BFD_ASSERT (0);
5191 break;
5192
5193 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5194 if (is_local)
5195 {
5196 /* Tiny TLSDESC->LE relaxation:
5197 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
5198 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
5199 .tlsdesccall var
5200 blr x1 => nop
5201 */
5202 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5203 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5204
5205 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5206 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5207 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5208
5209 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5210 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
5211 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5212 return bfd_reloc_continue;
5213 }
5214 else
5215 {
5216 /* Tiny TLSDESC->IE relaxation:
5217 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5218 adr x0, :tlsdesc:var => nop
5219 .tlsdesccall var
5220 blr x1 => nop
5221 */
5222 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5223 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5224
5225 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5226 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5227
5228 bfd_putl32 (0x58000000, contents + rel->r_offset);
5229 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5230 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5231 return bfd_reloc_continue;
5232 }
5233
5234 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5235 if (is_local)
5236 {
5237 /* Tiny GD->LE relaxation:
5238 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5239 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
5240 nop => add x0, x0, #:tprel_lo12_nc:x
5241 */
5242
5243 /* First kill the tls_get_addr reloc on the bl instruction. */
5244 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5245
5246 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5247 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
5248 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
5249
5250 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5251 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5252 rel[1].r_offset = rel->r_offset + 8;
5253
5254 /* Move the current relocation to the second instruction in
5255 the sequence. */
5256 rel->r_offset += 4;
5257 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5258 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5259 return bfd_reloc_continue;
5260 }
5261 else
5262 {
5263 /* Tiny GD->IE relaxation:
5264 adr x0, :tlsgd:var => ldr x0, :gottprel:var
5265 bl __tls_get_addr => mrs x1, tpidr_el0
5266 nop => add x0, x0, x1
5267 */
5268
5269 /* First kill the tls_get_addr reloc on the bl instruction. */
5270 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5271 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5272
5273 bfd_putl32 (0x58000000, contents + rel->r_offset);
5274 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5275 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5276 return bfd_reloc_continue;
5277 }
5278
5279 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5280 return bfd_reloc_continue;
5281
5282 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5283 if (is_local)
5284 {
5285 /* GD->LE relaxation:
5286 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5287 */
5288 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5289 return bfd_reloc_continue;
5290 }
5291 else
5292 {
5293 /* GD->IE relaxation:
5294 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
5295 */
5296 insn = bfd_getl32 (contents + rel->r_offset);
5297 insn &= 0xffffffe0;
5298 bfd_putl32 (insn, contents + rel->r_offset);
5299 return bfd_reloc_continue;
5300 }
5301
5302 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5303 if (is_local)
5304 {
5305 /* GD->LE relaxation
5306 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
5307 bl __tls_get_addr => mrs x1, tpidr_el0
5308 nop => add x0, x1, x0
5309 */
5310
5311 /* First kill the tls_get_addr reloc on the bl instruction. */
5312 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5313 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5314
5315 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5316 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5317 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5318 return bfd_reloc_continue;
5319 }
5320 else
5321 {
5322 /* GD->IE relaxation
5323 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
5324 BL __tls_get_addr => mrs x1, tpidr_el0
5325 R_AARCH64_CALL26
5326 NOP => add x0, x1, x0
5327 */
5328
5329 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5330
5331 /* Remove the relocation on the BL instruction. */
5332 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5333
5334 bfd_putl32 (0xf9400000, contents + rel->r_offset);
5335
5336 /* We choose to fixup the BL and NOP instructions using the
5337 offset from the second relocation to allow flexibility in
5338 scheduling instructions between the ADD and BL. */
5339 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5340 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
5341 return bfd_reloc_continue;
5342 }
5343
5344 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5345 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5346 /* GD->IE/LE relaxation:
5347 add x0, x0, #:tlsdesc_lo12:var => nop
5348 blr xd => nop
5349 */
5350 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5351 return bfd_reloc_ok;
5352
5353 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5354 /* IE->LE relaxation:
5355 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
5356 */
5357 if (is_local)
5358 {
5359 insn = bfd_getl32 (contents + rel->r_offset);
5360 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5361 }
5362 return bfd_reloc_continue;
5363
5364 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5365 /* IE->LE relaxation:
5366 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
5367 */
5368 if (is_local)
5369 {
5370 insn = bfd_getl32 (contents + rel->r_offset);
5371 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
5372 }
5373 return bfd_reloc_continue;
5374
5375 default:
5376 return bfd_reloc_continue;
5377 }
5378
5379 return bfd_reloc_ok;
5380 }
5381
5382 /* Relocate an AArch64 ELF section. */
5383
5384 static bfd_boolean
5385 elfNN_aarch64_relocate_section (bfd *output_bfd,
5386 struct bfd_link_info *info,
5387 bfd *input_bfd,
5388 asection *input_section,
5389 bfd_byte *contents,
5390 Elf_Internal_Rela *relocs,
5391 Elf_Internal_Sym *local_syms,
5392 asection **local_sections)
5393 {
5394 Elf_Internal_Shdr *symtab_hdr;
5395 struct elf_link_hash_entry **sym_hashes;
5396 Elf_Internal_Rela *rel;
5397 Elf_Internal_Rela *relend;
5398 const char *name;
5399 struct elf_aarch64_link_hash_table *globals;
5400 bfd_boolean save_addend = FALSE;
5401 bfd_vma addend = 0;
5402
5403 globals = elf_aarch64_hash_table (info);
5404
5405 symtab_hdr = &elf_symtab_hdr (input_bfd);
5406 sym_hashes = elf_sym_hashes (input_bfd);
5407
5408 rel = relocs;
5409 relend = relocs + input_section->reloc_count;
5410 for (; rel < relend; rel++)
5411 {
5412 unsigned int r_type;
5413 bfd_reloc_code_real_type bfd_r_type;
5414 bfd_reloc_code_real_type relaxed_bfd_r_type;
5415 reloc_howto_type *howto;
5416 unsigned long r_symndx;
5417 Elf_Internal_Sym *sym;
5418 asection *sec;
5419 struct elf_link_hash_entry *h;
5420 bfd_vma relocation;
5421 bfd_reloc_status_type r;
5422 arelent bfd_reloc;
5423 char sym_type;
5424 bfd_boolean unresolved_reloc = FALSE;
5425 char *error_message = NULL;
5426
5427 r_symndx = ELFNN_R_SYM (rel->r_info);
5428 r_type = ELFNN_R_TYPE (rel->r_info);
5429
5430 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
5431 howto = bfd_reloc.howto;
5432
5433 if (howto == NULL)
5434 {
5435 (*_bfd_error_handler)
5436 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
5437 input_bfd, input_section, r_type);
5438 return FALSE;
5439 }
5440 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
5441
5442 h = NULL;
5443 sym = NULL;
5444 sec = NULL;
5445
5446 if (r_symndx < symtab_hdr->sh_info)
5447 {
5448 sym = local_syms + r_symndx;
5449 sym_type = ELFNN_ST_TYPE (sym->st_info);
5450 sec = local_sections[r_symndx];
5451
5452 /* An object file might have a reference to a local
5453 undefined symbol. This is a daft object file, but we
5454 should at least do something about it. */
5455 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
5456 && bfd_is_und_section (sec)
5457 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
5458 {
5459 if (!info->callbacks->undefined_symbol
5460 (info, bfd_elf_string_from_elf_section
5461 (input_bfd, symtab_hdr->sh_link, sym->st_name),
5462 input_bfd, input_section, rel->r_offset, TRUE))
5463 return FALSE;
5464 }
5465
5466 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
5467
5468 /* Relocate against local STT_GNU_IFUNC symbol. */
5469 if (!bfd_link_relocatable (info)
5470 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
5471 {
5472 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
5473 rel, FALSE);
5474 if (h == NULL)
5475 abort ();
5476
5477 /* Set STT_GNU_IFUNC symbol value. */
5478 h->root.u.def.value = sym->st_value;
5479 h->root.u.def.section = sec;
5480 }
5481 }
5482 else
5483 {
5484 bfd_boolean warned, ignored;
5485
5486 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
5487 r_symndx, symtab_hdr, sym_hashes,
5488 h, sec, relocation,
5489 unresolved_reloc, warned, ignored);
5490
5491 sym_type = h->type;
5492 }
5493
5494 if (sec != NULL && discarded_section (sec))
5495 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
5496 rel, 1, relend, howto, 0, contents);
5497
5498 if (bfd_link_relocatable (info))
5499 continue;
5500
5501 if (h != NULL)
5502 name = h->root.root.string;
5503 else
5504 {
5505 name = (bfd_elf_string_from_elf_section
5506 (input_bfd, symtab_hdr->sh_link, sym->st_name));
5507 if (name == NULL || *name == '\0')
5508 name = bfd_section_name (input_bfd, sec);
5509 }
5510
5511 if (r_symndx != 0
5512 && r_type != R_AARCH64_NONE
5513 && r_type != R_AARCH64_NULL
5514 && (h == NULL
5515 || h->root.type == bfd_link_hash_defined
5516 || h->root.type == bfd_link_hash_defweak)
5517 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
5518 {
5519 (*_bfd_error_handler)
5520 ((sym_type == STT_TLS
5521 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
5522 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
5523 input_bfd,
5524 input_section, (long) rel->r_offset, howto->name, name);
5525 }
5526
5527 /* We relax only if we can see that there can be a valid transition
5528 from a reloc type to another.
5529 We call elfNN_aarch64_final_link_relocate unless we're completely
5530 done, i.e., the relaxation produced the final output we want. */
5531
5532 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
5533 h, r_symndx);
5534 if (relaxed_bfd_r_type != bfd_r_type)
5535 {
5536 bfd_r_type = relaxed_bfd_r_type;
5537 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
5538 BFD_ASSERT (howto != NULL);
5539 r_type = howto->type;
5540 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
5541 unresolved_reloc = 0;
5542 }
5543 else
5544 r = bfd_reloc_continue;
5545
5546 /* There may be multiple consecutive relocations for the
5547 same offset. In that case we are supposed to treat the
5548 output of each relocation as the addend for the next. */
5549 if (rel + 1 < relend
5550 && rel->r_offset == rel[1].r_offset
5551 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
5552 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
5553 save_addend = TRUE;
5554 else
5555 save_addend = FALSE;
5556
5557 if (r == bfd_reloc_continue)
5558 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
5559 input_section, contents, rel,
5560 relocation, info, sec,
5561 h, &unresolved_reloc,
5562 save_addend, &addend, sym);
5563
5564 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5565 {
5566 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5567 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5568 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5569 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5570 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5571 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5572 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5573 {
5574 bfd_boolean need_relocs = FALSE;
5575 bfd_byte *loc;
5576 int indx;
5577 bfd_vma off;
5578
5579 off = symbol_got_offset (input_bfd, h, r_symndx);
5580 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5581
5582 need_relocs =
5583 (bfd_link_pic (info) || indx != 0) &&
5584 (h == NULL
5585 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5586 || h->root.type != bfd_link_hash_undefweak);
5587
5588 BFD_ASSERT (globals->root.srelgot != NULL);
5589
5590 if (need_relocs)
5591 {
5592 Elf_Internal_Rela rela;
5593 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5594 rela.r_addend = 0;
5595 rela.r_offset = globals->root.sgot->output_section->vma +
5596 globals->root.sgot->output_offset + off;
5597
5598
5599 loc = globals->root.srelgot->contents;
5600 loc += globals->root.srelgot->reloc_count++
5601 * RELOC_SIZE (htab);
5602 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5603
5604 bfd_reloc_code_real_type real_type =
5605 elfNN_aarch64_bfd_reloc_from_type (r_type);
5606
5607 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
5608 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
5609 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
5610 {
5611 /* For local dynamic, don't generate DTPREL in any case.
5612 Initialize the DTPREL slot into zero, so we get module
5613 base address when invoke runtime TLS resolver. */
5614 bfd_put_NN (output_bfd, 0,
5615 globals->root.sgot->contents + off
5616 + GOT_ENTRY_SIZE);
5617 }
5618 else if (indx == 0)
5619 {
5620 bfd_put_NN (output_bfd,
5621 relocation - dtpoff_base (info),
5622 globals->root.sgot->contents + off
5623 + GOT_ENTRY_SIZE);
5624 }
5625 else
5626 {
5627 /* This TLS symbol is global. We emit a
5628 relocation to fixup the tls offset at load
5629 time. */
5630 rela.r_info =
5631 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5632 rela.r_addend = 0;
5633 rela.r_offset =
5634 (globals->root.sgot->output_section->vma
5635 + globals->root.sgot->output_offset + off
5636 + GOT_ENTRY_SIZE);
5637
5638 loc = globals->root.srelgot->contents;
5639 loc += globals->root.srelgot->reloc_count++
5640 * RELOC_SIZE (globals);
5641 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5642 bfd_put_NN (output_bfd, (bfd_vma) 0,
5643 globals->root.sgot->contents + off
5644 + GOT_ENTRY_SIZE);
5645 }
5646 }
5647 else
5648 {
5649 bfd_put_NN (output_bfd, (bfd_vma) 1,
5650 globals->root.sgot->contents + off);
5651 bfd_put_NN (output_bfd,
5652 relocation - dtpoff_base (info),
5653 globals->root.sgot->contents + off
5654 + GOT_ENTRY_SIZE);
5655 }
5656
5657 symbol_got_offset_mark (input_bfd, h, r_symndx);
5658 }
5659 break;
5660
5661 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5662 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5663 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5664 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5665 {
5666 bfd_boolean need_relocs = FALSE;
5667 bfd_byte *loc;
5668 int indx;
5669 bfd_vma off;
5670
5671 off = symbol_got_offset (input_bfd, h, r_symndx);
5672
5673 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5674
5675 need_relocs =
5676 (bfd_link_pic (info) || indx != 0) &&
5677 (h == NULL
5678 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5679 || h->root.type != bfd_link_hash_undefweak);
5680
5681 BFD_ASSERT (globals->root.srelgot != NULL);
5682
5683 if (need_relocs)
5684 {
5685 Elf_Internal_Rela rela;
5686
5687 if (indx == 0)
5688 rela.r_addend = relocation - dtpoff_base (info);
5689 else
5690 rela.r_addend = 0;
5691
5692 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5693 rela.r_offset = globals->root.sgot->output_section->vma +
5694 globals->root.sgot->output_offset + off;
5695
5696 loc = globals->root.srelgot->contents;
5697 loc += globals->root.srelgot->reloc_count++
5698 * RELOC_SIZE (htab);
5699
5700 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5701
5702 bfd_put_NN (output_bfd, rela.r_addend,
5703 globals->root.sgot->contents + off);
5704 }
5705 else
5706 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5707 globals->root.sgot->contents + off);
5708
5709 symbol_got_offset_mark (input_bfd, h, r_symndx);
5710 }
5711 break;
5712
5713 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5714 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5715 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5716 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5717 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5718 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5719 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5720 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5721 break;
5722
5723 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5724 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5725 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5726 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5727 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5728 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5729 {
5730 bfd_boolean need_relocs = FALSE;
5731 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5732 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5733
5734 need_relocs = (h == NULL
5735 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5736 || h->root.type != bfd_link_hash_undefweak);
5737
5738 BFD_ASSERT (globals->root.srelgot != NULL);
5739 BFD_ASSERT (globals->root.sgot != NULL);
5740
5741 if (need_relocs)
5742 {
5743 bfd_byte *loc;
5744 Elf_Internal_Rela rela;
5745 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5746
5747 rela.r_addend = 0;
5748 rela.r_offset = (globals->root.sgotplt->output_section->vma
5749 + globals->root.sgotplt->output_offset
5750 + off + globals->sgotplt_jump_table_size);
5751
5752 if (indx == 0)
5753 rela.r_addend = relocation - dtpoff_base (info);
5754
5755 /* Allocate the next available slot in the PLT reloc
5756 section to hold our R_AARCH64_TLSDESC, the next
5757 available slot is determined from reloc_count,
5758 which we step. But note, reloc_count was
5759 artifically moved down while allocating slots for
5760 real PLT relocs such that all of the PLT relocs
5761 will fit above the initial reloc_count and the
5762 extra stuff will fit below. */
5763 loc = globals->root.srelplt->contents;
5764 loc += globals->root.srelplt->reloc_count++
5765 * RELOC_SIZE (globals);
5766
5767 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5768
5769 bfd_put_NN (output_bfd, (bfd_vma) 0,
5770 globals->root.sgotplt->contents + off +
5771 globals->sgotplt_jump_table_size);
5772 bfd_put_NN (output_bfd, (bfd_vma) 0,
5773 globals->root.sgotplt->contents + off +
5774 globals->sgotplt_jump_table_size +
5775 GOT_ENTRY_SIZE);
5776 }
5777
5778 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5779 }
5780 break;
5781 default:
5782 break;
5783 }
5784
5785 if (!save_addend)
5786 addend = 0;
5787
5788
5789 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5790 because such sections are not SEC_ALLOC and thus ld.so will
5791 not process them. */
5792 if (unresolved_reloc
5793 && !((input_section->flags & SEC_DEBUGGING) != 0
5794 && h->def_dynamic)
5795 && _bfd_elf_section_offset (output_bfd, info, input_section,
5796 +rel->r_offset) != (bfd_vma) - 1)
5797 {
5798 (*_bfd_error_handler)
5799 (_
5800 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5801 input_bfd, input_section, (long) rel->r_offset, howto->name,
5802 h->root.root.string);
5803 return FALSE;
5804 }
5805
5806 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5807 {
5808 bfd_reloc_code_real_type real_r_type
5809 = elfNN_aarch64_bfd_reloc_from_type (r_type);
5810
5811 switch (r)
5812 {
5813 case bfd_reloc_overflow:
5814 if (!(*info->callbacks->reloc_overflow)
5815 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
5816 input_bfd, input_section, rel->r_offset))
5817 return FALSE;
5818 if (real_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5819 || real_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5820 {
5821 (*info->callbacks->warning)
5822 (info,
5823 _("Too many GOT entries for -fpic, "
5824 "please recompile with -fPIC"),
5825 name, input_bfd, input_section, rel->r_offset);
5826 return FALSE;
5827 }
5828 break;
5829
5830 case bfd_reloc_undefined:
5831 if (!((*info->callbacks->undefined_symbol)
5832 (info, name, input_bfd, input_section,
5833 rel->r_offset, TRUE)))
5834 return FALSE;
5835 break;
5836
5837 case bfd_reloc_outofrange:
5838 error_message = _("out of range");
5839 goto common_error;
5840
5841 case bfd_reloc_notsupported:
5842 error_message = _("unsupported relocation");
5843 goto common_error;
5844
5845 case bfd_reloc_dangerous:
5846 /* error_message should already be set. */
5847 goto common_error;
5848
5849 default:
5850 error_message = _("unknown error");
5851 /* Fall through. */
5852
5853 common_error:
5854 BFD_ASSERT (error_message != NULL);
5855 if (!((*info->callbacks->reloc_dangerous)
5856 (info, error_message, input_bfd, input_section,
5857 rel->r_offset)))
5858 return FALSE;
5859 break;
5860 }
5861 }
5862 }
5863
5864 return TRUE;
5865 }
5866
5867 /* Set the right machine number. */
5868
5869 static bfd_boolean
5870 elfNN_aarch64_object_p (bfd *abfd)
5871 {
5872 #if ARCH_SIZE == 32
5873 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5874 #else
5875 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5876 #endif
5877 return TRUE;
5878 }
5879
5880 /* Function to keep AArch64 specific flags in the ELF header. */
5881
5882 static bfd_boolean
5883 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5884 {
5885 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5886 {
5887 }
5888 else
5889 {
5890 elf_elfheader (abfd)->e_flags = flags;
5891 elf_flags_init (abfd) = TRUE;
5892 }
5893
5894 return TRUE;
5895 }
5896
5897 /* Merge backend specific data from an object file to the output
5898 object file when linking. */
5899
5900 static bfd_boolean
5901 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5902 {
5903 flagword out_flags;
5904 flagword in_flags;
5905 bfd_boolean flags_compatible = TRUE;
5906 asection *sec;
5907
5908 /* Check if we have the same endianess. */
5909 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5910 return FALSE;
5911
5912 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5913 return TRUE;
5914
5915 /* The input BFD must have had its flags initialised. */
5916 /* The following seems bogus to me -- The flags are initialized in
5917 the assembler but I don't think an elf_flags_init field is
5918 written into the object. */
5919 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5920
5921 in_flags = elf_elfheader (ibfd)->e_flags;
5922 out_flags = elf_elfheader (obfd)->e_flags;
5923
5924 if (!elf_flags_init (obfd))
5925 {
5926 /* If the input is the default architecture and had the default
5927 flags then do not bother setting the flags for the output
5928 architecture, instead allow future merges to do this. If no
5929 future merges ever set these flags then they will retain their
5930 uninitialised values, which surprise surprise, correspond
5931 to the default values. */
5932 if (bfd_get_arch_info (ibfd)->the_default
5933 && elf_elfheader (ibfd)->e_flags == 0)
5934 return TRUE;
5935
5936 elf_flags_init (obfd) = TRUE;
5937 elf_elfheader (obfd)->e_flags = in_flags;
5938
5939 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5940 && bfd_get_arch_info (obfd)->the_default)
5941 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5942 bfd_get_mach (ibfd));
5943
5944 return TRUE;
5945 }
5946
5947 /* Identical flags must be compatible. */
5948 if (in_flags == out_flags)
5949 return TRUE;
5950
5951 /* Check to see if the input BFD actually contains any sections. If
5952 not, its flags may not have been initialised either, but it
5953 cannot actually cause any incompatiblity. Do not short-circuit
5954 dynamic objects; their section list may be emptied by
5955 elf_link_add_object_symbols.
5956
5957 Also check to see if there are no code sections in the input.
5958 In this case there is no need to check for code specific flags.
5959 XXX - do we need to worry about floating-point format compatability
5960 in data sections ? */
5961 if (!(ibfd->flags & DYNAMIC))
5962 {
5963 bfd_boolean null_input_bfd = TRUE;
5964 bfd_boolean only_data_sections = TRUE;
5965
5966 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5967 {
5968 if ((bfd_get_section_flags (ibfd, sec)
5969 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5970 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5971 only_data_sections = FALSE;
5972
5973 null_input_bfd = FALSE;
5974 break;
5975 }
5976
5977 if (null_input_bfd || only_data_sections)
5978 return TRUE;
5979 }
5980
5981 return flags_compatible;
5982 }
5983
5984 /* Display the flags field. */
5985
5986 static bfd_boolean
5987 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5988 {
5989 FILE *file = (FILE *) ptr;
5990 unsigned long flags;
5991
5992 BFD_ASSERT (abfd != NULL && ptr != NULL);
5993
5994 /* Print normal ELF private data. */
5995 _bfd_elf_print_private_bfd_data (abfd, ptr);
5996
5997 flags = elf_elfheader (abfd)->e_flags;
5998 /* Ignore init flag - it may not be set, despite the flags field
5999 containing valid data. */
6000
6001 /* xgettext:c-format */
6002 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
6003
6004 if (flags)
6005 fprintf (file, _("<Unrecognised flag bits set>"));
6006
6007 fputc ('\n', file);
6008
6009 return TRUE;
6010 }
6011
6012 /* Update the got entry reference counts for the section being removed. */
6013
6014 static bfd_boolean
6015 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
6016 struct bfd_link_info *info,
6017 asection *sec,
6018 const Elf_Internal_Rela * relocs)
6019 {
6020 struct elf_aarch64_link_hash_table *htab;
6021 Elf_Internal_Shdr *symtab_hdr;
6022 struct elf_link_hash_entry **sym_hashes;
6023 struct elf_aarch64_local_symbol *locals;
6024 const Elf_Internal_Rela *rel, *relend;
6025
6026 if (bfd_link_relocatable (info))
6027 return TRUE;
6028
6029 htab = elf_aarch64_hash_table (info);
6030
6031 if (htab == NULL)
6032 return FALSE;
6033
6034 elf_section_data (sec)->local_dynrel = NULL;
6035
6036 symtab_hdr = &elf_symtab_hdr (abfd);
6037 sym_hashes = elf_sym_hashes (abfd);
6038
6039 locals = elf_aarch64_locals (abfd);
6040
6041 relend = relocs + sec->reloc_count;
6042 for (rel = relocs; rel < relend; rel++)
6043 {
6044 unsigned long r_symndx;
6045 unsigned int r_type;
6046 struct elf_link_hash_entry *h = NULL;
6047
6048 r_symndx = ELFNN_R_SYM (rel->r_info);
6049
6050 if (r_symndx >= symtab_hdr->sh_info)
6051 {
6052
6053 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6054 while (h->root.type == bfd_link_hash_indirect
6055 || h->root.type == bfd_link_hash_warning)
6056 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6057 }
6058 else
6059 {
6060 Elf_Internal_Sym *isym;
6061
6062 /* A local symbol. */
6063 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6064 abfd, r_symndx);
6065
6066 /* Check relocation against local STT_GNU_IFUNC symbol. */
6067 if (isym != NULL
6068 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6069 {
6070 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
6071 if (h == NULL)
6072 abort ();
6073 }
6074 }
6075
6076 if (h)
6077 {
6078 struct elf_aarch64_link_hash_entry *eh;
6079 struct elf_dyn_relocs **pp;
6080 struct elf_dyn_relocs *p;
6081
6082 eh = (struct elf_aarch64_link_hash_entry *) h;
6083
6084 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
6085 if (p->sec == sec)
6086 {
6087 /* Everything must go for SEC. */
6088 *pp = p->next;
6089 break;
6090 }
6091 }
6092
6093 r_type = ELFNN_R_TYPE (rel->r_info);
6094 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
6095 {
6096 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6097 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6098 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6099 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6100 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6101 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6102 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6103 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6104 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6105 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6106 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6107 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6108 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6109 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6110 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6111 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6112 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6113 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6114 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6115 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6116 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6117 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6118 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6119 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6120 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6121 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6122 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6123 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6124 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6125 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6126 if (h != NULL)
6127 {
6128 if (h->got.refcount > 0)
6129 h->got.refcount -= 1;
6130
6131 if (h->type == STT_GNU_IFUNC)
6132 {
6133 if (h->plt.refcount > 0)
6134 h->plt.refcount -= 1;
6135 }
6136 }
6137 else if (locals != NULL)
6138 {
6139 if (locals[r_symndx].got_refcount > 0)
6140 locals[r_symndx].got_refcount -= 1;
6141 }
6142 break;
6143
6144 case BFD_RELOC_AARCH64_CALL26:
6145 case BFD_RELOC_AARCH64_JUMP26:
6146 /* If this is a local symbol then we resolve it
6147 directly without creating a PLT entry. */
6148 if (h == NULL)
6149 continue;
6150
6151 if (h->plt.refcount > 0)
6152 h->plt.refcount -= 1;
6153 break;
6154
6155 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6156 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6157 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6158 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6159 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6160 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6161 case BFD_RELOC_AARCH64_MOVW_G3:
6162 case BFD_RELOC_AARCH64_NN:
6163 if (h != NULL && bfd_link_executable (info))
6164 {
6165 if (h->plt.refcount > 0)
6166 h->plt.refcount -= 1;
6167 }
6168 break;
6169
6170 default:
6171 break;
6172 }
6173 }
6174
6175 return TRUE;
6176 }
6177
6178 /* Adjust a symbol defined by a dynamic object and referenced by a
6179 regular object. The current definition is in some section of the
6180 dynamic object, but we're not including those sections. We have to
6181 change the definition to something the rest of the link can
6182 understand. */
6183
6184 static bfd_boolean
6185 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
6186 struct elf_link_hash_entry *h)
6187 {
6188 struct elf_aarch64_link_hash_table *htab;
6189 asection *s;
6190
6191 /* If this is a function, put it in the procedure linkage table. We
6192 will fill in the contents of the procedure linkage table later,
6193 when we know the address of the .got section. */
6194 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6195 {
6196 if (h->plt.refcount <= 0
6197 || (h->type != STT_GNU_IFUNC
6198 && (SYMBOL_CALLS_LOCAL (info, h)
6199 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6200 && h->root.type == bfd_link_hash_undefweak))))
6201 {
6202 /* This case can occur if we saw a CALL26 reloc in
6203 an input file, but the symbol wasn't referred to
6204 by a dynamic object or all references were
6205 garbage collected. In which case we can end up
6206 resolving. */
6207 h->plt.offset = (bfd_vma) - 1;
6208 h->needs_plt = 0;
6209 }
6210
6211 return TRUE;
6212 }
6213 else
6214 /* Otherwise, reset to -1. */
6215 h->plt.offset = (bfd_vma) - 1;
6216
6217
6218 /* If this is a weak symbol, and there is a real definition, the
6219 processor independent code will have arranged for us to see the
6220 real definition first, and we can just use the same value. */
6221 if (h->u.weakdef != NULL)
6222 {
6223 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6224 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6225 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6226 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6227 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6228 h->non_got_ref = h->u.weakdef->non_got_ref;
6229 return TRUE;
6230 }
6231
6232 /* If we are creating a shared library, we must presume that the
6233 only references to the symbol are via the global offset table.
6234 For such cases we need not do anything here; the relocations will
6235 be handled correctly by relocate_section. */
6236 if (bfd_link_pic (info))
6237 return TRUE;
6238
6239 /* If there are no references to this symbol that do not use the
6240 GOT, we don't need to generate a copy reloc. */
6241 if (!h->non_got_ref)
6242 return TRUE;
6243
6244 /* If -z nocopyreloc was given, we won't generate them either. */
6245 if (info->nocopyreloc)
6246 {
6247 h->non_got_ref = 0;
6248 return TRUE;
6249 }
6250
6251 /* We must allocate the symbol in our .dynbss section, which will
6252 become part of the .bss section of the executable. There will be
6253 an entry for this symbol in the .dynsym section. The dynamic
6254 object will contain position independent code, so all references
6255 from the dynamic object to this symbol will go through the global
6256 offset table. The dynamic linker will use the .dynsym entry to
6257 determine the address it must put in the global offset table, so
6258 both the dynamic object and the regular object will refer to the
6259 same memory location for the variable. */
6260
6261 htab = elf_aarch64_hash_table (info);
6262
6263 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6264 to copy the initial value out of the dynamic object and into the
6265 runtime process image. */
6266 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6267 {
6268 htab->srelbss->size += RELOC_SIZE (htab);
6269 h->needs_copy = 1;
6270 }
6271
6272 s = htab->sdynbss;
6273
6274 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6275
6276 }
6277
6278 static bfd_boolean
6279 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6280 {
6281 struct elf_aarch64_local_symbol *locals;
6282 locals = elf_aarch64_locals (abfd);
6283 if (locals == NULL)
6284 {
6285 locals = (struct elf_aarch64_local_symbol *)
6286 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6287 if (locals == NULL)
6288 return FALSE;
6289 elf_aarch64_locals (abfd) = locals;
6290 }
6291 return TRUE;
6292 }
6293
6294 /* Create the .got section to hold the global offset table. */
6295
6296 static bfd_boolean
6297 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6298 {
6299 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6300 flagword flags;
6301 asection *s;
6302 struct elf_link_hash_entry *h;
6303 struct elf_link_hash_table *htab = elf_hash_table (info);
6304
6305 /* This function may be called more than once. */
6306 s = bfd_get_linker_section (abfd, ".got");
6307 if (s != NULL)
6308 return TRUE;
6309
6310 flags = bed->dynamic_sec_flags;
6311
6312 s = bfd_make_section_anyway_with_flags (abfd,
6313 (bed->rela_plts_and_copies_p
6314 ? ".rela.got" : ".rel.got"),
6315 (bed->dynamic_sec_flags
6316 | SEC_READONLY));
6317 if (s == NULL
6318 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6319 return FALSE;
6320 htab->srelgot = s;
6321
6322 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
6323 if (s == NULL
6324 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6325 return FALSE;
6326 htab->sgot = s;
6327 htab->sgot->size += GOT_ENTRY_SIZE;
6328
6329 if (bed->want_got_sym)
6330 {
6331 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
6332 (or .got.plt) section. We don't do this in the linker script
6333 because we don't want to define the symbol if we are not creating
6334 a global offset table. */
6335 h = _bfd_elf_define_linkage_sym (abfd, info, s,
6336 "_GLOBAL_OFFSET_TABLE_");
6337 elf_hash_table (info)->hgot = h;
6338 if (h == NULL)
6339 return FALSE;
6340 }
6341
6342 if (bed->want_got_plt)
6343 {
6344 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
6345 if (s == NULL
6346 || !bfd_set_section_alignment (abfd, s,
6347 bed->s->log_file_align))
6348 return FALSE;
6349 htab->sgotplt = s;
6350 }
6351
6352 /* The first bit of the global offset table is the header. */
6353 s->size += bed->got_header_size;
6354
6355 return TRUE;
6356 }
6357
6358 /* Look through the relocs for a section during the first phase. */
6359
6360 static bfd_boolean
6361 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
6362 asection *sec, const Elf_Internal_Rela *relocs)
6363 {
6364 Elf_Internal_Shdr *symtab_hdr;
6365 struct elf_link_hash_entry **sym_hashes;
6366 const Elf_Internal_Rela *rel;
6367 const Elf_Internal_Rela *rel_end;
6368 asection *sreloc;
6369
6370 struct elf_aarch64_link_hash_table *htab;
6371
6372 if (bfd_link_relocatable (info))
6373 return TRUE;
6374
6375 BFD_ASSERT (is_aarch64_elf (abfd));
6376
6377 htab = elf_aarch64_hash_table (info);
6378 sreloc = NULL;
6379
6380 symtab_hdr = &elf_symtab_hdr (abfd);
6381 sym_hashes = elf_sym_hashes (abfd);
6382
6383 rel_end = relocs + sec->reloc_count;
6384 for (rel = relocs; rel < rel_end; rel++)
6385 {
6386 struct elf_link_hash_entry *h;
6387 unsigned long r_symndx;
6388 unsigned int r_type;
6389 bfd_reloc_code_real_type bfd_r_type;
6390 Elf_Internal_Sym *isym;
6391
6392 r_symndx = ELFNN_R_SYM (rel->r_info);
6393 r_type = ELFNN_R_TYPE (rel->r_info);
6394
6395 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
6396 {
6397 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
6398 r_symndx);
6399 return FALSE;
6400 }
6401
6402 if (r_symndx < symtab_hdr->sh_info)
6403 {
6404 /* A local symbol. */
6405 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6406 abfd, r_symndx);
6407 if (isym == NULL)
6408 return FALSE;
6409
6410 /* Check relocation against local STT_GNU_IFUNC symbol. */
6411 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6412 {
6413 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
6414 TRUE);
6415 if (h == NULL)
6416 return FALSE;
6417
6418 /* Fake a STT_GNU_IFUNC symbol. */
6419 h->type = STT_GNU_IFUNC;
6420 h->def_regular = 1;
6421 h->ref_regular = 1;
6422 h->forced_local = 1;
6423 h->root.type = bfd_link_hash_defined;
6424 }
6425 else
6426 h = NULL;
6427 }
6428 else
6429 {
6430 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6431 while (h->root.type == bfd_link_hash_indirect
6432 || h->root.type == bfd_link_hash_warning)
6433 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6434
6435 /* PR15323, ref flags aren't set for references in the same
6436 object. */
6437 h->root.non_ir_ref = 1;
6438 }
6439
6440 /* Could be done earlier, if h were already available. */
6441 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
6442
6443 if (h != NULL)
6444 {
6445 /* Create the ifunc sections for static executables. If we
6446 never see an indirect function symbol nor we are building
6447 a static executable, those sections will be empty and
6448 won't appear in output. */
6449 switch (bfd_r_type)
6450 {
6451 default:
6452 break;
6453
6454 case BFD_RELOC_AARCH64_ADD_LO12:
6455 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6456 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6457 case BFD_RELOC_AARCH64_CALL26:
6458 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6459 case BFD_RELOC_AARCH64_JUMP26:
6460 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6461 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6462 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6463 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6464 case BFD_RELOC_AARCH64_NN:
6465 if (htab->root.dynobj == NULL)
6466 htab->root.dynobj = abfd;
6467 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
6468 return FALSE;
6469 break;
6470 }
6471
6472 /* It is referenced by a non-shared object. */
6473 h->ref_regular = 1;
6474 h->root.non_ir_ref = 1;
6475 }
6476
6477 switch (bfd_r_type)
6478 {
6479 case BFD_RELOC_AARCH64_NN:
6480
6481 /* We don't need to handle relocs into sections not going into
6482 the "real" output. */
6483 if ((sec->flags & SEC_ALLOC) == 0)
6484 break;
6485
6486 if (h != NULL)
6487 {
6488 if (!bfd_link_pic (info))
6489 h->non_got_ref = 1;
6490
6491 h->plt.refcount += 1;
6492 h->pointer_equality_needed = 1;
6493 }
6494
6495 /* No need to do anything if we're not creating a shared
6496 object. */
6497 if (! bfd_link_pic (info))
6498 break;
6499
6500 {
6501 struct elf_dyn_relocs *p;
6502 struct elf_dyn_relocs **head;
6503
6504 /* We must copy these reloc types into the output file.
6505 Create a reloc section in dynobj and make room for
6506 this reloc. */
6507 if (sreloc == NULL)
6508 {
6509 if (htab->root.dynobj == NULL)
6510 htab->root.dynobj = abfd;
6511
6512 sreloc = _bfd_elf_make_dynamic_reloc_section
6513 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
6514
6515 if (sreloc == NULL)
6516 return FALSE;
6517 }
6518
6519 /* If this is a global symbol, we count the number of
6520 relocations we need for this symbol. */
6521 if (h != NULL)
6522 {
6523 struct elf_aarch64_link_hash_entry *eh;
6524 eh = (struct elf_aarch64_link_hash_entry *) h;
6525 head = &eh->dyn_relocs;
6526 }
6527 else
6528 {
6529 /* Track dynamic relocs needed for local syms too.
6530 We really need local syms available to do this
6531 easily. Oh well. */
6532
6533 asection *s;
6534 void **vpp;
6535
6536 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6537 abfd, r_symndx);
6538 if (isym == NULL)
6539 return FALSE;
6540
6541 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
6542 if (s == NULL)
6543 s = sec;
6544
6545 /* Beware of type punned pointers vs strict aliasing
6546 rules. */
6547 vpp = &(elf_section_data (s)->local_dynrel);
6548 head = (struct elf_dyn_relocs **) vpp;
6549 }
6550
6551 p = *head;
6552 if (p == NULL || p->sec != sec)
6553 {
6554 bfd_size_type amt = sizeof *p;
6555 p = ((struct elf_dyn_relocs *)
6556 bfd_zalloc (htab->root.dynobj, amt));
6557 if (p == NULL)
6558 return FALSE;
6559 p->next = *head;
6560 *head = p;
6561 p->sec = sec;
6562 }
6563
6564 p->count += 1;
6565
6566 }
6567 break;
6568
6569 /* RR: We probably want to keep a consistency check that
6570 there are no dangling GOT_PAGE relocs. */
6571 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6572 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6573 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6574 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6575 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6576 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6577 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6578 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6579 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6580 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6581 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6582 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6583 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6584 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6585 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6586 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6587 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6588 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6589 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6590 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6591 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6592 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6593 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6594 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6595 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6596 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6597 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6598 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6599 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6600 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6601 {
6602 unsigned got_type;
6603 unsigned old_got_type;
6604
6605 got_type = aarch64_reloc_got_type (bfd_r_type);
6606
6607 if (h)
6608 {
6609 h->got.refcount += 1;
6610 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6611 }
6612 else
6613 {
6614 struct elf_aarch64_local_symbol *locals;
6615
6616 if (!elfNN_aarch64_allocate_local_symbols
6617 (abfd, symtab_hdr->sh_info))
6618 return FALSE;
6619
6620 locals = elf_aarch64_locals (abfd);
6621 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6622 locals[r_symndx].got_refcount += 1;
6623 old_got_type = locals[r_symndx].got_type;
6624 }
6625
6626 /* If a variable is accessed with both general dynamic TLS
6627 methods, two slots may be created. */
6628 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6629 got_type |= old_got_type;
6630
6631 /* We will already have issued an error message if there
6632 is a TLS/non-TLS mismatch, based on the symbol type.
6633 So just combine any TLS types needed. */
6634 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6635 && got_type != GOT_NORMAL)
6636 got_type |= old_got_type;
6637
6638 /* If the symbol is accessed by both IE and GD methods, we
6639 are able to relax. Turn off the GD flag, without
6640 messing up with any other kind of TLS types that may be
6641 involved. */
6642 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6643 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6644
6645 if (old_got_type != got_type)
6646 {
6647 if (h != NULL)
6648 elf_aarch64_hash_entry (h)->got_type = got_type;
6649 else
6650 {
6651 struct elf_aarch64_local_symbol *locals;
6652 locals = elf_aarch64_locals (abfd);
6653 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6654 locals[r_symndx].got_type = got_type;
6655 }
6656 }
6657
6658 if (htab->root.dynobj == NULL)
6659 htab->root.dynobj = abfd;
6660 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6661 return FALSE;
6662 break;
6663 }
6664
6665 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6666 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6667 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6668 case BFD_RELOC_AARCH64_MOVW_G3:
6669 if (bfd_link_pic (info))
6670 {
6671 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6672 (*_bfd_error_handler)
6673 (_("%B: relocation %s against `%s' can not be used when making "
6674 "a shared object; recompile with -fPIC"),
6675 abfd, elfNN_aarch64_howto_table[howto_index].name,
6676 (h) ? h->root.root.string : "a local symbol");
6677 bfd_set_error (bfd_error_bad_value);
6678 return FALSE;
6679 }
6680
6681 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6682 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6683 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6684 if (h != NULL && bfd_link_executable (info))
6685 {
6686 /* If this reloc is in a read-only section, we might
6687 need a copy reloc. We can't check reliably at this
6688 stage whether the section is read-only, as input
6689 sections have not yet been mapped to output sections.
6690 Tentatively set the flag for now, and correct in
6691 adjust_dynamic_symbol. */
6692 h->non_got_ref = 1;
6693 h->plt.refcount += 1;
6694 h->pointer_equality_needed = 1;
6695 }
6696 /* FIXME:: RR need to handle these in shared libraries
6697 and essentially bomb out as these being non-PIC
6698 relocations in shared libraries. */
6699 break;
6700
6701 case BFD_RELOC_AARCH64_CALL26:
6702 case BFD_RELOC_AARCH64_JUMP26:
6703 /* If this is a local symbol then we resolve it
6704 directly without creating a PLT entry. */
6705 if (h == NULL)
6706 continue;
6707
6708 h->needs_plt = 1;
6709 if (h->plt.refcount <= 0)
6710 h->plt.refcount = 1;
6711 else
6712 h->plt.refcount += 1;
6713 break;
6714
6715 default:
6716 break;
6717 }
6718 }
6719
6720 return TRUE;
6721 }
6722
6723 /* Treat mapping symbols as special target symbols. */
6724
6725 static bfd_boolean
6726 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6727 asymbol *sym)
6728 {
6729 return bfd_is_aarch64_special_symbol_name (sym->name,
6730 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6731 }
6732
6733 /* This is a copy of elf_find_function () from elf.c except that
6734 AArch64 mapping symbols are ignored when looking for function names. */
6735
6736 static bfd_boolean
6737 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6738 asymbol **symbols,
6739 asection *section,
6740 bfd_vma offset,
6741 const char **filename_ptr,
6742 const char **functionname_ptr)
6743 {
6744 const char *filename = NULL;
6745 asymbol *func = NULL;
6746 bfd_vma low_func = 0;
6747 asymbol **p;
6748
6749 for (p = symbols; *p != NULL; p++)
6750 {
6751 elf_symbol_type *q;
6752
6753 q = (elf_symbol_type *) * p;
6754
6755 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6756 {
6757 default:
6758 break;
6759 case STT_FILE:
6760 filename = bfd_asymbol_name (&q->symbol);
6761 break;
6762 case STT_FUNC:
6763 case STT_NOTYPE:
6764 /* Skip mapping symbols. */
6765 if ((q->symbol.flags & BSF_LOCAL)
6766 && (bfd_is_aarch64_special_symbol_name
6767 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6768 continue;
6769 /* Fall through. */
6770 if (bfd_get_section (&q->symbol) == section
6771 && q->symbol.value >= low_func && q->symbol.value <= offset)
6772 {
6773 func = (asymbol *) q;
6774 low_func = q->symbol.value;
6775 }
6776 break;
6777 }
6778 }
6779
6780 if (func == NULL)
6781 return FALSE;
6782
6783 if (filename_ptr)
6784 *filename_ptr = filename;
6785 if (functionname_ptr)
6786 *functionname_ptr = bfd_asymbol_name (func);
6787
6788 return TRUE;
6789 }
6790
6791
6792 /* Find the nearest line to a particular section and offset, for error
6793 reporting. This code is a duplicate of the code in elf.c, except
6794 that it uses aarch64_elf_find_function. */
6795
6796 static bfd_boolean
6797 elfNN_aarch64_find_nearest_line (bfd *abfd,
6798 asymbol **symbols,
6799 asection *section,
6800 bfd_vma offset,
6801 const char **filename_ptr,
6802 const char **functionname_ptr,
6803 unsigned int *line_ptr,
6804 unsigned int *discriminator_ptr)
6805 {
6806 bfd_boolean found = FALSE;
6807
6808 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6809 filename_ptr, functionname_ptr,
6810 line_ptr, discriminator_ptr,
6811 dwarf_debug_sections, 0,
6812 &elf_tdata (abfd)->dwarf2_find_line_info))
6813 {
6814 if (!*functionname_ptr)
6815 aarch64_elf_find_function (abfd, symbols, section, offset,
6816 *filename_ptr ? NULL : filename_ptr,
6817 functionname_ptr);
6818
6819 return TRUE;
6820 }
6821
6822 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6823 toolchain uses DWARF1. */
6824
6825 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6826 &found, filename_ptr,
6827 functionname_ptr, line_ptr,
6828 &elf_tdata (abfd)->line_info))
6829 return FALSE;
6830
6831 if (found && (*functionname_ptr || *line_ptr))
6832 return TRUE;
6833
6834 if (symbols == NULL)
6835 return FALSE;
6836
6837 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6838 filename_ptr, functionname_ptr))
6839 return FALSE;
6840
6841 *line_ptr = 0;
6842 return TRUE;
6843 }
6844
6845 static bfd_boolean
6846 elfNN_aarch64_find_inliner_info (bfd *abfd,
6847 const char **filename_ptr,
6848 const char **functionname_ptr,
6849 unsigned int *line_ptr)
6850 {
6851 bfd_boolean found;
6852 found = _bfd_dwarf2_find_inliner_info
6853 (abfd, filename_ptr,
6854 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6855 return found;
6856 }
6857
6858
6859 static void
6860 elfNN_aarch64_post_process_headers (bfd *abfd,
6861 struct bfd_link_info *link_info)
6862 {
6863 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6864
6865 i_ehdrp = elf_elfheader (abfd);
6866 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6867
6868 _bfd_elf_post_process_headers (abfd, link_info);
6869 }
6870
6871 static enum elf_reloc_type_class
6872 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6873 const asection *rel_sec ATTRIBUTE_UNUSED,
6874 const Elf_Internal_Rela *rela)
6875 {
6876 switch ((int) ELFNN_R_TYPE (rela->r_info))
6877 {
6878 case AARCH64_R (RELATIVE):
6879 return reloc_class_relative;
6880 case AARCH64_R (JUMP_SLOT):
6881 return reloc_class_plt;
6882 case AARCH64_R (COPY):
6883 return reloc_class_copy;
6884 default:
6885 return reloc_class_normal;
6886 }
6887 }
6888
6889 /* Handle an AArch64 specific section when reading an object file. This is
6890 called when bfd_section_from_shdr finds a section with an unknown
6891 type. */
6892
6893 static bfd_boolean
6894 elfNN_aarch64_section_from_shdr (bfd *abfd,
6895 Elf_Internal_Shdr *hdr,
6896 const char *name, int shindex)
6897 {
6898 /* There ought to be a place to keep ELF backend specific flags, but
6899 at the moment there isn't one. We just keep track of the
6900 sections by their name, instead. Fortunately, the ABI gives
6901 names for all the AArch64 specific sections, so we will probably get
6902 away with this. */
6903 switch (hdr->sh_type)
6904 {
6905 case SHT_AARCH64_ATTRIBUTES:
6906 break;
6907
6908 default:
6909 return FALSE;
6910 }
6911
6912 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6913 return FALSE;
6914
6915 return TRUE;
6916 }
6917
6918 /* A structure used to record a list of sections, independently
6919 of the next and prev fields in the asection structure. */
6920 typedef struct section_list
6921 {
6922 asection *sec;
6923 struct section_list *next;
6924 struct section_list *prev;
6925 }
6926 section_list;
6927
6928 /* Unfortunately we need to keep a list of sections for which
6929 an _aarch64_elf_section_data structure has been allocated. This
6930 is because it is possible for functions like elfNN_aarch64_write_section
6931 to be called on a section which has had an elf_data_structure
6932 allocated for it (and so the used_by_bfd field is valid) but
6933 for which the AArch64 extended version of this structure - the
6934 _aarch64_elf_section_data structure - has not been allocated. */
6935 static section_list *sections_with_aarch64_elf_section_data = NULL;
6936
6937 static void
6938 record_section_with_aarch64_elf_section_data (asection *sec)
6939 {
6940 struct section_list *entry;
6941
6942 entry = bfd_malloc (sizeof (*entry));
6943 if (entry == NULL)
6944 return;
6945 entry->sec = sec;
6946 entry->next = sections_with_aarch64_elf_section_data;
6947 entry->prev = NULL;
6948 if (entry->next != NULL)
6949 entry->next->prev = entry;
6950 sections_with_aarch64_elf_section_data = entry;
6951 }
6952
6953 static struct section_list *
6954 find_aarch64_elf_section_entry (asection *sec)
6955 {
6956 struct section_list *entry;
6957 static struct section_list *last_entry = NULL;
6958
6959 /* This is a short cut for the typical case where the sections are added
6960 to the sections_with_aarch64_elf_section_data list in forward order and
6961 then looked up here in backwards order. This makes a real difference
6962 to the ld-srec/sec64k.exp linker test. */
6963 entry = sections_with_aarch64_elf_section_data;
6964 if (last_entry != NULL)
6965 {
6966 if (last_entry->sec == sec)
6967 entry = last_entry;
6968 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6969 entry = last_entry->next;
6970 }
6971
6972 for (; entry; entry = entry->next)
6973 if (entry->sec == sec)
6974 break;
6975
6976 if (entry)
6977 /* Record the entry prior to this one - it is the entry we are
6978 most likely to want to locate next time. Also this way if we
6979 have been called from
6980 unrecord_section_with_aarch64_elf_section_data () we will not
6981 be caching a pointer that is about to be freed. */
6982 last_entry = entry->prev;
6983
6984 return entry;
6985 }
6986
6987 static void
6988 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6989 {
6990 struct section_list *entry;
6991
6992 entry = find_aarch64_elf_section_entry (sec);
6993
6994 if (entry)
6995 {
6996 if (entry->prev != NULL)
6997 entry->prev->next = entry->next;
6998 if (entry->next != NULL)
6999 entry->next->prev = entry->prev;
7000 if (entry == sections_with_aarch64_elf_section_data)
7001 sections_with_aarch64_elf_section_data = entry->next;
7002 free (entry);
7003 }
7004 }
7005
7006
7007 typedef struct
7008 {
7009 void *finfo;
7010 struct bfd_link_info *info;
7011 asection *sec;
7012 int sec_shndx;
7013 int (*func) (void *, const char *, Elf_Internal_Sym *,
7014 asection *, struct elf_link_hash_entry *);
7015 } output_arch_syminfo;
7016
7017 enum map_symbol_type
7018 {
7019 AARCH64_MAP_INSN,
7020 AARCH64_MAP_DATA
7021 };
7022
7023
7024 /* Output a single mapping symbol. */
7025
7026 static bfd_boolean
7027 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
7028 enum map_symbol_type type, bfd_vma offset)
7029 {
7030 static const char *names[2] = { "$x", "$d" };
7031 Elf_Internal_Sym sym;
7032
7033 sym.st_value = (osi->sec->output_section->vma
7034 + osi->sec->output_offset + offset);
7035 sym.st_size = 0;
7036 sym.st_other = 0;
7037 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7038 sym.st_shndx = osi->sec_shndx;
7039 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
7040 }
7041
7042
7043
7044 /* Output mapping symbols for PLT entries associated with H. */
7045
7046 static bfd_boolean
7047 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
7048 {
7049 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
7050 bfd_vma addr;
7051
7052 if (h->root.type == bfd_link_hash_indirect)
7053 return TRUE;
7054
7055 if (h->root.type == bfd_link_hash_warning)
7056 /* When warning symbols are created, they **replace** the "real"
7057 entry in the hash table, thus we never get to see the real
7058 symbol in a hash traversal. So look at it now. */
7059 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7060
7061 if (h->plt.offset == (bfd_vma) - 1)
7062 return TRUE;
7063
7064 addr = h->plt.offset;
7065 if (addr == 32)
7066 {
7067 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7068 return FALSE;
7069 }
7070 return TRUE;
7071 }
7072
7073
7074 /* Output a single local symbol for a generated stub. */
7075
7076 static bfd_boolean
7077 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
7078 bfd_vma offset, bfd_vma size)
7079 {
7080 Elf_Internal_Sym sym;
7081
7082 sym.st_value = (osi->sec->output_section->vma
7083 + osi->sec->output_offset + offset);
7084 sym.st_size = size;
7085 sym.st_other = 0;
7086 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7087 sym.st_shndx = osi->sec_shndx;
7088 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
7089 }
7090
7091 static bfd_boolean
7092 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
7093 {
7094 struct elf_aarch64_stub_hash_entry *stub_entry;
7095 asection *stub_sec;
7096 bfd_vma addr;
7097 char *stub_name;
7098 output_arch_syminfo *osi;
7099
7100 /* Massage our args to the form they really have. */
7101 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
7102 osi = (output_arch_syminfo *) in_arg;
7103
7104 stub_sec = stub_entry->stub_sec;
7105
7106 /* Ensure this stub is attached to the current section being
7107 processed. */
7108 if (stub_sec != osi->sec)
7109 return TRUE;
7110
7111 addr = (bfd_vma) stub_entry->stub_offset;
7112
7113 stub_name = stub_entry->output_name;
7114
7115 switch (stub_entry->stub_type)
7116 {
7117 case aarch64_stub_adrp_branch:
7118 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7119 sizeof (aarch64_adrp_branch_stub)))
7120 return FALSE;
7121 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7122 return FALSE;
7123 break;
7124 case aarch64_stub_long_branch:
7125 if (!elfNN_aarch64_output_stub_sym
7126 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
7127 return FALSE;
7128 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7129 return FALSE;
7130 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
7131 return FALSE;
7132 break;
7133 case aarch64_stub_erratum_835769_veneer:
7134 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7135 sizeof (aarch64_erratum_835769_stub)))
7136 return FALSE;
7137 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7138 return FALSE;
7139 break;
7140 case aarch64_stub_erratum_843419_veneer:
7141 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7142 sizeof (aarch64_erratum_843419_stub)))
7143 return FALSE;
7144 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7145 return FALSE;
7146 break;
7147
7148 default:
7149 abort ();
7150 }
7151
7152 return TRUE;
7153 }
7154
7155 /* Output mapping symbols for linker generated sections. */
7156
7157 static bfd_boolean
7158 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
7159 struct bfd_link_info *info,
7160 void *finfo,
7161 int (*func) (void *, const char *,
7162 Elf_Internal_Sym *,
7163 asection *,
7164 struct elf_link_hash_entry
7165 *))
7166 {
7167 output_arch_syminfo osi;
7168 struct elf_aarch64_link_hash_table *htab;
7169
7170 htab = elf_aarch64_hash_table (info);
7171
7172 osi.finfo = finfo;
7173 osi.info = info;
7174 osi.func = func;
7175
7176 /* Long calls stubs. */
7177 if (htab->stub_bfd && htab->stub_bfd->sections)
7178 {
7179 asection *stub_sec;
7180
7181 for (stub_sec = htab->stub_bfd->sections;
7182 stub_sec != NULL; stub_sec = stub_sec->next)
7183 {
7184 /* Ignore non-stub sections. */
7185 if (!strstr (stub_sec->name, STUB_SUFFIX))
7186 continue;
7187
7188 osi.sec = stub_sec;
7189
7190 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7191 (output_bfd, osi.sec->output_section);
7192
7193 /* The first instruction in a stub is always a branch. */
7194 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
7195 return FALSE;
7196
7197 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7198 &osi);
7199 }
7200 }
7201
7202 /* Finally, output mapping symbols for the PLT. */
7203 if (!htab->root.splt || htab->root.splt->size == 0)
7204 return TRUE;
7205
7206 /* For now live without mapping symbols for the plt. */
7207 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7208 (output_bfd, htab->root.splt->output_section);
7209 osi.sec = htab->root.splt;
7210
7211 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
7212 (void *) &osi);
7213
7214 return TRUE;
7215
7216 }
7217
7218 /* Allocate target specific section data. */
7219
7220 static bfd_boolean
7221 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7222 {
7223 if (!sec->used_by_bfd)
7224 {
7225 _aarch64_elf_section_data *sdata;
7226 bfd_size_type amt = sizeof (*sdata);
7227
7228 sdata = bfd_zalloc (abfd, amt);
7229 if (sdata == NULL)
7230 return FALSE;
7231 sec->used_by_bfd = sdata;
7232 }
7233
7234 record_section_with_aarch64_elf_section_data (sec);
7235
7236 return _bfd_elf_new_section_hook (abfd, sec);
7237 }
7238
7239
7240 static void
7241 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7242 asection *sec,
7243 void *ignore ATTRIBUTE_UNUSED)
7244 {
7245 unrecord_section_with_aarch64_elf_section_data (sec);
7246 }
7247
7248 static bfd_boolean
7249 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7250 {
7251 if (abfd->sections)
7252 bfd_map_over_sections (abfd,
7253 unrecord_section_via_map_over_sections, NULL);
7254
7255 return _bfd_elf_close_and_cleanup (abfd);
7256 }
7257
7258 static bfd_boolean
7259 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7260 {
7261 if (abfd->sections)
7262 bfd_map_over_sections (abfd,
7263 unrecord_section_via_map_over_sections, NULL);
7264
7265 return _bfd_free_cached_info (abfd);
7266 }
7267
7268 /* Create dynamic sections. This is different from the ARM backend in that
7269 the got, plt, gotplt and their relocation sections are all created in the
7270 standard part of the bfd elf backend. */
7271
7272 static bfd_boolean
7273 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7274 struct bfd_link_info *info)
7275 {
7276 struct elf_aarch64_link_hash_table *htab;
7277
7278 /* We need to create .got section. */
7279 if (!aarch64_elf_create_got_section (dynobj, info))
7280 return FALSE;
7281
7282 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
7283 return FALSE;
7284
7285 htab = elf_aarch64_hash_table (info);
7286 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
7287 if (!bfd_link_pic (info))
7288 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
7289
7290 if (!htab->sdynbss || (!bfd_link_pic (info) && !htab->srelbss))
7291 abort ();
7292
7293 return TRUE;
7294 }
7295
7296
7297 /* Allocate space in .plt, .got and associated reloc sections for
7298 dynamic relocs. */
7299
7300 static bfd_boolean
7301 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7302 {
7303 struct bfd_link_info *info;
7304 struct elf_aarch64_link_hash_table *htab;
7305 struct elf_aarch64_link_hash_entry *eh;
7306 struct elf_dyn_relocs *p;
7307
7308 /* An example of a bfd_link_hash_indirect symbol is versioned
7309 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7310 -> __gxx_personality_v0(bfd_link_hash_defined)
7311
7312 There is no need to process bfd_link_hash_indirect symbols here
7313 because we will also be presented with the concrete instance of
7314 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7315 called to copy all relevant data from the generic to the concrete
7316 symbol instance.
7317 */
7318 if (h->root.type == bfd_link_hash_indirect)
7319 return TRUE;
7320
7321 if (h->root.type == bfd_link_hash_warning)
7322 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7323
7324 info = (struct bfd_link_info *) inf;
7325 htab = elf_aarch64_hash_table (info);
7326
7327 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7328 here if it is defined and referenced in a non-shared object. */
7329 if (h->type == STT_GNU_IFUNC
7330 && h->def_regular)
7331 return TRUE;
7332 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
7333 {
7334 /* Make sure this symbol is output as a dynamic symbol.
7335 Undefined weak syms won't yet be marked as dynamic. */
7336 if (h->dynindx == -1 && !h->forced_local)
7337 {
7338 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7339 return FALSE;
7340 }
7341
7342 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
7343 {
7344 asection *s = htab->root.splt;
7345
7346 /* If this is the first .plt entry, make room for the special
7347 first entry. */
7348 if (s->size == 0)
7349 s->size += htab->plt_header_size;
7350
7351 h->plt.offset = s->size;
7352
7353 /* If this symbol is not defined in a regular file, and we are
7354 not generating a shared library, then set the symbol to this
7355 location in the .plt. This is required to make function
7356 pointers compare as equal between the normal executable and
7357 the shared library. */
7358 if (!bfd_link_pic (info) && !h->def_regular)
7359 {
7360 h->root.u.def.section = s;
7361 h->root.u.def.value = h->plt.offset;
7362 }
7363
7364 /* Make room for this entry. For now we only create the
7365 small model PLT entries. We later need to find a way
7366 of relaxing into these from the large model PLT entries. */
7367 s->size += PLT_SMALL_ENTRY_SIZE;
7368
7369 /* We also need to make an entry in the .got.plt section, which
7370 will be placed in the .got section by the linker script. */
7371 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
7372
7373 /* We also need to make an entry in the .rela.plt section. */
7374 htab->root.srelplt->size += RELOC_SIZE (htab);
7375
7376 /* We need to ensure that all GOT entries that serve the PLT
7377 are consecutive with the special GOT slots [0] [1] and
7378 [2]. Any addtional relocations, such as
7379 R_AARCH64_TLSDESC, must be placed after the PLT related
7380 entries. We abuse the reloc_count such that during
7381 sizing we adjust reloc_count to indicate the number of
7382 PLT related reserved entries. In subsequent phases when
7383 filling in the contents of the reloc entries, PLT related
7384 entries are placed by computing their PLT index (0
7385 .. reloc_count). While other none PLT relocs are placed
7386 at the slot indicated by reloc_count and reloc_count is
7387 updated. */
7388
7389 htab->root.srelplt->reloc_count++;
7390 }
7391 else
7392 {
7393 h->plt.offset = (bfd_vma) - 1;
7394 h->needs_plt = 0;
7395 }
7396 }
7397 else
7398 {
7399 h->plt.offset = (bfd_vma) - 1;
7400 h->needs_plt = 0;
7401 }
7402
7403 eh = (struct elf_aarch64_link_hash_entry *) h;
7404 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7405
7406 if (h->got.refcount > 0)
7407 {
7408 bfd_boolean dyn;
7409 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
7410
7411 h->got.offset = (bfd_vma) - 1;
7412
7413 dyn = htab->root.dynamic_sections_created;
7414
7415 /* Make sure this symbol is output as a dynamic symbol.
7416 Undefined weak syms won't yet be marked as dynamic. */
7417 if (dyn && h->dynindx == -1 && !h->forced_local)
7418 {
7419 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7420 return FALSE;
7421 }
7422
7423 if (got_type == GOT_UNKNOWN)
7424 {
7425 }
7426 else if (got_type == GOT_NORMAL)
7427 {
7428 h->got.offset = htab->root.sgot->size;
7429 htab->root.sgot->size += GOT_ENTRY_SIZE;
7430 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7431 || h->root.type != bfd_link_hash_undefweak)
7432 && (bfd_link_pic (info)
7433 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7434 {
7435 htab->root.srelgot->size += RELOC_SIZE (htab);
7436 }
7437 }
7438 else
7439 {
7440 int indx;
7441 if (got_type & GOT_TLSDESC_GD)
7442 {
7443 eh->tlsdesc_got_jump_table_offset =
7444 (htab->root.sgotplt->size
7445 - aarch64_compute_jump_table_size (htab));
7446 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7447 h->got.offset = (bfd_vma) - 2;
7448 }
7449
7450 if (got_type & GOT_TLS_GD)
7451 {
7452 h->got.offset = htab->root.sgot->size;
7453 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7454 }
7455
7456 if (got_type & GOT_TLS_IE)
7457 {
7458 h->got.offset = htab->root.sgot->size;
7459 htab->root.sgot->size += GOT_ENTRY_SIZE;
7460 }
7461
7462 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7463 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7464 || h->root.type != bfd_link_hash_undefweak)
7465 && (bfd_link_pic (info)
7466 || indx != 0
7467 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7468 {
7469 if (got_type & GOT_TLSDESC_GD)
7470 {
7471 htab->root.srelplt->size += RELOC_SIZE (htab);
7472 /* Note reloc_count not incremented here! We have
7473 already adjusted reloc_count for this relocation
7474 type. */
7475
7476 /* TLSDESC PLT is now needed, but not yet determined. */
7477 htab->tlsdesc_plt = (bfd_vma) - 1;
7478 }
7479
7480 if (got_type & GOT_TLS_GD)
7481 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7482
7483 if (got_type & GOT_TLS_IE)
7484 htab->root.srelgot->size += RELOC_SIZE (htab);
7485 }
7486 }
7487 }
7488 else
7489 {
7490 h->got.offset = (bfd_vma) - 1;
7491 }
7492
7493 if (eh->dyn_relocs == NULL)
7494 return TRUE;
7495
7496 /* In the shared -Bsymbolic case, discard space allocated for
7497 dynamic pc-relative relocs against symbols which turn out to be
7498 defined in regular objects. For the normal shared case, discard
7499 space for pc-relative relocs that have become local due to symbol
7500 visibility changes. */
7501
7502 if (bfd_link_pic (info))
7503 {
7504 /* Relocs that use pc_count are those that appear on a call
7505 insn, or certain REL relocs that can generated via assembly.
7506 We want calls to protected symbols to resolve directly to the
7507 function rather than going via the plt. If people want
7508 function pointer comparisons to work as expected then they
7509 should avoid writing weird assembly. */
7510 if (SYMBOL_CALLS_LOCAL (info, h))
7511 {
7512 struct elf_dyn_relocs **pp;
7513
7514 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
7515 {
7516 p->count -= p->pc_count;
7517 p->pc_count = 0;
7518 if (p->count == 0)
7519 *pp = p->next;
7520 else
7521 pp = &p->next;
7522 }
7523 }
7524
7525 /* Also discard relocs on undefined weak syms with non-default
7526 visibility. */
7527 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
7528 {
7529 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7530 eh->dyn_relocs = NULL;
7531
7532 /* Make sure undefined weak symbols are output as a dynamic
7533 symbol in PIEs. */
7534 else if (h->dynindx == -1
7535 && !h->forced_local
7536 && !bfd_elf_link_record_dynamic_symbol (info, h))
7537 return FALSE;
7538 }
7539
7540 }
7541 else if (ELIMINATE_COPY_RELOCS)
7542 {
7543 /* For the non-shared case, discard space for relocs against
7544 symbols which turn out to need copy relocs or are not
7545 dynamic. */
7546
7547 if (!h->non_got_ref
7548 && ((h->def_dynamic
7549 && !h->def_regular)
7550 || (htab->root.dynamic_sections_created
7551 && (h->root.type == bfd_link_hash_undefweak
7552 || h->root.type == bfd_link_hash_undefined))))
7553 {
7554 /* Make sure this symbol is output as a dynamic symbol.
7555 Undefined weak syms won't yet be marked as dynamic. */
7556 if (h->dynindx == -1
7557 && !h->forced_local
7558 && !bfd_elf_link_record_dynamic_symbol (info, h))
7559 return FALSE;
7560
7561 /* If that succeeded, we know we'll be keeping all the
7562 relocs. */
7563 if (h->dynindx != -1)
7564 goto keep;
7565 }
7566
7567 eh->dyn_relocs = NULL;
7568
7569 keep:;
7570 }
7571
7572 /* Finally, allocate space. */
7573 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7574 {
7575 asection *sreloc;
7576
7577 sreloc = elf_section_data (p->sec)->sreloc;
7578
7579 BFD_ASSERT (sreloc != NULL);
7580
7581 sreloc->size += p->count * RELOC_SIZE (htab);
7582 }
7583
7584 return TRUE;
7585 }
7586
7587 /* Allocate space in .plt, .got and associated reloc sections for
7588 ifunc dynamic relocs. */
7589
7590 static bfd_boolean
7591 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
7592 void *inf)
7593 {
7594 struct bfd_link_info *info;
7595 struct elf_aarch64_link_hash_table *htab;
7596 struct elf_aarch64_link_hash_entry *eh;
7597
7598 /* An example of a bfd_link_hash_indirect symbol is versioned
7599 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7600 -> __gxx_personality_v0(bfd_link_hash_defined)
7601
7602 There is no need to process bfd_link_hash_indirect symbols here
7603 because we will also be presented with the concrete instance of
7604 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7605 called to copy all relevant data from the generic to the concrete
7606 symbol instance.
7607 */
7608 if (h->root.type == bfd_link_hash_indirect)
7609 return TRUE;
7610
7611 if (h->root.type == bfd_link_hash_warning)
7612 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7613
7614 info = (struct bfd_link_info *) inf;
7615 htab = elf_aarch64_hash_table (info);
7616
7617 eh = (struct elf_aarch64_link_hash_entry *) h;
7618
7619 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7620 here if it is defined and referenced in a non-shared object. */
7621 if (h->type == STT_GNU_IFUNC
7622 && h->def_regular)
7623 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7624 &eh->dyn_relocs,
7625 htab->plt_entry_size,
7626 htab->plt_header_size,
7627 GOT_ENTRY_SIZE);
7628 return TRUE;
7629 }
7630
7631 /* Allocate space in .plt, .got and associated reloc sections for
7632 local dynamic relocs. */
7633
7634 static bfd_boolean
7635 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7636 {
7637 struct elf_link_hash_entry *h
7638 = (struct elf_link_hash_entry *) *slot;
7639
7640 if (h->type != STT_GNU_IFUNC
7641 || !h->def_regular
7642 || !h->ref_regular
7643 || !h->forced_local
7644 || h->root.type != bfd_link_hash_defined)
7645 abort ();
7646
7647 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7648 }
7649
7650 /* Allocate space in .plt, .got and associated reloc sections for
7651 local ifunc dynamic relocs. */
7652
7653 static bfd_boolean
7654 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7655 {
7656 struct elf_link_hash_entry *h
7657 = (struct elf_link_hash_entry *) *slot;
7658
7659 if (h->type != STT_GNU_IFUNC
7660 || !h->def_regular
7661 || !h->ref_regular
7662 || !h->forced_local
7663 || h->root.type != bfd_link_hash_defined)
7664 abort ();
7665
7666 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7667 }
7668
7669 /* Find any dynamic relocs that apply to read-only sections. */
7670
7671 static bfd_boolean
7672 aarch64_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
7673 {
7674 struct elf_aarch64_link_hash_entry * eh;
7675 struct elf_dyn_relocs * p;
7676
7677 eh = (struct elf_aarch64_link_hash_entry *) h;
7678 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7679 {
7680 asection *s = p->sec;
7681
7682 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7683 {
7684 struct bfd_link_info *info = (struct bfd_link_info *) inf;
7685
7686 info->flags |= DF_TEXTREL;
7687
7688 /* Not an error, just cut short the traversal. */
7689 return FALSE;
7690 }
7691 }
7692 return TRUE;
7693 }
7694
7695 /* This is the most important function of all . Innocuosly named
7696 though ! */
7697 static bfd_boolean
7698 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7699 struct bfd_link_info *info)
7700 {
7701 struct elf_aarch64_link_hash_table *htab;
7702 bfd *dynobj;
7703 asection *s;
7704 bfd_boolean relocs;
7705 bfd *ibfd;
7706
7707 htab = elf_aarch64_hash_table ((info));
7708 dynobj = htab->root.dynobj;
7709
7710 BFD_ASSERT (dynobj != NULL);
7711
7712 if (htab->root.dynamic_sections_created)
7713 {
7714 if (bfd_link_executable (info))
7715 {
7716 s = bfd_get_linker_section (dynobj, ".interp");
7717 if (s == NULL)
7718 abort ();
7719 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7720 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7721 }
7722 }
7723
7724 /* Set up .got offsets for local syms, and space for local dynamic
7725 relocs. */
7726 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7727 {
7728 struct elf_aarch64_local_symbol *locals = NULL;
7729 Elf_Internal_Shdr *symtab_hdr;
7730 asection *srel;
7731 unsigned int i;
7732
7733 if (!is_aarch64_elf (ibfd))
7734 continue;
7735
7736 for (s = ibfd->sections; s != NULL; s = s->next)
7737 {
7738 struct elf_dyn_relocs *p;
7739
7740 for (p = (struct elf_dyn_relocs *)
7741 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7742 {
7743 if (!bfd_is_abs_section (p->sec)
7744 && bfd_is_abs_section (p->sec->output_section))
7745 {
7746 /* Input section has been discarded, either because
7747 it is a copy of a linkonce section or due to
7748 linker script /DISCARD/, so we'll be discarding
7749 the relocs too. */
7750 }
7751 else if (p->count != 0)
7752 {
7753 srel = elf_section_data (p->sec)->sreloc;
7754 srel->size += p->count * RELOC_SIZE (htab);
7755 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7756 info->flags |= DF_TEXTREL;
7757 }
7758 }
7759 }
7760
7761 locals = elf_aarch64_locals (ibfd);
7762 if (!locals)
7763 continue;
7764
7765 symtab_hdr = &elf_symtab_hdr (ibfd);
7766 srel = htab->root.srelgot;
7767 for (i = 0; i < symtab_hdr->sh_info; i++)
7768 {
7769 locals[i].got_offset = (bfd_vma) - 1;
7770 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7771 if (locals[i].got_refcount > 0)
7772 {
7773 unsigned got_type = locals[i].got_type;
7774 if (got_type & GOT_TLSDESC_GD)
7775 {
7776 locals[i].tlsdesc_got_jump_table_offset =
7777 (htab->root.sgotplt->size
7778 - aarch64_compute_jump_table_size (htab));
7779 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7780 locals[i].got_offset = (bfd_vma) - 2;
7781 }
7782
7783 if (got_type & GOT_TLS_GD)
7784 {
7785 locals[i].got_offset = htab->root.sgot->size;
7786 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7787 }
7788
7789 if (got_type & GOT_TLS_IE
7790 || got_type & GOT_NORMAL)
7791 {
7792 locals[i].got_offset = htab->root.sgot->size;
7793 htab->root.sgot->size += GOT_ENTRY_SIZE;
7794 }
7795
7796 if (got_type == GOT_UNKNOWN)
7797 {
7798 }
7799
7800 if (bfd_link_pic (info))
7801 {
7802 if (got_type & GOT_TLSDESC_GD)
7803 {
7804 htab->root.srelplt->size += RELOC_SIZE (htab);
7805 /* Note RELOC_COUNT not incremented here! */
7806 htab->tlsdesc_plt = (bfd_vma) - 1;
7807 }
7808
7809 if (got_type & GOT_TLS_GD)
7810 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7811
7812 if (got_type & GOT_TLS_IE
7813 || got_type & GOT_NORMAL)
7814 htab->root.srelgot->size += RELOC_SIZE (htab);
7815 }
7816 }
7817 else
7818 {
7819 locals[i].got_refcount = (bfd_vma) - 1;
7820 }
7821 }
7822 }
7823
7824
7825 /* Allocate global sym .plt and .got entries, and space for global
7826 sym dynamic relocs. */
7827 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7828 info);
7829
7830 /* Allocate global ifunc sym .plt and .got entries, and space for global
7831 ifunc sym dynamic relocs. */
7832 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7833 info);
7834
7835 /* Allocate .plt and .got entries, and space for local symbols. */
7836 htab_traverse (htab->loc_hash_table,
7837 elfNN_aarch64_allocate_local_dynrelocs,
7838 info);
7839
7840 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7841 htab_traverse (htab->loc_hash_table,
7842 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7843 info);
7844
7845 /* For every jump slot reserved in the sgotplt, reloc_count is
7846 incremented. However, when we reserve space for TLS descriptors,
7847 it's not incremented, so in order to compute the space reserved
7848 for them, it suffices to multiply the reloc count by the jump
7849 slot size. */
7850
7851 if (htab->root.srelplt)
7852 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7853
7854 if (htab->tlsdesc_plt)
7855 {
7856 if (htab->root.splt->size == 0)
7857 htab->root.splt->size += PLT_ENTRY_SIZE;
7858
7859 htab->tlsdesc_plt = htab->root.splt->size;
7860 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7861
7862 /* If we're not using lazy TLS relocations, don't generate the
7863 GOT entry required. */
7864 if (!(info->flags & DF_BIND_NOW))
7865 {
7866 htab->dt_tlsdesc_got = htab->root.sgot->size;
7867 htab->root.sgot->size += GOT_ENTRY_SIZE;
7868 }
7869 }
7870
7871 /* Init mapping symbols information to use later to distingush between
7872 code and data while scanning for errata. */
7873 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
7874 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7875 {
7876 if (!is_aarch64_elf (ibfd))
7877 continue;
7878 bfd_elfNN_aarch64_init_maps (ibfd);
7879 }
7880
7881 /* We now have determined the sizes of the various dynamic sections.
7882 Allocate memory for them. */
7883 relocs = FALSE;
7884 for (s = dynobj->sections; s != NULL; s = s->next)
7885 {
7886 if ((s->flags & SEC_LINKER_CREATED) == 0)
7887 continue;
7888
7889 if (s == htab->root.splt
7890 || s == htab->root.sgot
7891 || s == htab->root.sgotplt
7892 || s == htab->root.iplt
7893 || s == htab->root.igotplt || s == htab->sdynbss)
7894 {
7895 /* Strip this section if we don't need it; see the
7896 comment below. */
7897 }
7898 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7899 {
7900 if (s->size != 0 && s != htab->root.srelplt)
7901 relocs = TRUE;
7902
7903 /* We use the reloc_count field as a counter if we need
7904 to copy relocs into the output file. */
7905 if (s != htab->root.srelplt)
7906 s->reloc_count = 0;
7907 }
7908 else
7909 {
7910 /* It's not one of our sections, so don't allocate space. */
7911 continue;
7912 }
7913
7914 if (s->size == 0)
7915 {
7916 /* If we don't need this section, strip it from the
7917 output file. This is mostly to handle .rela.bss and
7918 .rela.plt. We must create both sections in
7919 create_dynamic_sections, because they must be created
7920 before the linker maps input sections to output
7921 sections. The linker does that before
7922 adjust_dynamic_symbol is called, and it is that
7923 function which decides whether anything needs to go
7924 into these sections. */
7925
7926 s->flags |= SEC_EXCLUDE;
7927 continue;
7928 }
7929
7930 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7931 continue;
7932
7933 /* Allocate memory for the section contents. We use bfd_zalloc
7934 here in case unused entries are not reclaimed before the
7935 section's contents are written out. This should not happen,
7936 but this way if it does, we get a R_AARCH64_NONE reloc instead
7937 of garbage. */
7938 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7939 if (s->contents == NULL)
7940 return FALSE;
7941 }
7942
7943 if (htab->root.dynamic_sections_created)
7944 {
7945 /* Add some entries to the .dynamic section. We fill in the
7946 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7947 must add the entries now so that we get the correct size for
7948 the .dynamic section. The DT_DEBUG entry is filled in by the
7949 dynamic linker and used by the debugger. */
7950 #define add_dynamic_entry(TAG, VAL) \
7951 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7952
7953 if (bfd_link_executable (info))
7954 {
7955 if (!add_dynamic_entry (DT_DEBUG, 0))
7956 return FALSE;
7957 }
7958
7959 if (htab->root.splt->size != 0)
7960 {
7961 if (!add_dynamic_entry (DT_PLTGOT, 0)
7962 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7963 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7964 || !add_dynamic_entry (DT_JMPREL, 0))
7965 return FALSE;
7966
7967 if (htab->tlsdesc_plt
7968 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7969 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7970 return FALSE;
7971 }
7972
7973 if (relocs)
7974 {
7975 if (!add_dynamic_entry (DT_RELA, 0)
7976 || !add_dynamic_entry (DT_RELASZ, 0)
7977 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7978 return FALSE;
7979
7980 /* If any dynamic relocs apply to a read-only section,
7981 then we need a DT_TEXTREL entry. */
7982 if ((info->flags & DF_TEXTREL) == 0)
7983 elf_link_hash_traverse (& htab->root, aarch64_readonly_dynrelocs,
7984 info);
7985
7986 if ((info->flags & DF_TEXTREL) != 0)
7987 {
7988 if (!add_dynamic_entry (DT_TEXTREL, 0))
7989 return FALSE;
7990 }
7991 }
7992 }
7993 #undef add_dynamic_entry
7994
7995 return TRUE;
7996 }
7997
7998 static inline void
7999 elf_aarch64_update_plt_entry (bfd *output_bfd,
8000 bfd_reloc_code_real_type r_type,
8001 bfd_byte *plt_entry, bfd_vma value)
8002 {
8003 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
8004
8005 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
8006 }
8007
8008 static void
8009 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
8010 struct elf_aarch64_link_hash_table
8011 *htab, bfd *output_bfd,
8012 struct bfd_link_info *info)
8013 {
8014 bfd_byte *plt_entry;
8015 bfd_vma plt_index;
8016 bfd_vma got_offset;
8017 bfd_vma gotplt_entry_address;
8018 bfd_vma plt_entry_address;
8019 Elf_Internal_Rela rela;
8020 bfd_byte *loc;
8021 asection *plt, *gotplt, *relplt;
8022
8023 /* When building a static executable, use .iplt, .igot.plt and
8024 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8025 if (htab->root.splt != NULL)
8026 {
8027 plt = htab->root.splt;
8028 gotplt = htab->root.sgotplt;
8029 relplt = htab->root.srelplt;
8030 }
8031 else
8032 {
8033 plt = htab->root.iplt;
8034 gotplt = htab->root.igotplt;
8035 relplt = htab->root.irelplt;
8036 }
8037
8038 /* Get the index in the procedure linkage table which
8039 corresponds to this symbol. This is the index of this symbol
8040 in all the symbols for which we are making plt entries. The
8041 first entry in the procedure linkage table is reserved.
8042
8043 Get the offset into the .got table of the entry that
8044 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
8045 bytes. The first three are reserved for the dynamic linker.
8046
8047 For static executables, we don't reserve anything. */
8048
8049 if (plt == htab->root.splt)
8050 {
8051 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
8052 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
8053 }
8054 else
8055 {
8056 plt_index = h->plt.offset / htab->plt_entry_size;
8057 got_offset = plt_index * GOT_ENTRY_SIZE;
8058 }
8059
8060 plt_entry = plt->contents + h->plt.offset;
8061 plt_entry_address = plt->output_section->vma
8062 + plt->output_offset + h->plt.offset;
8063 gotplt_entry_address = gotplt->output_section->vma +
8064 gotplt->output_offset + got_offset;
8065
8066 /* Copy in the boiler-plate for the PLTn entry. */
8067 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
8068
8069 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8070 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8071 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8072 plt_entry,
8073 PG (gotplt_entry_address) -
8074 PG (plt_entry_address));
8075
8076 /* Fill in the lo12 bits for the load from the pltgot. */
8077 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8078 plt_entry + 4,
8079 PG_OFFSET (gotplt_entry_address));
8080
8081 /* Fill in the lo12 bits for the add from the pltgot entry. */
8082 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8083 plt_entry + 8,
8084 PG_OFFSET (gotplt_entry_address));
8085
8086 /* All the GOTPLT Entries are essentially initialized to PLT0. */
8087 bfd_put_NN (output_bfd,
8088 plt->output_section->vma + plt->output_offset,
8089 gotplt->contents + got_offset);
8090
8091 rela.r_offset = gotplt_entry_address;
8092
8093 if (h->dynindx == -1
8094 || ((bfd_link_executable (info)
8095 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8096 && h->def_regular
8097 && h->type == STT_GNU_IFUNC))
8098 {
8099 /* If an STT_GNU_IFUNC symbol is locally defined, generate
8100 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
8101 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
8102 rela.r_addend = (h->root.u.def.value
8103 + h->root.u.def.section->output_section->vma
8104 + h->root.u.def.section->output_offset);
8105 }
8106 else
8107 {
8108 /* Fill in the entry in the .rela.plt section. */
8109 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
8110 rela.r_addend = 0;
8111 }
8112
8113 /* Compute the relocation entry to used based on PLT index and do
8114 not adjust reloc_count. The reloc_count has already been adjusted
8115 to account for this entry. */
8116 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
8117 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8118 }
8119
8120 /* Size sections even though they're not dynamic. We use it to setup
8121 _TLS_MODULE_BASE_, if needed. */
8122
8123 static bfd_boolean
8124 elfNN_aarch64_always_size_sections (bfd *output_bfd,
8125 struct bfd_link_info *info)
8126 {
8127 asection *tls_sec;
8128
8129 if (bfd_link_relocatable (info))
8130 return TRUE;
8131
8132 tls_sec = elf_hash_table (info)->tls_sec;
8133
8134 if (tls_sec)
8135 {
8136 struct elf_link_hash_entry *tlsbase;
8137
8138 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
8139 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
8140
8141 if (tlsbase)
8142 {
8143 struct bfd_link_hash_entry *h = NULL;
8144 const struct elf_backend_data *bed =
8145 get_elf_backend_data (output_bfd);
8146
8147 if (!(_bfd_generic_link_add_one_symbol
8148 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
8149 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
8150 return FALSE;
8151
8152 tlsbase->type = STT_TLS;
8153 tlsbase = (struct elf_link_hash_entry *) h;
8154 tlsbase->def_regular = 1;
8155 tlsbase->other = STV_HIDDEN;
8156 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
8157 }
8158 }
8159
8160 return TRUE;
8161 }
8162
8163 /* Finish up dynamic symbol handling. We set the contents of various
8164 dynamic sections here. */
8165 static bfd_boolean
8166 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
8167 struct bfd_link_info *info,
8168 struct elf_link_hash_entry *h,
8169 Elf_Internal_Sym *sym)
8170 {
8171 struct elf_aarch64_link_hash_table *htab;
8172 htab = elf_aarch64_hash_table (info);
8173
8174 if (h->plt.offset != (bfd_vma) - 1)
8175 {
8176 asection *plt, *gotplt, *relplt;
8177
8178 /* This symbol has an entry in the procedure linkage table. Set
8179 it up. */
8180
8181 /* When building a static executable, use .iplt, .igot.plt and
8182 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8183 if (htab->root.splt != NULL)
8184 {
8185 plt = htab->root.splt;
8186 gotplt = htab->root.sgotplt;
8187 relplt = htab->root.srelplt;
8188 }
8189 else
8190 {
8191 plt = htab->root.iplt;
8192 gotplt = htab->root.igotplt;
8193 relplt = htab->root.irelplt;
8194 }
8195
8196 /* This symbol has an entry in the procedure linkage table. Set
8197 it up. */
8198 if ((h->dynindx == -1
8199 && !((h->forced_local || bfd_link_executable (info))
8200 && h->def_regular
8201 && h->type == STT_GNU_IFUNC))
8202 || plt == NULL
8203 || gotplt == NULL
8204 || relplt == NULL)
8205 abort ();
8206
8207 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
8208 if (!h->def_regular)
8209 {
8210 /* Mark the symbol as undefined, rather than as defined in
8211 the .plt section. */
8212 sym->st_shndx = SHN_UNDEF;
8213 /* If the symbol is weak we need to clear the value.
8214 Otherwise, the PLT entry would provide a definition for
8215 the symbol even if the symbol wasn't defined anywhere,
8216 and so the symbol would never be NULL. Leave the value if
8217 there were any relocations where pointer equality matters
8218 (this is a clue for the dynamic linker, to make function
8219 pointer comparisons work between an application and shared
8220 library). */
8221 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
8222 sym->st_value = 0;
8223 }
8224 }
8225
8226 if (h->got.offset != (bfd_vma) - 1
8227 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8228 {
8229 Elf_Internal_Rela rela;
8230 bfd_byte *loc;
8231
8232 /* This symbol has an entry in the global offset table. Set it
8233 up. */
8234 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8235 abort ();
8236
8237 rela.r_offset = (htab->root.sgot->output_section->vma
8238 + htab->root.sgot->output_offset
8239 + (h->got.offset & ~(bfd_vma) 1));
8240
8241 if (h->def_regular
8242 && h->type == STT_GNU_IFUNC)
8243 {
8244 if (bfd_link_pic (info))
8245 {
8246 /* Generate R_AARCH64_GLOB_DAT. */
8247 goto do_glob_dat;
8248 }
8249 else
8250 {
8251 asection *plt;
8252
8253 if (!h->pointer_equality_needed)
8254 abort ();
8255
8256 /* For non-shared object, we can't use .got.plt, which
8257 contains the real function address if we need pointer
8258 equality. We load the GOT entry with the PLT entry. */
8259 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8260 bfd_put_NN (output_bfd, (plt->output_section->vma
8261 + plt->output_offset
8262 + h->plt.offset),
8263 htab->root.sgot->contents
8264 + (h->got.offset & ~(bfd_vma) 1));
8265 return TRUE;
8266 }
8267 }
8268 else if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
8269 {
8270 if (!h->def_regular)
8271 return FALSE;
8272
8273 BFD_ASSERT ((h->got.offset & 1) != 0);
8274 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8275 rela.r_addend = (h->root.u.def.value
8276 + h->root.u.def.section->output_section->vma
8277 + h->root.u.def.section->output_offset);
8278 }
8279 else
8280 {
8281 do_glob_dat:
8282 BFD_ASSERT ((h->got.offset & 1) == 0);
8283 bfd_put_NN (output_bfd, (bfd_vma) 0,
8284 htab->root.sgot->contents + h->got.offset);
8285 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8286 rela.r_addend = 0;
8287 }
8288
8289 loc = htab->root.srelgot->contents;
8290 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8291 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8292 }
8293
8294 if (h->needs_copy)
8295 {
8296 Elf_Internal_Rela rela;
8297 bfd_byte *loc;
8298
8299 /* This symbol needs a copy reloc. Set it up. */
8300
8301 if (h->dynindx == -1
8302 || (h->root.type != bfd_link_hash_defined
8303 && h->root.type != bfd_link_hash_defweak)
8304 || htab->srelbss == NULL)
8305 abort ();
8306
8307 rela.r_offset = (h->root.u.def.value
8308 + h->root.u.def.section->output_section->vma
8309 + h->root.u.def.section->output_offset);
8310 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8311 rela.r_addend = 0;
8312 loc = htab->srelbss->contents;
8313 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
8314 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8315 }
8316
8317 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
8318 be NULL for local symbols. */
8319 if (sym != NULL
8320 && (h == elf_hash_table (info)->hdynamic
8321 || h == elf_hash_table (info)->hgot))
8322 sym->st_shndx = SHN_ABS;
8323
8324 return TRUE;
8325 }
8326
8327 /* Finish up local dynamic symbol handling. We set the contents of
8328 various dynamic sections here. */
8329
8330 static bfd_boolean
8331 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
8332 {
8333 struct elf_link_hash_entry *h
8334 = (struct elf_link_hash_entry *) *slot;
8335 struct bfd_link_info *info
8336 = (struct bfd_link_info *) inf;
8337
8338 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
8339 info, h, NULL);
8340 }
8341
8342 static void
8343 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
8344 struct elf_aarch64_link_hash_table
8345 *htab)
8346 {
8347 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
8348 small and large plts and at the minute just generates
8349 the small PLT. */
8350
8351 /* PLT0 of the small PLT looks like this in ELF64 -
8352 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
8353 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
8354 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
8355 // symbol resolver
8356 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
8357 // GOTPLT entry for this.
8358 br x17
8359 PLT0 will be slightly different in ELF32 due to different got entry
8360 size.
8361 */
8362 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
8363 bfd_vma plt_base;
8364
8365
8366 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
8367 PLT_ENTRY_SIZE);
8368 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
8369 PLT_ENTRY_SIZE;
8370
8371 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
8372 + htab->root.sgotplt->output_offset
8373 + GOT_ENTRY_SIZE * 2);
8374
8375 plt_base = htab->root.splt->output_section->vma +
8376 htab->root.splt->output_offset;
8377
8378 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8379 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8380 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8381 htab->root.splt->contents + 4,
8382 PG (plt_got_2nd_ent) - PG (plt_base + 4));
8383
8384 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8385 htab->root.splt->contents + 8,
8386 PG_OFFSET (plt_got_2nd_ent));
8387
8388 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8389 htab->root.splt->contents + 12,
8390 PG_OFFSET (plt_got_2nd_ent));
8391 }
8392
8393 static bfd_boolean
8394 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
8395 struct bfd_link_info *info)
8396 {
8397 struct elf_aarch64_link_hash_table *htab;
8398 bfd *dynobj;
8399 asection *sdyn;
8400
8401 htab = elf_aarch64_hash_table (info);
8402 dynobj = htab->root.dynobj;
8403 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
8404
8405 if (htab->root.dynamic_sections_created)
8406 {
8407 ElfNN_External_Dyn *dyncon, *dynconend;
8408
8409 if (sdyn == NULL || htab->root.sgot == NULL)
8410 abort ();
8411
8412 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
8413 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
8414 for (; dyncon < dynconend; dyncon++)
8415 {
8416 Elf_Internal_Dyn dyn;
8417 asection *s;
8418
8419 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
8420
8421 switch (dyn.d_tag)
8422 {
8423 default:
8424 continue;
8425
8426 case DT_PLTGOT:
8427 s = htab->root.sgotplt;
8428 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
8429 break;
8430
8431 case DT_JMPREL:
8432 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
8433 break;
8434
8435 case DT_PLTRELSZ:
8436 s = htab->root.srelplt;
8437 dyn.d_un.d_val = s->size;
8438 break;
8439
8440 case DT_RELASZ:
8441 /* The procedure linkage table relocs (DT_JMPREL) should
8442 not be included in the overall relocs (DT_RELA).
8443 Therefore, we override the DT_RELASZ entry here to
8444 make it not include the JMPREL relocs. Since the
8445 linker script arranges for .rela.plt to follow all
8446 other relocation sections, we don't have to worry
8447 about changing the DT_RELA entry. */
8448 if (htab->root.srelplt != NULL)
8449 {
8450 s = htab->root.srelplt;
8451 dyn.d_un.d_val -= s->size;
8452 }
8453 break;
8454
8455 case DT_TLSDESC_PLT:
8456 s = htab->root.splt;
8457 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8458 + htab->tlsdesc_plt;
8459 break;
8460
8461 case DT_TLSDESC_GOT:
8462 s = htab->root.sgot;
8463 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8464 + htab->dt_tlsdesc_got;
8465 break;
8466 }
8467
8468 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
8469 }
8470
8471 }
8472
8473 /* Fill in the special first entry in the procedure linkage table. */
8474 if (htab->root.splt && htab->root.splt->size > 0)
8475 {
8476 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
8477
8478 elf_section_data (htab->root.splt->output_section)->
8479 this_hdr.sh_entsize = htab->plt_entry_size;
8480
8481
8482 if (htab->tlsdesc_plt)
8483 {
8484 bfd_put_NN (output_bfd, (bfd_vma) 0,
8485 htab->root.sgot->contents + htab->dt_tlsdesc_got);
8486
8487 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
8488 elfNN_aarch64_tlsdesc_small_plt_entry,
8489 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
8490
8491 {
8492 bfd_vma adrp1_addr =
8493 htab->root.splt->output_section->vma
8494 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
8495
8496 bfd_vma adrp2_addr = adrp1_addr + 4;
8497
8498 bfd_vma got_addr =
8499 htab->root.sgot->output_section->vma
8500 + htab->root.sgot->output_offset;
8501
8502 bfd_vma pltgot_addr =
8503 htab->root.sgotplt->output_section->vma
8504 + htab->root.sgotplt->output_offset;
8505
8506 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
8507
8508 bfd_byte *plt_entry =
8509 htab->root.splt->contents + htab->tlsdesc_plt;
8510
8511 /* adrp x2, DT_TLSDESC_GOT */
8512 elf_aarch64_update_plt_entry (output_bfd,
8513 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8514 plt_entry + 4,
8515 (PG (dt_tlsdesc_got)
8516 - PG (adrp1_addr)));
8517
8518 /* adrp x3, 0 */
8519 elf_aarch64_update_plt_entry (output_bfd,
8520 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8521 plt_entry + 8,
8522 (PG (pltgot_addr)
8523 - PG (adrp2_addr)));
8524
8525 /* ldr x2, [x2, #0] */
8526 elf_aarch64_update_plt_entry (output_bfd,
8527 BFD_RELOC_AARCH64_LDSTNN_LO12,
8528 plt_entry + 12,
8529 PG_OFFSET (dt_tlsdesc_got));
8530
8531 /* add x3, x3, 0 */
8532 elf_aarch64_update_plt_entry (output_bfd,
8533 BFD_RELOC_AARCH64_ADD_LO12,
8534 plt_entry + 16,
8535 PG_OFFSET (pltgot_addr));
8536 }
8537 }
8538 }
8539
8540 if (htab->root.sgotplt)
8541 {
8542 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
8543 {
8544 (*_bfd_error_handler)
8545 (_("discarded output section: `%A'"), htab->root.sgotplt);
8546 return FALSE;
8547 }
8548
8549 /* Fill in the first three entries in the global offset table. */
8550 if (htab->root.sgotplt->size > 0)
8551 {
8552 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
8553
8554 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
8555 bfd_put_NN (output_bfd,
8556 (bfd_vma) 0,
8557 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
8558 bfd_put_NN (output_bfd,
8559 (bfd_vma) 0,
8560 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
8561 }
8562
8563 if (htab->root.sgot)
8564 {
8565 if (htab->root.sgot->size > 0)
8566 {
8567 bfd_vma addr =
8568 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
8569 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
8570 }
8571 }
8572
8573 elf_section_data (htab->root.sgotplt->output_section)->
8574 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
8575 }
8576
8577 if (htab->root.sgot && htab->root.sgot->size > 0)
8578 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
8579 = GOT_ENTRY_SIZE;
8580
8581 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
8582 htab_traverse (htab->loc_hash_table,
8583 elfNN_aarch64_finish_local_dynamic_symbol,
8584 info);
8585
8586 return TRUE;
8587 }
8588
8589 /* Return address for Ith PLT stub in section PLT, for relocation REL
8590 or (bfd_vma) -1 if it should not be included. */
8591
8592 static bfd_vma
8593 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
8594 const arelent *rel ATTRIBUTE_UNUSED)
8595 {
8596 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
8597 }
8598
8599
8600 /* We use this so we can override certain functions
8601 (though currently we don't). */
8602
8603 const struct elf_size_info elfNN_aarch64_size_info =
8604 {
8605 sizeof (ElfNN_External_Ehdr),
8606 sizeof (ElfNN_External_Phdr),
8607 sizeof (ElfNN_External_Shdr),
8608 sizeof (ElfNN_External_Rel),
8609 sizeof (ElfNN_External_Rela),
8610 sizeof (ElfNN_External_Sym),
8611 sizeof (ElfNN_External_Dyn),
8612 sizeof (Elf_External_Note),
8613 4, /* Hash table entry size. */
8614 1, /* Internal relocs per external relocs. */
8615 ARCH_SIZE, /* Arch size. */
8616 LOG_FILE_ALIGN, /* Log_file_align. */
8617 ELFCLASSNN, EV_CURRENT,
8618 bfd_elfNN_write_out_phdrs,
8619 bfd_elfNN_write_shdrs_and_ehdr,
8620 bfd_elfNN_checksum_contents,
8621 bfd_elfNN_write_relocs,
8622 bfd_elfNN_swap_symbol_in,
8623 bfd_elfNN_swap_symbol_out,
8624 bfd_elfNN_slurp_reloc_table,
8625 bfd_elfNN_slurp_symbol_table,
8626 bfd_elfNN_swap_dyn_in,
8627 bfd_elfNN_swap_dyn_out,
8628 bfd_elfNN_swap_reloc_in,
8629 bfd_elfNN_swap_reloc_out,
8630 bfd_elfNN_swap_reloca_in,
8631 bfd_elfNN_swap_reloca_out
8632 };
8633
8634 #define ELF_ARCH bfd_arch_aarch64
8635 #define ELF_MACHINE_CODE EM_AARCH64
8636 #define ELF_MAXPAGESIZE 0x10000
8637 #define ELF_MINPAGESIZE 0x1000
8638 #define ELF_COMMONPAGESIZE 0x1000
8639
8640 #define bfd_elfNN_close_and_cleanup \
8641 elfNN_aarch64_close_and_cleanup
8642
8643 #define bfd_elfNN_bfd_free_cached_info \
8644 elfNN_aarch64_bfd_free_cached_info
8645
8646 #define bfd_elfNN_bfd_is_target_special_symbol \
8647 elfNN_aarch64_is_target_special_symbol
8648
8649 #define bfd_elfNN_bfd_link_hash_table_create \
8650 elfNN_aarch64_link_hash_table_create
8651
8652 #define bfd_elfNN_bfd_merge_private_bfd_data \
8653 elfNN_aarch64_merge_private_bfd_data
8654
8655 #define bfd_elfNN_bfd_print_private_bfd_data \
8656 elfNN_aarch64_print_private_bfd_data
8657
8658 #define bfd_elfNN_bfd_reloc_type_lookup \
8659 elfNN_aarch64_reloc_type_lookup
8660
8661 #define bfd_elfNN_bfd_reloc_name_lookup \
8662 elfNN_aarch64_reloc_name_lookup
8663
8664 #define bfd_elfNN_bfd_set_private_flags \
8665 elfNN_aarch64_set_private_flags
8666
8667 #define bfd_elfNN_find_inliner_info \
8668 elfNN_aarch64_find_inliner_info
8669
8670 #define bfd_elfNN_find_nearest_line \
8671 elfNN_aarch64_find_nearest_line
8672
8673 #define bfd_elfNN_mkobject \
8674 elfNN_aarch64_mkobject
8675
8676 #define bfd_elfNN_new_section_hook \
8677 elfNN_aarch64_new_section_hook
8678
8679 #define elf_backend_adjust_dynamic_symbol \
8680 elfNN_aarch64_adjust_dynamic_symbol
8681
8682 #define elf_backend_always_size_sections \
8683 elfNN_aarch64_always_size_sections
8684
8685 #define elf_backend_check_relocs \
8686 elfNN_aarch64_check_relocs
8687
8688 #define elf_backend_copy_indirect_symbol \
8689 elfNN_aarch64_copy_indirect_symbol
8690
8691 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8692 to them in our hash. */
8693 #define elf_backend_create_dynamic_sections \
8694 elfNN_aarch64_create_dynamic_sections
8695
8696 #define elf_backend_init_index_section \
8697 _bfd_elf_init_2_index_sections
8698
8699 #define elf_backend_finish_dynamic_sections \
8700 elfNN_aarch64_finish_dynamic_sections
8701
8702 #define elf_backend_finish_dynamic_symbol \
8703 elfNN_aarch64_finish_dynamic_symbol
8704
8705 #define elf_backend_gc_sweep_hook \
8706 elfNN_aarch64_gc_sweep_hook
8707
8708 #define elf_backend_object_p \
8709 elfNN_aarch64_object_p
8710
8711 #define elf_backend_output_arch_local_syms \
8712 elfNN_aarch64_output_arch_local_syms
8713
8714 #define elf_backend_plt_sym_val \
8715 elfNN_aarch64_plt_sym_val
8716
8717 #define elf_backend_post_process_headers \
8718 elfNN_aarch64_post_process_headers
8719
8720 #define elf_backend_relocate_section \
8721 elfNN_aarch64_relocate_section
8722
8723 #define elf_backend_reloc_type_class \
8724 elfNN_aarch64_reloc_type_class
8725
8726 #define elf_backend_section_from_shdr \
8727 elfNN_aarch64_section_from_shdr
8728
8729 #define elf_backend_size_dynamic_sections \
8730 elfNN_aarch64_size_dynamic_sections
8731
8732 #define elf_backend_size_info \
8733 elfNN_aarch64_size_info
8734
8735 #define elf_backend_write_section \
8736 elfNN_aarch64_write_section
8737
8738 #define elf_backend_can_refcount 1
8739 #define elf_backend_can_gc_sections 1
8740 #define elf_backend_plt_readonly 1
8741 #define elf_backend_want_got_plt 1
8742 #define elf_backend_want_plt_sym 0
8743 #define elf_backend_may_use_rel_p 0
8744 #define elf_backend_may_use_rela_p 1
8745 #define elf_backend_default_use_rela_p 1
8746 #define elf_backend_rela_normal 1
8747 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8748 #define elf_backend_default_execstack 0
8749 #define elf_backend_extern_protected_data 1
8750
8751 #undef elf_backend_obj_attrs_section
8752 #define elf_backend_obj_attrs_section ".ARM.attributes"
8753
8754 #include "elfNN-target.h"
This page took 0.224965 seconds and 4 git commands to generate.