[AArch64] Tidy up in aarch64_mem_op_p().
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
188 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
189
190 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
191 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC)
203
204 #define ELIMINATE_COPY_RELOCS 0
205
206 /* Return size of a relocation entry. HTAB is the bfd's
207 elf_aarch64_link_hash_entry. */
208 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
209
210 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
211 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
212 #define PLT_ENTRY_SIZE (32)
213 #define PLT_SMALL_ENTRY_SIZE (16)
214 #define PLT_TLSDESC_ENTRY_SIZE (32)
215
216 /* Encoding of the nop instruction */
217 #define INSN_NOP 0xd503201f
218
219 #define aarch64_compute_jump_table_size(htab) \
220 (((htab)->root.srelplt == NULL) ? 0 \
221 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
222
223 /* The first entry in a procedure linkage table looks like this
224 if the distance between the PLTGOT and the PLT is < 4GB use
225 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
226 in x16 and needs to work out PLTGOT[1] by using an address of
227 [x16,#-GOT_ENTRY_SIZE]. */
228 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
229 {
230 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
231 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
232 #if ARCH_SIZE == 64
233 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
234 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
235 #else
236 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
237 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
238 #endif
239 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
240 0x1f, 0x20, 0x03, 0xd5, /* nop */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
243 };
244
245 /* Per function entry in a procedure linkage table looks like this
246 if the distance between the PLTGOT and the PLT is < 4GB use
247 these PLT entries. */
248 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
249 {
250 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
251 #if ARCH_SIZE == 64
252 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
253 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
254 #else
255 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
256 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
257 #endif
258 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
259 };
260
261 static const bfd_byte
262 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
263 {
264 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
265 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
266 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
267 #if ARCH_SIZE == 64
268 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
269 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
270 #else
271 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
272 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
273 #endif
274 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
275 0x1f, 0x20, 0x03, 0xd5, /* nop */
276 0x1f, 0x20, 0x03, 0xd5, /* nop */
277 };
278
279 #define elf_info_to_howto elfNN_aarch64_info_to_howto
280 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
281
282 #define AARCH64_ELF_ABI_VERSION 0
283
284 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
285 #define ALL_ONES (~ (bfd_vma) 0)
286
287 /* Indexed by the bfd interal reloc enumerators.
288 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
289 in reloc.c. */
290
291 static reloc_howto_type elfNN_aarch64_howto_table[] =
292 {
293 EMPTY_HOWTO (0),
294
295 /* Basic data relocations. */
296
297 #if ARCH_SIZE == 64
298 HOWTO (R_AARCH64_NULL, /* type */
299 0, /* rightshift */
300 3, /* size (0 = byte, 1 = short, 2 = long) */
301 0, /* bitsize */
302 FALSE, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_dont, /* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_AARCH64_NULL", /* name */
307 FALSE, /* partial_inplace */
308 0, /* src_mask */
309 0, /* dst_mask */
310 FALSE), /* pcrel_offset */
311 #else
312 HOWTO (R_AARCH64_NONE, /* type */
313 0, /* rightshift */
314 3, /* size (0 = byte, 1 = short, 2 = long) */
315 0, /* bitsize */
316 FALSE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_dont, /* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_AARCH64_NONE", /* name */
321 FALSE, /* partial_inplace */
322 0, /* src_mask */
323 0, /* dst_mask */
324 FALSE), /* pcrel_offset */
325 #endif
326
327 /* .xword: (S+A) */
328 HOWTO64 (AARCH64_R (ABS64), /* type */
329 0, /* rightshift */
330 4, /* size (4 = long long) */
331 64, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_unsigned, /* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 AARCH64_R_STR (ABS64), /* name */
337 FALSE, /* partial_inplace */
338 ALL_ONES, /* src_mask */
339 ALL_ONES, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 /* .word: (S+A) */
343 HOWTO (AARCH64_R (ABS32), /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_unsigned, /* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 AARCH64_R_STR (ABS32), /* name */
352 FALSE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
356
357 /* .half: (S+A) */
358 HOWTO (AARCH64_R (ABS16), /* type */
359 0, /* rightshift */
360 1, /* size (0 = byte, 1 = short, 2 = long) */
361 16, /* bitsize */
362 FALSE, /* pc_relative */
363 0, /* bitpos */
364 complain_overflow_unsigned, /* complain_on_overflow */
365 bfd_elf_generic_reloc, /* special_function */
366 AARCH64_R_STR (ABS16), /* name */
367 FALSE, /* partial_inplace */
368 0xffff, /* src_mask */
369 0xffff, /* dst_mask */
370 FALSE), /* pcrel_offset */
371
372 /* .xword: (S+A-P) */
373 HOWTO64 (AARCH64_R (PREL64), /* type */
374 0, /* rightshift */
375 4, /* size (4 = long long) */
376 64, /* bitsize */
377 TRUE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_signed, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 AARCH64_R_STR (PREL64), /* name */
382 FALSE, /* partial_inplace */
383 ALL_ONES, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 TRUE), /* pcrel_offset */
386
387 /* .word: (S+A-P) */
388 HOWTO (AARCH64_R (PREL32), /* type */
389 0, /* rightshift */
390 2, /* size (0 = byte, 1 = short, 2 = long) */
391 32, /* bitsize */
392 TRUE, /* pc_relative */
393 0, /* bitpos */
394 complain_overflow_signed, /* complain_on_overflow */
395 bfd_elf_generic_reloc, /* special_function */
396 AARCH64_R_STR (PREL32), /* name */
397 FALSE, /* partial_inplace */
398 0xffffffff, /* src_mask */
399 0xffffffff, /* dst_mask */
400 TRUE), /* pcrel_offset */
401
402 /* .half: (S+A-P) */
403 HOWTO (AARCH64_R (PREL16), /* type */
404 0, /* rightshift */
405 1, /* size (0 = byte, 1 = short, 2 = long) */
406 16, /* bitsize */
407 TRUE, /* pc_relative */
408 0, /* bitpos */
409 complain_overflow_signed, /* complain_on_overflow */
410 bfd_elf_generic_reloc, /* special_function */
411 AARCH64_R_STR (PREL16), /* name */
412 FALSE, /* partial_inplace */
413 0xffff, /* src_mask */
414 0xffff, /* dst_mask */
415 TRUE), /* pcrel_offset */
416
417 /* Group relocations to create a 16, 32, 48 or 64 bit
418 unsigned data or abs address inline. */
419
420 /* MOVZ: ((S+A) >> 0) & 0xffff */
421 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
422 0, /* rightshift */
423 2, /* size (0 = byte, 1 = short, 2 = long) */
424 16, /* bitsize */
425 FALSE, /* pc_relative */
426 0, /* bitpos */
427 complain_overflow_unsigned, /* complain_on_overflow */
428 bfd_elf_generic_reloc, /* special_function */
429 AARCH64_R_STR (MOVW_UABS_G0), /* name */
430 FALSE, /* partial_inplace */
431 0xffff, /* src_mask */
432 0xffff, /* dst_mask */
433 FALSE), /* pcrel_offset */
434
435 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
436 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
437 0, /* rightshift */
438 2, /* size (0 = byte, 1 = short, 2 = long) */
439 16, /* bitsize */
440 FALSE, /* pc_relative */
441 0, /* bitpos */
442 complain_overflow_dont, /* complain_on_overflow */
443 bfd_elf_generic_reloc, /* special_function */
444 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
445 FALSE, /* partial_inplace */
446 0xffff, /* src_mask */
447 0xffff, /* dst_mask */
448 FALSE), /* pcrel_offset */
449
450 /* MOVZ: ((S+A) >> 16) & 0xffff */
451 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
452 16, /* rightshift */
453 2, /* size (0 = byte, 1 = short, 2 = long) */
454 16, /* bitsize */
455 FALSE, /* pc_relative */
456 0, /* bitpos */
457 complain_overflow_unsigned, /* complain_on_overflow */
458 bfd_elf_generic_reloc, /* special_function */
459 AARCH64_R_STR (MOVW_UABS_G1), /* name */
460 FALSE, /* partial_inplace */
461 0xffff, /* src_mask */
462 0xffff, /* dst_mask */
463 FALSE), /* pcrel_offset */
464
465 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
466 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
467 16, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 16, /* bitsize */
470 FALSE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_dont, /* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
475 FALSE, /* partial_inplace */
476 0xffff, /* src_mask */
477 0xffff, /* dst_mask */
478 FALSE), /* pcrel_offset */
479
480 /* MOVZ: ((S+A) >> 32) & 0xffff */
481 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
482 32, /* rightshift */
483 2, /* size (0 = byte, 1 = short, 2 = long) */
484 16, /* bitsize */
485 FALSE, /* pc_relative */
486 0, /* bitpos */
487 complain_overflow_unsigned, /* complain_on_overflow */
488 bfd_elf_generic_reloc, /* special_function */
489 AARCH64_R_STR (MOVW_UABS_G2), /* name */
490 FALSE, /* partial_inplace */
491 0xffff, /* src_mask */
492 0xffff, /* dst_mask */
493 FALSE), /* pcrel_offset */
494
495 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
496 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
497 32, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 16, /* bitsize */
500 FALSE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_dont, /* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
505 FALSE, /* partial_inplace */
506 0xffff, /* src_mask */
507 0xffff, /* dst_mask */
508 FALSE), /* pcrel_offset */
509
510 /* MOVZ: ((S+A) >> 48) & 0xffff */
511 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
512 48, /* rightshift */
513 2, /* size (0 = byte, 1 = short, 2 = long) */
514 16, /* bitsize */
515 FALSE, /* pc_relative */
516 0, /* bitpos */
517 complain_overflow_unsigned, /* complain_on_overflow */
518 bfd_elf_generic_reloc, /* special_function */
519 AARCH64_R_STR (MOVW_UABS_G3), /* name */
520 FALSE, /* partial_inplace */
521 0xffff, /* src_mask */
522 0xffff, /* dst_mask */
523 FALSE), /* pcrel_offset */
524
525 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
526 signed data or abs address inline. Will change instruction
527 to MOVN or MOVZ depending on sign of calculated value. */
528
529 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
530 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
531 0, /* rightshift */
532 2, /* size (0 = byte, 1 = short, 2 = long) */
533 16, /* bitsize */
534 FALSE, /* pc_relative */
535 0, /* bitpos */
536 complain_overflow_signed, /* complain_on_overflow */
537 bfd_elf_generic_reloc, /* special_function */
538 AARCH64_R_STR (MOVW_SABS_G0), /* name */
539 FALSE, /* partial_inplace */
540 0xffff, /* src_mask */
541 0xffff, /* dst_mask */
542 FALSE), /* pcrel_offset */
543
544 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
545 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
546 16, /* rightshift */
547 2, /* size (0 = byte, 1 = short, 2 = long) */
548 16, /* bitsize */
549 FALSE, /* pc_relative */
550 0, /* bitpos */
551 complain_overflow_signed, /* complain_on_overflow */
552 bfd_elf_generic_reloc, /* special_function */
553 AARCH64_R_STR (MOVW_SABS_G1), /* name */
554 FALSE, /* partial_inplace */
555 0xffff, /* src_mask */
556 0xffff, /* dst_mask */
557 FALSE), /* pcrel_offset */
558
559 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
560 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
561 32, /* rightshift */
562 2, /* size (0 = byte, 1 = short, 2 = long) */
563 16, /* bitsize */
564 FALSE, /* pc_relative */
565 0, /* bitpos */
566 complain_overflow_signed, /* complain_on_overflow */
567 bfd_elf_generic_reloc, /* special_function */
568 AARCH64_R_STR (MOVW_SABS_G2), /* name */
569 FALSE, /* partial_inplace */
570 0xffff, /* src_mask */
571 0xffff, /* dst_mask */
572 FALSE), /* pcrel_offset */
573
574 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
575 addresses: PG(x) is (x & ~0xfff). */
576
577 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
578 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
579 2, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 19, /* bitsize */
582 TRUE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_signed, /* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 AARCH64_R_STR (LD_PREL_LO19), /* name */
587 FALSE, /* partial_inplace */
588 0x7ffff, /* src_mask */
589 0x7ffff, /* dst_mask */
590 TRUE), /* pcrel_offset */
591
592 /* ADR: (S+A-P) & 0x1fffff */
593 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
594 0, /* rightshift */
595 2, /* size (0 = byte, 1 = short, 2 = long) */
596 21, /* bitsize */
597 TRUE, /* pc_relative */
598 0, /* bitpos */
599 complain_overflow_signed, /* complain_on_overflow */
600 bfd_elf_generic_reloc, /* special_function */
601 AARCH64_R_STR (ADR_PREL_LO21), /* name */
602 FALSE, /* partial_inplace */
603 0x1fffff, /* src_mask */
604 0x1fffff, /* dst_mask */
605 TRUE), /* pcrel_offset */
606
607 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
608 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
609 12, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 21, /* bitsize */
612 TRUE, /* pc_relative */
613 0, /* bitpos */
614 complain_overflow_signed, /* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
617 FALSE, /* partial_inplace */
618 0x1fffff, /* src_mask */
619 0x1fffff, /* dst_mask */
620 TRUE), /* pcrel_offset */
621
622 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
623 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
624 12, /* rightshift */
625 2, /* size (0 = byte, 1 = short, 2 = long) */
626 21, /* bitsize */
627 TRUE, /* pc_relative */
628 0, /* bitpos */
629 complain_overflow_dont, /* complain_on_overflow */
630 bfd_elf_generic_reloc, /* special_function */
631 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
632 FALSE, /* partial_inplace */
633 0x1fffff, /* src_mask */
634 0x1fffff, /* dst_mask */
635 TRUE), /* pcrel_offset */
636
637 /* ADD: (S+A) & 0xfff [no overflow check] */
638 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 12, /* bitsize */
642 FALSE, /* pc_relative */
643 10, /* bitpos */
644 complain_overflow_dont, /* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
647 FALSE, /* partial_inplace */
648 0x3ffc00, /* src_mask */
649 0x3ffc00, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 /* LD/ST8: (S+A) & 0xfff */
653 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 12, /* bitsize */
657 FALSE, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont, /* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
662 FALSE, /* partial_inplace */
663 0xfff, /* src_mask */
664 0xfff, /* dst_mask */
665 FALSE), /* pcrel_offset */
666
667 /* Relocations for control-flow instructions. */
668
669 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
670 HOWTO (AARCH64_R (TSTBR14), /* type */
671 2, /* rightshift */
672 2, /* size (0 = byte, 1 = short, 2 = long) */
673 14, /* bitsize */
674 TRUE, /* pc_relative */
675 0, /* bitpos */
676 complain_overflow_signed, /* complain_on_overflow */
677 bfd_elf_generic_reloc, /* special_function */
678 AARCH64_R_STR (TSTBR14), /* name */
679 FALSE, /* partial_inplace */
680 0x3fff, /* src_mask */
681 0x3fff, /* dst_mask */
682 TRUE), /* pcrel_offset */
683
684 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
685 HOWTO (AARCH64_R (CONDBR19), /* type */
686 2, /* rightshift */
687 2, /* size (0 = byte, 1 = short, 2 = long) */
688 19, /* bitsize */
689 TRUE, /* pc_relative */
690 0, /* bitpos */
691 complain_overflow_signed, /* complain_on_overflow */
692 bfd_elf_generic_reloc, /* special_function */
693 AARCH64_R_STR (CONDBR19), /* name */
694 FALSE, /* partial_inplace */
695 0x7ffff, /* src_mask */
696 0x7ffff, /* dst_mask */
697 TRUE), /* pcrel_offset */
698
699 /* B: ((S+A-P) >> 2) & 0x3ffffff */
700 HOWTO (AARCH64_R (JUMP26), /* type */
701 2, /* rightshift */
702 2, /* size (0 = byte, 1 = short, 2 = long) */
703 26, /* bitsize */
704 TRUE, /* pc_relative */
705 0, /* bitpos */
706 complain_overflow_signed, /* complain_on_overflow */
707 bfd_elf_generic_reloc, /* special_function */
708 AARCH64_R_STR (JUMP26), /* name */
709 FALSE, /* partial_inplace */
710 0x3ffffff, /* src_mask */
711 0x3ffffff, /* dst_mask */
712 TRUE), /* pcrel_offset */
713
714 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
715 HOWTO (AARCH64_R (CALL26), /* type */
716 2, /* rightshift */
717 2, /* size (0 = byte, 1 = short, 2 = long) */
718 26, /* bitsize */
719 TRUE, /* pc_relative */
720 0, /* bitpos */
721 complain_overflow_signed, /* complain_on_overflow */
722 bfd_elf_generic_reloc, /* special_function */
723 AARCH64_R_STR (CALL26), /* name */
724 FALSE, /* partial_inplace */
725 0x3ffffff, /* src_mask */
726 0x3ffffff, /* dst_mask */
727 TRUE), /* pcrel_offset */
728
729 /* LD/ST16: (S+A) & 0xffe */
730 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
731 1, /* rightshift */
732 2, /* size (0 = byte, 1 = short, 2 = long) */
733 12, /* bitsize */
734 FALSE, /* pc_relative */
735 0, /* bitpos */
736 complain_overflow_dont, /* complain_on_overflow */
737 bfd_elf_generic_reloc, /* special_function */
738 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
739 FALSE, /* partial_inplace */
740 0xffe, /* src_mask */
741 0xffe, /* dst_mask */
742 FALSE), /* pcrel_offset */
743
744 /* LD/ST32: (S+A) & 0xffc */
745 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
746 2, /* rightshift */
747 2, /* size (0 = byte, 1 = short, 2 = long) */
748 12, /* bitsize */
749 FALSE, /* pc_relative */
750 0, /* bitpos */
751 complain_overflow_dont, /* complain_on_overflow */
752 bfd_elf_generic_reloc, /* special_function */
753 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
754 FALSE, /* partial_inplace */
755 0xffc, /* src_mask */
756 0xffc, /* dst_mask */
757 FALSE), /* pcrel_offset */
758
759 /* LD/ST64: (S+A) & 0xff8 */
760 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
761 3, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 12, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_dont, /* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
769 FALSE, /* partial_inplace */
770 0xff8, /* src_mask */
771 0xff8, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 /* LD/ST128: (S+A) & 0xff0 */
775 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
776 4, /* rightshift */
777 2, /* size (0 = byte, 1 = short, 2 = long) */
778 12, /* bitsize */
779 FALSE, /* pc_relative */
780 0, /* bitpos */
781 complain_overflow_dont, /* complain_on_overflow */
782 bfd_elf_generic_reloc, /* special_function */
783 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
784 FALSE, /* partial_inplace */
785 0xff0, /* src_mask */
786 0xff0, /* dst_mask */
787 FALSE), /* pcrel_offset */
788
789 /* Set a load-literal immediate field to bits
790 0x1FFFFC of G(S)-P */
791 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
792 2, /* rightshift */
793 2, /* size (0 = byte,1 = short,2 = long) */
794 19, /* bitsize */
795 TRUE, /* pc_relative */
796 0, /* bitpos */
797 complain_overflow_signed, /* complain_on_overflow */
798 bfd_elf_generic_reloc, /* special_function */
799 AARCH64_R_STR (GOT_LD_PREL19), /* name */
800 FALSE, /* partial_inplace */
801 0xffffe0, /* src_mask */
802 0xffffe0, /* dst_mask */
803 TRUE), /* pcrel_offset */
804
805 /* Get to the page for the GOT entry for the symbol
806 (G(S) - P) using an ADRP instruction. */
807 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
808 12, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 21, /* bitsize */
811 TRUE, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_dont, /* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
816 FALSE, /* partial_inplace */
817 0x1fffff, /* src_mask */
818 0x1fffff, /* dst_mask */
819 TRUE), /* pcrel_offset */
820
821 /* LD64: GOT offset G(S) & 0xff8 */
822 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
823 3, /* rightshift */
824 2, /* size (0 = byte, 1 = short, 2 = long) */
825 12, /* bitsize */
826 FALSE, /* pc_relative */
827 0, /* bitpos */
828 complain_overflow_dont, /* complain_on_overflow */
829 bfd_elf_generic_reloc, /* special_function */
830 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
831 FALSE, /* partial_inplace */
832 0xff8, /* src_mask */
833 0xff8, /* dst_mask */
834 FALSE), /* pcrel_offset */
835
836 /* LD32: GOT offset G(S) & 0xffc */
837 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
838 2, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 12, /* bitsize */
841 FALSE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont, /* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
846 FALSE, /* partial_inplace */
847 0xffc, /* src_mask */
848 0xffc, /* dst_mask */
849 FALSE), /* pcrel_offset */
850
851 /* Get to the page for the GOT entry for the symbol
852 (G(S) - P) using an ADRP instruction. */
853 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
854 12, /* rightshift */
855 2, /* size (0 = byte, 1 = short, 2 = long) */
856 21, /* bitsize */
857 TRUE, /* pc_relative */
858 0, /* bitpos */
859 complain_overflow_dont, /* complain_on_overflow */
860 bfd_elf_generic_reloc, /* special_function */
861 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
862 FALSE, /* partial_inplace */
863 0x1fffff, /* src_mask */
864 0x1fffff, /* dst_mask */
865 TRUE), /* pcrel_offset */
866
867 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
868 0, /* rightshift */
869 2, /* size (0 = byte, 1 = short, 2 = long) */
870 21, /* bitsize */
871 TRUE, /* pc_relative */
872 0, /* bitpos */
873 complain_overflow_dont, /* complain_on_overflow */
874 bfd_elf_generic_reloc, /* special_function */
875 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
876 FALSE, /* partial_inplace */
877 0x1fffff, /* src_mask */
878 0x1fffff, /* dst_mask */
879 TRUE), /* pcrel_offset */
880
881 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
882 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
883 0, /* rightshift */
884 2, /* size (0 = byte, 1 = short, 2 = long) */
885 12, /* bitsize */
886 FALSE, /* pc_relative */
887 0, /* bitpos */
888 complain_overflow_dont, /* complain_on_overflow */
889 bfd_elf_generic_reloc, /* special_function */
890 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
891 FALSE, /* partial_inplace */
892 0xfff, /* src_mask */
893 0xfff, /* dst_mask */
894 FALSE), /* pcrel_offset */
895
896 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
897 16, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 16, /* bitsize */
900 FALSE, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont, /* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
905 FALSE, /* partial_inplace */
906 0xffff, /* src_mask */
907 0xffff, /* dst_mask */
908 FALSE), /* pcrel_offset */
909
910 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 16, /* bitsize */
914 FALSE, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont, /* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
919 FALSE, /* partial_inplace */
920 0xffff, /* src_mask */
921 0xffff, /* dst_mask */
922 FALSE), /* pcrel_offset */
923
924 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
925 12, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 21, /* bitsize */
928 FALSE, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont, /* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
933 FALSE, /* partial_inplace */
934 0x1fffff, /* src_mask */
935 0x1fffff, /* dst_mask */
936 FALSE), /* pcrel_offset */
937
938 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
939 3, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 12, /* bitsize */
942 FALSE, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont, /* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
947 FALSE, /* partial_inplace */
948 0xff8, /* src_mask */
949 0xff8, /* dst_mask */
950 FALSE), /* pcrel_offset */
951
952 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
953 2, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 12, /* bitsize */
956 FALSE, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont, /* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
961 FALSE, /* partial_inplace */
962 0xffc, /* src_mask */
963 0xffc, /* dst_mask */
964 FALSE), /* pcrel_offset */
965
966 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
967 2, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 19, /* bitsize */
970 FALSE, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
975 FALSE, /* partial_inplace */
976 0x1ffffc, /* src_mask */
977 0x1ffffc, /* dst_mask */
978 FALSE), /* pcrel_offset */
979
980 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
981 32, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 16, /* bitsize */
984 FALSE, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_unsigned, /* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
989 FALSE, /* partial_inplace */
990 0xffff, /* src_mask */
991 0xffff, /* dst_mask */
992 FALSE), /* pcrel_offset */
993
994 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
995 16, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 16, /* bitsize */
998 FALSE, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont, /* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1003 FALSE, /* partial_inplace */
1004 0xffff, /* src_mask */
1005 0xffff, /* dst_mask */
1006 FALSE), /* pcrel_offset */
1007
1008 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1009 16, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 16, /* bitsize */
1012 FALSE, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont, /* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1017 FALSE, /* partial_inplace */
1018 0xffff, /* src_mask */
1019 0xffff, /* dst_mask */
1020 FALSE), /* pcrel_offset */
1021
1022 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 16, /* bitsize */
1026 FALSE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1035
1036 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 16, /* bitsize */
1040 FALSE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1049
1050 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 12, /* bitsize */
1054 FALSE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_unsigned, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1059 FALSE, /* partial_inplace */
1060 0xfff, /* src_mask */
1061 0xfff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1063
1064 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 12, /* bitsize */
1068 FALSE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1073 FALSE, /* partial_inplace */
1074 0xfff, /* src_mask */
1075 0xfff, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1077
1078 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 12, /* bitsize */
1082 FALSE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1087 FALSE, /* partial_inplace */
1088 0xfff, /* src_mask */
1089 0xfff, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1091
1092 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1093 2, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 19, /* bitsize */
1096 TRUE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1101 FALSE, /* partial_inplace */
1102 0x0ffffe0, /* src_mask */
1103 0x0ffffe0, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1105
1106 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 21, /* bitsize */
1110 TRUE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1115 FALSE, /* partial_inplace */
1116 0x1fffff, /* src_mask */
1117 0x1fffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1119
1120 /* Get to the page for the GOT entry for the symbol
1121 (G(S) - P) using an ADRP instruction. */
1122 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1123 12, /* rightshift */
1124 2, /* size (0 = byte, 1 = short, 2 = long) */
1125 21, /* bitsize */
1126 TRUE, /* pc_relative */
1127 0, /* bitpos */
1128 complain_overflow_dont, /* complain_on_overflow */
1129 bfd_elf_generic_reloc, /* special_function */
1130 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1131 FALSE, /* partial_inplace */
1132 0x1fffff, /* src_mask */
1133 0x1fffff, /* dst_mask */
1134 TRUE), /* pcrel_offset */
1135
1136 /* LD64: GOT offset G(S) & 0xff8. */
1137 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1138 3, /* rightshift */
1139 2, /* size (0 = byte, 1 = short, 2 = long) */
1140 12, /* bitsize */
1141 FALSE, /* pc_relative */
1142 0, /* bitpos */
1143 complain_overflow_dont, /* complain_on_overflow */
1144 bfd_elf_generic_reloc, /* special_function */
1145 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1146 FALSE, /* partial_inplace */
1147 0xff8, /* src_mask */
1148 0xff8, /* dst_mask */
1149 FALSE), /* pcrel_offset */
1150
1151 /* LD32: GOT offset G(S) & 0xffc. */
1152 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1153 2, /* rightshift */
1154 2, /* size (0 = byte, 1 = short, 2 = long) */
1155 12, /* bitsize */
1156 FALSE, /* pc_relative */
1157 0, /* bitpos */
1158 complain_overflow_dont, /* complain_on_overflow */
1159 bfd_elf_generic_reloc, /* special_function */
1160 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1161 FALSE, /* partial_inplace */
1162 0xffc, /* src_mask */
1163 0xffc, /* dst_mask */
1164 FALSE), /* pcrel_offset */
1165
1166 /* ADD: GOT offset G(S) & 0xfff. */
1167 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1168 0, /* rightshift */
1169 2, /* size (0 = byte, 1 = short, 2 = long) */
1170 12, /* bitsize */
1171 FALSE, /* pc_relative */
1172 0, /* bitpos */
1173 complain_overflow_dont, /* complain_on_overflow */
1174 bfd_elf_generic_reloc, /* special_function */
1175 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1176 FALSE, /* partial_inplace */
1177 0xfff, /* src_mask */
1178 0xfff, /* dst_mask */
1179 FALSE), /* pcrel_offset */
1180
1181 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1182 16, /* rightshift */
1183 2, /* size (0 = byte, 1 = short, 2 = long) */
1184 12, /* bitsize */
1185 FALSE, /* pc_relative */
1186 0, /* bitpos */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1190 FALSE, /* partial_inplace */
1191 0xffff, /* src_mask */
1192 0xffff, /* dst_mask */
1193 FALSE), /* pcrel_offset */
1194
1195 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1196 0, /* rightshift */
1197 2, /* size (0 = byte, 1 = short, 2 = long) */
1198 12, /* bitsize */
1199 FALSE, /* pc_relative */
1200 0, /* bitpos */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1204 FALSE, /* partial_inplace */
1205 0xffff, /* src_mask */
1206 0xffff, /* dst_mask */
1207 FALSE), /* pcrel_offset */
1208
1209 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1210 0, /* rightshift */
1211 2, /* size (0 = byte, 1 = short, 2 = long) */
1212 12, /* bitsize */
1213 FALSE, /* pc_relative */
1214 0, /* bitpos */
1215 complain_overflow_dont, /* complain_on_overflow */
1216 bfd_elf_generic_reloc, /* special_function */
1217 AARCH64_R_STR (TLSDESC_LDR), /* name */
1218 FALSE, /* partial_inplace */
1219 0x0, /* src_mask */
1220 0x0, /* dst_mask */
1221 FALSE), /* pcrel_offset */
1222
1223 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1224 0, /* rightshift */
1225 2, /* size (0 = byte, 1 = short, 2 = long) */
1226 12, /* bitsize */
1227 FALSE, /* pc_relative */
1228 0, /* bitpos */
1229 complain_overflow_dont, /* complain_on_overflow */
1230 bfd_elf_generic_reloc, /* special_function */
1231 AARCH64_R_STR (TLSDESC_ADD), /* name */
1232 FALSE, /* partial_inplace */
1233 0x0, /* src_mask */
1234 0x0, /* dst_mask */
1235 FALSE), /* pcrel_offset */
1236
1237 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1238 0, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1240 0, /* bitsize */
1241 FALSE, /* pc_relative */
1242 0, /* bitpos */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 AARCH64_R_STR (TLSDESC_CALL), /* name */
1246 FALSE, /* partial_inplace */
1247 0x0, /* src_mask */
1248 0x0, /* dst_mask */
1249 FALSE), /* pcrel_offset */
1250
1251 HOWTO (AARCH64_R (COPY), /* type */
1252 0, /* rightshift */
1253 2, /* size (0 = byte, 1 = short, 2 = long) */
1254 64, /* bitsize */
1255 FALSE, /* pc_relative */
1256 0, /* bitpos */
1257 complain_overflow_bitfield, /* complain_on_overflow */
1258 bfd_elf_generic_reloc, /* special_function */
1259 AARCH64_R_STR (COPY), /* name */
1260 TRUE, /* partial_inplace */
1261 0xffffffff, /* src_mask */
1262 0xffffffff, /* dst_mask */
1263 FALSE), /* pcrel_offset */
1264
1265 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1266 0, /* rightshift */
1267 2, /* size (0 = byte, 1 = short, 2 = long) */
1268 64, /* bitsize */
1269 FALSE, /* pc_relative */
1270 0, /* bitpos */
1271 complain_overflow_bitfield, /* complain_on_overflow */
1272 bfd_elf_generic_reloc, /* special_function */
1273 AARCH64_R_STR (GLOB_DAT), /* name */
1274 TRUE, /* partial_inplace */
1275 0xffffffff, /* src_mask */
1276 0xffffffff, /* dst_mask */
1277 FALSE), /* pcrel_offset */
1278
1279 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1280 0, /* rightshift */
1281 2, /* size (0 = byte, 1 = short, 2 = long) */
1282 64, /* bitsize */
1283 FALSE, /* pc_relative */
1284 0, /* bitpos */
1285 complain_overflow_bitfield, /* complain_on_overflow */
1286 bfd_elf_generic_reloc, /* special_function */
1287 AARCH64_R_STR (JUMP_SLOT), /* name */
1288 TRUE, /* partial_inplace */
1289 0xffffffff, /* src_mask */
1290 0xffffffff, /* dst_mask */
1291 FALSE), /* pcrel_offset */
1292
1293 HOWTO (AARCH64_R (RELATIVE), /* type */
1294 0, /* rightshift */
1295 2, /* size (0 = byte, 1 = short, 2 = long) */
1296 64, /* bitsize */
1297 FALSE, /* pc_relative */
1298 0, /* bitpos */
1299 complain_overflow_bitfield, /* complain_on_overflow */
1300 bfd_elf_generic_reloc, /* special_function */
1301 AARCH64_R_STR (RELATIVE), /* name */
1302 TRUE, /* partial_inplace */
1303 ALL_ONES, /* src_mask */
1304 ALL_ONES, /* dst_mask */
1305 FALSE), /* pcrel_offset */
1306
1307 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1308 0, /* rightshift */
1309 2, /* size (0 = byte, 1 = short, 2 = long) */
1310 64, /* bitsize */
1311 FALSE, /* pc_relative */
1312 0, /* bitpos */
1313 complain_overflow_dont, /* complain_on_overflow */
1314 bfd_elf_generic_reloc, /* special_function */
1315 #if ARCH_SIZE == 64
1316 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1317 #else
1318 AARCH64_R_STR (TLS_DTPMOD), /* name */
1319 #endif
1320 FALSE, /* partial_inplace */
1321 0, /* src_mask */
1322 ALL_ONES, /* dst_mask */
1323 FALSE), /* pc_reloffset */
1324
1325 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1326 0, /* rightshift */
1327 2, /* size (0 = byte, 1 = short, 2 = long) */
1328 64, /* bitsize */
1329 FALSE, /* pc_relative */
1330 0, /* bitpos */
1331 complain_overflow_dont, /* complain_on_overflow */
1332 bfd_elf_generic_reloc, /* special_function */
1333 #if ARCH_SIZE == 64
1334 AARCH64_R_STR (TLS_DTPREL64), /* name */
1335 #else
1336 AARCH64_R_STR (TLS_DTPREL), /* name */
1337 #endif
1338 FALSE, /* partial_inplace */
1339 0, /* src_mask */
1340 ALL_ONES, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 64, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont, /* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 #if ARCH_SIZE == 64
1352 AARCH64_R_STR (TLS_TPREL64), /* name */
1353 #else
1354 AARCH64_R_STR (TLS_TPREL), /* name */
1355 #endif
1356 FALSE, /* partial_inplace */
1357 0, /* src_mask */
1358 ALL_ONES, /* dst_mask */
1359 FALSE), /* pcrel_offset */
1360
1361 HOWTO (AARCH64_R (TLSDESC), /* type */
1362 0, /* rightshift */
1363 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 64, /* bitsize */
1365 FALSE, /* pc_relative */
1366 0, /* bitpos */
1367 complain_overflow_dont, /* complain_on_overflow */
1368 bfd_elf_generic_reloc, /* special_function */
1369 AARCH64_R_STR (TLSDESC), /* name */
1370 FALSE, /* partial_inplace */
1371 0, /* src_mask */
1372 ALL_ONES, /* dst_mask */
1373 FALSE), /* pcrel_offset */
1374
1375 HOWTO (AARCH64_R (IRELATIVE), /* type */
1376 0, /* rightshift */
1377 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 64, /* bitsize */
1379 FALSE, /* pc_relative */
1380 0, /* bitpos */
1381 complain_overflow_bitfield, /* complain_on_overflow */
1382 bfd_elf_generic_reloc, /* special_function */
1383 AARCH64_R_STR (IRELATIVE), /* name */
1384 FALSE, /* partial_inplace */
1385 0, /* src_mask */
1386 ALL_ONES, /* dst_mask */
1387 FALSE), /* pcrel_offset */
1388
1389 EMPTY_HOWTO (0),
1390 };
1391
1392 static reloc_howto_type elfNN_aarch64_howto_none =
1393 HOWTO (R_AARCH64_NONE, /* type */
1394 0, /* rightshift */
1395 3, /* size (0 = byte, 1 = short, 2 = long) */
1396 0, /* bitsize */
1397 FALSE, /* pc_relative */
1398 0, /* bitpos */
1399 complain_overflow_dont,/* complain_on_overflow */
1400 bfd_elf_generic_reloc, /* special_function */
1401 "R_AARCH64_NONE", /* name */
1402 FALSE, /* partial_inplace */
1403 0, /* src_mask */
1404 0, /* dst_mask */
1405 FALSE); /* pcrel_offset */
1406
1407 /* Given HOWTO, return the bfd internal relocation enumerator. */
1408
1409 static bfd_reloc_code_real_type
1410 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1411 {
1412 const int size
1413 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1414 const ptrdiff_t offset
1415 = howto - elfNN_aarch64_howto_table;
1416
1417 if (offset > 0 && offset < size - 1)
1418 return BFD_RELOC_AARCH64_RELOC_START + offset;
1419
1420 if (howto == &elfNN_aarch64_howto_none)
1421 return BFD_RELOC_AARCH64_NONE;
1422
1423 return BFD_RELOC_AARCH64_RELOC_START;
1424 }
1425
1426 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1427
1428 static bfd_reloc_code_real_type
1429 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1430 {
1431 static bfd_boolean initialized_p = FALSE;
1432 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1433 static unsigned int offsets[R_AARCH64_end];
1434
1435 if (initialized_p == FALSE)
1436 {
1437 unsigned int i;
1438
1439 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1440 if (elfNN_aarch64_howto_table[i].type != 0)
1441 offsets[elfNN_aarch64_howto_table[i].type] = i;
1442
1443 initialized_p = TRUE;
1444 }
1445
1446 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1447 return BFD_RELOC_AARCH64_NONE;
1448
1449 /* PR 17512: file: b371e70a. */
1450 if (r_type >= R_AARCH64_end)
1451 {
1452 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1453 bfd_set_error (bfd_error_bad_value);
1454 return BFD_RELOC_AARCH64_NONE;
1455 }
1456
1457 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1458 }
1459
1460 struct elf_aarch64_reloc_map
1461 {
1462 bfd_reloc_code_real_type from;
1463 bfd_reloc_code_real_type to;
1464 };
1465
1466 /* Map bfd generic reloc to AArch64-specific reloc. */
1467 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1468 {
1469 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1470
1471 /* Basic data relocations. */
1472 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1473 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1474 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1475 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1476 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1477 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1478 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1479 };
1480
1481 /* Given the bfd internal relocation enumerator in CODE, return the
1482 corresponding howto entry. */
1483
1484 static reloc_howto_type *
1485 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1486 {
1487 unsigned int i;
1488
1489 /* Convert bfd generic reloc to AArch64-specific reloc. */
1490 if (code < BFD_RELOC_AARCH64_RELOC_START
1491 || code > BFD_RELOC_AARCH64_RELOC_END)
1492 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1493 if (elf_aarch64_reloc_map[i].from == code)
1494 {
1495 code = elf_aarch64_reloc_map[i].to;
1496 break;
1497 }
1498
1499 if (code > BFD_RELOC_AARCH64_RELOC_START
1500 && code < BFD_RELOC_AARCH64_RELOC_END)
1501 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1502 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1503
1504 if (code == BFD_RELOC_AARCH64_NONE)
1505 return &elfNN_aarch64_howto_none;
1506
1507 return NULL;
1508 }
1509
1510 static reloc_howto_type *
1511 elfNN_aarch64_howto_from_type (unsigned int r_type)
1512 {
1513 bfd_reloc_code_real_type val;
1514 reloc_howto_type *howto;
1515
1516 #if ARCH_SIZE == 32
1517 if (r_type > 256)
1518 {
1519 bfd_set_error (bfd_error_bad_value);
1520 return NULL;
1521 }
1522 #endif
1523
1524 if (r_type == R_AARCH64_NONE)
1525 return &elfNN_aarch64_howto_none;
1526
1527 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1528 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1529
1530 if (howto != NULL)
1531 return howto;
1532
1533 bfd_set_error (bfd_error_bad_value);
1534 return NULL;
1535 }
1536
1537 static void
1538 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1539 Elf_Internal_Rela *elf_reloc)
1540 {
1541 unsigned int r_type;
1542
1543 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1544 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1545 }
1546
1547 static reloc_howto_type *
1548 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1549 bfd_reloc_code_real_type code)
1550 {
1551 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1552
1553 if (howto != NULL)
1554 return howto;
1555
1556 bfd_set_error (bfd_error_bad_value);
1557 return NULL;
1558 }
1559
1560 static reloc_howto_type *
1561 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1562 const char *r_name)
1563 {
1564 unsigned int i;
1565
1566 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1567 if (elfNN_aarch64_howto_table[i].name != NULL
1568 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1569 return &elfNN_aarch64_howto_table[i];
1570
1571 return NULL;
1572 }
1573
1574 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1575 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1576 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1577 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1578
1579 /* The linker script knows the section names for placement.
1580 The entry_names are used to do simple name mangling on the stubs.
1581 Given a function name, and its type, the stub can be found. The
1582 name can be changed. The only requirement is the %s be present. */
1583 #define STUB_ENTRY_NAME "__%s_veneer"
1584
1585 /* The name of the dynamic interpreter. This is put in the .interp
1586 section. */
1587 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1588
1589 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1590 (((1 << 25) - 1) << 2)
1591 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1592 (-((1 << 25) << 2))
1593
1594 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1595 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1596
1597 static int
1598 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1599 {
1600 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1601 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1602 }
1603
1604 static int
1605 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1606 {
1607 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1608 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1609 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1610 }
1611
1612 static const uint32_t aarch64_adrp_branch_stub [] =
1613 {
1614 0x90000010, /* adrp ip0, X */
1615 /* R_AARCH64_ADR_HI21_PCREL(X) */
1616 0x91000210, /* add ip0, ip0, :lo12:X */
1617 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1618 0xd61f0200, /* br ip0 */
1619 };
1620
1621 static const uint32_t aarch64_long_branch_stub[] =
1622 {
1623 #if ARCH_SIZE == 64
1624 0x58000090, /* ldr ip0, 1f */
1625 #else
1626 0x18000090, /* ldr wip0, 1f */
1627 #endif
1628 0x10000011, /* adr ip1, #0 */
1629 0x8b110210, /* add ip0, ip0, ip1 */
1630 0xd61f0200, /* br ip0 */
1631 0x00000000, /* 1: .xword or .word
1632 R_AARCH64_PRELNN(X) + 12
1633 */
1634 0x00000000,
1635 };
1636
1637 static const uint32_t aarch64_erratum_835769_stub[] =
1638 {
1639 0x00000000, /* Placeholder for multiply accumulate. */
1640 0x14000000, /* b <label> */
1641 };
1642
1643 /* Section name for stubs is the associated section name plus this
1644 string. */
1645 #define STUB_SUFFIX ".stub"
1646
1647 enum elf_aarch64_stub_type
1648 {
1649 aarch64_stub_none,
1650 aarch64_stub_adrp_branch,
1651 aarch64_stub_long_branch,
1652 aarch64_stub_erratum_835769_veneer,
1653 };
1654
1655 struct elf_aarch64_stub_hash_entry
1656 {
1657 /* Base hash table entry structure. */
1658 struct bfd_hash_entry root;
1659
1660 /* The stub section. */
1661 asection *stub_sec;
1662
1663 /* Offset within stub_sec of the beginning of this stub. */
1664 bfd_vma stub_offset;
1665
1666 /* Given the symbol's value and its section we can determine its final
1667 value when building the stubs (so the stub knows where to jump). */
1668 bfd_vma target_value;
1669 asection *target_section;
1670
1671 enum elf_aarch64_stub_type stub_type;
1672
1673 /* The symbol table entry, if any, that this was derived from. */
1674 struct elf_aarch64_link_hash_entry *h;
1675
1676 /* Destination symbol type */
1677 unsigned char st_type;
1678
1679 /* Where this stub is being called from, or, in the case of combined
1680 stub sections, the first input section in the group. */
1681 asection *id_sec;
1682
1683 /* The name for the local symbol at the start of this stub. The
1684 stub name in the hash table has to be unique; this does not, so
1685 it can be friendlier. */
1686 char *output_name;
1687
1688 /* The instruction which caused this stub to be generated (only valid for
1689 erratum 835769 workaround stubs at present). */
1690 uint32_t veneered_insn;
1691 };
1692
1693 /* Used to build a map of a section. This is required for mixed-endian
1694 code/data. */
1695
1696 typedef struct elf_elf_section_map
1697 {
1698 bfd_vma vma;
1699 char type;
1700 }
1701 elf_aarch64_section_map;
1702
1703
1704 typedef struct _aarch64_elf_section_data
1705 {
1706 struct bfd_elf_section_data elf;
1707 unsigned int mapcount;
1708 unsigned int mapsize;
1709 elf_aarch64_section_map *map;
1710 }
1711 _aarch64_elf_section_data;
1712
1713 #define elf_aarch64_section_data(sec) \
1714 ((_aarch64_elf_section_data *) elf_section_data (sec))
1715
1716 /* A fix-descriptor for erratum 835769. */
1717 struct aarch64_erratum_835769_fix
1718 {
1719 bfd *input_bfd;
1720 asection *section;
1721 bfd_vma offset;
1722 uint32_t veneered_insn;
1723 char *stub_name;
1724 enum elf_aarch64_stub_type stub_type;
1725 };
1726
1727 /* The size of the thread control block which is defined to be two pointers. */
1728 #define TCB_SIZE (ARCH_SIZE/8)*2
1729
1730 struct elf_aarch64_local_symbol
1731 {
1732 unsigned int got_type;
1733 bfd_signed_vma got_refcount;
1734 bfd_vma got_offset;
1735
1736 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1737 offset is from the end of the jump table and reserved entries
1738 within the PLTGOT.
1739
1740 The magic value (bfd_vma) -1 indicates that an offset has not be
1741 allocated. */
1742 bfd_vma tlsdesc_got_jump_table_offset;
1743 };
1744
1745 struct elf_aarch64_obj_tdata
1746 {
1747 struct elf_obj_tdata root;
1748
1749 /* local symbol descriptors */
1750 struct elf_aarch64_local_symbol *locals;
1751
1752 /* Zero to warn when linking objects with incompatible enum sizes. */
1753 int no_enum_size_warning;
1754
1755 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1756 int no_wchar_size_warning;
1757 };
1758
1759 #define elf_aarch64_tdata(bfd) \
1760 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1761
1762 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1763
1764 #define is_aarch64_elf(bfd) \
1765 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1766 && elf_tdata (bfd) != NULL \
1767 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1768
1769 static bfd_boolean
1770 elfNN_aarch64_mkobject (bfd *abfd)
1771 {
1772 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1773 AARCH64_ELF_DATA);
1774 }
1775
1776 #define elf_aarch64_hash_entry(ent) \
1777 ((struct elf_aarch64_link_hash_entry *)(ent))
1778
1779 #define GOT_UNKNOWN 0
1780 #define GOT_NORMAL 1
1781 #define GOT_TLS_GD 2
1782 #define GOT_TLS_IE 4
1783 #define GOT_TLSDESC_GD 8
1784
1785 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1786
1787 /* AArch64 ELF linker hash entry. */
1788 struct elf_aarch64_link_hash_entry
1789 {
1790 struct elf_link_hash_entry root;
1791
1792 /* Track dynamic relocs copied for this symbol. */
1793 struct elf_dyn_relocs *dyn_relocs;
1794
1795 /* Since PLT entries have variable size, we need to record the
1796 index into .got.plt instead of recomputing it from the PLT
1797 offset. */
1798 bfd_signed_vma plt_got_offset;
1799
1800 /* Bit mask representing the type of GOT entry(s) if any required by
1801 this symbol. */
1802 unsigned int got_type;
1803
1804 /* A pointer to the most recently used stub hash entry against this
1805 symbol. */
1806 struct elf_aarch64_stub_hash_entry *stub_cache;
1807
1808 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1809 is from the end of the jump table and reserved entries within the PLTGOT.
1810
1811 The magic value (bfd_vma) -1 indicates that an offset has not
1812 be allocated. */
1813 bfd_vma tlsdesc_got_jump_table_offset;
1814 };
1815
1816 static unsigned int
1817 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1818 bfd *abfd,
1819 unsigned long r_symndx)
1820 {
1821 if (h)
1822 return elf_aarch64_hash_entry (h)->got_type;
1823
1824 if (! elf_aarch64_locals (abfd))
1825 return GOT_UNKNOWN;
1826
1827 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1828 }
1829
1830 /* Get the AArch64 elf linker hash table from a link_info structure. */
1831 #define elf_aarch64_hash_table(info) \
1832 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1833
1834 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1835 ((struct elf_aarch64_stub_hash_entry *) \
1836 bfd_hash_lookup ((table), (string), (create), (copy)))
1837
1838 /* AArch64 ELF linker hash table. */
1839 struct elf_aarch64_link_hash_table
1840 {
1841 /* The main hash table. */
1842 struct elf_link_hash_table root;
1843
1844 /* Nonzero to force PIC branch veneers. */
1845 int pic_veneer;
1846
1847 /* Fix erratum 835769. */
1848 int fix_erratum_835769;
1849
1850 /* A table of fix locations for erratum 835769. This holds erratum
1851 fix locations between elfNN_aarch64_size_stubs() and
1852 elfNN_aarch64_write_section(). */
1853 struct aarch64_erratum_835769_fix *aarch64_erratum_835769_fixes;
1854 unsigned int num_aarch64_erratum_835769_fixes;
1855
1856 /* The number of bytes in the initial entry in the PLT. */
1857 bfd_size_type plt_header_size;
1858
1859 /* The number of bytes in the subsequent PLT etries. */
1860 bfd_size_type plt_entry_size;
1861
1862 /* Short-cuts to get to dynamic linker sections. */
1863 asection *sdynbss;
1864 asection *srelbss;
1865
1866 /* Small local sym cache. */
1867 struct sym_cache sym_cache;
1868
1869 /* For convenience in allocate_dynrelocs. */
1870 bfd *obfd;
1871
1872 /* The amount of space used by the reserved portion of the sgotplt
1873 section, plus whatever space is used by the jump slots. */
1874 bfd_vma sgotplt_jump_table_size;
1875
1876 /* The stub hash table. */
1877 struct bfd_hash_table stub_hash_table;
1878
1879 /* Linker stub bfd. */
1880 bfd *stub_bfd;
1881
1882 /* Linker call-backs. */
1883 asection *(*add_stub_section) (const char *, asection *);
1884 void (*layout_sections_again) (void);
1885
1886 /* Array to keep track of which stub sections have been created, and
1887 information on stub grouping. */
1888 struct map_stub
1889 {
1890 /* This is the section to which stubs in the group will be
1891 attached. */
1892 asection *link_sec;
1893 /* The stub section. */
1894 asection *stub_sec;
1895 } *stub_group;
1896
1897 /* Assorted information used by elfNN_aarch64_size_stubs. */
1898 unsigned int bfd_count;
1899 int top_index;
1900 asection **input_list;
1901
1902 /* The offset into splt of the PLT entry for the TLS descriptor
1903 resolver. Special values are 0, if not necessary (or not found
1904 to be necessary yet), and -1 if needed but not determined
1905 yet. */
1906 bfd_vma tlsdesc_plt;
1907
1908 /* The GOT offset for the lazy trampoline. Communicated to the
1909 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1910 indicates an offset is not allocated. */
1911 bfd_vma dt_tlsdesc_got;
1912
1913 /* Used by local STT_GNU_IFUNC symbols. */
1914 htab_t loc_hash_table;
1915 void * loc_hash_memory;
1916 };
1917
1918 /* Create an entry in an AArch64 ELF linker hash table. */
1919
1920 static struct bfd_hash_entry *
1921 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1922 struct bfd_hash_table *table,
1923 const char *string)
1924 {
1925 struct elf_aarch64_link_hash_entry *ret =
1926 (struct elf_aarch64_link_hash_entry *) entry;
1927
1928 /* Allocate the structure if it has not already been allocated by a
1929 subclass. */
1930 if (ret == NULL)
1931 ret = bfd_hash_allocate (table,
1932 sizeof (struct elf_aarch64_link_hash_entry));
1933 if (ret == NULL)
1934 return (struct bfd_hash_entry *) ret;
1935
1936 /* Call the allocation method of the superclass. */
1937 ret = ((struct elf_aarch64_link_hash_entry *)
1938 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1939 table, string));
1940 if (ret != NULL)
1941 {
1942 ret->dyn_relocs = NULL;
1943 ret->got_type = GOT_UNKNOWN;
1944 ret->plt_got_offset = (bfd_vma) - 1;
1945 ret->stub_cache = NULL;
1946 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1947 }
1948
1949 return (struct bfd_hash_entry *) ret;
1950 }
1951
1952 /* Initialize an entry in the stub hash table. */
1953
1954 static struct bfd_hash_entry *
1955 stub_hash_newfunc (struct bfd_hash_entry *entry,
1956 struct bfd_hash_table *table, const char *string)
1957 {
1958 /* Allocate the structure if it has not already been allocated by a
1959 subclass. */
1960 if (entry == NULL)
1961 {
1962 entry = bfd_hash_allocate (table,
1963 sizeof (struct
1964 elf_aarch64_stub_hash_entry));
1965 if (entry == NULL)
1966 return entry;
1967 }
1968
1969 /* Call the allocation method of the superclass. */
1970 entry = bfd_hash_newfunc (entry, table, string);
1971 if (entry != NULL)
1972 {
1973 struct elf_aarch64_stub_hash_entry *eh;
1974
1975 /* Initialize the local fields. */
1976 eh = (struct elf_aarch64_stub_hash_entry *) entry;
1977 eh->stub_sec = NULL;
1978 eh->stub_offset = 0;
1979 eh->target_value = 0;
1980 eh->target_section = NULL;
1981 eh->stub_type = aarch64_stub_none;
1982 eh->h = NULL;
1983 eh->id_sec = NULL;
1984 }
1985
1986 return entry;
1987 }
1988
1989 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
1990 for local symbol so that we can handle local STT_GNU_IFUNC symbols
1991 as global symbol. We reuse indx and dynstr_index for local symbol
1992 hash since they aren't used by global symbols in this backend. */
1993
1994 static hashval_t
1995 elfNN_aarch64_local_htab_hash (const void *ptr)
1996 {
1997 struct elf_link_hash_entry *h
1998 = (struct elf_link_hash_entry *) ptr;
1999 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2000 }
2001
2002 /* Compare local hash entries. */
2003
2004 static int
2005 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2006 {
2007 struct elf_link_hash_entry *h1
2008 = (struct elf_link_hash_entry *) ptr1;
2009 struct elf_link_hash_entry *h2
2010 = (struct elf_link_hash_entry *) ptr2;
2011
2012 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2013 }
2014
2015 /* Find and/or create a hash entry for local symbol. */
2016
2017 static struct elf_link_hash_entry *
2018 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2019 bfd *abfd, const Elf_Internal_Rela *rel,
2020 bfd_boolean create)
2021 {
2022 struct elf_aarch64_link_hash_entry e, *ret;
2023 asection *sec = abfd->sections;
2024 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2025 ELFNN_R_SYM (rel->r_info));
2026 void **slot;
2027
2028 e.root.indx = sec->id;
2029 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2030 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2031 create ? INSERT : NO_INSERT);
2032
2033 if (!slot)
2034 return NULL;
2035
2036 if (*slot)
2037 {
2038 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2039 return &ret->root;
2040 }
2041
2042 ret = (struct elf_aarch64_link_hash_entry *)
2043 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2044 sizeof (struct elf_aarch64_link_hash_entry));
2045 if (ret)
2046 {
2047 memset (ret, 0, sizeof (*ret));
2048 ret->root.indx = sec->id;
2049 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2050 ret->root.dynindx = -1;
2051 *slot = ret;
2052 }
2053 return &ret->root;
2054 }
2055
2056 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2057
2058 static void
2059 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2060 struct elf_link_hash_entry *dir,
2061 struct elf_link_hash_entry *ind)
2062 {
2063 struct elf_aarch64_link_hash_entry *edir, *eind;
2064
2065 edir = (struct elf_aarch64_link_hash_entry *) dir;
2066 eind = (struct elf_aarch64_link_hash_entry *) ind;
2067
2068 if (eind->dyn_relocs != NULL)
2069 {
2070 if (edir->dyn_relocs != NULL)
2071 {
2072 struct elf_dyn_relocs **pp;
2073 struct elf_dyn_relocs *p;
2074
2075 /* Add reloc counts against the indirect sym to the direct sym
2076 list. Merge any entries against the same section. */
2077 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2078 {
2079 struct elf_dyn_relocs *q;
2080
2081 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2082 if (q->sec == p->sec)
2083 {
2084 q->pc_count += p->pc_count;
2085 q->count += p->count;
2086 *pp = p->next;
2087 break;
2088 }
2089 if (q == NULL)
2090 pp = &p->next;
2091 }
2092 *pp = edir->dyn_relocs;
2093 }
2094
2095 edir->dyn_relocs = eind->dyn_relocs;
2096 eind->dyn_relocs = NULL;
2097 }
2098
2099 if (ind->root.type == bfd_link_hash_indirect)
2100 {
2101 /* Copy over PLT info. */
2102 if (dir->got.refcount <= 0)
2103 {
2104 edir->got_type = eind->got_type;
2105 eind->got_type = GOT_UNKNOWN;
2106 }
2107 }
2108
2109 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2110 }
2111
2112 /* Destroy an AArch64 elf linker hash table. */
2113
2114 static void
2115 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2116 {
2117 struct elf_aarch64_link_hash_table *ret
2118 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2119
2120 if (ret->loc_hash_table)
2121 htab_delete (ret->loc_hash_table);
2122 if (ret->loc_hash_memory)
2123 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2124
2125 bfd_hash_table_free (&ret->stub_hash_table);
2126 _bfd_elf_link_hash_table_free (obfd);
2127 }
2128
2129 /* Create an AArch64 elf linker hash table. */
2130
2131 static struct bfd_link_hash_table *
2132 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2133 {
2134 struct elf_aarch64_link_hash_table *ret;
2135 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2136
2137 ret = bfd_zmalloc (amt);
2138 if (ret == NULL)
2139 return NULL;
2140
2141 if (!_bfd_elf_link_hash_table_init
2142 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2143 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2144 {
2145 free (ret);
2146 return NULL;
2147 }
2148
2149 ret->plt_header_size = PLT_ENTRY_SIZE;
2150 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2151 ret->obfd = abfd;
2152 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2153
2154 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2155 sizeof (struct elf_aarch64_stub_hash_entry)))
2156 {
2157 _bfd_elf_link_hash_table_free (abfd);
2158 return NULL;
2159 }
2160
2161 ret->loc_hash_table = htab_try_create (1024,
2162 elfNN_aarch64_local_htab_hash,
2163 elfNN_aarch64_local_htab_eq,
2164 NULL);
2165 ret->loc_hash_memory = objalloc_create ();
2166 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2167 {
2168 elfNN_aarch64_link_hash_table_free (abfd);
2169 return NULL;
2170 }
2171 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2172
2173 return &ret->root.root;
2174 }
2175
2176 static bfd_boolean
2177 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2178 bfd_vma offset, bfd_vma value)
2179 {
2180 reloc_howto_type *howto;
2181 bfd_vma place;
2182
2183 howto = elfNN_aarch64_howto_from_type (r_type);
2184 place = (input_section->output_section->vma + input_section->output_offset
2185 + offset);
2186
2187 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2188 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2189 return _bfd_aarch64_elf_put_addend (input_bfd,
2190 input_section->contents + offset, r_type,
2191 howto, value);
2192 }
2193
2194 static enum elf_aarch64_stub_type
2195 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2196 {
2197 if (aarch64_valid_for_adrp_p (value, place))
2198 return aarch64_stub_adrp_branch;
2199 return aarch64_stub_long_branch;
2200 }
2201
2202 /* Determine the type of stub needed, if any, for a call. */
2203
2204 static enum elf_aarch64_stub_type
2205 aarch64_type_of_stub (struct bfd_link_info *info,
2206 asection *input_sec,
2207 const Elf_Internal_Rela *rel,
2208 unsigned char st_type,
2209 struct elf_aarch64_link_hash_entry *hash,
2210 bfd_vma destination)
2211 {
2212 bfd_vma location;
2213 bfd_signed_vma branch_offset;
2214 unsigned int r_type;
2215 struct elf_aarch64_link_hash_table *globals;
2216 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2217 bfd_boolean via_plt_p;
2218
2219 if (st_type != STT_FUNC)
2220 return stub_type;
2221
2222 globals = elf_aarch64_hash_table (info);
2223 via_plt_p = (globals->root.splt != NULL && hash != NULL
2224 && hash->root.plt.offset != (bfd_vma) - 1);
2225
2226 if (via_plt_p)
2227 return stub_type;
2228
2229 /* Determine where the call point is. */
2230 location = (input_sec->output_offset
2231 + input_sec->output_section->vma + rel->r_offset);
2232
2233 branch_offset = (bfd_signed_vma) (destination - location);
2234
2235 r_type = ELFNN_R_TYPE (rel->r_info);
2236
2237 /* We don't want to redirect any old unconditional jump in this way,
2238 only one which is being used for a sibcall, where it is
2239 acceptable for the IP0 and IP1 registers to be clobbered. */
2240 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2241 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2242 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2243 {
2244 stub_type = aarch64_stub_long_branch;
2245 }
2246
2247 return stub_type;
2248 }
2249
2250 /* Build a name for an entry in the stub hash table. */
2251
2252 static char *
2253 elfNN_aarch64_stub_name (const asection *input_section,
2254 const asection *sym_sec,
2255 const struct elf_aarch64_link_hash_entry *hash,
2256 const Elf_Internal_Rela *rel)
2257 {
2258 char *stub_name;
2259 bfd_size_type len;
2260
2261 if (hash)
2262 {
2263 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2264 stub_name = bfd_malloc (len);
2265 if (stub_name != NULL)
2266 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2267 (unsigned int) input_section->id,
2268 hash->root.root.root.string,
2269 rel->r_addend);
2270 }
2271 else
2272 {
2273 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2274 stub_name = bfd_malloc (len);
2275 if (stub_name != NULL)
2276 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2277 (unsigned int) input_section->id,
2278 (unsigned int) sym_sec->id,
2279 (unsigned int) ELFNN_R_SYM (rel->r_info),
2280 rel->r_addend);
2281 }
2282
2283 return stub_name;
2284 }
2285
2286 /* Look up an entry in the stub hash. Stub entries are cached because
2287 creating the stub name takes a bit of time. */
2288
2289 static struct elf_aarch64_stub_hash_entry *
2290 elfNN_aarch64_get_stub_entry (const asection *input_section,
2291 const asection *sym_sec,
2292 struct elf_link_hash_entry *hash,
2293 const Elf_Internal_Rela *rel,
2294 struct elf_aarch64_link_hash_table *htab)
2295 {
2296 struct elf_aarch64_stub_hash_entry *stub_entry;
2297 struct elf_aarch64_link_hash_entry *h =
2298 (struct elf_aarch64_link_hash_entry *) hash;
2299 const asection *id_sec;
2300
2301 if ((input_section->flags & SEC_CODE) == 0)
2302 return NULL;
2303
2304 /* If this input section is part of a group of sections sharing one
2305 stub section, then use the id of the first section in the group.
2306 Stub names need to include a section id, as there may well be
2307 more than one stub used to reach say, printf, and we need to
2308 distinguish between them. */
2309 id_sec = htab->stub_group[input_section->id].link_sec;
2310
2311 if (h != NULL && h->stub_cache != NULL
2312 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2313 {
2314 stub_entry = h->stub_cache;
2315 }
2316 else
2317 {
2318 char *stub_name;
2319
2320 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2321 if (stub_name == NULL)
2322 return NULL;
2323
2324 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2325 stub_name, FALSE, FALSE);
2326 if (h != NULL)
2327 h->stub_cache = stub_entry;
2328
2329 free (stub_name);
2330 }
2331
2332 return stub_entry;
2333 }
2334
2335 /* Add a new stub entry to the stub hash. Not all fields of the new
2336 stub entry are initialised. */
2337
2338 static struct elf_aarch64_stub_hash_entry *
2339 elfNN_aarch64_add_stub (const char *stub_name,
2340 asection *section,
2341 struct elf_aarch64_link_hash_table *htab)
2342 {
2343 asection *link_sec;
2344 asection *stub_sec;
2345 struct elf_aarch64_stub_hash_entry *stub_entry;
2346
2347 link_sec = htab->stub_group[section->id].link_sec;
2348 stub_sec = htab->stub_group[section->id].stub_sec;
2349 if (stub_sec == NULL)
2350 {
2351 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2352 if (stub_sec == NULL)
2353 {
2354 size_t namelen;
2355 bfd_size_type len;
2356 char *s_name;
2357
2358 namelen = strlen (link_sec->name);
2359 len = namelen + sizeof (STUB_SUFFIX);
2360 s_name = bfd_alloc (htab->stub_bfd, len);
2361 if (s_name == NULL)
2362 return NULL;
2363
2364 memcpy (s_name, link_sec->name, namelen);
2365 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2366 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2367 if (stub_sec == NULL)
2368 return NULL;
2369 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2370 }
2371 htab->stub_group[section->id].stub_sec = stub_sec;
2372 }
2373
2374 /* Enter this entry into the linker stub hash table. */
2375 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2376 TRUE, FALSE);
2377 if (stub_entry == NULL)
2378 {
2379 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2380 section->owner, stub_name);
2381 return NULL;
2382 }
2383
2384 stub_entry->stub_sec = stub_sec;
2385 stub_entry->stub_offset = 0;
2386 stub_entry->id_sec = link_sec;
2387
2388 return stub_entry;
2389 }
2390
2391 static bfd_boolean
2392 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2393 void *in_arg ATTRIBUTE_UNUSED)
2394 {
2395 struct elf_aarch64_stub_hash_entry *stub_entry;
2396 asection *stub_sec;
2397 bfd *stub_bfd;
2398 bfd_byte *loc;
2399 bfd_vma sym_value;
2400 bfd_vma veneered_insn_loc;
2401 bfd_vma veneer_entry_loc;
2402 bfd_signed_vma branch_offset = 0;
2403 unsigned int template_size;
2404 const uint32_t *template;
2405 unsigned int i;
2406
2407 /* Massage our args to the form they really have. */
2408 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2409
2410 stub_sec = stub_entry->stub_sec;
2411
2412 /* Make a note of the offset within the stubs for this entry. */
2413 stub_entry->stub_offset = stub_sec->size;
2414 loc = stub_sec->contents + stub_entry->stub_offset;
2415
2416 stub_bfd = stub_sec->owner;
2417
2418 /* This is the address of the stub destination. */
2419 sym_value = (stub_entry->target_value
2420 + stub_entry->target_section->output_offset
2421 + stub_entry->target_section->output_section->vma);
2422
2423 if (stub_entry->stub_type == aarch64_stub_long_branch)
2424 {
2425 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2426 + stub_sec->output_offset);
2427
2428 /* See if we can relax the stub. */
2429 if (aarch64_valid_for_adrp_p (sym_value, place))
2430 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2431 }
2432
2433 switch (stub_entry->stub_type)
2434 {
2435 case aarch64_stub_adrp_branch:
2436 template = aarch64_adrp_branch_stub;
2437 template_size = sizeof (aarch64_adrp_branch_stub);
2438 break;
2439 case aarch64_stub_long_branch:
2440 template = aarch64_long_branch_stub;
2441 template_size = sizeof (aarch64_long_branch_stub);
2442 break;
2443 case aarch64_stub_erratum_835769_veneer:
2444 template = aarch64_erratum_835769_stub;
2445 template_size = sizeof (aarch64_erratum_835769_stub);
2446 break;
2447 default:
2448 abort ();
2449 }
2450
2451 for (i = 0; i < (template_size / sizeof template[0]); i++)
2452 {
2453 bfd_putl32 (template[i], loc);
2454 loc += 4;
2455 }
2456
2457 template_size = (template_size + 7) & ~7;
2458 stub_sec->size += template_size;
2459
2460 switch (stub_entry->stub_type)
2461 {
2462 case aarch64_stub_adrp_branch:
2463 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2464 stub_entry->stub_offset, sym_value))
2465 /* The stub would not have been relaxed if the offset was out
2466 of range. */
2467 BFD_FAIL ();
2468
2469 _bfd_final_link_relocate
2470 (elfNN_aarch64_howto_from_type (AARCH64_R (ADD_ABS_LO12_NC)),
2471 stub_bfd,
2472 stub_sec,
2473 stub_sec->contents,
2474 stub_entry->stub_offset + 4,
2475 sym_value,
2476 0);
2477 break;
2478
2479 case aarch64_stub_long_branch:
2480 /* We want the value relative to the address 12 bytes back from the
2481 value itself. */
2482 _bfd_final_link_relocate (elfNN_aarch64_howto_from_type
2483 (AARCH64_R (PRELNN)), stub_bfd, stub_sec,
2484 stub_sec->contents,
2485 stub_entry->stub_offset + 16,
2486 sym_value + 12, 0);
2487 break;
2488
2489 case aarch64_stub_erratum_835769_veneer:
2490 veneered_insn_loc = stub_entry->target_section->output_section->vma
2491 + stub_entry->target_section->output_offset
2492 + stub_entry->target_value;
2493 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2494 + stub_entry->stub_sec->output_offset
2495 + stub_entry->stub_offset;
2496 branch_offset = veneered_insn_loc - veneer_entry_loc;
2497 branch_offset >>= 2;
2498 branch_offset &= 0x3ffffff;
2499 bfd_putl32 (stub_entry->veneered_insn,
2500 stub_sec->contents + stub_entry->stub_offset);
2501 bfd_putl32 (template[1] | branch_offset,
2502 stub_sec->contents + stub_entry->stub_offset + 4);
2503 break;
2504
2505 default:
2506 abort ();
2507 }
2508
2509 return TRUE;
2510 }
2511
2512 /* As above, but don't actually build the stub. Just bump offset so
2513 we know stub section sizes. */
2514
2515 static bfd_boolean
2516 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2517 void *in_arg ATTRIBUTE_UNUSED)
2518 {
2519 struct elf_aarch64_stub_hash_entry *stub_entry;
2520 int size;
2521
2522 /* Massage our args to the form they really have. */
2523 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2524
2525 switch (stub_entry->stub_type)
2526 {
2527 case aarch64_stub_adrp_branch:
2528 size = sizeof (aarch64_adrp_branch_stub);
2529 break;
2530 case aarch64_stub_long_branch:
2531 size = sizeof (aarch64_long_branch_stub);
2532 break;
2533 case aarch64_stub_erratum_835769_veneer:
2534 size = sizeof (aarch64_erratum_835769_stub);
2535 break;
2536 default:
2537 abort ();
2538 }
2539
2540 size = (size + 7) & ~7;
2541 stub_entry->stub_sec->size += size;
2542 return TRUE;
2543 }
2544
2545 /* External entry points for sizing and building linker stubs. */
2546
2547 /* Set up various things so that we can make a list of input sections
2548 for each output section included in the link. Returns -1 on error,
2549 0 when no stubs will be needed, and 1 on success. */
2550
2551 int
2552 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2553 struct bfd_link_info *info)
2554 {
2555 bfd *input_bfd;
2556 unsigned int bfd_count;
2557 int top_id, top_index;
2558 asection *section;
2559 asection **input_list, **list;
2560 bfd_size_type amt;
2561 struct elf_aarch64_link_hash_table *htab =
2562 elf_aarch64_hash_table (info);
2563
2564 if (!is_elf_hash_table (htab))
2565 return 0;
2566
2567 /* Count the number of input BFDs and find the top input section id. */
2568 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2569 input_bfd != NULL; input_bfd = input_bfd->link.next)
2570 {
2571 bfd_count += 1;
2572 for (section = input_bfd->sections;
2573 section != NULL; section = section->next)
2574 {
2575 if (top_id < section->id)
2576 top_id = section->id;
2577 }
2578 }
2579 htab->bfd_count = bfd_count;
2580
2581 amt = sizeof (struct map_stub) * (top_id + 1);
2582 htab->stub_group = bfd_zmalloc (amt);
2583 if (htab->stub_group == NULL)
2584 return -1;
2585
2586 /* We can't use output_bfd->section_count here to find the top output
2587 section index as some sections may have been removed, and
2588 _bfd_strip_section_from_output doesn't renumber the indices. */
2589 for (section = output_bfd->sections, top_index = 0;
2590 section != NULL; section = section->next)
2591 {
2592 if (top_index < section->index)
2593 top_index = section->index;
2594 }
2595
2596 htab->top_index = top_index;
2597 amt = sizeof (asection *) * (top_index + 1);
2598 input_list = bfd_malloc (amt);
2599 htab->input_list = input_list;
2600 if (input_list == NULL)
2601 return -1;
2602
2603 /* For sections we aren't interested in, mark their entries with a
2604 value we can check later. */
2605 list = input_list + top_index;
2606 do
2607 *list = bfd_abs_section_ptr;
2608 while (list-- != input_list);
2609
2610 for (section = output_bfd->sections;
2611 section != NULL; section = section->next)
2612 {
2613 if ((section->flags & SEC_CODE) != 0)
2614 input_list[section->index] = NULL;
2615 }
2616
2617 return 1;
2618 }
2619
2620 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2621 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2622
2623 /* The linker repeatedly calls this function for each input section,
2624 in the order that input sections are linked into output sections.
2625 Build lists of input sections to determine groupings between which
2626 we may insert linker stubs. */
2627
2628 void
2629 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2630 {
2631 struct elf_aarch64_link_hash_table *htab =
2632 elf_aarch64_hash_table (info);
2633
2634 if (isec->output_section->index <= htab->top_index)
2635 {
2636 asection **list = htab->input_list + isec->output_section->index;
2637
2638 if (*list != bfd_abs_section_ptr)
2639 {
2640 /* Steal the link_sec pointer for our list. */
2641 /* This happens to make the list in reverse order,
2642 which is what we want. */
2643 PREV_SEC (isec) = *list;
2644 *list = isec;
2645 }
2646 }
2647 }
2648
2649 /* See whether we can group stub sections together. Grouping stub
2650 sections may result in fewer stubs. More importantly, we need to
2651 put all .init* and .fini* stubs at the beginning of the .init or
2652 .fini output sections respectively, because glibc splits the
2653 _init and _fini functions into multiple parts. Putting a stub in
2654 the middle of a function is not a good idea. */
2655
2656 static void
2657 group_sections (struct elf_aarch64_link_hash_table *htab,
2658 bfd_size_type stub_group_size,
2659 bfd_boolean stubs_always_before_branch)
2660 {
2661 asection **list = htab->input_list + htab->top_index;
2662
2663 do
2664 {
2665 asection *tail = *list;
2666
2667 if (tail == bfd_abs_section_ptr)
2668 continue;
2669
2670 while (tail != NULL)
2671 {
2672 asection *curr;
2673 asection *prev;
2674 bfd_size_type total;
2675
2676 curr = tail;
2677 total = tail->size;
2678 while ((prev = PREV_SEC (curr)) != NULL
2679 && ((total += curr->output_offset - prev->output_offset)
2680 < stub_group_size))
2681 curr = prev;
2682
2683 /* OK, the size from the start of CURR to the end is less
2684 than stub_group_size and thus can be handled by one stub
2685 section. (Or the tail section is itself larger than
2686 stub_group_size, in which case we may be toast.)
2687 We should really be keeping track of the total size of
2688 stubs added here, as stubs contribute to the final output
2689 section size. */
2690 do
2691 {
2692 prev = PREV_SEC (tail);
2693 /* Set up this stub group. */
2694 htab->stub_group[tail->id].link_sec = curr;
2695 }
2696 while (tail != curr && (tail = prev) != NULL);
2697
2698 /* But wait, there's more! Input sections up to stub_group_size
2699 bytes before the stub section can be handled by it too. */
2700 if (!stubs_always_before_branch)
2701 {
2702 total = 0;
2703 while (prev != NULL
2704 && ((total += tail->output_offset - prev->output_offset)
2705 < stub_group_size))
2706 {
2707 tail = prev;
2708 prev = PREV_SEC (tail);
2709 htab->stub_group[tail->id].link_sec = curr;
2710 }
2711 }
2712 tail = prev;
2713 }
2714 }
2715 while (list-- != htab->input_list);
2716
2717 free (htab->input_list);
2718 }
2719
2720 #undef PREV_SEC
2721
2722 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2723
2724 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2725 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2726 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2727 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2728 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2729 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2730
2731 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2732 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2733 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2734 #define AARCH64_ZR 0x1f
2735
2736 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2737 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2738
2739 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2740 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2741 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2742 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2743 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2744 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2745 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2746 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2747 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2748 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2749 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2750 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2751 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2752 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2753 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2754 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2755 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2756 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2757
2758 /* Classify an INSN if it is indeed a load/store.
2759
2760 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2761
2762 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2763 is set equal to RT.
2764
2765 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2766
2767 */
2768
2769 static bfd_boolean
2770 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2771 bfd_boolean *pair, bfd_boolean *load)
2772 {
2773 uint32_t opcode;
2774 unsigned int r;
2775 uint32_t opc = 0;
2776 uint32_t v = 0;
2777 uint32_t opc_v = 0;
2778
2779 /* Bail out quickly if INSN doesn't fall into the the load-store
2780 encoding space. */
2781 if (!AARCH64_LDST (insn))
2782 return FALSE;
2783
2784 *pair = FALSE;
2785 *load = FALSE;
2786 if (AARCH64_LDST_EX (insn))
2787 {
2788 *rt = AARCH64_RT (insn);
2789 *rt2 = *rt;
2790 if (AARCH64_BIT (insn, 21) == 1)
2791 {
2792 *pair = TRUE;
2793 *rt2 = AARCH64_RT2 (insn);
2794 }
2795 *load = AARCH64_LD (insn);
2796 return TRUE;
2797 }
2798 else if (AARCH64_LDST_NAP (insn)
2799 || AARCH64_LDSTP_PI (insn)
2800 || AARCH64_LDSTP_O (insn)
2801 || AARCH64_LDSTP_PRE (insn))
2802 {
2803 *pair = TRUE;
2804 *rt = AARCH64_RT (insn);
2805 *rt2 = AARCH64_RT2 (insn);
2806 *load = AARCH64_LD (insn);
2807 return TRUE;
2808 }
2809 else if (AARCH64_LDST_PCREL (insn)
2810 || AARCH64_LDST_UI (insn)
2811 || AARCH64_LDST_PIIMM (insn)
2812 || AARCH64_LDST_U (insn)
2813 || AARCH64_LDST_PREIMM (insn)
2814 || AARCH64_LDST_RO (insn)
2815 || AARCH64_LDST_UIMM (insn))
2816 {
2817 *rt = AARCH64_RT (insn);
2818 *rt2 = *rt;
2819 if (AARCH64_LDST_PCREL (insn))
2820 *load = TRUE;
2821 opc = AARCH64_BITS (insn, 22, 2);
2822 v = AARCH64_BIT (insn, 26);
2823 opc_v = opc | (v << 2);
2824 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2825 || opc_v == 5 || opc_v == 7);
2826 return TRUE;
2827 }
2828 else if (AARCH64_LDST_SIMD_M (insn)
2829 || AARCH64_LDST_SIMD_M_PI (insn))
2830 {
2831 *rt = AARCH64_RT (insn);
2832 *load = AARCH64_BIT (insn, 22);
2833 opcode = (insn >> 12) & 0xf;
2834 switch (opcode)
2835 {
2836 case 0:
2837 case 2:
2838 *rt2 = *rt + 3;
2839 break;
2840
2841 case 4:
2842 case 6:
2843 *rt2 = *rt + 2;
2844 break;
2845
2846 case 7:
2847 *rt2 = *rt;
2848 break;
2849
2850 case 8:
2851 case 10:
2852 *rt2 = *rt + 1;
2853 break;
2854
2855 default:
2856 return FALSE;
2857 }
2858 return TRUE;
2859 }
2860 else if (AARCH64_LDST_SIMD_S (insn)
2861 || AARCH64_LDST_SIMD_S_PI (insn))
2862 {
2863 *rt = AARCH64_RT (insn);
2864 r = (insn >> 21) & 1;
2865 *load = AARCH64_BIT (insn, 22);
2866 opcode = (insn >> 13) & 0x7;
2867 switch (opcode)
2868 {
2869 case 0:
2870 case 2:
2871 case 4:
2872 *rt2 = *rt + r;
2873 break;
2874
2875 case 1:
2876 case 3:
2877 case 5:
2878 *rt2 = *rt + (r == 0 ? 2 : 3);
2879 break;
2880
2881 case 6:
2882 *rt2 = *rt + r;
2883 break;
2884
2885 case 7:
2886 *rt2 = *rt + (r == 0 ? 2 : 3);
2887 break;
2888
2889 default:
2890 return FALSE;
2891 }
2892 return TRUE;
2893 }
2894
2895 return FALSE;
2896 }
2897
2898 /* Return TRUE if INSN is multiply-accumulate. */
2899
2900 static bfd_boolean
2901 aarch64_mlxl_p (uint32_t insn)
2902 {
2903 uint32_t op31 = AARCH64_OP31 (insn);
2904
2905 if (AARCH64_MAC (insn)
2906 && (op31 == 0 || op31 == 1 || op31 == 5)
2907 /* Exclude MUL instructions which are encoded as a multiple accumulate
2908 with RA = XZR. */
2909 && AARCH64_RA (insn) != AARCH64_ZR)
2910 return TRUE;
2911
2912 return FALSE;
2913 }
2914
2915 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
2916 it is possible for a 64-bit multiply-accumulate instruction to generate an
2917 incorrect result. The details are quite complex and hard to
2918 determine statically, since branches in the code may exist in some
2919 circumstances, but all cases end with a memory (load, store, or
2920 prefetch) instruction followed immediately by the multiply-accumulate
2921 operation. We employ a linker patching technique, by moving the potentially
2922 affected multiply-accumulate instruction into a patch region and replacing
2923 the original instruction with a branch to the patch. This function checks
2924 if INSN_1 is the memory operation followed by a multiply-accumulate
2925 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
2926 if INSN_1 and INSN_2 are safe. */
2927
2928 static bfd_boolean
2929 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
2930 {
2931 uint32_t rt;
2932 uint32_t rt2;
2933 uint32_t rn;
2934 uint32_t rm;
2935 uint32_t ra;
2936 bfd_boolean pair;
2937 bfd_boolean load;
2938
2939 if (aarch64_mlxl_p (insn_2)
2940 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
2941 {
2942 /* Any SIMD memory op is independent of the subsequent MLA
2943 by definition of the erratum. */
2944 if (AARCH64_BIT (insn_1, 26))
2945 return TRUE;
2946
2947 /* If not SIMD, check for integer memory ops and MLA relationship. */
2948 rn = AARCH64_RN (insn_2);
2949 ra = AARCH64_RA (insn_2);
2950 rm = AARCH64_RM (insn_2);
2951
2952 /* If this is a load and there's a true(RAW) dependency, we are safe
2953 and this is not an erratum sequence. */
2954 if (load &&
2955 (rt == rn || rt == rm || rt == ra
2956 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
2957 return FALSE;
2958
2959 /* We conservatively put out stubs for all other cases (including
2960 writebacks). */
2961 return TRUE;
2962 }
2963
2964 return FALSE;
2965 }
2966
2967 /* Used to order a list of mapping symbols by address. */
2968
2969 static int
2970 elf_aarch64_compare_mapping (const void *a, const void *b)
2971 {
2972 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
2973 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
2974
2975 if (amap->vma > bmap->vma)
2976 return 1;
2977 else if (amap->vma < bmap->vma)
2978 return -1;
2979 else if (amap->type > bmap->type)
2980 /* Ensure results do not depend on the host qsort for objects with
2981 multiple mapping symbols at the same address by sorting on type
2982 after vma. */
2983 return 1;
2984 else if (amap->type < bmap->type)
2985 return -1;
2986 else
2987 return 0;
2988 }
2989
2990 static bfd_boolean
2991 erratum_835769_scan (bfd *input_bfd,
2992 struct bfd_link_info *info,
2993 struct aarch64_erratum_835769_fix **fixes_p,
2994 unsigned int *num_fixes_p,
2995 unsigned int *fix_table_size_p)
2996 {
2997 asection *section;
2998 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
2999 struct aarch64_erratum_835769_fix *fixes = *fixes_p;
3000 unsigned int num_fixes = *num_fixes_p;
3001 unsigned int fix_table_size = *fix_table_size_p;
3002
3003 if (htab == NULL)
3004 return FALSE;
3005
3006 for (section = input_bfd->sections;
3007 section != NULL;
3008 section = section->next)
3009 {
3010 bfd_byte *contents = NULL;
3011 struct _aarch64_elf_section_data *sec_data;
3012 unsigned int span;
3013
3014 if (elf_section_type (section) != SHT_PROGBITS
3015 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3016 || (section->flags & SEC_EXCLUDE) != 0
3017 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3018 || (section->output_section == bfd_abs_section_ptr))
3019 continue;
3020
3021 if (elf_section_data (section)->this_hdr.contents != NULL)
3022 contents = elf_section_data (section)->this_hdr.contents;
3023 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3024 return TRUE;
3025
3026 sec_data = elf_aarch64_section_data (section);
3027
3028 qsort (sec_data->map, sec_data->mapcount,
3029 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3030
3031 for (span = 0; span < sec_data->mapcount; span++)
3032 {
3033 unsigned int span_start = sec_data->map[span].vma;
3034 unsigned int span_end = ((span == sec_data->mapcount - 1)
3035 ? sec_data->map[0].vma + section->size
3036 : sec_data->map[span + 1].vma);
3037 unsigned int i;
3038 char span_type = sec_data->map[span].type;
3039
3040 if (span_type == 'd')
3041 continue;
3042
3043 for (i = span_start; i + 4 < span_end; i += 4)
3044 {
3045 uint32_t insn_1 = bfd_getl32 (contents + i);
3046 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3047
3048 if (aarch64_erratum_sequence (insn_1, insn_2))
3049 {
3050 char *stub_name = NULL;
3051 stub_name = (char *) bfd_malloc
3052 (strlen ("__erratum_835769_veneer_") + 16);
3053 if (stub_name != NULL)
3054 sprintf
3055 (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3056 else
3057 return TRUE;
3058
3059 if (num_fixes == fix_table_size)
3060 {
3061 fix_table_size *= 2;
3062 fixes =
3063 (struct aarch64_erratum_835769_fix *)
3064 bfd_realloc (fixes,
3065 sizeof (struct aarch64_erratum_835769_fix)
3066 * fix_table_size);
3067 if (fixes == NULL)
3068 return TRUE;
3069 }
3070
3071 fixes[num_fixes].input_bfd = input_bfd;
3072 fixes[num_fixes].section = section;
3073 fixes[num_fixes].offset = i + 4;
3074 fixes[num_fixes].veneered_insn = insn_2;
3075 fixes[num_fixes].stub_name = stub_name;
3076 fixes[num_fixes].stub_type = aarch64_stub_erratum_835769_veneer;
3077 num_fixes++;
3078 }
3079 }
3080 }
3081 if (elf_section_data (section)->this_hdr.contents == NULL)
3082 free (contents);
3083 }
3084
3085 *fixes_p = fixes;
3086 *num_fixes_p = num_fixes;
3087 *fix_table_size_p = fix_table_size;
3088 return FALSE;
3089 }
3090
3091 /* Find or create a stub section. Returns a pointer to the stub section, and
3092 the section to which the stub section will be attached (in *LINK_SEC_P).
3093 LINK_SEC_P may be NULL. */
3094
3095 static asection *
3096 elf_aarch64_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3097 struct elf_aarch64_link_hash_table *htab)
3098 {
3099 asection *link_sec;
3100 asection *stub_sec;
3101
3102 link_sec = htab->stub_group[section->id].link_sec;
3103 BFD_ASSERT (link_sec != NULL);
3104 stub_sec = htab->stub_group[section->id].stub_sec;
3105
3106 if (stub_sec == NULL)
3107 {
3108 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3109 if (stub_sec == NULL)
3110 {
3111 size_t namelen;
3112 bfd_size_type len;
3113 char *s_name;
3114
3115 namelen = strlen (link_sec->name);
3116 len = namelen + sizeof (STUB_SUFFIX);
3117 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3118 if (s_name == NULL)
3119 return NULL;
3120
3121 memcpy (s_name, link_sec->name, namelen);
3122 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3123 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3124
3125 if (stub_sec == NULL)
3126 return NULL;
3127 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3128 }
3129 htab->stub_group[section->id].stub_sec = stub_sec;
3130 }
3131
3132 if (link_sec_p)
3133 *link_sec_p = link_sec;
3134
3135 return stub_sec;
3136 }
3137
3138 /* Determine and set the size of the stub section for a final link.
3139
3140 The basic idea here is to examine all the relocations looking for
3141 PC-relative calls to a target that is unreachable with a "bl"
3142 instruction. */
3143
3144 bfd_boolean
3145 elfNN_aarch64_size_stubs (bfd *output_bfd,
3146 bfd *stub_bfd,
3147 struct bfd_link_info *info,
3148 bfd_signed_vma group_size,
3149 asection * (*add_stub_section) (const char *,
3150 asection *),
3151 void (*layout_sections_again) (void))
3152 {
3153 bfd_size_type stub_group_size;
3154 bfd_boolean stubs_always_before_branch;
3155 bfd_boolean stub_changed = 0;
3156 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3157 struct aarch64_erratum_835769_fix *erratum_835769_fixes = NULL;
3158 unsigned int num_erratum_835769_fixes = 0;
3159 unsigned int erratum_835769_fix_table_size = 10;
3160 unsigned int i;
3161
3162 if (htab->fix_erratum_835769)
3163 {
3164 erratum_835769_fixes
3165 = (struct aarch64_erratum_835769_fix *)
3166 bfd_zmalloc
3167 (sizeof (struct aarch64_erratum_835769_fix) *
3168 erratum_835769_fix_table_size);
3169 if (erratum_835769_fixes == NULL)
3170 goto error_ret_free_local;
3171 }
3172
3173 /* Propagate mach to stub bfd, because it may not have been
3174 finalized when we created stub_bfd. */
3175 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3176 bfd_get_mach (output_bfd));
3177
3178 /* Stash our params away. */
3179 htab->stub_bfd = stub_bfd;
3180 htab->add_stub_section = add_stub_section;
3181 htab->layout_sections_again = layout_sections_again;
3182 stubs_always_before_branch = group_size < 0;
3183 if (group_size < 0)
3184 stub_group_size = -group_size;
3185 else
3186 stub_group_size = group_size;
3187
3188 if (stub_group_size == 1)
3189 {
3190 /* Default values. */
3191 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3192 stub_group_size = 127 * 1024 * 1024;
3193 }
3194
3195 group_sections (htab, stub_group_size, stubs_always_before_branch);
3196
3197 while (1)
3198 {
3199 bfd *input_bfd;
3200 unsigned int bfd_indx;
3201 asection *stub_sec;
3202 unsigned prev_num_erratum_835769_fixes = num_erratum_835769_fixes;
3203
3204 num_erratum_835769_fixes = 0;
3205 for (input_bfd = info->input_bfds, bfd_indx = 0;
3206 input_bfd != NULL; input_bfd = input_bfd->link.next, bfd_indx++)
3207 {
3208 Elf_Internal_Shdr *symtab_hdr;
3209 asection *section;
3210 Elf_Internal_Sym *local_syms = NULL;
3211
3212 /* We'll need the symbol table in a second. */
3213 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3214 if (symtab_hdr->sh_info == 0)
3215 continue;
3216
3217 /* Walk over each section attached to the input bfd. */
3218 for (section = input_bfd->sections;
3219 section != NULL; section = section->next)
3220 {
3221 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3222
3223 /* If there aren't any relocs, then there's nothing more
3224 to do. */
3225 if ((section->flags & SEC_RELOC) == 0
3226 || section->reloc_count == 0
3227 || (section->flags & SEC_CODE) == 0)
3228 continue;
3229
3230 /* If this section is a link-once section that will be
3231 discarded, then don't create any stubs. */
3232 if (section->output_section == NULL
3233 || section->output_section->owner != output_bfd)
3234 continue;
3235
3236 /* Get the relocs. */
3237 internal_relocs
3238 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3239 NULL, info->keep_memory);
3240 if (internal_relocs == NULL)
3241 goto error_ret_free_local;
3242
3243 /* Now examine each relocation. */
3244 irela = internal_relocs;
3245 irelaend = irela + section->reloc_count;
3246 for (; irela < irelaend; irela++)
3247 {
3248 unsigned int r_type, r_indx;
3249 enum elf_aarch64_stub_type stub_type;
3250 struct elf_aarch64_stub_hash_entry *stub_entry;
3251 asection *sym_sec;
3252 bfd_vma sym_value;
3253 bfd_vma destination;
3254 struct elf_aarch64_link_hash_entry *hash;
3255 const char *sym_name;
3256 char *stub_name;
3257 const asection *id_sec;
3258 unsigned char st_type;
3259 bfd_size_type len;
3260
3261 r_type = ELFNN_R_TYPE (irela->r_info);
3262 r_indx = ELFNN_R_SYM (irela->r_info);
3263
3264 if (r_type >= (unsigned int) R_AARCH64_end)
3265 {
3266 bfd_set_error (bfd_error_bad_value);
3267 error_ret_free_internal:
3268 if (elf_section_data (section)->relocs == NULL)
3269 free (internal_relocs);
3270 goto error_ret_free_local;
3271 }
3272
3273 /* Only look for stubs on unconditional branch and
3274 branch and link instructions. */
3275 if (r_type != (unsigned int) AARCH64_R (CALL26)
3276 && r_type != (unsigned int) AARCH64_R (JUMP26))
3277 continue;
3278
3279 /* Now determine the call target, its name, value,
3280 section. */
3281 sym_sec = NULL;
3282 sym_value = 0;
3283 destination = 0;
3284 hash = NULL;
3285 sym_name = NULL;
3286 if (r_indx < symtab_hdr->sh_info)
3287 {
3288 /* It's a local symbol. */
3289 Elf_Internal_Sym *sym;
3290 Elf_Internal_Shdr *hdr;
3291
3292 if (local_syms == NULL)
3293 {
3294 local_syms
3295 = (Elf_Internal_Sym *) symtab_hdr->contents;
3296 if (local_syms == NULL)
3297 local_syms
3298 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3299 symtab_hdr->sh_info, 0,
3300 NULL, NULL, NULL);
3301 if (local_syms == NULL)
3302 goto error_ret_free_internal;
3303 }
3304
3305 sym = local_syms + r_indx;
3306 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3307 sym_sec = hdr->bfd_section;
3308 if (!sym_sec)
3309 /* This is an undefined symbol. It can never
3310 be resolved. */
3311 continue;
3312
3313 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3314 sym_value = sym->st_value;
3315 destination = (sym_value + irela->r_addend
3316 + sym_sec->output_offset
3317 + sym_sec->output_section->vma);
3318 st_type = ELF_ST_TYPE (sym->st_info);
3319 sym_name
3320 = bfd_elf_string_from_elf_section (input_bfd,
3321 symtab_hdr->sh_link,
3322 sym->st_name);
3323 }
3324 else
3325 {
3326 int e_indx;
3327
3328 e_indx = r_indx - symtab_hdr->sh_info;
3329 hash = ((struct elf_aarch64_link_hash_entry *)
3330 elf_sym_hashes (input_bfd)[e_indx]);
3331
3332 while (hash->root.root.type == bfd_link_hash_indirect
3333 || hash->root.root.type == bfd_link_hash_warning)
3334 hash = ((struct elf_aarch64_link_hash_entry *)
3335 hash->root.root.u.i.link);
3336
3337 if (hash->root.root.type == bfd_link_hash_defined
3338 || hash->root.root.type == bfd_link_hash_defweak)
3339 {
3340 struct elf_aarch64_link_hash_table *globals =
3341 elf_aarch64_hash_table (info);
3342 sym_sec = hash->root.root.u.def.section;
3343 sym_value = hash->root.root.u.def.value;
3344 /* For a destination in a shared library,
3345 use the PLT stub as target address to
3346 decide whether a branch stub is
3347 needed. */
3348 if (globals->root.splt != NULL && hash != NULL
3349 && hash->root.plt.offset != (bfd_vma) - 1)
3350 {
3351 sym_sec = globals->root.splt;
3352 sym_value = hash->root.plt.offset;
3353 if (sym_sec->output_section != NULL)
3354 destination = (sym_value
3355 + sym_sec->output_offset
3356 +
3357 sym_sec->output_section->vma);
3358 }
3359 else if (sym_sec->output_section != NULL)
3360 destination = (sym_value + irela->r_addend
3361 + sym_sec->output_offset
3362 + sym_sec->output_section->vma);
3363 }
3364 else if (hash->root.root.type == bfd_link_hash_undefined
3365 || (hash->root.root.type
3366 == bfd_link_hash_undefweak))
3367 {
3368 /* For a shared library, use the PLT stub as
3369 target address to decide whether a long
3370 branch stub is needed.
3371 For absolute code, they cannot be handled. */
3372 struct elf_aarch64_link_hash_table *globals =
3373 elf_aarch64_hash_table (info);
3374
3375 if (globals->root.splt != NULL && hash != NULL
3376 && hash->root.plt.offset != (bfd_vma) - 1)
3377 {
3378 sym_sec = globals->root.splt;
3379 sym_value = hash->root.plt.offset;
3380 if (sym_sec->output_section != NULL)
3381 destination = (sym_value
3382 + sym_sec->output_offset
3383 +
3384 sym_sec->output_section->vma);
3385 }
3386 else
3387 continue;
3388 }
3389 else
3390 {
3391 bfd_set_error (bfd_error_bad_value);
3392 goto error_ret_free_internal;
3393 }
3394 st_type = ELF_ST_TYPE (hash->root.type);
3395 sym_name = hash->root.root.root.string;
3396 }
3397
3398 /* Determine what (if any) linker stub is needed. */
3399 stub_type = aarch64_type_of_stub
3400 (info, section, irela, st_type, hash, destination);
3401 if (stub_type == aarch64_stub_none)
3402 continue;
3403
3404 /* Support for grouping stub sections. */
3405 id_sec = htab->stub_group[section->id].link_sec;
3406
3407 /* Get the name of this stub. */
3408 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3409 irela);
3410 if (!stub_name)
3411 goto error_ret_free_internal;
3412
3413 stub_entry =
3414 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3415 stub_name, FALSE, FALSE);
3416 if (stub_entry != NULL)
3417 {
3418 /* The proper stub has already been created. */
3419 free (stub_name);
3420 continue;
3421 }
3422
3423 stub_entry = elfNN_aarch64_add_stub (stub_name, section,
3424 htab);
3425 if (stub_entry == NULL)
3426 {
3427 free (stub_name);
3428 goto error_ret_free_internal;
3429 }
3430
3431 stub_entry->target_value = sym_value;
3432 stub_entry->target_section = sym_sec;
3433 stub_entry->stub_type = stub_type;
3434 stub_entry->h = hash;
3435 stub_entry->st_type = st_type;
3436
3437 if (sym_name == NULL)
3438 sym_name = "unnamed";
3439 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3440 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3441 if (stub_entry->output_name == NULL)
3442 {
3443 free (stub_name);
3444 goto error_ret_free_internal;
3445 }
3446
3447 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3448 sym_name);
3449
3450 stub_changed = TRUE;
3451 }
3452
3453 /* We're done with the internal relocs, free them. */
3454 if (elf_section_data (section)->relocs == NULL)
3455 free (internal_relocs);
3456 }
3457
3458 if (htab->fix_erratum_835769)
3459 {
3460 /* Scan for sequences which might trigger erratum 835769. */
3461 if (erratum_835769_scan (input_bfd, info, &erratum_835769_fixes,
3462 &num_erratum_835769_fixes,
3463 &erratum_835769_fix_table_size) != 0)
3464 goto error_ret_free_local;
3465 }
3466 }
3467
3468 if (prev_num_erratum_835769_fixes != num_erratum_835769_fixes)
3469 stub_changed = TRUE;
3470
3471 if (!stub_changed)
3472 break;
3473
3474 /* OK, we've added some stubs. Find out the new size of the
3475 stub sections. */
3476 for (stub_sec = htab->stub_bfd->sections;
3477 stub_sec != NULL; stub_sec = stub_sec->next)
3478 {
3479 /* Ignore non-stub sections. */
3480 if (!strstr (stub_sec->name, STUB_SUFFIX))
3481 continue;
3482 stub_sec->size = 0;
3483 }
3484
3485 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3486
3487 /* Add erratum 835769 veneers to stub section sizes too. */
3488 if (htab->fix_erratum_835769)
3489 for (i = 0; i < num_erratum_835769_fixes; i++)
3490 {
3491 stub_sec = elf_aarch64_create_or_find_stub_sec (NULL,
3492 erratum_835769_fixes[i].section, htab);
3493
3494 if (stub_sec == NULL)
3495 goto error_ret_free_local;
3496
3497 stub_sec->size += 8;
3498 }
3499
3500 /* Ask the linker to do its stuff. */
3501 (*htab->layout_sections_again) ();
3502 stub_changed = FALSE;
3503 }
3504
3505 /* Add stubs for erratum 835769 fixes now. */
3506 if (htab->fix_erratum_835769)
3507 {
3508 for (i = 0; i < num_erratum_835769_fixes; i++)
3509 {
3510 struct elf_aarch64_stub_hash_entry *stub_entry;
3511 char *stub_name = erratum_835769_fixes[i].stub_name;
3512 asection *section = erratum_835769_fixes[i].section;
3513 unsigned int section_id = erratum_835769_fixes[i].section->id;
3514 asection *link_sec = htab->stub_group[section_id].link_sec;
3515 asection *stub_sec = htab->stub_group[section_id].stub_sec;
3516
3517 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
3518 stub_name, TRUE, FALSE);
3519 if (stub_entry == NULL)
3520 {
3521 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3522 section->owner,
3523 stub_name);
3524 return FALSE;
3525 }
3526
3527 stub_entry->stub_sec = stub_sec;
3528 stub_entry->stub_offset = 0;
3529 stub_entry->id_sec = link_sec;
3530 stub_entry->stub_type = erratum_835769_fixes[i].stub_type;
3531 stub_entry->target_section = section;
3532 stub_entry->target_value = erratum_835769_fixes[i].offset;
3533 stub_entry->veneered_insn = erratum_835769_fixes[i].veneered_insn;
3534 stub_entry->output_name = erratum_835769_fixes[i].stub_name;
3535 }
3536
3537 /* Stash the erratum 835769 fix array for use later in
3538 elfNN_aarch64_write_section(). */
3539 htab->aarch64_erratum_835769_fixes = erratum_835769_fixes;
3540 htab->num_aarch64_erratum_835769_fixes = num_erratum_835769_fixes;
3541 }
3542 else
3543 {
3544 htab->aarch64_erratum_835769_fixes = NULL;
3545 htab->num_aarch64_erratum_835769_fixes = 0;
3546 }
3547
3548 return TRUE;
3549
3550 error_ret_free_local:
3551 return FALSE;
3552 }
3553
3554 /* Build all the stubs associated with the current output file. The
3555 stubs are kept in a hash table attached to the main linker hash
3556 table. We also set up the .plt entries for statically linked PIC
3557 functions here. This function is called via aarch64_elf_finish in the
3558 linker. */
3559
3560 bfd_boolean
3561 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3562 {
3563 asection *stub_sec;
3564 struct bfd_hash_table *table;
3565 struct elf_aarch64_link_hash_table *htab;
3566
3567 htab = elf_aarch64_hash_table (info);
3568
3569 for (stub_sec = htab->stub_bfd->sections;
3570 stub_sec != NULL; stub_sec = stub_sec->next)
3571 {
3572 bfd_size_type size;
3573
3574 /* Ignore non-stub sections. */
3575 if (!strstr (stub_sec->name, STUB_SUFFIX))
3576 continue;
3577
3578 /* Allocate memory to hold the linker stubs. */
3579 size = stub_sec->size;
3580 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3581 if (stub_sec->contents == NULL && size != 0)
3582 return FALSE;
3583 stub_sec->size = 0;
3584 }
3585
3586 /* Build the stubs as directed by the stub hash table. */
3587 table = &htab->stub_hash_table;
3588 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3589
3590 return TRUE;
3591 }
3592
3593
3594 /* Add an entry to the code/data map for section SEC. */
3595
3596 static void
3597 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3598 {
3599 struct _aarch64_elf_section_data *sec_data =
3600 elf_aarch64_section_data (sec);
3601 unsigned int newidx;
3602
3603 if (sec_data->map == NULL)
3604 {
3605 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3606 sec_data->mapcount = 0;
3607 sec_data->mapsize = 1;
3608 }
3609
3610 newidx = sec_data->mapcount++;
3611
3612 if (sec_data->mapcount > sec_data->mapsize)
3613 {
3614 sec_data->mapsize *= 2;
3615 sec_data->map = bfd_realloc_or_free
3616 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3617 }
3618
3619 if (sec_data->map)
3620 {
3621 sec_data->map[newidx].vma = vma;
3622 sec_data->map[newidx].type = type;
3623 }
3624 }
3625
3626
3627 /* Initialise maps of insn/data for input BFDs. */
3628 void
3629 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3630 {
3631 Elf_Internal_Sym *isymbuf;
3632 Elf_Internal_Shdr *hdr;
3633 unsigned int i, localsyms;
3634
3635 /* Make sure that we are dealing with an AArch64 elf binary. */
3636 if (!is_aarch64_elf (abfd))
3637 return;
3638
3639 if ((abfd->flags & DYNAMIC) != 0)
3640 return;
3641
3642 hdr = &elf_symtab_hdr (abfd);
3643 localsyms = hdr->sh_info;
3644
3645 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3646 should contain the number of local symbols, which should come before any
3647 global symbols. Mapping symbols are always local. */
3648 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3649
3650 /* No internal symbols read? Skip this BFD. */
3651 if (isymbuf == NULL)
3652 return;
3653
3654 for (i = 0; i < localsyms; i++)
3655 {
3656 Elf_Internal_Sym *isym = &isymbuf[i];
3657 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3658 const char *name;
3659
3660 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3661 {
3662 name = bfd_elf_string_from_elf_section (abfd,
3663 hdr->sh_link,
3664 isym->st_name);
3665
3666 if (bfd_is_aarch64_special_symbol_name
3667 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3668 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3669 }
3670 }
3671 }
3672
3673 /* Set option values needed during linking. */
3674 void
3675 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3676 struct bfd_link_info *link_info,
3677 int no_enum_warn,
3678 int no_wchar_warn, int pic_veneer,
3679 int fix_erratum_835769)
3680 {
3681 struct elf_aarch64_link_hash_table *globals;
3682
3683 globals = elf_aarch64_hash_table (link_info);
3684 globals->pic_veneer = pic_veneer;
3685 globals->fix_erratum_835769 = fix_erratum_835769;
3686
3687 BFD_ASSERT (is_aarch64_elf (output_bfd));
3688 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3689 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3690 }
3691
3692 static bfd_vma
3693 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3694 struct elf_aarch64_link_hash_table
3695 *globals, struct bfd_link_info *info,
3696 bfd_vma value, bfd *output_bfd,
3697 bfd_boolean *unresolved_reloc_p)
3698 {
3699 bfd_vma off = (bfd_vma) - 1;
3700 asection *basegot = globals->root.sgot;
3701 bfd_boolean dyn = globals->root.dynamic_sections_created;
3702
3703 if (h != NULL)
3704 {
3705 BFD_ASSERT (basegot != NULL);
3706 off = h->got.offset;
3707 BFD_ASSERT (off != (bfd_vma) - 1);
3708 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3709 || (info->shared
3710 && SYMBOL_REFERENCES_LOCAL (info, h))
3711 || (ELF_ST_VISIBILITY (h->other)
3712 && h->root.type == bfd_link_hash_undefweak))
3713 {
3714 /* This is actually a static link, or it is a -Bsymbolic link
3715 and the symbol is defined locally. We must initialize this
3716 entry in the global offset table. Since the offset must
3717 always be a multiple of 8 (4 in the case of ILP32), we use
3718 the least significant bit to record whether we have
3719 initialized it already.
3720 When doing a dynamic link, we create a .rel(a).got relocation
3721 entry to initialize the value. This is done in the
3722 finish_dynamic_symbol routine. */
3723 if ((off & 1) != 0)
3724 off &= ~1;
3725 else
3726 {
3727 bfd_put_NN (output_bfd, value, basegot->contents + off);
3728 h->got.offset |= 1;
3729 }
3730 }
3731 else
3732 *unresolved_reloc_p = FALSE;
3733
3734 off = off + basegot->output_section->vma + basegot->output_offset;
3735 }
3736
3737 return off;
3738 }
3739
3740 /* Change R_TYPE to a more efficient access model where possible,
3741 return the new reloc type. */
3742
3743 static bfd_reloc_code_real_type
3744 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
3745 struct elf_link_hash_entry *h)
3746 {
3747 bfd_boolean is_local = h == NULL;
3748
3749 switch (r_type)
3750 {
3751 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3752 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3753 return (is_local
3754 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3755 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
3756
3757 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3758 return (is_local
3759 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3760 : r_type);
3761
3762 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3763 return (is_local
3764 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3765 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3766
3767 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3768 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
3769 return (is_local
3770 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3771 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
3772
3773 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3774 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3775
3776 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
3777 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3778
3779 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3780 return r_type;
3781
3782 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3783 return (is_local
3784 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
3785 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3786
3787 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3788 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3789 /* Instructions with these relocations will become NOPs. */
3790 return BFD_RELOC_AARCH64_NONE;
3791
3792 default:
3793 break;
3794 }
3795
3796 return r_type;
3797 }
3798
3799 static unsigned int
3800 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
3801 {
3802 switch (r_type)
3803 {
3804 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3805 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3806 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3807 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3808 return GOT_NORMAL;
3809
3810 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3811 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3812 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3813 return GOT_TLS_GD;
3814
3815 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3816 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3817 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3818 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3819 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
3820 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3821 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3822 return GOT_TLSDESC_GD;
3823
3824 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3825 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3826 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3827 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3828 return GOT_TLS_IE;
3829
3830 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3831 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3832 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3833 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3834 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3835 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3836 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3837 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3838 return GOT_UNKNOWN;
3839
3840 default:
3841 break;
3842 }
3843 return GOT_UNKNOWN;
3844 }
3845
3846 static bfd_boolean
3847 aarch64_can_relax_tls (bfd *input_bfd,
3848 struct bfd_link_info *info,
3849 bfd_reloc_code_real_type r_type,
3850 struct elf_link_hash_entry *h,
3851 unsigned long r_symndx)
3852 {
3853 unsigned int symbol_got_type;
3854 unsigned int reloc_got_type;
3855
3856 if (! IS_AARCH64_TLS_RELOC (r_type))
3857 return FALSE;
3858
3859 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3860 reloc_got_type = aarch64_reloc_got_type (r_type);
3861
3862 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3863 return TRUE;
3864
3865 if (info->shared)
3866 return FALSE;
3867
3868 if (h && h->root.type == bfd_link_hash_undefweak)
3869 return FALSE;
3870
3871 return TRUE;
3872 }
3873
3874 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
3875 enumerator. */
3876
3877 static bfd_reloc_code_real_type
3878 aarch64_tls_transition (bfd *input_bfd,
3879 struct bfd_link_info *info,
3880 unsigned int r_type,
3881 struct elf_link_hash_entry *h,
3882 unsigned long r_symndx)
3883 {
3884 bfd_reloc_code_real_type bfd_r_type
3885 = elfNN_aarch64_bfd_reloc_from_type (r_type);
3886
3887 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
3888 return bfd_r_type;
3889
3890 return aarch64_tls_transition_without_check (bfd_r_type, h);
3891 }
3892
3893 /* Return the base VMA address which should be subtracted from real addresses
3894 when resolving R_AARCH64_TLS_DTPREL relocation. */
3895
3896 static bfd_vma
3897 dtpoff_base (struct bfd_link_info *info)
3898 {
3899 /* If tls_sec is NULL, we should have signalled an error already. */
3900 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3901 return elf_hash_table (info)->tls_sec->vma;
3902 }
3903
3904 /* Return the base VMA address which should be subtracted from real addresses
3905 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3906
3907 static bfd_vma
3908 tpoff_base (struct bfd_link_info *info)
3909 {
3910 struct elf_link_hash_table *htab = elf_hash_table (info);
3911
3912 /* If tls_sec is NULL, we should have signalled an error already. */
3913 BFD_ASSERT (htab->tls_sec != NULL);
3914
3915 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3916 htab->tls_sec->alignment_power);
3917 return htab->tls_sec->vma - base;
3918 }
3919
3920 static bfd_vma *
3921 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3922 unsigned long r_symndx)
3923 {
3924 /* Calculate the address of the GOT entry for symbol
3925 referred to in h. */
3926 if (h != NULL)
3927 return &h->got.offset;
3928 else
3929 {
3930 /* local symbol */
3931 struct elf_aarch64_local_symbol *l;
3932
3933 l = elf_aarch64_locals (input_bfd);
3934 return &l[r_symndx].got_offset;
3935 }
3936 }
3937
3938 static void
3939 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3940 unsigned long r_symndx)
3941 {
3942 bfd_vma *p;
3943 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3944 *p |= 1;
3945 }
3946
3947 static int
3948 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3949 unsigned long r_symndx)
3950 {
3951 bfd_vma value;
3952 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3953 return value & 1;
3954 }
3955
3956 static bfd_vma
3957 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3958 unsigned long r_symndx)
3959 {
3960 bfd_vma value;
3961 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3962 value &= ~1;
3963 return value;
3964 }
3965
3966 static bfd_vma *
3967 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3968 unsigned long r_symndx)
3969 {
3970 /* Calculate the address of the GOT entry for symbol
3971 referred to in h. */
3972 if (h != NULL)
3973 {
3974 struct elf_aarch64_link_hash_entry *eh;
3975 eh = (struct elf_aarch64_link_hash_entry *) h;
3976 return &eh->tlsdesc_got_jump_table_offset;
3977 }
3978 else
3979 {
3980 /* local symbol */
3981 struct elf_aarch64_local_symbol *l;
3982
3983 l = elf_aarch64_locals (input_bfd);
3984 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3985 }
3986 }
3987
3988 static void
3989 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3990 unsigned long r_symndx)
3991 {
3992 bfd_vma *p;
3993 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3994 *p |= 1;
3995 }
3996
3997 static int
3998 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3999 struct elf_link_hash_entry *h,
4000 unsigned long r_symndx)
4001 {
4002 bfd_vma value;
4003 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4004 return value & 1;
4005 }
4006
4007 static bfd_vma
4008 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4009 unsigned long r_symndx)
4010 {
4011 bfd_vma value;
4012 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4013 value &= ~1;
4014 return value;
4015 }
4016
4017 /* Data for make_branch_to_erratum_835769_stub(). */
4018
4019 struct erratum_835769_branch_to_stub_data
4020 {
4021 asection *output_section;
4022 bfd_byte *contents;
4023 };
4024
4025 /* Helper to insert branches to erratum 835769 stubs in the right
4026 places for a particular section. */
4027
4028 static bfd_boolean
4029 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4030 void *in_arg)
4031 {
4032 struct elf_aarch64_stub_hash_entry *stub_entry;
4033 struct erratum_835769_branch_to_stub_data *data;
4034 bfd_byte *contents;
4035 unsigned long branch_insn = 0;
4036 bfd_vma veneered_insn_loc, veneer_entry_loc;
4037 bfd_signed_vma branch_offset;
4038 unsigned int target;
4039 bfd *abfd;
4040
4041 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4042 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4043
4044 if (stub_entry->target_section != data->output_section
4045 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4046 return TRUE;
4047
4048 contents = data->contents;
4049 veneered_insn_loc = stub_entry->target_section->output_section->vma
4050 + stub_entry->target_section->output_offset
4051 + stub_entry->target_value;
4052 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4053 + stub_entry->stub_sec->output_offset
4054 + stub_entry->stub_offset;
4055 branch_offset = veneer_entry_loc - veneered_insn_loc;
4056
4057 abfd = stub_entry->target_section->owner;
4058 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4059 (*_bfd_error_handler)
4060 (_("%B: error: Erratum 835769 stub out "
4061 "of range (input file too large)"), abfd);
4062
4063 target = stub_entry->target_value;
4064 branch_insn = 0x14000000;
4065 branch_offset >>= 2;
4066 branch_offset &= 0x3ffffff;
4067 branch_insn |= branch_offset;
4068 bfd_putl32 (branch_insn, &contents[target]);
4069
4070 return TRUE;
4071 }
4072
4073 static bfd_boolean
4074 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4075 struct bfd_link_info *link_info,
4076 asection *sec,
4077 bfd_byte *contents)
4078
4079 {
4080 struct elf_aarch64_link_hash_table *globals =
4081 elf_aarch64_hash_table (link_info);
4082
4083 if (globals == NULL)
4084 return FALSE;
4085
4086 /* Fix code to point to erratum 835769 stubs. */
4087 if (globals->fix_erratum_835769)
4088 {
4089 struct erratum_835769_branch_to_stub_data data;
4090
4091 data.output_section = sec;
4092 data.contents = contents;
4093 bfd_hash_traverse (&globals->stub_hash_table,
4094 make_branch_to_erratum_835769_stub, &data);
4095 }
4096
4097 return FALSE;
4098 }
4099
4100 /* Perform a relocation as part of a final link. */
4101 static bfd_reloc_status_type
4102 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4103 bfd *input_bfd,
4104 bfd *output_bfd,
4105 asection *input_section,
4106 bfd_byte *contents,
4107 Elf_Internal_Rela *rel,
4108 bfd_vma value,
4109 struct bfd_link_info *info,
4110 asection *sym_sec,
4111 struct elf_link_hash_entry *h,
4112 bfd_boolean *unresolved_reloc_p,
4113 bfd_boolean save_addend,
4114 bfd_vma *saved_addend,
4115 Elf_Internal_Sym *sym)
4116 {
4117 Elf_Internal_Shdr *symtab_hdr;
4118 unsigned int r_type = howto->type;
4119 bfd_reloc_code_real_type bfd_r_type
4120 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4121 bfd_reloc_code_real_type new_bfd_r_type;
4122 unsigned long r_symndx;
4123 bfd_byte *hit_data = contents + rel->r_offset;
4124 bfd_vma place;
4125 bfd_signed_vma signed_addend;
4126 struct elf_aarch64_link_hash_table *globals;
4127 bfd_boolean weak_undef_p;
4128
4129 globals = elf_aarch64_hash_table (info);
4130
4131 symtab_hdr = &elf_symtab_hdr (input_bfd);
4132
4133 BFD_ASSERT (is_aarch64_elf (input_bfd));
4134
4135 r_symndx = ELFNN_R_SYM (rel->r_info);
4136
4137 /* It is possible to have linker relaxations on some TLS access
4138 models. Update our information here. */
4139 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4140 if (new_bfd_r_type != bfd_r_type)
4141 {
4142 bfd_r_type = new_bfd_r_type;
4143 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4144 BFD_ASSERT (howto != NULL);
4145 r_type = howto->type;
4146 }
4147
4148 place = input_section->output_section->vma
4149 + input_section->output_offset + rel->r_offset;
4150
4151 /* Get addend, accumulating the addend for consecutive relocs
4152 which refer to the same offset. */
4153 signed_addend = saved_addend ? *saved_addend : 0;
4154 signed_addend += rel->r_addend;
4155
4156 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4157 : bfd_is_und_section (sym_sec));
4158
4159 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4160 it here if it is defined in a non-shared object. */
4161 if (h != NULL
4162 && h->type == STT_GNU_IFUNC
4163 && h->def_regular)
4164 {
4165 asection *plt;
4166 const char *name;
4167 asection *base_got;
4168 bfd_vma off;
4169
4170 if ((input_section->flags & SEC_ALLOC) == 0
4171 || h->plt.offset == (bfd_vma) -1)
4172 abort ();
4173
4174 /* STT_GNU_IFUNC symbol must go through PLT. */
4175 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4176 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4177
4178 switch (bfd_r_type)
4179 {
4180 default:
4181 if (h->root.root.string)
4182 name = h->root.root.string;
4183 else
4184 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4185 NULL);
4186 (*_bfd_error_handler)
4187 (_("%B: relocation %s against STT_GNU_IFUNC "
4188 "symbol `%s' isn't handled by %s"), input_bfd,
4189 howto->name, name, __FUNCTION__);
4190 bfd_set_error (bfd_error_bad_value);
4191 return FALSE;
4192
4193 case BFD_RELOC_AARCH64_NN:
4194 if (rel->r_addend != 0)
4195 {
4196 if (h->root.root.string)
4197 name = h->root.root.string;
4198 else
4199 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4200 sym, NULL);
4201 (*_bfd_error_handler)
4202 (_("%B: relocation %s against STT_GNU_IFUNC "
4203 "symbol `%s' has non-zero addend: %d"),
4204 input_bfd, howto->name, name, rel->r_addend);
4205 bfd_set_error (bfd_error_bad_value);
4206 return FALSE;
4207 }
4208
4209 /* Generate dynamic relocation only when there is a
4210 non-GOT reference in a shared object. */
4211 if (info->shared && h->non_got_ref)
4212 {
4213 Elf_Internal_Rela outrel;
4214 asection *sreloc;
4215
4216 /* Need a dynamic relocation to get the real function
4217 address. */
4218 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4219 info,
4220 input_section,
4221 rel->r_offset);
4222 if (outrel.r_offset == (bfd_vma) -1
4223 || outrel.r_offset == (bfd_vma) -2)
4224 abort ();
4225
4226 outrel.r_offset += (input_section->output_section->vma
4227 + input_section->output_offset);
4228
4229 if (h->dynindx == -1
4230 || h->forced_local
4231 || info->executable)
4232 {
4233 /* This symbol is resolved locally. */
4234 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4235 outrel.r_addend = (h->root.u.def.value
4236 + h->root.u.def.section->output_section->vma
4237 + h->root.u.def.section->output_offset);
4238 }
4239 else
4240 {
4241 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4242 outrel.r_addend = 0;
4243 }
4244
4245 sreloc = globals->root.irelifunc;
4246 elf_append_rela (output_bfd, sreloc, &outrel);
4247
4248 /* If this reloc is against an external symbol, we
4249 do not want to fiddle with the addend. Otherwise,
4250 we need to include the symbol value so that it
4251 becomes an addend for the dynamic reloc. For an
4252 internal symbol, we have updated addend. */
4253 return bfd_reloc_ok;
4254 }
4255 /* FALLTHROUGH */
4256 case BFD_RELOC_AARCH64_JUMP26:
4257 case BFD_RELOC_AARCH64_CALL26:
4258 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4259 signed_addend,
4260 weak_undef_p);
4261 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4262 howto, value);
4263 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4264 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4265 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4266 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4267 base_got = globals->root.sgot;
4268 off = h->got.offset;
4269
4270 if (base_got == NULL)
4271 abort ();
4272
4273 if (off == (bfd_vma) -1)
4274 {
4275 bfd_vma plt_index;
4276
4277 /* We can't use h->got.offset here to save state, or
4278 even just remember the offset, as finish_dynamic_symbol
4279 would use that as offset into .got. */
4280
4281 if (globals->root.splt != NULL)
4282 {
4283 plt_index = ((h->plt.offset - globals->plt_header_size) /
4284 globals->plt_entry_size);
4285 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4286 base_got = globals->root.sgotplt;
4287 }
4288 else
4289 {
4290 plt_index = h->plt.offset / globals->plt_entry_size;
4291 off = plt_index * GOT_ENTRY_SIZE;
4292 base_got = globals->root.igotplt;
4293 }
4294
4295 if (h->dynindx == -1
4296 || h->forced_local
4297 || info->symbolic)
4298 {
4299 /* This references the local definition. We must
4300 initialize this entry in the global offset table.
4301 Since the offset must always be a multiple of 8,
4302 we use the least significant bit to record
4303 whether we have initialized it already.
4304
4305 When doing a dynamic link, we create a .rela.got
4306 relocation entry to initialize the value. This
4307 is done in the finish_dynamic_symbol routine. */
4308 if ((off & 1) != 0)
4309 off &= ~1;
4310 else
4311 {
4312 bfd_put_NN (output_bfd, value,
4313 base_got->contents + off);
4314 /* Note that this is harmless as -1 | 1 still is -1. */
4315 h->got.offset |= 1;
4316 }
4317 }
4318 value = (base_got->output_section->vma
4319 + base_got->output_offset + off);
4320 }
4321 else
4322 value = aarch64_calculate_got_entry_vma (h, globals, info,
4323 value, output_bfd,
4324 unresolved_reloc_p);
4325 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4326 0, weak_undef_p);
4327 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4328 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4329 case BFD_RELOC_AARCH64_ADD_LO12:
4330 break;
4331 }
4332 }
4333
4334 switch (bfd_r_type)
4335 {
4336 case BFD_RELOC_AARCH64_NONE:
4337 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4338 *unresolved_reloc_p = FALSE;
4339 return bfd_reloc_ok;
4340
4341 case BFD_RELOC_AARCH64_NN:
4342
4343 /* When generating a shared object or relocatable executable, these
4344 relocations are copied into the output file to be resolved at
4345 run time. */
4346 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4347 && (input_section->flags & SEC_ALLOC)
4348 && (h == NULL
4349 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4350 || h->root.type != bfd_link_hash_undefweak))
4351 {
4352 Elf_Internal_Rela outrel;
4353 bfd_byte *loc;
4354 bfd_boolean skip, relocate;
4355 asection *sreloc;
4356
4357 *unresolved_reloc_p = FALSE;
4358
4359 skip = FALSE;
4360 relocate = FALSE;
4361
4362 outrel.r_addend = signed_addend;
4363 outrel.r_offset =
4364 _bfd_elf_section_offset (output_bfd, info, input_section,
4365 rel->r_offset);
4366 if (outrel.r_offset == (bfd_vma) - 1)
4367 skip = TRUE;
4368 else if (outrel.r_offset == (bfd_vma) - 2)
4369 {
4370 skip = TRUE;
4371 relocate = TRUE;
4372 }
4373
4374 outrel.r_offset += (input_section->output_section->vma
4375 + input_section->output_offset);
4376
4377 if (skip)
4378 memset (&outrel, 0, sizeof outrel);
4379 else if (h != NULL
4380 && h->dynindx != -1
4381 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4382 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4383 else
4384 {
4385 int symbol;
4386
4387 /* On SVR4-ish systems, the dynamic loader cannot
4388 relocate the text and data segments independently,
4389 so the symbol does not matter. */
4390 symbol = 0;
4391 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4392 outrel.r_addend += value;
4393 }
4394
4395 sreloc = elf_section_data (input_section)->sreloc;
4396 if (sreloc == NULL || sreloc->contents == NULL)
4397 return bfd_reloc_notsupported;
4398
4399 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4400 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4401
4402 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4403 {
4404 /* Sanity to check that we have previously allocated
4405 sufficient space in the relocation section for the
4406 number of relocations we actually want to emit. */
4407 abort ();
4408 }
4409
4410 /* If this reloc is against an external symbol, we do not want to
4411 fiddle with the addend. Otherwise, we need to include the symbol
4412 value so that it becomes an addend for the dynamic reloc. */
4413 if (!relocate)
4414 return bfd_reloc_ok;
4415
4416 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4417 contents, rel->r_offset, value,
4418 signed_addend);
4419 }
4420 else
4421 value += signed_addend;
4422 break;
4423
4424 case BFD_RELOC_AARCH64_JUMP26:
4425 case BFD_RELOC_AARCH64_CALL26:
4426 {
4427 asection *splt = globals->root.splt;
4428 bfd_boolean via_plt_p =
4429 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4430
4431 /* A call to an undefined weak symbol is converted to a jump to
4432 the next instruction unless a PLT entry will be created.
4433 The jump to the next instruction is optimized as a NOP.
4434 Do the same for local undefined symbols. */
4435 if (weak_undef_p && ! via_plt_p)
4436 {
4437 bfd_putl32 (INSN_NOP, hit_data);
4438 return bfd_reloc_ok;
4439 }
4440
4441 /* If the call goes through a PLT entry, make sure to
4442 check distance to the right destination address. */
4443 if (via_plt_p)
4444 {
4445 value = (splt->output_section->vma
4446 + splt->output_offset + h->plt.offset);
4447 *unresolved_reloc_p = FALSE;
4448 }
4449
4450 /* If the target symbol is global and marked as a function the
4451 relocation applies a function call or a tail call. In this
4452 situation we can veneer out of range branches. The veneers
4453 use IP0 and IP1 hence cannot be used arbitrary out of range
4454 branches that occur within the body of a function. */
4455 if (h && h->type == STT_FUNC)
4456 {
4457 /* Check if a stub has to be inserted because the destination
4458 is too far away. */
4459 if (! aarch64_valid_branch_p (value, place))
4460 {
4461 /* The target is out of reach, so redirect the branch to
4462 the local stub for this function. */
4463 struct elf_aarch64_stub_hash_entry *stub_entry;
4464 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4465 sym_sec, h,
4466 rel, globals);
4467 if (stub_entry != NULL)
4468 value = (stub_entry->stub_offset
4469 + stub_entry->stub_sec->output_offset
4470 + stub_entry->stub_sec->output_section->vma);
4471 }
4472 }
4473 }
4474 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4475 signed_addend, weak_undef_p);
4476 break;
4477
4478 case BFD_RELOC_AARCH64_16:
4479 #if ARCH_SIZE == 64
4480 case BFD_RELOC_AARCH64_32:
4481 #endif
4482 case BFD_RELOC_AARCH64_ADD_LO12:
4483 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4484 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4485 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4486 case BFD_RELOC_AARCH64_BRANCH19:
4487 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4488 case BFD_RELOC_AARCH64_LDST8_LO12:
4489 case BFD_RELOC_AARCH64_LDST16_LO12:
4490 case BFD_RELOC_AARCH64_LDST32_LO12:
4491 case BFD_RELOC_AARCH64_LDST64_LO12:
4492 case BFD_RELOC_AARCH64_LDST128_LO12:
4493 case BFD_RELOC_AARCH64_MOVW_G0_S:
4494 case BFD_RELOC_AARCH64_MOVW_G1_S:
4495 case BFD_RELOC_AARCH64_MOVW_G2_S:
4496 case BFD_RELOC_AARCH64_MOVW_G0:
4497 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4498 case BFD_RELOC_AARCH64_MOVW_G1:
4499 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4500 case BFD_RELOC_AARCH64_MOVW_G2:
4501 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4502 case BFD_RELOC_AARCH64_MOVW_G3:
4503 case BFD_RELOC_AARCH64_16_PCREL:
4504 case BFD_RELOC_AARCH64_32_PCREL:
4505 case BFD_RELOC_AARCH64_64_PCREL:
4506 case BFD_RELOC_AARCH64_TSTBR14:
4507 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4508 signed_addend, weak_undef_p);
4509 break;
4510
4511 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4512 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4513 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4514 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4515 if (globals->root.sgot == NULL)
4516 BFD_ASSERT (h != NULL);
4517
4518 if (h != NULL)
4519 {
4520 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4521 output_bfd,
4522 unresolved_reloc_p);
4523 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4524 0, weak_undef_p);
4525 }
4526 break;
4527
4528 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4529 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4530 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4531 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4532 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4533 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4534 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4535 if (globals->root.sgot == NULL)
4536 return bfd_reloc_notsupported;
4537
4538 value = (symbol_got_offset (input_bfd, h, r_symndx)
4539 + globals->root.sgot->output_section->vma
4540 + globals->root.sgot->output_offset);
4541
4542 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4543 0, weak_undef_p);
4544 *unresolved_reloc_p = FALSE;
4545 break;
4546
4547 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4548 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4549 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4550 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4551 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4552 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4553 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4554 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4555 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4556 signed_addend - tpoff_base (info),
4557 weak_undef_p);
4558 *unresolved_reloc_p = FALSE;
4559 break;
4560
4561 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4562 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4563 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4564 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4565 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4566 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4567 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4568 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4569 if (globals->root.sgot == NULL)
4570 return bfd_reloc_notsupported;
4571 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4572 + globals->root.sgotplt->output_section->vma
4573 + globals->root.sgotplt->output_offset
4574 + globals->sgotplt_jump_table_size);
4575
4576 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4577 0, weak_undef_p);
4578 *unresolved_reloc_p = FALSE;
4579 break;
4580
4581 default:
4582 return bfd_reloc_notsupported;
4583 }
4584
4585 if (saved_addend)
4586 *saved_addend = value;
4587
4588 /* Only apply the final relocation in a sequence. */
4589 if (save_addend)
4590 return bfd_reloc_continue;
4591
4592 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4593 howto, value);
4594 }
4595
4596 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4597 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4598 link.
4599
4600 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4601 is to then call final_link_relocate. Return other values in the
4602 case of error. */
4603
4604 static bfd_reloc_status_type
4605 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
4606 bfd *input_bfd, bfd_byte *contents,
4607 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4608 {
4609 bfd_boolean is_local = h == NULL;
4610 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
4611 unsigned long insn;
4612
4613 BFD_ASSERT (globals && input_bfd && contents && rel);
4614
4615 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4616 {
4617 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4618 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4619 if (is_local)
4620 {
4621 /* GD->LE relaxation:
4622 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4623 or
4624 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4625 */
4626 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4627 return bfd_reloc_continue;
4628 }
4629 else
4630 {
4631 /* GD->IE relaxation:
4632 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4633 or
4634 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4635 */
4636 return bfd_reloc_continue;
4637 }
4638
4639 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4640 BFD_ASSERT (0);
4641 break;
4642
4643 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4644 if (is_local)
4645 {
4646 /* Tiny TLSDESC->LE relaxation:
4647 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
4648 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
4649 .tlsdesccall var
4650 blr x1 => nop
4651 */
4652 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4653 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4654
4655 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4656 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
4657 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4658
4659 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4660 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
4661 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4662 return bfd_reloc_continue;
4663 }
4664 else
4665 {
4666 /* Tiny TLSDESC->IE relaxation:
4667 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
4668 adr x0, :tlsdesc:var => nop
4669 .tlsdesccall var
4670 blr x1 => nop
4671 */
4672 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4673 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4674
4675 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4676 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4677
4678 bfd_putl32 (0x58000000, contents + rel->r_offset);
4679 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
4680 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4681 return bfd_reloc_continue;
4682 }
4683
4684 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4685 if (is_local)
4686 {
4687 /* Tiny GD->LE relaxation:
4688 adr x0, :tlsgd:var => mrs x1, tpidr_el0
4689 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
4690 nop => add x0, x0, #:tprel_lo12_nc:x
4691 */
4692
4693 /* First kill the tls_get_addr reloc on the bl instruction. */
4694 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4695
4696 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
4697 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
4698 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
4699
4700 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4701 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
4702 rel[1].r_offset = rel->r_offset + 8;
4703
4704 /* Move the current relocation to the second instruction in
4705 the sequence. */
4706 rel->r_offset += 4;
4707 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4708 AARCH64_R (TLSLE_ADD_TPREL_HI12));
4709 return bfd_reloc_continue;
4710 }
4711 else
4712 {
4713 /* Tiny GD->IE relaxation:
4714 adr x0, :tlsgd:var => ldr x0, :gottprel:var
4715 bl __tls_get_addr => mrs x1, tpidr_el0
4716 nop => add x0, x0, x1
4717 */
4718
4719 /* First kill the tls_get_addr reloc on the bl instruction. */
4720 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4721 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4722
4723 bfd_putl32 (0x58000000, contents + rel->r_offset);
4724 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4725 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4726 return bfd_reloc_continue;
4727 }
4728
4729 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4730 return bfd_reloc_continue;
4731
4732 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4733 if (is_local)
4734 {
4735 /* GD->LE relaxation:
4736 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4737 */
4738 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4739 return bfd_reloc_continue;
4740 }
4741 else
4742 {
4743 /* GD->IE relaxation:
4744 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4745 */
4746 insn = bfd_getl32 (contents + rel->r_offset);
4747 insn &= 0xffffffe0;
4748 bfd_putl32 (insn, contents + rel->r_offset);
4749 return bfd_reloc_continue;
4750 }
4751
4752 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4753 if (is_local)
4754 {
4755 /* GD->LE relaxation
4756 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4757 bl __tls_get_addr => mrs x1, tpidr_el0
4758 nop => add x0, x1, x0
4759 */
4760
4761 /* First kill the tls_get_addr reloc on the bl instruction. */
4762 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4763 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4764
4765 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4766 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4767 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4768 return bfd_reloc_continue;
4769 }
4770 else
4771 {
4772 /* GD->IE relaxation
4773 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4774 BL __tls_get_addr => mrs x1, tpidr_el0
4775 R_AARCH64_CALL26
4776 NOP => add x0, x1, x0
4777 */
4778
4779 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
4780
4781 /* Remove the relocation on the BL instruction. */
4782 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4783
4784 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4785
4786 /* We choose to fixup the BL and NOP instructions using the
4787 offset from the second relocation to allow flexibility in
4788 scheduling instructions between the ADD and BL. */
4789 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4790 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4791 return bfd_reloc_continue;
4792 }
4793
4794 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4795 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4796 /* GD->IE/LE relaxation:
4797 add x0, x0, #:tlsdesc_lo12:var => nop
4798 blr xd => nop
4799 */
4800 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4801 return bfd_reloc_ok;
4802
4803 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4804 /* IE->LE relaxation:
4805 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4806 */
4807 if (is_local)
4808 {
4809 insn = bfd_getl32 (contents + rel->r_offset);
4810 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4811 }
4812 return bfd_reloc_continue;
4813
4814 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4815 /* IE->LE relaxation:
4816 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4817 */
4818 if (is_local)
4819 {
4820 insn = bfd_getl32 (contents + rel->r_offset);
4821 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4822 }
4823 return bfd_reloc_continue;
4824
4825 default:
4826 return bfd_reloc_continue;
4827 }
4828
4829 return bfd_reloc_ok;
4830 }
4831
4832 /* Relocate an AArch64 ELF section. */
4833
4834 static bfd_boolean
4835 elfNN_aarch64_relocate_section (bfd *output_bfd,
4836 struct bfd_link_info *info,
4837 bfd *input_bfd,
4838 asection *input_section,
4839 bfd_byte *contents,
4840 Elf_Internal_Rela *relocs,
4841 Elf_Internal_Sym *local_syms,
4842 asection **local_sections)
4843 {
4844 Elf_Internal_Shdr *symtab_hdr;
4845 struct elf_link_hash_entry **sym_hashes;
4846 Elf_Internal_Rela *rel;
4847 Elf_Internal_Rela *relend;
4848 const char *name;
4849 struct elf_aarch64_link_hash_table *globals;
4850 bfd_boolean save_addend = FALSE;
4851 bfd_vma addend = 0;
4852
4853 globals = elf_aarch64_hash_table (info);
4854
4855 symtab_hdr = &elf_symtab_hdr (input_bfd);
4856 sym_hashes = elf_sym_hashes (input_bfd);
4857
4858 rel = relocs;
4859 relend = relocs + input_section->reloc_count;
4860 for (; rel < relend; rel++)
4861 {
4862 unsigned int r_type;
4863 bfd_reloc_code_real_type bfd_r_type;
4864 bfd_reloc_code_real_type relaxed_bfd_r_type;
4865 reloc_howto_type *howto;
4866 unsigned long r_symndx;
4867 Elf_Internal_Sym *sym;
4868 asection *sec;
4869 struct elf_link_hash_entry *h;
4870 bfd_vma relocation;
4871 bfd_reloc_status_type r;
4872 arelent bfd_reloc;
4873 char sym_type;
4874 bfd_boolean unresolved_reloc = FALSE;
4875 char *error_message = NULL;
4876
4877 r_symndx = ELFNN_R_SYM (rel->r_info);
4878 r_type = ELFNN_R_TYPE (rel->r_info);
4879
4880 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
4881 howto = bfd_reloc.howto;
4882
4883 if (howto == NULL)
4884 {
4885 (*_bfd_error_handler)
4886 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4887 input_bfd, input_section, r_type);
4888 return FALSE;
4889 }
4890 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
4891
4892 h = NULL;
4893 sym = NULL;
4894 sec = NULL;
4895
4896 if (r_symndx < symtab_hdr->sh_info)
4897 {
4898 sym = local_syms + r_symndx;
4899 sym_type = ELFNN_ST_TYPE (sym->st_info);
4900 sec = local_sections[r_symndx];
4901
4902 /* An object file might have a reference to a local
4903 undefined symbol. This is a daft object file, but we
4904 should at least do something about it. */
4905 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4906 && bfd_is_und_section (sec)
4907 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4908 {
4909 if (!info->callbacks->undefined_symbol
4910 (info, bfd_elf_string_from_elf_section
4911 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4912 input_bfd, input_section, rel->r_offset, TRUE))
4913 return FALSE;
4914 }
4915
4916 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4917
4918 /* Relocate against local STT_GNU_IFUNC symbol. */
4919 if (!info->relocatable
4920 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4921 {
4922 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
4923 rel, FALSE);
4924 if (h == NULL)
4925 abort ();
4926
4927 /* Set STT_GNU_IFUNC symbol value. */
4928 h->root.u.def.value = sym->st_value;
4929 h->root.u.def.section = sec;
4930 }
4931 }
4932 else
4933 {
4934 bfd_boolean warned, ignored;
4935
4936 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4937 r_symndx, symtab_hdr, sym_hashes,
4938 h, sec, relocation,
4939 unresolved_reloc, warned, ignored);
4940
4941 sym_type = h->type;
4942 }
4943
4944 if (sec != NULL && discarded_section (sec))
4945 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4946 rel, 1, relend, howto, 0, contents);
4947
4948 if (info->relocatable)
4949 continue;
4950
4951 if (h != NULL)
4952 name = h->root.root.string;
4953 else
4954 {
4955 name = (bfd_elf_string_from_elf_section
4956 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4957 if (name == NULL || *name == '\0')
4958 name = bfd_section_name (input_bfd, sec);
4959 }
4960
4961 if (r_symndx != 0
4962 && r_type != R_AARCH64_NONE
4963 && r_type != R_AARCH64_NULL
4964 && (h == NULL
4965 || h->root.type == bfd_link_hash_defined
4966 || h->root.type == bfd_link_hash_defweak)
4967 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
4968 {
4969 (*_bfd_error_handler)
4970 ((sym_type == STT_TLS
4971 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4972 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4973 input_bfd,
4974 input_section, (long) rel->r_offset, howto->name, name);
4975 }
4976
4977 /* We relax only if we can see that there can be a valid transition
4978 from a reloc type to another.
4979 We call elfNN_aarch64_final_link_relocate unless we're completely
4980 done, i.e., the relaxation produced the final output we want. */
4981
4982 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4983 h, r_symndx);
4984 if (relaxed_bfd_r_type != bfd_r_type)
4985 {
4986 bfd_r_type = relaxed_bfd_r_type;
4987 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4988 BFD_ASSERT (howto != NULL);
4989 r_type = howto->type;
4990 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4991 unresolved_reloc = 0;
4992 }
4993 else
4994 r = bfd_reloc_continue;
4995
4996 /* There may be multiple consecutive relocations for the
4997 same offset. In that case we are supposed to treat the
4998 output of each relocation as the addend for the next. */
4999 if (rel + 1 < relend
5000 && rel->r_offset == rel[1].r_offset
5001 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
5002 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
5003 save_addend = TRUE;
5004 else
5005 save_addend = FALSE;
5006
5007 if (r == bfd_reloc_continue)
5008 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
5009 input_section, contents, rel,
5010 relocation, info, sec,
5011 h, &unresolved_reloc,
5012 save_addend, &addend, sym);
5013
5014 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5015 {
5016 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5017 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5018 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5019 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5020 {
5021 bfd_boolean need_relocs = FALSE;
5022 bfd_byte *loc;
5023 int indx;
5024 bfd_vma off;
5025
5026 off = symbol_got_offset (input_bfd, h, r_symndx);
5027 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5028
5029 need_relocs =
5030 (info->shared || indx != 0) &&
5031 (h == NULL
5032 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5033 || h->root.type != bfd_link_hash_undefweak);
5034
5035 BFD_ASSERT (globals->root.srelgot != NULL);
5036
5037 if (need_relocs)
5038 {
5039 Elf_Internal_Rela rela;
5040 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5041 rela.r_addend = 0;
5042 rela.r_offset = globals->root.sgot->output_section->vma +
5043 globals->root.sgot->output_offset + off;
5044
5045
5046 loc = globals->root.srelgot->contents;
5047 loc += globals->root.srelgot->reloc_count++
5048 * RELOC_SIZE (htab);
5049 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5050
5051 if (indx == 0)
5052 {
5053 bfd_put_NN (output_bfd,
5054 relocation - dtpoff_base (info),
5055 globals->root.sgot->contents + off
5056 + GOT_ENTRY_SIZE);
5057 }
5058 else
5059 {
5060 /* This TLS symbol is global. We emit a
5061 relocation to fixup the tls offset at load
5062 time. */
5063 rela.r_info =
5064 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5065 rela.r_addend = 0;
5066 rela.r_offset =
5067 (globals->root.sgot->output_section->vma
5068 + globals->root.sgot->output_offset + off
5069 + GOT_ENTRY_SIZE);
5070
5071 loc = globals->root.srelgot->contents;
5072 loc += globals->root.srelgot->reloc_count++
5073 * RELOC_SIZE (globals);
5074 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5075 bfd_put_NN (output_bfd, (bfd_vma) 0,
5076 globals->root.sgot->contents + off
5077 + GOT_ENTRY_SIZE);
5078 }
5079 }
5080 else
5081 {
5082 bfd_put_NN (output_bfd, (bfd_vma) 1,
5083 globals->root.sgot->contents + off);
5084 bfd_put_NN (output_bfd,
5085 relocation - dtpoff_base (info),
5086 globals->root.sgot->contents + off
5087 + GOT_ENTRY_SIZE);
5088 }
5089
5090 symbol_got_offset_mark (input_bfd, h, r_symndx);
5091 }
5092 break;
5093
5094 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5095 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5096 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5097 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5098 {
5099 bfd_boolean need_relocs = FALSE;
5100 bfd_byte *loc;
5101 int indx;
5102 bfd_vma off;
5103
5104 off = symbol_got_offset (input_bfd, h, r_symndx);
5105
5106 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5107
5108 need_relocs =
5109 (info->shared || indx != 0) &&
5110 (h == NULL
5111 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5112 || h->root.type != bfd_link_hash_undefweak);
5113
5114 BFD_ASSERT (globals->root.srelgot != NULL);
5115
5116 if (need_relocs)
5117 {
5118 Elf_Internal_Rela rela;
5119
5120 if (indx == 0)
5121 rela.r_addend = relocation - dtpoff_base (info);
5122 else
5123 rela.r_addend = 0;
5124
5125 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5126 rela.r_offset = globals->root.sgot->output_section->vma +
5127 globals->root.sgot->output_offset + off;
5128
5129 loc = globals->root.srelgot->contents;
5130 loc += globals->root.srelgot->reloc_count++
5131 * RELOC_SIZE (htab);
5132
5133 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5134
5135 bfd_put_NN (output_bfd, rela.r_addend,
5136 globals->root.sgot->contents + off);
5137 }
5138 else
5139 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5140 globals->root.sgot->contents + off);
5141
5142 symbol_got_offset_mark (input_bfd, h, r_symndx);
5143 }
5144 break;
5145
5146 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5147 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5148 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5149 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5150 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5151 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5152 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5153 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5154 break;
5155
5156 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5157 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5158 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5159 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5160 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5161 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5162 {
5163 bfd_boolean need_relocs = FALSE;
5164 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5165 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5166
5167 need_relocs = (h == NULL
5168 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5169 || h->root.type != bfd_link_hash_undefweak);
5170
5171 BFD_ASSERT (globals->root.srelgot != NULL);
5172 BFD_ASSERT (globals->root.sgot != NULL);
5173
5174 if (need_relocs)
5175 {
5176 bfd_byte *loc;
5177 Elf_Internal_Rela rela;
5178 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5179
5180 rela.r_addend = 0;
5181 rela.r_offset = (globals->root.sgotplt->output_section->vma
5182 + globals->root.sgotplt->output_offset
5183 + off + globals->sgotplt_jump_table_size);
5184
5185 if (indx == 0)
5186 rela.r_addend = relocation - dtpoff_base (info);
5187
5188 /* Allocate the next available slot in the PLT reloc
5189 section to hold our R_AARCH64_TLSDESC, the next
5190 available slot is determined from reloc_count,
5191 which we step. But note, reloc_count was
5192 artifically moved down while allocating slots for
5193 real PLT relocs such that all of the PLT relocs
5194 will fit above the initial reloc_count and the
5195 extra stuff will fit below. */
5196 loc = globals->root.srelplt->contents;
5197 loc += globals->root.srelplt->reloc_count++
5198 * RELOC_SIZE (globals);
5199
5200 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5201
5202 bfd_put_NN (output_bfd, (bfd_vma) 0,
5203 globals->root.sgotplt->contents + off +
5204 globals->sgotplt_jump_table_size);
5205 bfd_put_NN (output_bfd, (bfd_vma) 0,
5206 globals->root.sgotplt->contents + off +
5207 globals->sgotplt_jump_table_size +
5208 GOT_ENTRY_SIZE);
5209 }
5210
5211 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5212 }
5213 break;
5214 default:
5215 break;
5216 }
5217
5218 if (!save_addend)
5219 addend = 0;
5220
5221
5222 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5223 because such sections are not SEC_ALLOC and thus ld.so will
5224 not process them. */
5225 if (unresolved_reloc
5226 && !((input_section->flags & SEC_DEBUGGING) != 0
5227 && h->def_dynamic)
5228 && _bfd_elf_section_offset (output_bfd, info, input_section,
5229 +rel->r_offset) != (bfd_vma) - 1)
5230 {
5231 (*_bfd_error_handler)
5232 (_
5233 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5234 input_bfd, input_section, (long) rel->r_offset, howto->name,
5235 h->root.root.string);
5236 return FALSE;
5237 }
5238
5239 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5240 {
5241 switch (r)
5242 {
5243 case bfd_reloc_overflow:
5244 /* If the overflowing reloc was to an undefined symbol,
5245 we have already printed one error message and there
5246 is no point complaining again. */
5247 if ((!h ||
5248 h->root.type != bfd_link_hash_undefined)
5249 && (!((*info->callbacks->reloc_overflow)
5250 (info, (h ? &h->root : NULL), name, howto->name,
5251 (bfd_vma) 0, input_bfd, input_section,
5252 rel->r_offset))))
5253 return FALSE;
5254 break;
5255
5256 case bfd_reloc_undefined:
5257 if (!((*info->callbacks->undefined_symbol)
5258 (info, name, input_bfd, input_section,
5259 rel->r_offset, TRUE)))
5260 return FALSE;
5261 break;
5262
5263 case bfd_reloc_outofrange:
5264 error_message = _("out of range");
5265 goto common_error;
5266
5267 case bfd_reloc_notsupported:
5268 error_message = _("unsupported relocation");
5269 goto common_error;
5270
5271 case bfd_reloc_dangerous:
5272 /* error_message should already be set. */
5273 goto common_error;
5274
5275 default:
5276 error_message = _("unknown error");
5277 /* Fall through. */
5278
5279 common_error:
5280 BFD_ASSERT (error_message != NULL);
5281 if (!((*info->callbacks->reloc_dangerous)
5282 (info, error_message, input_bfd, input_section,
5283 rel->r_offset)))
5284 return FALSE;
5285 break;
5286 }
5287 }
5288 }
5289
5290 return TRUE;
5291 }
5292
5293 /* Set the right machine number. */
5294
5295 static bfd_boolean
5296 elfNN_aarch64_object_p (bfd *abfd)
5297 {
5298 #if ARCH_SIZE == 32
5299 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5300 #else
5301 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5302 #endif
5303 return TRUE;
5304 }
5305
5306 /* Function to keep AArch64 specific flags in the ELF header. */
5307
5308 static bfd_boolean
5309 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5310 {
5311 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5312 {
5313 }
5314 else
5315 {
5316 elf_elfheader (abfd)->e_flags = flags;
5317 elf_flags_init (abfd) = TRUE;
5318 }
5319
5320 return TRUE;
5321 }
5322
5323 /* Merge backend specific data from an object file to the output
5324 object file when linking. */
5325
5326 static bfd_boolean
5327 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5328 {
5329 flagword out_flags;
5330 flagword in_flags;
5331 bfd_boolean flags_compatible = TRUE;
5332 asection *sec;
5333
5334 /* Check if we have the same endianess. */
5335 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5336 return FALSE;
5337
5338 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5339 return TRUE;
5340
5341 /* The input BFD must have had its flags initialised. */
5342 /* The following seems bogus to me -- The flags are initialized in
5343 the assembler but I don't think an elf_flags_init field is
5344 written into the object. */
5345 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5346
5347 in_flags = elf_elfheader (ibfd)->e_flags;
5348 out_flags = elf_elfheader (obfd)->e_flags;
5349
5350 if (!elf_flags_init (obfd))
5351 {
5352 /* If the input is the default architecture and had the default
5353 flags then do not bother setting the flags for the output
5354 architecture, instead allow future merges to do this. If no
5355 future merges ever set these flags then they will retain their
5356 uninitialised values, which surprise surprise, correspond
5357 to the default values. */
5358 if (bfd_get_arch_info (ibfd)->the_default
5359 && elf_elfheader (ibfd)->e_flags == 0)
5360 return TRUE;
5361
5362 elf_flags_init (obfd) = TRUE;
5363 elf_elfheader (obfd)->e_flags = in_flags;
5364
5365 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5366 && bfd_get_arch_info (obfd)->the_default)
5367 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5368 bfd_get_mach (ibfd));
5369
5370 return TRUE;
5371 }
5372
5373 /* Identical flags must be compatible. */
5374 if (in_flags == out_flags)
5375 return TRUE;
5376
5377 /* Check to see if the input BFD actually contains any sections. If
5378 not, its flags may not have been initialised either, but it
5379 cannot actually cause any incompatiblity. Do not short-circuit
5380 dynamic objects; their section list may be emptied by
5381 elf_link_add_object_symbols.
5382
5383 Also check to see if there are no code sections in the input.
5384 In this case there is no need to check for code specific flags.
5385 XXX - do we need to worry about floating-point format compatability
5386 in data sections ? */
5387 if (!(ibfd->flags & DYNAMIC))
5388 {
5389 bfd_boolean null_input_bfd = TRUE;
5390 bfd_boolean only_data_sections = TRUE;
5391
5392 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5393 {
5394 if ((bfd_get_section_flags (ibfd, sec)
5395 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5396 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5397 only_data_sections = FALSE;
5398
5399 null_input_bfd = FALSE;
5400 break;
5401 }
5402
5403 if (null_input_bfd || only_data_sections)
5404 return TRUE;
5405 }
5406
5407 return flags_compatible;
5408 }
5409
5410 /* Display the flags field. */
5411
5412 static bfd_boolean
5413 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5414 {
5415 FILE *file = (FILE *) ptr;
5416 unsigned long flags;
5417
5418 BFD_ASSERT (abfd != NULL && ptr != NULL);
5419
5420 /* Print normal ELF private data. */
5421 _bfd_elf_print_private_bfd_data (abfd, ptr);
5422
5423 flags = elf_elfheader (abfd)->e_flags;
5424 /* Ignore init flag - it may not be set, despite the flags field
5425 containing valid data. */
5426
5427 /* xgettext:c-format */
5428 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5429
5430 if (flags)
5431 fprintf (file, _("<Unrecognised flag bits set>"));
5432
5433 fputc ('\n', file);
5434
5435 return TRUE;
5436 }
5437
5438 /* Update the got entry reference counts for the section being removed. */
5439
5440 static bfd_boolean
5441 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5442 struct bfd_link_info *info,
5443 asection *sec,
5444 const Elf_Internal_Rela * relocs)
5445 {
5446 struct elf_aarch64_link_hash_table *htab;
5447 Elf_Internal_Shdr *symtab_hdr;
5448 struct elf_link_hash_entry **sym_hashes;
5449 struct elf_aarch64_local_symbol *locals;
5450 const Elf_Internal_Rela *rel, *relend;
5451
5452 if (info->relocatable)
5453 return TRUE;
5454
5455 htab = elf_aarch64_hash_table (info);
5456
5457 if (htab == NULL)
5458 return FALSE;
5459
5460 elf_section_data (sec)->local_dynrel = NULL;
5461
5462 symtab_hdr = &elf_symtab_hdr (abfd);
5463 sym_hashes = elf_sym_hashes (abfd);
5464
5465 locals = elf_aarch64_locals (abfd);
5466
5467 relend = relocs + sec->reloc_count;
5468 for (rel = relocs; rel < relend; rel++)
5469 {
5470 unsigned long r_symndx;
5471 unsigned int r_type;
5472 struct elf_link_hash_entry *h = NULL;
5473
5474 r_symndx = ELFNN_R_SYM (rel->r_info);
5475
5476 if (r_symndx >= symtab_hdr->sh_info)
5477 {
5478
5479 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5480 while (h->root.type == bfd_link_hash_indirect
5481 || h->root.type == bfd_link_hash_warning)
5482 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5483 }
5484 else
5485 {
5486 Elf_Internal_Sym *isym;
5487
5488 /* A local symbol. */
5489 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5490 abfd, r_symndx);
5491
5492 /* Check relocation against local STT_GNU_IFUNC symbol. */
5493 if (isym != NULL
5494 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5495 {
5496 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
5497 if (h == NULL)
5498 abort ();
5499 }
5500 }
5501
5502 if (h)
5503 {
5504 struct elf_aarch64_link_hash_entry *eh;
5505 struct elf_dyn_relocs **pp;
5506 struct elf_dyn_relocs *p;
5507
5508 eh = (struct elf_aarch64_link_hash_entry *) h;
5509
5510 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
5511 if (p->sec == sec)
5512 {
5513 /* Everything must go for SEC. */
5514 *pp = p->next;
5515 break;
5516 }
5517 }
5518
5519 r_type = ELFNN_R_TYPE (rel->r_info);
5520 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
5521 {
5522 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5523 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5524 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5525 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5526 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5527 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5528 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5529 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5530 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5531 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5532 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5533 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5534 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5535 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5536 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5537 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5538 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5539 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5540 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5541 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5542 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5543 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5544 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5545 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5546 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5547 if (h != NULL)
5548 {
5549 if (h->got.refcount > 0)
5550 h->got.refcount -= 1;
5551
5552 if (h->type == STT_GNU_IFUNC)
5553 {
5554 if (h->plt.refcount > 0)
5555 h->plt.refcount -= 1;
5556 }
5557 }
5558 else if (locals != NULL)
5559 {
5560 if (locals[r_symndx].got_refcount > 0)
5561 locals[r_symndx].got_refcount -= 1;
5562 }
5563 break;
5564
5565 case BFD_RELOC_AARCH64_CALL26:
5566 case BFD_RELOC_AARCH64_JUMP26:
5567 /* If this is a local symbol then we resolve it
5568 directly without creating a PLT entry. */
5569 if (h == NULL)
5570 continue;
5571
5572 if (h->plt.refcount > 0)
5573 h->plt.refcount -= 1;
5574 break;
5575
5576 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5577 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5578 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5579 case BFD_RELOC_AARCH64_MOVW_G3:
5580 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5581 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5582 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5583 case BFD_RELOC_AARCH64_NN:
5584 if (h != NULL && info->executable)
5585 {
5586 if (h->plt.refcount > 0)
5587 h->plt.refcount -= 1;
5588 }
5589 break;
5590
5591 default:
5592 break;
5593 }
5594 }
5595
5596 return TRUE;
5597 }
5598
5599 /* Adjust a symbol defined by a dynamic object and referenced by a
5600 regular object. The current definition is in some section of the
5601 dynamic object, but we're not including those sections. We have to
5602 change the definition to something the rest of the link can
5603 understand. */
5604
5605 static bfd_boolean
5606 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5607 struct elf_link_hash_entry *h)
5608 {
5609 struct elf_aarch64_link_hash_table *htab;
5610 asection *s;
5611
5612 /* If this is a function, put it in the procedure linkage table. We
5613 will fill in the contents of the procedure linkage table later,
5614 when we know the address of the .got section. */
5615 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
5616 {
5617 if (h->plt.refcount <= 0
5618 || (h->type != STT_GNU_IFUNC
5619 && (SYMBOL_CALLS_LOCAL (info, h)
5620 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
5621 && h->root.type == bfd_link_hash_undefweak))))
5622 {
5623 /* This case can occur if we saw a CALL26 reloc in
5624 an input file, but the symbol wasn't referred to
5625 by a dynamic object or all references were
5626 garbage collected. In which case we can end up
5627 resolving. */
5628 h->plt.offset = (bfd_vma) - 1;
5629 h->needs_plt = 0;
5630 }
5631
5632 return TRUE;
5633 }
5634 else
5635 /* It's possible that we incorrectly decided a .plt reloc was
5636 needed for an R_X86_64_PC32 reloc to a non-function sym in
5637 check_relocs. We can't decide accurately between function and
5638 non-function syms in check-relocs; Objects loaded later in
5639 the link may change h->type. So fix it now. */
5640 h->plt.offset = (bfd_vma) - 1;
5641
5642
5643 /* If this is a weak symbol, and there is a real definition, the
5644 processor independent code will have arranged for us to see the
5645 real definition first, and we can just use the same value. */
5646 if (h->u.weakdef != NULL)
5647 {
5648 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5649 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5650 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5651 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5652 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5653 h->non_got_ref = h->u.weakdef->non_got_ref;
5654 return TRUE;
5655 }
5656
5657 /* If we are creating a shared library, we must presume that the
5658 only references to the symbol are via the global offset table.
5659 For such cases we need not do anything here; the relocations will
5660 be handled correctly by relocate_section. */
5661 if (info->shared)
5662 return TRUE;
5663
5664 /* If there are no references to this symbol that do not use the
5665 GOT, we don't need to generate a copy reloc. */
5666 if (!h->non_got_ref)
5667 return TRUE;
5668
5669 /* If -z nocopyreloc was given, we won't generate them either. */
5670 if (info->nocopyreloc)
5671 {
5672 h->non_got_ref = 0;
5673 return TRUE;
5674 }
5675
5676 /* We must allocate the symbol in our .dynbss section, which will
5677 become part of the .bss section of the executable. There will be
5678 an entry for this symbol in the .dynsym section. The dynamic
5679 object will contain position independent code, so all references
5680 from the dynamic object to this symbol will go through the global
5681 offset table. The dynamic linker will use the .dynsym entry to
5682 determine the address it must put in the global offset table, so
5683 both the dynamic object and the regular object will refer to the
5684 same memory location for the variable. */
5685
5686 htab = elf_aarch64_hash_table (info);
5687
5688 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5689 to copy the initial value out of the dynamic object and into the
5690 runtime process image. */
5691 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5692 {
5693 htab->srelbss->size += RELOC_SIZE (htab);
5694 h->needs_copy = 1;
5695 }
5696
5697 s = htab->sdynbss;
5698
5699 return _bfd_elf_adjust_dynamic_copy (info, h, s);
5700
5701 }
5702
5703 static bfd_boolean
5704 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5705 {
5706 struct elf_aarch64_local_symbol *locals;
5707 locals = elf_aarch64_locals (abfd);
5708 if (locals == NULL)
5709 {
5710 locals = (struct elf_aarch64_local_symbol *)
5711 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5712 if (locals == NULL)
5713 return FALSE;
5714 elf_aarch64_locals (abfd) = locals;
5715 }
5716 return TRUE;
5717 }
5718
5719 /* Create the .got section to hold the global offset table. */
5720
5721 static bfd_boolean
5722 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
5723 {
5724 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5725 flagword flags;
5726 asection *s;
5727 struct elf_link_hash_entry *h;
5728 struct elf_link_hash_table *htab = elf_hash_table (info);
5729
5730 /* This function may be called more than once. */
5731 s = bfd_get_linker_section (abfd, ".got");
5732 if (s != NULL)
5733 return TRUE;
5734
5735 flags = bed->dynamic_sec_flags;
5736
5737 s = bfd_make_section_anyway_with_flags (abfd,
5738 (bed->rela_plts_and_copies_p
5739 ? ".rela.got" : ".rel.got"),
5740 (bed->dynamic_sec_flags
5741 | SEC_READONLY));
5742 if (s == NULL
5743 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5744 return FALSE;
5745 htab->srelgot = s;
5746
5747 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
5748 if (s == NULL
5749 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5750 return FALSE;
5751 htab->sgot = s;
5752 htab->sgot->size += GOT_ENTRY_SIZE;
5753
5754 if (bed->want_got_sym)
5755 {
5756 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
5757 (or .got.plt) section. We don't do this in the linker script
5758 because we don't want to define the symbol if we are not creating
5759 a global offset table. */
5760 h = _bfd_elf_define_linkage_sym (abfd, info, s,
5761 "_GLOBAL_OFFSET_TABLE_");
5762 elf_hash_table (info)->hgot = h;
5763 if (h == NULL)
5764 return FALSE;
5765 }
5766
5767 if (bed->want_got_plt)
5768 {
5769 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
5770 if (s == NULL
5771 || !bfd_set_section_alignment (abfd, s,
5772 bed->s->log_file_align))
5773 return FALSE;
5774 htab->sgotplt = s;
5775 }
5776
5777 /* The first bit of the global offset table is the header. */
5778 s->size += bed->got_header_size;
5779
5780 return TRUE;
5781 }
5782
5783 /* Look through the relocs for a section during the first phase. */
5784
5785 static bfd_boolean
5786 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5787 asection *sec, const Elf_Internal_Rela *relocs)
5788 {
5789 Elf_Internal_Shdr *symtab_hdr;
5790 struct elf_link_hash_entry **sym_hashes;
5791 const Elf_Internal_Rela *rel;
5792 const Elf_Internal_Rela *rel_end;
5793 asection *sreloc;
5794
5795 struct elf_aarch64_link_hash_table *htab;
5796
5797 if (info->relocatable)
5798 return TRUE;
5799
5800 BFD_ASSERT (is_aarch64_elf (abfd));
5801
5802 htab = elf_aarch64_hash_table (info);
5803 sreloc = NULL;
5804
5805 symtab_hdr = &elf_symtab_hdr (abfd);
5806 sym_hashes = elf_sym_hashes (abfd);
5807
5808 rel_end = relocs + sec->reloc_count;
5809 for (rel = relocs; rel < rel_end; rel++)
5810 {
5811 struct elf_link_hash_entry *h;
5812 unsigned long r_symndx;
5813 unsigned int r_type;
5814 bfd_reloc_code_real_type bfd_r_type;
5815 Elf_Internal_Sym *isym;
5816
5817 r_symndx = ELFNN_R_SYM (rel->r_info);
5818 r_type = ELFNN_R_TYPE (rel->r_info);
5819
5820 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5821 {
5822 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5823 r_symndx);
5824 return FALSE;
5825 }
5826
5827 if (r_symndx < symtab_hdr->sh_info)
5828 {
5829 /* A local symbol. */
5830 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5831 abfd, r_symndx);
5832 if (isym == NULL)
5833 return FALSE;
5834
5835 /* Check relocation against local STT_GNU_IFUNC symbol. */
5836 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5837 {
5838 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
5839 TRUE);
5840 if (h == NULL)
5841 return FALSE;
5842
5843 /* Fake a STT_GNU_IFUNC symbol. */
5844 h->type = STT_GNU_IFUNC;
5845 h->def_regular = 1;
5846 h->ref_regular = 1;
5847 h->forced_local = 1;
5848 h->root.type = bfd_link_hash_defined;
5849 }
5850 else
5851 h = NULL;
5852 }
5853 else
5854 {
5855 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5856 while (h->root.type == bfd_link_hash_indirect
5857 || h->root.type == bfd_link_hash_warning)
5858 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5859
5860 /* PR15323, ref flags aren't set for references in the same
5861 object. */
5862 h->root.non_ir_ref = 1;
5863 }
5864
5865 /* Could be done earlier, if h were already available. */
5866 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5867
5868 if (h != NULL)
5869 {
5870 /* Create the ifunc sections for static executables. If we
5871 never see an indirect function symbol nor we are building
5872 a static executable, those sections will be empty and
5873 won't appear in output. */
5874 switch (bfd_r_type)
5875 {
5876 default:
5877 break;
5878
5879 case BFD_RELOC_AARCH64_NN:
5880 case BFD_RELOC_AARCH64_CALL26:
5881 case BFD_RELOC_AARCH64_JUMP26:
5882 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5883 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5884 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5885 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5886 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5887 case BFD_RELOC_AARCH64_ADD_LO12:
5888 if (htab->root.dynobj == NULL)
5889 htab->root.dynobj = abfd;
5890 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
5891 return FALSE;
5892 break;
5893 }
5894
5895 /* It is referenced by a non-shared object. */
5896 h->ref_regular = 1;
5897 h->root.non_ir_ref = 1;
5898 }
5899
5900 switch (bfd_r_type)
5901 {
5902 case BFD_RELOC_AARCH64_NN:
5903
5904 /* We don't need to handle relocs into sections not going into
5905 the "real" output. */
5906 if ((sec->flags & SEC_ALLOC) == 0)
5907 break;
5908
5909 if (h != NULL)
5910 {
5911 if (!info->shared)
5912 h->non_got_ref = 1;
5913
5914 h->plt.refcount += 1;
5915 h->pointer_equality_needed = 1;
5916 }
5917
5918 /* No need to do anything if we're not creating a shared
5919 object. */
5920 if (! info->shared)
5921 break;
5922
5923 {
5924 struct elf_dyn_relocs *p;
5925 struct elf_dyn_relocs **head;
5926
5927 /* We must copy these reloc types into the output file.
5928 Create a reloc section in dynobj and make room for
5929 this reloc. */
5930 if (sreloc == NULL)
5931 {
5932 if (htab->root.dynobj == NULL)
5933 htab->root.dynobj = abfd;
5934
5935 sreloc = _bfd_elf_make_dynamic_reloc_section
5936 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
5937
5938 if (sreloc == NULL)
5939 return FALSE;
5940 }
5941
5942 /* If this is a global symbol, we count the number of
5943 relocations we need for this symbol. */
5944 if (h != NULL)
5945 {
5946 struct elf_aarch64_link_hash_entry *eh;
5947 eh = (struct elf_aarch64_link_hash_entry *) h;
5948 head = &eh->dyn_relocs;
5949 }
5950 else
5951 {
5952 /* Track dynamic relocs needed for local syms too.
5953 We really need local syms available to do this
5954 easily. Oh well. */
5955
5956 asection *s;
5957 void **vpp;
5958
5959 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5960 abfd, r_symndx);
5961 if (isym == NULL)
5962 return FALSE;
5963
5964 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5965 if (s == NULL)
5966 s = sec;
5967
5968 /* Beware of type punned pointers vs strict aliasing
5969 rules. */
5970 vpp = &(elf_section_data (s)->local_dynrel);
5971 head = (struct elf_dyn_relocs **) vpp;
5972 }
5973
5974 p = *head;
5975 if (p == NULL || p->sec != sec)
5976 {
5977 bfd_size_type amt = sizeof *p;
5978 p = ((struct elf_dyn_relocs *)
5979 bfd_zalloc (htab->root.dynobj, amt));
5980 if (p == NULL)
5981 return FALSE;
5982 p->next = *head;
5983 *head = p;
5984 p->sec = sec;
5985 }
5986
5987 p->count += 1;
5988
5989 }
5990 break;
5991
5992 /* RR: We probably want to keep a consistency check that
5993 there are no dangling GOT_PAGE relocs. */
5994 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5995 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5996 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5997 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5998 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5999 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6000 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6001 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6002 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6003 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6004 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6005 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6006 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6007 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6008 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6009 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6010 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6011 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6012 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6013 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6014 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6015 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6016 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6017 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6018 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6019 {
6020 unsigned got_type;
6021 unsigned old_got_type;
6022
6023 got_type = aarch64_reloc_got_type (bfd_r_type);
6024
6025 if (h)
6026 {
6027 h->got.refcount += 1;
6028 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6029 }
6030 else
6031 {
6032 struct elf_aarch64_local_symbol *locals;
6033
6034 if (!elfNN_aarch64_allocate_local_symbols
6035 (abfd, symtab_hdr->sh_info))
6036 return FALSE;
6037
6038 locals = elf_aarch64_locals (abfd);
6039 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6040 locals[r_symndx].got_refcount += 1;
6041 old_got_type = locals[r_symndx].got_type;
6042 }
6043
6044 /* If a variable is accessed with both general dynamic TLS
6045 methods, two slots may be created. */
6046 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6047 got_type |= old_got_type;
6048
6049 /* We will already have issued an error message if there
6050 is a TLS/non-TLS mismatch, based on the symbol type.
6051 So just combine any TLS types needed. */
6052 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6053 && got_type != GOT_NORMAL)
6054 got_type |= old_got_type;
6055
6056 /* If the symbol is accessed by both IE and GD methods, we
6057 are able to relax. Turn off the GD flag, without
6058 messing up with any other kind of TLS types that may be
6059 involved. */
6060 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6061 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6062
6063 if (old_got_type != got_type)
6064 {
6065 if (h != NULL)
6066 elf_aarch64_hash_entry (h)->got_type = got_type;
6067 else
6068 {
6069 struct elf_aarch64_local_symbol *locals;
6070 locals = elf_aarch64_locals (abfd);
6071 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6072 locals[r_symndx].got_type = got_type;
6073 }
6074 }
6075
6076 if (htab->root.dynobj == NULL)
6077 htab->root.dynobj = abfd;
6078 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6079 return FALSE;
6080 break;
6081 }
6082
6083 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6084 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6085 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6086 case BFD_RELOC_AARCH64_MOVW_G3:
6087 if (info->shared)
6088 {
6089 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6090 (*_bfd_error_handler)
6091 (_("%B: relocation %s against `%s' can not be used when making "
6092 "a shared object; recompile with -fPIC"),
6093 abfd, elfNN_aarch64_howto_table[howto_index].name,
6094 (h) ? h->root.root.string : "a local symbol");
6095 bfd_set_error (bfd_error_bad_value);
6096 return FALSE;
6097 }
6098
6099 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6100 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6101 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6102 if (h != NULL && info->executable)
6103 {
6104 /* If this reloc is in a read-only section, we might
6105 need a copy reloc. We can't check reliably at this
6106 stage whether the section is read-only, as input
6107 sections have not yet been mapped to output sections.
6108 Tentatively set the flag for now, and correct in
6109 adjust_dynamic_symbol. */
6110 h->non_got_ref = 1;
6111 h->plt.refcount += 1;
6112 h->pointer_equality_needed = 1;
6113 }
6114 /* FIXME:: RR need to handle these in shared libraries
6115 and essentially bomb out as these being non-PIC
6116 relocations in shared libraries. */
6117 break;
6118
6119 case BFD_RELOC_AARCH64_CALL26:
6120 case BFD_RELOC_AARCH64_JUMP26:
6121 /* If this is a local symbol then we resolve it
6122 directly without creating a PLT entry. */
6123 if (h == NULL)
6124 continue;
6125
6126 h->needs_plt = 1;
6127 if (h->plt.refcount <= 0)
6128 h->plt.refcount = 1;
6129 else
6130 h->plt.refcount += 1;
6131 break;
6132
6133 default:
6134 break;
6135 }
6136 }
6137
6138 return TRUE;
6139 }
6140
6141 /* Treat mapping symbols as special target symbols. */
6142
6143 static bfd_boolean
6144 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6145 asymbol *sym)
6146 {
6147 return bfd_is_aarch64_special_symbol_name (sym->name,
6148 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6149 }
6150
6151 /* This is a copy of elf_find_function () from elf.c except that
6152 AArch64 mapping symbols are ignored when looking for function names. */
6153
6154 static bfd_boolean
6155 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6156 asymbol **symbols,
6157 asection *section,
6158 bfd_vma offset,
6159 const char **filename_ptr,
6160 const char **functionname_ptr)
6161 {
6162 const char *filename = NULL;
6163 asymbol *func = NULL;
6164 bfd_vma low_func = 0;
6165 asymbol **p;
6166
6167 for (p = symbols; *p != NULL; p++)
6168 {
6169 elf_symbol_type *q;
6170
6171 q = (elf_symbol_type *) * p;
6172
6173 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6174 {
6175 default:
6176 break;
6177 case STT_FILE:
6178 filename = bfd_asymbol_name (&q->symbol);
6179 break;
6180 case STT_FUNC:
6181 case STT_NOTYPE:
6182 /* Skip mapping symbols. */
6183 if ((q->symbol.flags & BSF_LOCAL)
6184 && (bfd_is_aarch64_special_symbol_name
6185 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6186 continue;
6187 /* Fall through. */
6188 if (bfd_get_section (&q->symbol) == section
6189 && q->symbol.value >= low_func && q->symbol.value <= offset)
6190 {
6191 func = (asymbol *) q;
6192 low_func = q->symbol.value;
6193 }
6194 break;
6195 }
6196 }
6197
6198 if (func == NULL)
6199 return FALSE;
6200
6201 if (filename_ptr)
6202 *filename_ptr = filename;
6203 if (functionname_ptr)
6204 *functionname_ptr = bfd_asymbol_name (func);
6205
6206 return TRUE;
6207 }
6208
6209
6210 /* Find the nearest line to a particular section and offset, for error
6211 reporting. This code is a duplicate of the code in elf.c, except
6212 that it uses aarch64_elf_find_function. */
6213
6214 static bfd_boolean
6215 elfNN_aarch64_find_nearest_line (bfd *abfd,
6216 asymbol **symbols,
6217 asection *section,
6218 bfd_vma offset,
6219 const char **filename_ptr,
6220 const char **functionname_ptr,
6221 unsigned int *line_ptr,
6222 unsigned int *discriminator_ptr)
6223 {
6224 bfd_boolean found = FALSE;
6225
6226 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6227 filename_ptr, functionname_ptr,
6228 line_ptr, discriminator_ptr,
6229 dwarf_debug_sections, 0,
6230 &elf_tdata (abfd)->dwarf2_find_line_info))
6231 {
6232 if (!*functionname_ptr)
6233 aarch64_elf_find_function (abfd, symbols, section, offset,
6234 *filename_ptr ? NULL : filename_ptr,
6235 functionname_ptr);
6236
6237 return TRUE;
6238 }
6239
6240 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6241 toolchain uses DWARF1. */
6242
6243 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6244 &found, filename_ptr,
6245 functionname_ptr, line_ptr,
6246 &elf_tdata (abfd)->line_info))
6247 return FALSE;
6248
6249 if (found && (*functionname_ptr || *line_ptr))
6250 return TRUE;
6251
6252 if (symbols == NULL)
6253 return FALSE;
6254
6255 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6256 filename_ptr, functionname_ptr))
6257 return FALSE;
6258
6259 *line_ptr = 0;
6260 return TRUE;
6261 }
6262
6263 static bfd_boolean
6264 elfNN_aarch64_find_inliner_info (bfd *abfd,
6265 const char **filename_ptr,
6266 const char **functionname_ptr,
6267 unsigned int *line_ptr)
6268 {
6269 bfd_boolean found;
6270 found = _bfd_dwarf2_find_inliner_info
6271 (abfd, filename_ptr,
6272 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6273 return found;
6274 }
6275
6276
6277 static void
6278 elfNN_aarch64_post_process_headers (bfd *abfd,
6279 struct bfd_link_info *link_info)
6280 {
6281 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6282
6283 i_ehdrp = elf_elfheader (abfd);
6284 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6285
6286 _bfd_elf_post_process_headers (abfd, link_info);
6287 }
6288
6289 static enum elf_reloc_type_class
6290 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6291 const asection *rel_sec ATTRIBUTE_UNUSED,
6292 const Elf_Internal_Rela *rela)
6293 {
6294 switch ((int) ELFNN_R_TYPE (rela->r_info))
6295 {
6296 case AARCH64_R (RELATIVE):
6297 return reloc_class_relative;
6298 case AARCH64_R (JUMP_SLOT):
6299 return reloc_class_plt;
6300 case AARCH64_R (COPY):
6301 return reloc_class_copy;
6302 default:
6303 return reloc_class_normal;
6304 }
6305 }
6306
6307 /* Handle an AArch64 specific section when reading an object file. This is
6308 called when bfd_section_from_shdr finds a section with an unknown
6309 type. */
6310
6311 static bfd_boolean
6312 elfNN_aarch64_section_from_shdr (bfd *abfd,
6313 Elf_Internal_Shdr *hdr,
6314 const char *name, int shindex)
6315 {
6316 /* There ought to be a place to keep ELF backend specific flags, but
6317 at the moment there isn't one. We just keep track of the
6318 sections by their name, instead. Fortunately, the ABI gives
6319 names for all the AArch64 specific sections, so we will probably get
6320 away with this. */
6321 switch (hdr->sh_type)
6322 {
6323 case SHT_AARCH64_ATTRIBUTES:
6324 break;
6325
6326 default:
6327 return FALSE;
6328 }
6329
6330 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6331 return FALSE;
6332
6333 return TRUE;
6334 }
6335
6336 /* A structure used to record a list of sections, independently
6337 of the next and prev fields in the asection structure. */
6338 typedef struct section_list
6339 {
6340 asection *sec;
6341 struct section_list *next;
6342 struct section_list *prev;
6343 }
6344 section_list;
6345
6346 /* Unfortunately we need to keep a list of sections for which
6347 an _aarch64_elf_section_data structure has been allocated. This
6348 is because it is possible for functions like elfNN_aarch64_write_section
6349 to be called on a section which has had an elf_data_structure
6350 allocated for it (and so the used_by_bfd field is valid) but
6351 for which the AArch64 extended version of this structure - the
6352 _aarch64_elf_section_data structure - has not been allocated. */
6353 static section_list *sections_with_aarch64_elf_section_data = NULL;
6354
6355 static void
6356 record_section_with_aarch64_elf_section_data (asection *sec)
6357 {
6358 struct section_list *entry;
6359
6360 entry = bfd_malloc (sizeof (*entry));
6361 if (entry == NULL)
6362 return;
6363 entry->sec = sec;
6364 entry->next = sections_with_aarch64_elf_section_data;
6365 entry->prev = NULL;
6366 if (entry->next != NULL)
6367 entry->next->prev = entry;
6368 sections_with_aarch64_elf_section_data = entry;
6369 }
6370
6371 static struct section_list *
6372 find_aarch64_elf_section_entry (asection *sec)
6373 {
6374 struct section_list *entry;
6375 static struct section_list *last_entry = NULL;
6376
6377 /* This is a short cut for the typical case where the sections are added
6378 to the sections_with_aarch64_elf_section_data list in forward order and
6379 then looked up here in backwards order. This makes a real difference
6380 to the ld-srec/sec64k.exp linker test. */
6381 entry = sections_with_aarch64_elf_section_data;
6382 if (last_entry != NULL)
6383 {
6384 if (last_entry->sec == sec)
6385 entry = last_entry;
6386 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6387 entry = last_entry->next;
6388 }
6389
6390 for (; entry; entry = entry->next)
6391 if (entry->sec == sec)
6392 break;
6393
6394 if (entry)
6395 /* Record the entry prior to this one - it is the entry we are
6396 most likely to want to locate next time. Also this way if we
6397 have been called from
6398 unrecord_section_with_aarch64_elf_section_data () we will not
6399 be caching a pointer that is about to be freed. */
6400 last_entry = entry->prev;
6401
6402 return entry;
6403 }
6404
6405 static void
6406 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6407 {
6408 struct section_list *entry;
6409
6410 entry = find_aarch64_elf_section_entry (sec);
6411
6412 if (entry)
6413 {
6414 if (entry->prev != NULL)
6415 entry->prev->next = entry->next;
6416 if (entry->next != NULL)
6417 entry->next->prev = entry->prev;
6418 if (entry == sections_with_aarch64_elf_section_data)
6419 sections_with_aarch64_elf_section_data = entry->next;
6420 free (entry);
6421 }
6422 }
6423
6424
6425 typedef struct
6426 {
6427 void *finfo;
6428 struct bfd_link_info *info;
6429 asection *sec;
6430 int sec_shndx;
6431 int (*func) (void *, const char *, Elf_Internal_Sym *,
6432 asection *, struct elf_link_hash_entry *);
6433 } output_arch_syminfo;
6434
6435 enum map_symbol_type
6436 {
6437 AARCH64_MAP_INSN,
6438 AARCH64_MAP_DATA
6439 };
6440
6441
6442 /* Output a single mapping symbol. */
6443
6444 static bfd_boolean
6445 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6446 enum map_symbol_type type, bfd_vma offset)
6447 {
6448 static const char *names[2] = { "$x", "$d" };
6449 Elf_Internal_Sym sym;
6450
6451 sym.st_value = (osi->sec->output_section->vma
6452 + osi->sec->output_offset + offset);
6453 sym.st_size = 0;
6454 sym.st_other = 0;
6455 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6456 sym.st_shndx = osi->sec_shndx;
6457 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6458 }
6459
6460
6461
6462 /* Output mapping symbols for PLT entries associated with H. */
6463
6464 static bfd_boolean
6465 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6466 {
6467 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6468 bfd_vma addr;
6469
6470 if (h->root.type == bfd_link_hash_indirect)
6471 return TRUE;
6472
6473 if (h->root.type == bfd_link_hash_warning)
6474 /* When warning symbols are created, they **replace** the "real"
6475 entry in the hash table, thus we never get to see the real
6476 symbol in a hash traversal. So look at it now. */
6477 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6478
6479 if (h->plt.offset == (bfd_vma) - 1)
6480 return TRUE;
6481
6482 addr = h->plt.offset;
6483 if (addr == 32)
6484 {
6485 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6486 return FALSE;
6487 }
6488 return TRUE;
6489 }
6490
6491
6492 /* Output a single local symbol for a generated stub. */
6493
6494 static bfd_boolean
6495 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
6496 bfd_vma offset, bfd_vma size)
6497 {
6498 Elf_Internal_Sym sym;
6499
6500 sym.st_value = (osi->sec->output_section->vma
6501 + osi->sec->output_offset + offset);
6502 sym.st_size = size;
6503 sym.st_other = 0;
6504 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6505 sym.st_shndx = osi->sec_shndx;
6506 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
6507 }
6508
6509 static bfd_boolean
6510 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
6511 {
6512 struct elf_aarch64_stub_hash_entry *stub_entry;
6513 asection *stub_sec;
6514 bfd_vma addr;
6515 char *stub_name;
6516 output_arch_syminfo *osi;
6517
6518 /* Massage our args to the form they really have. */
6519 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6520 osi = (output_arch_syminfo *) in_arg;
6521
6522 stub_sec = stub_entry->stub_sec;
6523
6524 /* Ensure this stub is attached to the current section being
6525 processed. */
6526 if (stub_sec != osi->sec)
6527 return TRUE;
6528
6529 addr = (bfd_vma) stub_entry->stub_offset;
6530
6531 stub_name = stub_entry->output_name;
6532
6533 switch (stub_entry->stub_type)
6534 {
6535 case aarch64_stub_adrp_branch:
6536 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6537 sizeof (aarch64_adrp_branch_stub)))
6538 return FALSE;
6539 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6540 return FALSE;
6541 break;
6542 case aarch64_stub_long_branch:
6543 if (!elfNN_aarch64_output_stub_sym
6544 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
6545 return FALSE;
6546 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6547 return FALSE;
6548 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
6549 return FALSE;
6550 break;
6551 case aarch64_stub_erratum_835769_veneer:
6552 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6553 sizeof (aarch64_erratum_835769_stub)))
6554 return FALSE;
6555 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6556 return FALSE;
6557 break;
6558 default:
6559 abort ();
6560 }
6561
6562 return TRUE;
6563 }
6564
6565 /* Output mapping symbols for linker generated sections. */
6566
6567 static bfd_boolean
6568 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
6569 struct bfd_link_info *info,
6570 void *finfo,
6571 int (*func) (void *, const char *,
6572 Elf_Internal_Sym *,
6573 asection *,
6574 struct elf_link_hash_entry
6575 *))
6576 {
6577 output_arch_syminfo osi;
6578 struct elf_aarch64_link_hash_table *htab;
6579
6580 htab = elf_aarch64_hash_table (info);
6581
6582 osi.finfo = finfo;
6583 osi.info = info;
6584 osi.func = func;
6585
6586 /* Long calls stubs. */
6587 if (htab->stub_bfd && htab->stub_bfd->sections)
6588 {
6589 asection *stub_sec;
6590
6591 for (stub_sec = htab->stub_bfd->sections;
6592 stub_sec != NULL; stub_sec = stub_sec->next)
6593 {
6594 /* Ignore non-stub sections. */
6595 if (!strstr (stub_sec->name, STUB_SUFFIX))
6596 continue;
6597
6598 osi.sec = stub_sec;
6599
6600 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6601 (output_bfd, osi.sec->output_section);
6602
6603 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
6604 &osi);
6605 }
6606 }
6607
6608 /* Finally, output mapping symbols for the PLT. */
6609 if (!htab->root.splt || htab->root.splt->size == 0)
6610 return TRUE;
6611
6612 /* For now live without mapping symbols for the plt. */
6613 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6614 (output_bfd, htab->root.splt->output_section);
6615 osi.sec = htab->root.splt;
6616
6617 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
6618 (void *) &osi);
6619
6620 return TRUE;
6621
6622 }
6623
6624 /* Allocate target specific section data. */
6625
6626 static bfd_boolean
6627 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
6628 {
6629 if (!sec->used_by_bfd)
6630 {
6631 _aarch64_elf_section_data *sdata;
6632 bfd_size_type amt = sizeof (*sdata);
6633
6634 sdata = bfd_zalloc (abfd, amt);
6635 if (sdata == NULL)
6636 return FALSE;
6637 sec->used_by_bfd = sdata;
6638 }
6639
6640 record_section_with_aarch64_elf_section_data (sec);
6641
6642 return _bfd_elf_new_section_hook (abfd, sec);
6643 }
6644
6645
6646 static void
6647 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
6648 asection *sec,
6649 void *ignore ATTRIBUTE_UNUSED)
6650 {
6651 unrecord_section_with_aarch64_elf_section_data (sec);
6652 }
6653
6654 static bfd_boolean
6655 elfNN_aarch64_close_and_cleanup (bfd *abfd)
6656 {
6657 if (abfd->sections)
6658 bfd_map_over_sections (abfd,
6659 unrecord_section_via_map_over_sections, NULL);
6660
6661 return _bfd_elf_close_and_cleanup (abfd);
6662 }
6663
6664 static bfd_boolean
6665 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
6666 {
6667 if (abfd->sections)
6668 bfd_map_over_sections (abfd,
6669 unrecord_section_via_map_over_sections, NULL);
6670
6671 return _bfd_free_cached_info (abfd);
6672 }
6673
6674 /* Create dynamic sections. This is different from the ARM backend in that
6675 the got, plt, gotplt and their relocation sections are all created in the
6676 standard part of the bfd elf backend. */
6677
6678 static bfd_boolean
6679 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
6680 struct bfd_link_info *info)
6681 {
6682 struct elf_aarch64_link_hash_table *htab;
6683
6684 /* We need to create .got section. */
6685 if (!aarch64_elf_create_got_section (dynobj, info))
6686 return FALSE;
6687
6688 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
6689 return FALSE;
6690
6691 htab = elf_aarch64_hash_table (info);
6692 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
6693 if (!info->shared)
6694 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
6695
6696 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
6697 abort ();
6698
6699 return TRUE;
6700 }
6701
6702
6703 /* Allocate space in .plt, .got and associated reloc sections for
6704 dynamic relocs. */
6705
6706 static bfd_boolean
6707 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
6708 {
6709 struct bfd_link_info *info;
6710 struct elf_aarch64_link_hash_table *htab;
6711 struct elf_aarch64_link_hash_entry *eh;
6712 struct elf_dyn_relocs *p;
6713
6714 /* An example of a bfd_link_hash_indirect symbol is versioned
6715 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6716 -> __gxx_personality_v0(bfd_link_hash_defined)
6717
6718 There is no need to process bfd_link_hash_indirect symbols here
6719 because we will also be presented with the concrete instance of
6720 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6721 called to copy all relevant data from the generic to the concrete
6722 symbol instance.
6723 */
6724 if (h->root.type == bfd_link_hash_indirect)
6725 return TRUE;
6726
6727 if (h->root.type == bfd_link_hash_warning)
6728 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6729
6730 info = (struct bfd_link_info *) inf;
6731 htab = elf_aarch64_hash_table (info);
6732
6733 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6734 here if it is defined and referenced in a non-shared object. */
6735 if (h->type == STT_GNU_IFUNC
6736 && h->def_regular)
6737 return TRUE;
6738 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
6739 {
6740 /* Make sure this symbol is output as a dynamic symbol.
6741 Undefined weak syms won't yet be marked as dynamic. */
6742 if (h->dynindx == -1 && !h->forced_local)
6743 {
6744 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6745 return FALSE;
6746 }
6747
6748 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
6749 {
6750 asection *s = htab->root.splt;
6751
6752 /* If this is the first .plt entry, make room for the special
6753 first entry. */
6754 if (s->size == 0)
6755 s->size += htab->plt_header_size;
6756
6757 h->plt.offset = s->size;
6758
6759 /* If this symbol is not defined in a regular file, and we are
6760 not generating a shared library, then set the symbol to this
6761 location in the .plt. This is required to make function
6762 pointers compare as equal between the normal executable and
6763 the shared library. */
6764 if (!info->shared && !h->def_regular)
6765 {
6766 h->root.u.def.section = s;
6767 h->root.u.def.value = h->plt.offset;
6768 }
6769
6770 /* Make room for this entry. For now we only create the
6771 small model PLT entries. We later need to find a way
6772 of relaxing into these from the large model PLT entries. */
6773 s->size += PLT_SMALL_ENTRY_SIZE;
6774
6775 /* We also need to make an entry in the .got.plt section, which
6776 will be placed in the .got section by the linker script. */
6777 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6778
6779 /* We also need to make an entry in the .rela.plt section. */
6780 htab->root.srelplt->size += RELOC_SIZE (htab);
6781
6782 /* We need to ensure that all GOT entries that serve the PLT
6783 are consecutive with the special GOT slots [0] [1] and
6784 [2]. Any addtional relocations, such as
6785 R_AARCH64_TLSDESC, must be placed after the PLT related
6786 entries. We abuse the reloc_count such that during
6787 sizing we adjust reloc_count to indicate the number of
6788 PLT related reserved entries. In subsequent phases when
6789 filling in the contents of the reloc entries, PLT related
6790 entries are placed by computing their PLT index (0
6791 .. reloc_count). While other none PLT relocs are placed
6792 at the slot indicated by reloc_count and reloc_count is
6793 updated. */
6794
6795 htab->root.srelplt->reloc_count++;
6796 }
6797 else
6798 {
6799 h->plt.offset = (bfd_vma) - 1;
6800 h->needs_plt = 0;
6801 }
6802 }
6803 else
6804 {
6805 h->plt.offset = (bfd_vma) - 1;
6806 h->needs_plt = 0;
6807 }
6808
6809 eh = (struct elf_aarch64_link_hash_entry *) h;
6810 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6811
6812 if (h->got.refcount > 0)
6813 {
6814 bfd_boolean dyn;
6815 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
6816
6817 h->got.offset = (bfd_vma) - 1;
6818
6819 dyn = htab->root.dynamic_sections_created;
6820
6821 /* Make sure this symbol is output as a dynamic symbol.
6822 Undefined weak syms won't yet be marked as dynamic. */
6823 if (dyn && h->dynindx == -1 && !h->forced_local)
6824 {
6825 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6826 return FALSE;
6827 }
6828
6829 if (got_type == GOT_UNKNOWN)
6830 {
6831 }
6832 else if (got_type == GOT_NORMAL)
6833 {
6834 h->got.offset = htab->root.sgot->size;
6835 htab->root.sgot->size += GOT_ENTRY_SIZE;
6836 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6837 || h->root.type != bfd_link_hash_undefweak)
6838 && (info->shared
6839 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6840 {
6841 htab->root.srelgot->size += RELOC_SIZE (htab);
6842 }
6843 }
6844 else
6845 {
6846 int indx;
6847 if (got_type & GOT_TLSDESC_GD)
6848 {
6849 eh->tlsdesc_got_jump_table_offset =
6850 (htab->root.sgotplt->size
6851 - aarch64_compute_jump_table_size (htab));
6852 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6853 h->got.offset = (bfd_vma) - 2;
6854 }
6855
6856 if (got_type & GOT_TLS_GD)
6857 {
6858 h->got.offset = htab->root.sgot->size;
6859 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6860 }
6861
6862 if (got_type & GOT_TLS_IE)
6863 {
6864 h->got.offset = htab->root.sgot->size;
6865 htab->root.sgot->size += GOT_ENTRY_SIZE;
6866 }
6867
6868 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6869 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6870 || h->root.type != bfd_link_hash_undefweak)
6871 && (info->shared
6872 || indx != 0
6873 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6874 {
6875 if (got_type & GOT_TLSDESC_GD)
6876 {
6877 htab->root.srelplt->size += RELOC_SIZE (htab);
6878 /* Note reloc_count not incremented here! We have
6879 already adjusted reloc_count for this relocation
6880 type. */
6881
6882 /* TLSDESC PLT is now needed, but not yet determined. */
6883 htab->tlsdesc_plt = (bfd_vma) - 1;
6884 }
6885
6886 if (got_type & GOT_TLS_GD)
6887 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6888
6889 if (got_type & GOT_TLS_IE)
6890 htab->root.srelgot->size += RELOC_SIZE (htab);
6891 }
6892 }
6893 }
6894 else
6895 {
6896 h->got.offset = (bfd_vma) - 1;
6897 }
6898
6899 if (eh->dyn_relocs == NULL)
6900 return TRUE;
6901
6902 /* In the shared -Bsymbolic case, discard space allocated for
6903 dynamic pc-relative relocs against symbols which turn out to be
6904 defined in regular objects. For the normal shared case, discard
6905 space for pc-relative relocs that have become local due to symbol
6906 visibility changes. */
6907
6908 if (info->shared)
6909 {
6910 /* Relocs that use pc_count are those that appear on a call
6911 insn, or certain REL relocs that can generated via assembly.
6912 We want calls to protected symbols to resolve directly to the
6913 function rather than going via the plt. If people want
6914 function pointer comparisons to work as expected then they
6915 should avoid writing weird assembly. */
6916 if (SYMBOL_CALLS_LOCAL (info, h))
6917 {
6918 struct elf_dyn_relocs **pp;
6919
6920 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6921 {
6922 p->count -= p->pc_count;
6923 p->pc_count = 0;
6924 if (p->count == 0)
6925 *pp = p->next;
6926 else
6927 pp = &p->next;
6928 }
6929 }
6930
6931 /* Also discard relocs on undefined weak syms with non-default
6932 visibility. */
6933 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6934 {
6935 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6936 eh->dyn_relocs = NULL;
6937
6938 /* Make sure undefined weak symbols are output as a dynamic
6939 symbol in PIEs. */
6940 else if (h->dynindx == -1
6941 && !h->forced_local
6942 && !bfd_elf_link_record_dynamic_symbol (info, h))
6943 return FALSE;
6944 }
6945
6946 }
6947 else if (ELIMINATE_COPY_RELOCS)
6948 {
6949 /* For the non-shared case, discard space for relocs against
6950 symbols which turn out to need copy relocs or are not
6951 dynamic. */
6952
6953 if (!h->non_got_ref
6954 && ((h->def_dynamic
6955 && !h->def_regular)
6956 || (htab->root.dynamic_sections_created
6957 && (h->root.type == bfd_link_hash_undefweak
6958 || h->root.type == bfd_link_hash_undefined))))
6959 {
6960 /* Make sure this symbol is output as a dynamic symbol.
6961 Undefined weak syms won't yet be marked as dynamic. */
6962 if (h->dynindx == -1
6963 && !h->forced_local
6964 && !bfd_elf_link_record_dynamic_symbol (info, h))
6965 return FALSE;
6966
6967 /* If that succeeded, we know we'll be keeping all the
6968 relocs. */
6969 if (h->dynindx != -1)
6970 goto keep;
6971 }
6972
6973 eh->dyn_relocs = NULL;
6974
6975 keep:;
6976 }
6977
6978 /* Finally, allocate space. */
6979 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6980 {
6981 asection *sreloc;
6982
6983 sreloc = elf_section_data (p->sec)->sreloc;
6984
6985 BFD_ASSERT (sreloc != NULL);
6986
6987 sreloc->size += p->count * RELOC_SIZE (htab);
6988 }
6989
6990 return TRUE;
6991 }
6992
6993 /* Allocate space in .plt, .got and associated reloc sections for
6994 ifunc dynamic relocs. */
6995
6996 static bfd_boolean
6997 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
6998 void *inf)
6999 {
7000 struct bfd_link_info *info;
7001 struct elf_aarch64_link_hash_table *htab;
7002 struct elf_aarch64_link_hash_entry *eh;
7003
7004 /* An example of a bfd_link_hash_indirect symbol is versioned
7005 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7006 -> __gxx_personality_v0(bfd_link_hash_defined)
7007
7008 There is no need to process bfd_link_hash_indirect symbols here
7009 because we will also be presented with the concrete instance of
7010 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7011 called to copy all relevant data from the generic to the concrete
7012 symbol instance.
7013 */
7014 if (h->root.type == bfd_link_hash_indirect)
7015 return TRUE;
7016
7017 if (h->root.type == bfd_link_hash_warning)
7018 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7019
7020 info = (struct bfd_link_info *) inf;
7021 htab = elf_aarch64_hash_table (info);
7022
7023 eh = (struct elf_aarch64_link_hash_entry *) h;
7024
7025 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7026 here if it is defined and referenced in a non-shared object. */
7027 if (h->type == STT_GNU_IFUNC
7028 && h->def_regular)
7029 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7030 &eh->dyn_relocs,
7031 htab->plt_entry_size,
7032 htab->plt_header_size,
7033 GOT_ENTRY_SIZE);
7034 return TRUE;
7035 }
7036
7037 /* Allocate space in .plt, .got and associated reloc sections for
7038 local dynamic relocs. */
7039
7040 static bfd_boolean
7041 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7042 {
7043 struct elf_link_hash_entry *h
7044 = (struct elf_link_hash_entry *) *slot;
7045
7046 if (h->type != STT_GNU_IFUNC
7047 || !h->def_regular
7048 || !h->ref_regular
7049 || !h->forced_local
7050 || h->root.type != bfd_link_hash_defined)
7051 abort ();
7052
7053 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7054 }
7055
7056 /* Allocate space in .plt, .got and associated reloc sections for
7057 local ifunc dynamic relocs. */
7058
7059 static bfd_boolean
7060 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7061 {
7062 struct elf_link_hash_entry *h
7063 = (struct elf_link_hash_entry *) *slot;
7064
7065 if (h->type != STT_GNU_IFUNC
7066 || !h->def_regular
7067 || !h->ref_regular
7068 || !h->forced_local
7069 || h->root.type != bfd_link_hash_defined)
7070 abort ();
7071
7072 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7073 }
7074
7075 /* This is the most important function of all . Innocuosly named
7076 though ! */
7077 static bfd_boolean
7078 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7079 struct bfd_link_info *info)
7080 {
7081 struct elf_aarch64_link_hash_table *htab;
7082 bfd *dynobj;
7083 asection *s;
7084 bfd_boolean relocs;
7085 bfd *ibfd;
7086
7087 htab = elf_aarch64_hash_table ((info));
7088 dynobj = htab->root.dynobj;
7089
7090 BFD_ASSERT (dynobj != NULL);
7091
7092 if (htab->root.dynamic_sections_created)
7093 {
7094 if (info->executable)
7095 {
7096 s = bfd_get_linker_section (dynobj, ".interp");
7097 if (s == NULL)
7098 abort ();
7099 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7100 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7101 }
7102 }
7103
7104 /* Set up .got offsets for local syms, and space for local dynamic
7105 relocs. */
7106 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7107 {
7108 struct elf_aarch64_local_symbol *locals = NULL;
7109 Elf_Internal_Shdr *symtab_hdr;
7110 asection *srel;
7111 unsigned int i;
7112
7113 if (!is_aarch64_elf (ibfd))
7114 continue;
7115
7116 for (s = ibfd->sections; s != NULL; s = s->next)
7117 {
7118 struct elf_dyn_relocs *p;
7119
7120 for (p = (struct elf_dyn_relocs *)
7121 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7122 {
7123 if (!bfd_is_abs_section (p->sec)
7124 && bfd_is_abs_section (p->sec->output_section))
7125 {
7126 /* Input section has been discarded, either because
7127 it is a copy of a linkonce section or due to
7128 linker script /DISCARD/, so we'll be discarding
7129 the relocs too. */
7130 }
7131 else if (p->count != 0)
7132 {
7133 srel = elf_section_data (p->sec)->sreloc;
7134 srel->size += p->count * RELOC_SIZE (htab);
7135 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7136 info->flags |= DF_TEXTREL;
7137 }
7138 }
7139 }
7140
7141 locals = elf_aarch64_locals (ibfd);
7142 if (!locals)
7143 continue;
7144
7145 symtab_hdr = &elf_symtab_hdr (ibfd);
7146 srel = htab->root.srelgot;
7147 for (i = 0; i < symtab_hdr->sh_info; i++)
7148 {
7149 locals[i].got_offset = (bfd_vma) - 1;
7150 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7151 if (locals[i].got_refcount > 0)
7152 {
7153 unsigned got_type = locals[i].got_type;
7154 if (got_type & GOT_TLSDESC_GD)
7155 {
7156 locals[i].tlsdesc_got_jump_table_offset =
7157 (htab->root.sgotplt->size
7158 - aarch64_compute_jump_table_size (htab));
7159 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7160 locals[i].got_offset = (bfd_vma) - 2;
7161 }
7162
7163 if (got_type & GOT_TLS_GD)
7164 {
7165 locals[i].got_offset = htab->root.sgot->size;
7166 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7167 }
7168
7169 if (got_type & GOT_TLS_IE)
7170 {
7171 locals[i].got_offset = htab->root.sgot->size;
7172 htab->root.sgot->size += GOT_ENTRY_SIZE;
7173 }
7174
7175 if (got_type == GOT_UNKNOWN)
7176 {
7177 }
7178
7179 if (got_type == GOT_NORMAL)
7180 {
7181 }
7182
7183 if (info->shared)
7184 {
7185 if (got_type & GOT_TLSDESC_GD)
7186 {
7187 htab->root.srelplt->size += RELOC_SIZE (htab);
7188 /* Note RELOC_COUNT not incremented here! */
7189 htab->tlsdesc_plt = (bfd_vma) - 1;
7190 }
7191
7192 if (got_type & GOT_TLS_GD)
7193 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7194
7195 if (got_type & GOT_TLS_IE)
7196 htab->root.srelgot->size += RELOC_SIZE (htab);
7197 }
7198 }
7199 else
7200 {
7201 locals[i].got_refcount = (bfd_vma) - 1;
7202 }
7203 }
7204 }
7205
7206
7207 /* Allocate global sym .plt and .got entries, and space for global
7208 sym dynamic relocs. */
7209 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7210 info);
7211
7212 /* Allocate global ifunc sym .plt and .got entries, and space for global
7213 ifunc sym dynamic relocs. */
7214 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7215 info);
7216
7217 /* Allocate .plt and .got entries, and space for local symbols. */
7218 htab_traverse (htab->loc_hash_table,
7219 elfNN_aarch64_allocate_local_dynrelocs,
7220 info);
7221
7222 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7223 htab_traverse (htab->loc_hash_table,
7224 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7225 info);
7226
7227 /* For every jump slot reserved in the sgotplt, reloc_count is
7228 incremented. However, when we reserve space for TLS descriptors,
7229 it's not incremented, so in order to compute the space reserved
7230 for them, it suffices to multiply the reloc count by the jump
7231 slot size. */
7232
7233 if (htab->root.srelplt)
7234 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7235
7236 if (htab->tlsdesc_plt)
7237 {
7238 if (htab->root.splt->size == 0)
7239 htab->root.splt->size += PLT_ENTRY_SIZE;
7240
7241 htab->tlsdesc_plt = htab->root.splt->size;
7242 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7243
7244 /* If we're not using lazy TLS relocations, don't generate the
7245 GOT entry required. */
7246 if (!(info->flags & DF_BIND_NOW))
7247 {
7248 htab->dt_tlsdesc_got = htab->root.sgot->size;
7249 htab->root.sgot->size += GOT_ENTRY_SIZE;
7250 }
7251 }
7252
7253 /* Init mapping symbols information to use later to distingush between
7254 code and data while scanning for erratam 835769. */
7255 if (htab->fix_erratum_835769)
7256 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7257 {
7258 if (!is_aarch64_elf (ibfd))
7259 continue;
7260 bfd_elfNN_aarch64_init_maps (ibfd);
7261 }
7262
7263 /* We now have determined the sizes of the various dynamic sections.
7264 Allocate memory for them. */
7265 relocs = FALSE;
7266 for (s = dynobj->sections; s != NULL; s = s->next)
7267 {
7268 if ((s->flags & SEC_LINKER_CREATED) == 0)
7269 continue;
7270
7271 if (s == htab->root.splt
7272 || s == htab->root.sgot
7273 || s == htab->root.sgotplt
7274 || s == htab->root.iplt
7275 || s == htab->root.igotplt || s == htab->sdynbss)
7276 {
7277 /* Strip this section if we don't need it; see the
7278 comment below. */
7279 }
7280 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7281 {
7282 if (s->size != 0 && s != htab->root.srelplt)
7283 relocs = TRUE;
7284
7285 /* We use the reloc_count field as a counter if we need
7286 to copy relocs into the output file. */
7287 if (s != htab->root.srelplt)
7288 s->reloc_count = 0;
7289 }
7290 else
7291 {
7292 /* It's not one of our sections, so don't allocate space. */
7293 continue;
7294 }
7295
7296 if (s->size == 0)
7297 {
7298 /* If we don't need this section, strip it from the
7299 output file. This is mostly to handle .rela.bss and
7300 .rela.plt. We must create both sections in
7301 create_dynamic_sections, because they must be created
7302 before the linker maps input sections to output
7303 sections. The linker does that before
7304 adjust_dynamic_symbol is called, and it is that
7305 function which decides whether anything needs to go
7306 into these sections. */
7307
7308 s->flags |= SEC_EXCLUDE;
7309 continue;
7310 }
7311
7312 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7313 continue;
7314
7315 /* Allocate memory for the section contents. We use bfd_zalloc
7316 here in case unused entries are not reclaimed before the
7317 section's contents are written out. This should not happen,
7318 but this way if it does, we get a R_AARCH64_NONE reloc instead
7319 of garbage. */
7320 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7321 if (s->contents == NULL)
7322 return FALSE;
7323 }
7324
7325 if (htab->root.dynamic_sections_created)
7326 {
7327 /* Add some entries to the .dynamic section. We fill in the
7328 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7329 must add the entries now so that we get the correct size for
7330 the .dynamic section. The DT_DEBUG entry is filled in by the
7331 dynamic linker and used by the debugger. */
7332 #define add_dynamic_entry(TAG, VAL) \
7333 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7334
7335 if (info->executable)
7336 {
7337 if (!add_dynamic_entry (DT_DEBUG, 0))
7338 return FALSE;
7339 }
7340
7341 if (htab->root.splt->size != 0)
7342 {
7343 if (!add_dynamic_entry (DT_PLTGOT, 0)
7344 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7345 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7346 || !add_dynamic_entry (DT_JMPREL, 0))
7347 return FALSE;
7348
7349 if (htab->tlsdesc_plt
7350 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7351 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7352 return FALSE;
7353 }
7354
7355 if (relocs)
7356 {
7357 if (!add_dynamic_entry (DT_RELA, 0)
7358 || !add_dynamic_entry (DT_RELASZ, 0)
7359 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7360 return FALSE;
7361
7362 /* If any dynamic relocs apply to a read-only section,
7363 then we need a DT_TEXTREL entry. */
7364 if ((info->flags & DF_TEXTREL) != 0)
7365 {
7366 if (!add_dynamic_entry (DT_TEXTREL, 0))
7367 return FALSE;
7368 }
7369 }
7370 }
7371 #undef add_dynamic_entry
7372
7373 return TRUE;
7374 }
7375
7376 static inline void
7377 elf_aarch64_update_plt_entry (bfd *output_bfd,
7378 bfd_reloc_code_real_type r_type,
7379 bfd_byte *plt_entry, bfd_vma value)
7380 {
7381 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7382
7383 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7384 }
7385
7386 static void
7387 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7388 struct elf_aarch64_link_hash_table
7389 *htab, bfd *output_bfd,
7390 struct bfd_link_info *info)
7391 {
7392 bfd_byte *plt_entry;
7393 bfd_vma plt_index;
7394 bfd_vma got_offset;
7395 bfd_vma gotplt_entry_address;
7396 bfd_vma plt_entry_address;
7397 Elf_Internal_Rela rela;
7398 bfd_byte *loc;
7399 asection *plt, *gotplt, *relplt;
7400
7401 /* When building a static executable, use .iplt, .igot.plt and
7402 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7403 if (htab->root.splt != NULL)
7404 {
7405 plt = htab->root.splt;
7406 gotplt = htab->root.sgotplt;
7407 relplt = htab->root.srelplt;
7408 }
7409 else
7410 {
7411 plt = htab->root.iplt;
7412 gotplt = htab->root.igotplt;
7413 relplt = htab->root.irelplt;
7414 }
7415
7416 /* Get the index in the procedure linkage table which
7417 corresponds to this symbol. This is the index of this symbol
7418 in all the symbols for which we are making plt entries. The
7419 first entry in the procedure linkage table is reserved.
7420
7421 Get the offset into the .got table of the entry that
7422 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7423 bytes. The first three are reserved for the dynamic linker.
7424
7425 For static executables, we don't reserve anything. */
7426
7427 if (plt == htab->root.splt)
7428 {
7429 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
7430 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
7431 }
7432 else
7433 {
7434 plt_index = h->plt.offset / htab->plt_entry_size;
7435 got_offset = plt_index * GOT_ENTRY_SIZE;
7436 }
7437
7438 plt_entry = plt->contents + h->plt.offset;
7439 plt_entry_address = plt->output_section->vma
7440 + plt->output_offset + h->plt.offset;
7441 gotplt_entry_address = gotplt->output_section->vma +
7442 gotplt->output_offset + got_offset;
7443
7444 /* Copy in the boiler-plate for the PLTn entry. */
7445 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
7446
7447 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7448 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7449 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7450 plt_entry,
7451 PG (gotplt_entry_address) -
7452 PG (plt_entry_address));
7453
7454 /* Fill in the lo12 bits for the load from the pltgot. */
7455 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7456 plt_entry + 4,
7457 PG_OFFSET (gotplt_entry_address));
7458
7459 /* Fill in the lo12 bits for the add from the pltgot entry. */
7460 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7461 plt_entry + 8,
7462 PG_OFFSET (gotplt_entry_address));
7463
7464 /* All the GOTPLT Entries are essentially initialized to PLT0. */
7465 bfd_put_NN (output_bfd,
7466 plt->output_section->vma + plt->output_offset,
7467 gotplt->contents + got_offset);
7468
7469 rela.r_offset = gotplt_entry_address;
7470
7471 if (h->dynindx == -1
7472 || ((info->executable
7473 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7474 && h->def_regular
7475 && h->type == STT_GNU_IFUNC))
7476 {
7477 /* If an STT_GNU_IFUNC symbol is locally defined, generate
7478 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
7479 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
7480 rela.r_addend = (h->root.u.def.value
7481 + h->root.u.def.section->output_section->vma
7482 + h->root.u.def.section->output_offset);
7483 }
7484 else
7485 {
7486 /* Fill in the entry in the .rela.plt section. */
7487 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
7488 rela.r_addend = 0;
7489 }
7490
7491 /* Compute the relocation entry to used based on PLT index and do
7492 not adjust reloc_count. The reloc_count has already been adjusted
7493 to account for this entry. */
7494 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
7495 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7496 }
7497
7498 /* Size sections even though they're not dynamic. We use it to setup
7499 _TLS_MODULE_BASE_, if needed. */
7500
7501 static bfd_boolean
7502 elfNN_aarch64_always_size_sections (bfd *output_bfd,
7503 struct bfd_link_info *info)
7504 {
7505 asection *tls_sec;
7506
7507 if (info->relocatable)
7508 return TRUE;
7509
7510 tls_sec = elf_hash_table (info)->tls_sec;
7511
7512 if (tls_sec)
7513 {
7514 struct elf_link_hash_entry *tlsbase;
7515
7516 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
7517 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
7518
7519 if (tlsbase)
7520 {
7521 struct bfd_link_hash_entry *h = NULL;
7522 const struct elf_backend_data *bed =
7523 get_elf_backend_data (output_bfd);
7524
7525 if (!(_bfd_generic_link_add_one_symbol
7526 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
7527 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
7528 return FALSE;
7529
7530 tlsbase->type = STT_TLS;
7531 tlsbase = (struct elf_link_hash_entry *) h;
7532 tlsbase->def_regular = 1;
7533 tlsbase->other = STV_HIDDEN;
7534 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
7535 }
7536 }
7537
7538 return TRUE;
7539 }
7540
7541 /* Finish up dynamic symbol handling. We set the contents of various
7542 dynamic sections here. */
7543 static bfd_boolean
7544 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
7545 struct bfd_link_info *info,
7546 struct elf_link_hash_entry *h,
7547 Elf_Internal_Sym *sym)
7548 {
7549 struct elf_aarch64_link_hash_table *htab;
7550 htab = elf_aarch64_hash_table (info);
7551
7552 if (h->plt.offset != (bfd_vma) - 1)
7553 {
7554 asection *plt, *gotplt, *relplt;
7555
7556 /* This symbol has an entry in the procedure linkage table. Set
7557 it up. */
7558
7559 /* When building a static executable, use .iplt, .igot.plt and
7560 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7561 if (htab->root.splt != NULL)
7562 {
7563 plt = htab->root.splt;
7564 gotplt = htab->root.sgotplt;
7565 relplt = htab->root.srelplt;
7566 }
7567 else
7568 {
7569 plt = htab->root.iplt;
7570 gotplt = htab->root.igotplt;
7571 relplt = htab->root.irelplt;
7572 }
7573
7574 /* This symbol has an entry in the procedure linkage table. Set
7575 it up. */
7576 if ((h->dynindx == -1
7577 && !((h->forced_local || info->executable)
7578 && h->def_regular
7579 && h->type == STT_GNU_IFUNC))
7580 || plt == NULL
7581 || gotplt == NULL
7582 || relplt == NULL)
7583 abort ();
7584
7585 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
7586 if (!h->def_regular)
7587 {
7588 /* Mark the symbol as undefined, rather than as defined in
7589 the .plt section. */
7590 sym->st_shndx = SHN_UNDEF;
7591 /* If the symbol is weak we need to clear the value.
7592 Otherwise, the PLT entry would provide a definition for
7593 the symbol even if the symbol wasn't defined anywhere,
7594 and so the symbol would never be NULL. Leave the value if
7595 there were any relocations where pointer equality matters
7596 (this is a clue for the dynamic linker, to make function
7597 pointer comparisons work between an application and shared
7598 library). */
7599 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
7600 sym->st_value = 0;
7601 }
7602 }
7603
7604 if (h->got.offset != (bfd_vma) - 1
7605 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
7606 {
7607 Elf_Internal_Rela rela;
7608 bfd_byte *loc;
7609
7610 /* This symbol has an entry in the global offset table. Set it
7611 up. */
7612 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
7613 abort ();
7614
7615 rela.r_offset = (htab->root.sgot->output_section->vma
7616 + htab->root.sgot->output_offset
7617 + (h->got.offset & ~(bfd_vma) 1));
7618
7619 if (h->def_regular
7620 && h->type == STT_GNU_IFUNC)
7621 {
7622 if (info->shared)
7623 {
7624 /* Generate R_AARCH64_GLOB_DAT. */
7625 goto do_glob_dat;
7626 }
7627 else
7628 {
7629 asection *plt;
7630
7631 if (!h->pointer_equality_needed)
7632 abort ();
7633
7634 /* For non-shared object, we can't use .got.plt, which
7635 contains the real function address if we need pointer
7636 equality. We load the GOT entry with the PLT entry. */
7637 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
7638 bfd_put_NN (output_bfd, (plt->output_section->vma
7639 + plt->output_offset
7640 + h->plt.offset),
7641 htab->root.sgot->contents
7642 + (h->got.offset & ~(bfd_vma) 1));
7643 return TRUE;
7644 }
7645 }
7646 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
7647 {
7648 if (!h->def_regular)
7649 return FALSE;
7650
7651 BFD_ASSERT ((h->got.offset & 1) != 0);
7652 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
7653 rela.r_addend = (h->root.u.def.value
7654 + h->root.u.def.section->output_section->vma
7655 + h->root.u.def.section->output_offset);
7656 }
7657 else
7658 {
7659 do_glob_dat:
7660 BFD_ASSERT ((h->got.offset & 1) == 0);
7661 bfd_put_NN (output_bfd, (bfd_vma) 0,
7662 htab->root.sgot->contents + h->got.offset);
7663 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
7664 rela.r_addend = 0;
7665 }
7666
7667 loc = htab->root.srelgot->contents;
7668 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
7669 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7670 }
7671
7672 if (h->needs_copy)
7673 {
7674 Elf_Internal_Rela rela;
7675 bfd_byte *loc;
7676
7677 /* This symbol needs a copy reloc. Set it up. */
7678
7679 if (h->dynindx == -1
7680 || (h->root.type != bfd_link_hash_defined
7681 && h->root.type != bfd_link_hash_defweak)
7682 || htab->srelbss == NULL)
7683 abort ();
7684
7685 rela.r_offset = (h->root.u.def.value
7686 + h->root.u.def.section->output_section->vma
7687 + h->root.u.def.section->output_offset);
7688 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
7689 rela.r_addend = 0;
7690 loc = htab->srelbss->contents;
7691 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
7692 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7693 }
7694
7695 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
7696 be NULL for local symbols. */
7697 if (sym != NULL
7698 && (h == elf_hash_table (info)->hdynamic
7699 || h == elf_hash_table (info)->hgot))
7700 sym->st_shndx = SHN_ABS;
7701
7702 return TRUE;
7703 }
7704
7705 /* Finish up local dynamic symbol handling. We set the contents of
7706 various dynamic sections here. */
7707
7708 static bfd_boolean
7709 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
7710 {
7711 struct elf_link_hash_entry *h
7712 = (struct elf_link_hash_entry *) *slot;
7713 struct bfd_link_info *info
7714 = (struct bfd_link_info *) inf;
7715
7716 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
7717 info, h, NULL);
7718 }
7719
7720 static void
7721 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
7722 struct elf_aarch64_link_hash_table
7723 *htab)
7724 {
7725 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
7726 small and large plts and at the minute just generates
7727 the small PLT. */
7728
7729 /* PLT0 of the small PLT looks like this in ELF64 -
7730 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
7731 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
7732 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
7733 // symbol resolver
7734 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
7735 // GOTPLT entry for this.
7736 br x17
7737 PLT0 will be slightly different in ELF32 due to different got entry
7738 size.
7739 */
7740 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
7741 bfd_vma plt_base;
7742
7743
7744 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
7745 PLT_ENTRY_SIZE);
7746 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
7747 PLT_ENTRY_SIZE;
7748
7749 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
7750 + htab->root.sgotplt->output_offset
7751 + GOT_ENTRY_SIZE * 2);
7752
7753 plt_base = htab->root.splt->output_section->vma +
7754 htab->root.splt->output_offset;
7755
7756 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7757 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7758 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7759 htab->root.splt->contents + 4,
7760 PG (plt_got_2nd_ent) - PG (plt_base + 4));
7761
7762 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7763 htab->root.splt->contents + 8,
7764 PG_OFFSET (plt_got_2nd_ent));
7765
7766 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7767 htab->root.splt->contents + 12,
7768 PG_OFFSET (plt_got_2nd_ent));
7769 }
7770
7771 static bfd_boolean
7772 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
7773 struct bfd_link_info *info)
7774 {
7775 struct elf_aarch64_link_hash_table *htab;
7776 bfd *dynobj;
7777 asection *sdyn;
7778
7779 htab = elf_aarch64_hash_table (info);
7780 dynobj = htab->root.dynobj;
7781 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
7782
7783 if (htab->root.dynamic_sections_created)
7784 {
7785 ElfNN_External_Dyn *dyncon, *dynconend;
7786
7787 if (sdyn == NULL || htab->root.sgot == NULL)
7788 abort ();
7789
7790 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
7791 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
7792 for (; dyncon < dynconend; dyncon++)
7793 {
7794 Elf_Internal_Dyn dyn;
7795 asection *s;
7796
7797 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
7798
7799 switch (dyn.d_tag)
7800 {
7801 default:
7802 continue;
7803
7804 case DT_PLTGOT:
7805 s = htab->root.sgotplt;
7806 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
7807 break;
7808
7809 case DT_JMPREL:
7810 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
7811 break;
7812
7813 case DT_PLTRELSZ:
7814 s = htab->root.srelplt;
7815 dyn.d_un.d_val = s->size;
7816 break;
7817
7818 case DT_RELASZ:
7819 /* The procedure linkage table relocs (DT_JMPREL) should
7820 not be included in the overall relocs (DT_RELA).
7821 Therefore, we override the DT_RELASZ entry here to
7822 make it not include the JMPREL relocs. Since the
7823 linker script arranges for .rela.plt to follow all
7824 other relocation sections, we don't have to worry
7825 about changing the DT_RELA entry. */
7826 if (htab->root.srelplt != NULL)
7827 {
7828 s = htab->root.srelplt;
7829 dyn.d_un.d_val -= s->size;
7830 }
7831 break;
7832
7833 case DT_TLSDESC_PLT:
7834 s = htab->root.splt;
7835 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7836 + htab->tlsdesc_plt;
7837 break;
7838
7839 case DT_TLSDESC_GOT:
7840 s = htab->root.sgot;
7841 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7842 + htab->dt_tlsdesc_got;
7843 break;
7844 }
7845
7846 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
7847 }
7848
7849 }
7850
7851 /* Fill in the special first entry in the procedure linkage table. */
7852 if (htab->root.splt && htab->root.splt->size > 0)
7853 {
7854 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
7855
7856 elf_section_data (htab->root.splt->output_section)->
7857 this_hdr.sh_entsize = htab->plt_entry_size;
7858
7859
7860 if (htab->tlsdesc_plt)
7861 {
7862 bfd_put_NN (output_bfd, (bfd_vma) 0,
7863 htab->root.sgot->contents + htab->dt_tlsdesc_got);
7864
7865 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
7866 elfNN_aarch64_tlsdesc_small_plt_entry,
7867 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
7868
7869 {
7870 bfd_vma adrp1_addr =
7871 htab->root.splt->output_section->vma
7872 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
7873
7874 bfd_vma adrp2_addr = adrp1_addr + 4;
7875
7876 bfd_vma got_addr =
7877 htab->root.sgot->output_section->vma
7878 + htab->root.sgot->output_offset;
7879
7880 bfd_vma pltgot_addr =
7881 htab->root.sgotplt->output_section->vma
7882 + htab->root.sgotplt->output_offset;
7883
7884 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
7885
7886 bfd_byte *plt_entry =
7887 htab->root.splt->contents + htab->tlsdesc_plt;
7888
7889 /* adrp x2, DT_TLSDESC_GOT */
7890 elf_aarch64_update_plt_entry (output_bfd,
7891 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7892 plt_entry + 4,
7893 (PG (dt_tlsdesc_got)
7894 - PG (adrp1_addr)));
7895
7896 /* adrp x3, 0 */
7897 elf_aarch64_update_plt_entry (output_bfd,
7898 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7899 plt_entry + 8,
7900 (PG (pltgot_addr)
7901 - PG (adrp2_addr)));
7902
7903 /* ldr x2, [x2, #0] */
7904 elf_aarch64_update_plt_entry (output_bfd,
7905 BFD_RELOC_AARCH64_LDSTNN_LO12,
7906 plt_entry + 12,
7907 PG_OFFSET (dt_tlsdesc_got));
7908
7909 /* add x3, x3, 0 */
7910 elf_aarch64_update_plt_entry (output_bfd,
7911 BFD_RELOC_AARCH64_ADD_LO12,
7912 plt_entry + 16,
7913 PG_OFFSET (pltgot_addr));
7914 }
7915 }
7916 }
7917
7918 if (htab->root.sgotplt)
7919 {
7920 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
7921 {
7922 (*_bfd_error_handler)
7923 (_("discarded output section: `%A'"), htab->root.sgotplt);
7924 return FALSE;
7925 }
7926
7927 /* Fill in the first three entries in the global offset table. */
7928 if (htab->root.sgotplt->size > 0)
7929 {
7930 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
7931
7932 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
7933 bfd_put_NN (output_bfd,
7934 (bfd_vma) 0,
7935 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
7936 bfd_put_NN (output_bfd,
7937 (bfd_vma) 0,
7938 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
7939 }
7940
7941 if (htab->root.sgot)
7942 {
7943 if (htab->root.sgot->size > 0)
7944 {
7945 bfd_vma addr =
7946 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
7947 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
7948 }
7949 }
7950
7951 elf_section_data (htab->root.sgotplt->output_section)->
7952 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
7953 }
7954
7955 if (htab->root.sgot && htab->root.sgot->size > 0)
7956 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
7957 = GOT_ENTRY_SIZE;
7958
7959 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
7960 htab_traverse (htab->loc_hash_table,
7961 elfNN_aarch64_finish_local_dynamic_symbol,
7962 info);
7963
7964 return TRUE;
7965 }
7966
7967 /* Return address for Ith PLT stub in section PLT, for relocation REL
7968 or (bfd_vma) -1 if it should not be included. */
7969
7970 static bfd_vma
7971 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
7972 const arelent *rel ATTRIBUTE_UNUSED)
7973 {
7974 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
7975 }
7976
7977
7978 /* We use this so we can override certain functions
7979 (though currently we don't). */
7980
7981 const struct elf_size_info elfNN_aarch64_size_info =
7982 {
7983 sizeof (ElfNN_External_Ehdr),
7984 sizeof (ElfNN_External_Phdr),
7985 sizeof (ElfNN_External_Shdr),
7986 sizeof (ElfNN_External_Rel),
7987 sizeof (ElfNN_External_Rela),
7988 sizeof (ElfNN_External_Sym),
7989 sizeof (ElfNN_External_Dyn),
7990 sizeof (Elf_External_Note),
7991 4, /* Hash table entry size. */
7992 1, /* Internal relocs per external relocs. */
7993 ARCH_SIZE, /* Arch size. */
7994 LOG_FILE_ALIGN, /* Log_file_align. */
7995 ELFCLASSNN, EV_CURRENT,
7996 bfd_elfNN_write_out_phdrs,
7997 bfd_elfNN_write_shdrs_and_ehdr,
7998 bfd_elfNN_checksum_contents,
7999 bfd_elfNN_write_relocs,
8000 bfd_elfNN_swap_symbol_in,
8001 bfd_elfNN_swap_symbol_out,
8002 bfd_elfNN_slurp_reloc_table,
8003 bfd_elfNN_slurp_symbol_table,
8004 bfd_elfNN_swap_dyn_in,
8005 bfd_elfNN_swap_dyn_out,
8006 bfd_elfNN_swap_reloc_in,
8007 bfd_elfNN_swap_reloc_out,
8008 bfd_elfNN_swap_reloca_in,
8009 bfd_elfNN_swap_reloca_out
8010 };
8011
8012 #define ELF_ARCH bfd_arch_aarch64
8013 #define ELF_MACHINE_CODE EM_AARCH64
8014 #define ELF_MAXPAGESIZE 0x10000
8015 #define ELF_MINPAGESIZE 0x1000
8016 #define ELF_COMMONPAGESIZE 0x1000
8017
8018 #define bfd_elfNN_close_and_cleanup \
8019 elfNN_aarch64_close_and_cleanup
8020
8021 #define bfd_elfNN_bfd_free_cached_info \
8022 elfNN_aarch64_bfd_free_cached_info
8023
8024 #define bfd_elfNN_bfd_is_target_special_symbol \
8025 elfNN_aarch64_is_target_special_symbol
8026
8027 #define bfd_elfNN_bfd_link_hash_table_create \
8028 elfNN_aarch64_link_hash_table_create
8029
8030 #define bfd_elfNN_bfd_merge_private_bfd_data \
8031 elfNN_aarch64_merge_private_bfd_data
8032
8033 #define bfd_elfNN_bfd_print_private_bfd_data \
8034 elfNN_aarch64_print_private_bfd_data
8035
8036 #define bfd_elfNN_bfd_reloc_type_lookup \
8037 elfNN_aarch64_reloc_type_lookup
8038
8039 #define bfd_elfNN_bfd_reloc_name_lookup \
8040 elfNN_aarch64_reloc_name_lookup
8041
8042 #define bfd_elfNN_bfd_set_private_flags \
8043 elfNN_aarch64_set_private_flags
8044
8045 #define bfd_elfNN_find_inliner_info \
8046 elfNN_aarch64_find_inliner_info
8047
8048 #define bfd_elfNN_find_nearest_line \
8049 elfNN_aarch64_find_nearest_line
8050
8051 #define bfd_elfNN_mkobject \
8052 elfNN_aarch64_mkobject
8053
8054 #define bfd_elfNN_new_section_hook \
8055 elfNN_aarch64_new_section_hook
8056
8057 #define elf_backend_adjust_dynamic_symbol \
8058 elfNN_aarch64_adjust_dynamic_symbol
8059
8060 #define elf_backend_always_size_sections \
8061 elfNN_aarch64_always_size_sections
8062
8063 #define elf_backend_check_relocs \
8064 elfNN_aarch64_check_relocs
8065
8066 #define elf_backend_copy_indirect_symbol \
8067 elfNN_aarch64_copy_indirect_symbol
8068
8069 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8070 to them in our hash. */
8071 #define elf_backend_create_dynamic_sections \
8072 elfNN_aarch64_create_dynamic_sections
8073
8074 #define elf_backend_init_index_section \
8075 _bfd_elf_init_2_index_sections
8076
8077 #define elf_backend_finish_dynamic_sections \
8078 elfNN_aarch64_finish_dynamic_sections
8079
8080 #define elf_backend_finish_dynamic_symbol \
8081 elfNN_aarch64_finish_dynamic_symbol
8082
8083 #define elf_backend_gc_sweep_hook \
8084 elfNN_aarch64_gc_sweep_hook
8085
8086 #define elf_backend_object_p \
8087 elfNN_aarch64_object_p
8088
8089 #define elf_backend_output_arch_local_syms \
8090 elfNN_aarch64_output_arch_local_syms
8091
8092 #define elf_backend_plt_sym_val \
8093 elfNN_aarch64_plt_sym_val
8094
8095 #define elf_backend_post_process_headers \
8096 elfNN_aarch64_post_process_headers
8097
8098 #define elf_backend_relocate_section \
8099 elfNN_aarch64_relocate_section
8100
8101 #define elf_backend_reloc_type_class \
8102 elfNN_aarch64_reloc_type_class
8103
8104 #define elf_backend_section_from_shdr \
8105 elfNN_aarch64_section_from_shdr
8106
8107 #define elf_backend_size_dynamic_sections \
8108 elfNN_aarch64_size_dynamic_sections
8109
8110 #define elf_backend_size_info \
8111 elfNN_aarch64_size_info
8112
8113 #define elf_backend_write_section \
8114 elfNN_aarch64_write_section
8115
8116 #define elf_backend_can_refcount 1
8117 #define elf_backend_can_gc_sections 1
8118 #define elf_backend_plt_readonly 1
8119 #define elf_backend_want_got_plt 1
8120 #define elf_backend_want_plt_sym 0
8121 #define elf_backend_may_use_rel_p 0
8122 #define elf_backend_may_use_rela_p 1
8123 #define elf_backend_default_use_rela_p 1
8124 #define elf_backend_rela_normal 1
8125 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8126 #define elf_backend_default_execstack 0
8127
8128 #undef elf_backend_obj_attrs_section
8129 #define elf_backend_obj_attrs_section ".ARM.attributes"
8130
8131 #include "elfNN-target.h"
This page took 0.258959 seconds and 5 git commands to generate.