[AArch64, ILP32] 3/6 Support for ELF32 relocs and refactor reloc handling
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright 2009-2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "elf/aarch64.h"
146
147 #define ARCH_SIZE NN
148
149 #if ARCH_SIZE == 64
150 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
151 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
152 #define HOWTO64(...) HOWTO (__VA_ARGS__)
153 #define HOWTO32(...) EMPTY_HOWTO (0)
154 #define LOG_FILE_ALIGN 3
155 #endif
156
157 #if ARCH_SIZE == 32
158 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
159 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
160 #define HOWTO64(...) EMPTY_HOWTO (0)
161 #define HOWTO32(...) HOWTO (__VA_ARGS__)
162 #define LOG_FILE_ALIGN 2
163 #endif
164
165 static bfd_reloc_status_type
166 bfd_elf_aarch64_put_addend (bfd *abfd,
167 bfd_byte *address,
168 reloc_howto_type *howto, bfd_signed_vma addend);
169
170 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
171 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
190 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
191
192 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
193 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC)
205
206 #define ELIMINATE_COPY_RELOCS 0
207
208 /* Return size of a relocation entry. HTAB is the bfd's
209 elf_aarch64_link_hash_entry. */
210 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
211
212 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
213 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
214 #define PLT_ENTRY_SIZE (32)
215 #define PLT_SMALL_ENTRY_SIZE (16)
216 #define PLT_TLSDESC_ENTRY_SIZE (32)
217
218 /* Take the PAGE component of an address or offset. */
219 #define PG(x) ((x) & ~ (bfd_vma) 0xfff)
220 #define PG_OFFSET(x) ((x) & (bfd_vma) 0xfff)
221
222 /* Encoding of the nop instruction */
223 #define INSN_NOP 0xd503201f
224
225 #define aarch64_compute_jump_table_size(htab) \
226 (((htab)->root.srelplt == NULL) ? 0 \
227 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
228
229 /* The first entry in a procedure linkage table looks like this
230 if the distance between the PLTGOT and the PLT is < 4GB use
231 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
232 in x16 and needs to work out PLTGOT[1] by using an address of
233 [x16,#-GOT_ENTRY_SIZE]. */
234 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
235 {
236 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
237 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
238 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
239 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
240 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
243 0x1f, 0x20, 0x03, 0xd5, /* nop */
244 };
245
246 /* Per function entry in a procedure linkage table looks like this
247 if the distance between the PLTGOT and the PLT is < 4GB use
248 these PLT entries. */
249 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
250 {
251 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
252 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
253 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
254 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
255 };
256
257 static const bfd_byte
258 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
259 {
260 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
261 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
262 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
263 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
264 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
265 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
266 0x1f, 0x20, 0x03, 0xd5, /* nop */
267 0x1f, 0x20, 0x03, 0xd5, /* nop */
268 };
269
270 #define elf_info_to_howto elfNN_aarch64_info_to_howto
271 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
272
273 #define AARCH64_ELF_ABI_VERSION 0
274
275 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
276 #define ALL_ONES (~ (bfd_vma) 0)
277
278 /* Indexed by the bfd interal reloc enumerators.
279 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
280 in reloc.c. */
281
282 static reloc_howto_type elfNN_aarch64_howto_table[] =
283 {
284 EMPTY_HOWTO (0),
285
286 /* Basic data relocations. */
287
288 #if ARCH_SIZE == 64
289 HOWTO (R_AARCH64_NULL, /* type */
290 0, /* rightshift */
291 0, /* size (0 = byte, 1 = short, 2 = long) */
292 0, /* bitsize */
293 FALSE, /* pc_relative */
294 0, /* bitpos */
295 complain_overflow_dont, /* complain_on_overflow */
296 bfd_elf_generic_reloc, /* special_function */
297 "R_AARCH64_NULL", /* name */
298 FALSE, /* partial_inplace */
299 0, /* src_mask */
300 0, /* dst_mask */
301 FALSE), /* pcrel_offset */
302 #else
303 HOWTO (R_AARCH64_NONE, /* type */
304 0, /* rightshift */
305 0, /* size (0 = byte, 1 = short, 2 = long) */
306 0, /* bitsize */
307 FALSE, /* pc_relative */
308 0, /* bitpos */
309 complain_overflow_dont, /* complain_on_overflow */
310 bfd_elf_generic_reloc, /* special_function */
311 "R_AARCH64_NONE", /* name */
312 FALSE, /* partial_inplace */
313 0, /* src_mask */
314 0, /* dst_mask */
315 FALSE), /* pcrel_offset */
316 #endif
317
318 /* .xword: (S+A) */
319 HOWTO64 (AARCH64_R (ABS64), /* type */
320 0, /* rightshift */
321 4, /* size (4 = long long) */
322 64, /* bitsize */
323 FALSE, /* pc_relative */
324 0, /* bitpos */
325 complain_overflow_unsigned, /* complain_on_overflow */
326 bfd_elf_generic_reloc, /* special_function */
327 AARCH64_R_STR (ABS64), /* name */
328 FALSE, /* partial_inplace */
329 ALL_ONES, /* src_mask */
330 ALL_ONES, /* dst_mask */
331 FALSE), /* pcrel_offset */
332
333 /* .word: (S+A) */
334 HOWTO (AARCH64_R (ABS32), /* type */
335 0, /* rightshift */
336 2, /* size (0 = byte, 1 = short, 2 = long) */
337 32, /* bitsize */
338 FALSE, /* pc_relative */
339 0, /* bitpos */
340 complain_overflow_unsigned, /* complain_on_overflow */
341 bfd_elf_generic_reloc, /* special_function */
342 AARCH64_R_STR (ABS32), /* name */
343 FALSE, /* partial_inplace */
344 0xffffffff, /* src_mask */
345 0xffffffff, /* dst_mask */
346 FALSE), /* pcrel_offset */
347
348 /* .half: (S+A) */
349 HOWTO (AARCH64_R (ABS16), /* type */
350 0, /* rightshift */
351 1, /* size (0 = byte, 1 = short, 2 = long) */
352 16, /* bitsize */
353 FALSE, /* pc_relative */
354 0, /* bitpos */
355 complain_overflow_unsigned, /* complain_on_overflow */
356 bfd_elf_generic_reloc, /* special_function */
357 AARCH64_R_STR (ABS16), /* name */
358 FALSE, /* partial_inplace */
359 0xffff, /* src_mask */
360 0xffff, /* dst_mask */
361 FALSE), /* pcrel_offset */
362
363 /* .xword: (S+A-P) */
364 HOWTO64 (AARCH64_R (PREL64), /* type */
365 0, /* rightshift */
366 4, /* size (4 = long long) */
367 64, /* bitsize */
368 TRUE, /* pc_relative */
369 0, /* bitpos */
370 complain_overflow_signed, /* complain_on_overflow */
371 bfd_elf_generic_reloc, /* special_function */
372 AARCH64_R_STR (PREL64), /* name */
373 FALSE, /* partial_inplace */
374 ALL_ONES, /* src_mask */
375 ALL_ONES, /* dst_mask */
376 TRUE), /* pcrel_offset */
377
378 /* .word: (S+A-P) */
379 HOWTO (AARCH64_R (PREL32), /* type */
380 0, /* rightshift */
381 2, /* size (0 = byte, 1 = short, 2 = long) */
382 32, /* bitsize */
383 TRUE, /* pc_relative */
384 0, /* bitpos */
385 complain_overflow_signed, /* complain_on_overflow */
386 bfd_elf_generic_reloc, /* special_function */
387 AARCH64_R_STR (PREL32), /* name */
388 FALSE, /* partial_inplace */
389 0xffffffff, /* src_mask */
390 0xffffffff, /* dst_mask */
391 TRUE), /* pcrel_offset */
392
393 /* .half: (S+A-P) */
394 HOWTO (AARCH64_R (PREL16), /* type */
395 0, /* rightshift */
396 1, /* size (0 = byte, 1 = short, 2 = long) */
397 16, /* bitsize */
398 TRUE, /* pc_relative */
399 0, /* bitpos */
400 complain_overflow_signed, /* complain_on_overflow */
401 bfd_elf_generic_reloc, /* special_function */
402 AARCH64_R_STR (PREL16), /* name */
403 FALSE, /* partial_inplace */
404 0xffff, /* src_mask */
405 0xffff, /* dst_mask */
406 TRUE), /* pcrel_offset */
407
408 /* Group relocations to create a 16, 32, 48 or 64 bit
409 unsigned data or abs address inline. */
410
411 /* MOVZ: ((S+A) >> 0) & 0xffff */
412 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
413 0, /* rightshift */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
415 16, /* bitsize */
416 FALSE, /* pc_relative */
417 0, /* bitpos */
418 complain_overflow_unsigned, /* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 AARCH64_R_STR (MOVW_UABS_G0), /* name */
421 FALSE, /* partial_inplace */
422 0xffff, /* src_mask */
423 0xffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
425
426 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
427 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
428 0, /* rightshift */
429 2, /* size (0 = byte, 1 = short, 2 = long) */
430 16, /* bitsize */
431 FALSE, /* pc_relative */
432 0, /* bitpos */
433 complain_overflow_dont, /* complain_on_overflow */
434 bfd_elf_generic_reloc, /* special_function */
435 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
436 FALSE, /* partial_inplace */
437 0xffff, /* src_mask */
438 0xffff, /* dst_mask */
439 FALSE), /* pcrel_offset */
440
441 /* MOVZ: ((S+A) >> 16) & 0xffff */
442 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
443 16, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 16, /* bitsize */
446 FALSE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_unsigned, /* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 AARCH64_R_STR (MOVW_UABS_G1), /* name */
451 FALSE, /* partial_inplace */
452 0xffff, /* src_mask */
453 0xffff, /* dst_mask */
454 FALSE), /* pcrel_offset */
455
456 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
457 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
458 16, /* rightshift */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
460 16, /* bitsize */
461 FALSE, /* pc_relative */
462 0, /* bitpos */
463 complain_overflow_dont, /* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
466 FALSE, /* partial_inplace */
467 0xffff, /* src_mask */
468 0xffff, /* dst_mask */
469 FALSE), /* pcrel_offset */
470
471 /* MOVZ: ((S+A) >> 32) & 0xffff */
472 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
473 32, /* rightshift */
474 2, /* size (0 = byte, 1 = short, 2 = long) */
475 16, /* bitsize */
476 FALSE, /* pc_relative */
477 0, /* bitpos */
478 complain_overflow_unsigned, /* complain_on_overflow */
479 bfd_elf_generic_reloc, /* special_function */
480 AARCH64_R_STR (MOVW_UABS_G2), /* name */
481 FALSE, /* partial_inplace */
482 0xffff, /* src_mask */
483 0xffff, /* dst_mask */
484 FALSE), /* pcrel_offset */
485
486 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
487 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
488 32, /* rightshift */
489 2, /* size (0 = byte, 1 = short, 2 = long) */
490 16, /* bitsize */
491 FALSE, /* pc_relative */
492 0, /* bitpos */
493 complain_overflow_dont, /* complain_on_overflow */
494 bfd_elf_generic_reloc, /* special_function */
495 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
496 FALSE, /* partial_inplace */
497 0xffff, /* src_mask */
498 0xffff, /* dst_mask */
499 FALSE), /* pcrel_offset */
500
501 /* MOVZ: ((S+A) >> 48) & 0xffff */
502 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
503 48, /* rightshift */
504 2, /* size (0 = byte, 1 = short, 2 = long) */
505 16, /* bitsize */
506 FALSE, /* pc_relative */
507 0, /* bitpos */
508 complain_overflow_unsigned, /* complain_on_overflow */
509 bfd_elf_generic_reloc, /* special_function */
510 AARCH64_R_STR (MOVW_UABS_G3), /* name */
511 FALSE, /* partial_inplace */
512 0xffff, /* src_mask */
513 0xffff, /* dst_mask */
514 FALSE), /* pcrel_offset */
515
516 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
517 signed data or abs address inline. Will change instruction
518 to MOVN or MOVZ depending on sign of calculated value. */
519
520 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
521 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
522 0, /* rightshift */
523 2, /* size (0 = byte, 1 = short, 2 = long) */
524 16, /* bitsize */
525 FALSE, /* pc_relative */
526 0, /* bitpos */
527 complain_overflow_signed, /* complain_on_overflow */
528 bfd_elf_generic_reloc, /* special_function */
529 AARCH64_R_STR (MOVW_SABS_G0), /* name */
530 FALSE, /* partial_inplace */
531 0xffff, /* src_mask */
532 0xffff, /* dst_mask */
533 FALSE), /* pcrel_offset */
534
535 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
536 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
537 16, /* rightshift */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
539 16, /* bitsize */
540 FALSE, /* pc_relative */
541 0, /* bitpos */
542 complain_overflow_signed, /* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 AARCH64_R_STR (MOVW_SABS_G1), /* name */
545 FALSE, /* partial_inplace */
546 0xffff, /* src_mask */
547 0xffff, /* dst_mask */
548 FALSE), /* pcrel_offset */
549
550 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
551 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
552 32, /* rightshift */
553 2, /* size (0 = byte, 1 = short, 2 = long) */
554 16, /* bitsize */
555 FALSE, /* pc_relative */
556 0, /* bitpos */
557 complain_overflow_signed, /* complain_on_overflow */
558 bfd_elf_generic_reloc, /* special_function */
559 AARCH64_R_STR (MOVW_SABS_G2), /* name */
560 FALSE, /* partial_inplace */
561 0xffff, /* src_mask */
562 0xffff, /* dst_mask */
563 FALSE), /* pcrel_offset */
564
565 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
566 addresses: PG(x) is (x & ~0xfff). */
567
568 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
569 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
570 2, /* rightshift */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
572 19, /* bitsize */
573 TRUE, /* pc_relative */
574 0, /* bitpos */
575 complain_overflow_signed, /* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 AARCH64_R_STR (LD_PREL_LO19), /* name */
578 FALSE, /* partial_inplace */
579 0x7ffff, /* src_mask */
580 0x7ffff, /* dst_mask */
581 TRUE), /* pcrel_offset */
582
583 /* ADR: (S+A-P) & 0x1fffff */
584 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
585 0, /* rightshift */
586 2, /* size (0 = byte, 1 = short, 2 = long) */
587 21, /* bitsize */
588 TRUE, /* pc_relative */
589 0, /* bitpos */
590 complain_overflow_signed, /* complain_on_overflow */
591 bfd_elf_generic_reloc, /* special_function */
592 AARCH64_R_STR (ADR_PREL_LO21), /* name */
593 FALSE, /* partial_inplace */
594 0x1fffff, /* src_mask */
595 0x1fffff, /* dst_mask */
596 TRUE), /* pcrel_offset */
597
598 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
599 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
600 12, /* rightshift */
601 2, /* size (0 = byte, 1 = short, 2 = long) */
602 21, /* bitsize */
603 TRUE, /* pc_relative */
604 0, /* bitpos */
605 complain_overflow_signed, /* complain_on_overflow */
606 bfd_elf_generic_reloc, /* special_function */
607 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
608 FALSE, /* partial_inplace */
609 0x1fffff, /* src_mask */
610 0x1fffff, /* dst_mask */
611 TRUE), /* pcrel_offset */
612
613 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
614 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
615 12, /* rightshift */
616 2, /* size (0 = byte, 1 = short, 2 = long) */
617 21, /* bitsize */
618 TRUE, /* pc_relative */
619 0, /* bitpos */
620 complain_overflow_dont, /* complain_on_overflow */
621 bfd_elf_generic_reloc, /* special_function */
622 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
623 FALSE, /* partial_inplace */
624 0x1fffff, /* src_mask */
625 0x1fffff, /* dst_mask */
626 TRUE), /* pcrel_offset */
627
628 /* ADD: (S+A) & 0xfff [no overflow check] */
629 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
630 0, /* rightshift */
631 2, /* size (0 = byte, 1 = short, 2 = long) */
632 12, /* bitsize */
633 FALSE, /* pc_relative */
634 10, /* bitpos */
635 complain_overflow_dont, /* complain_on_overflow */
636 bfd_elf_generic_reloc, /* special_function */
637 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
638 FALSE, /* partial_inplace */
639 0x3ffc00, /* src_mask */
640 0x3ffc00, /* dst_mask */
641 FALSE), /* pcrel_offset */
642
643 /* LD/ST8: (S+A) & 0xfff */
644 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
645 0, /* rightshift */
646 2, /* size (0 = byte, 1 = short, 2 = long) */
647 12, /* bitsize */
648 FALSE, /* pc_relative */
649 0, /* bitpos */
650 complain_overflow_dont, /* complain_on_overflow */
651 bfd_elf_generic_reloc, /* special_function */
652 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
653 FALSE, /* partial_inplace */
654 0xfff, /* src_mask */
655 0xfff, /* dst_mask */
656 FALSE), /* pcrel_offset */
657
658 /* Relocations for control-flow instructions. */
659
660 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
661 HOWTO (AARCH64_R (TSTBR14), /* type */
662 2, /* rightshift */
663 2, /* size (0 = byte, 1 = short, 2 = long) */
664 14, /* bitsize */
665 TRUE, /* pc_relative */
666 0, /* bitpos */
667 complain_overflow_signed, /* complain_on_overflow */
668 bfd_elf_generic_reloc, /* special_function */
669 AARCH64_R_STR (TSTBR14), /* name */
670 FALSE, /* partial_inplace */
671 0x3fff, /* src_mask */
672 0x3fff, /* dst_mask */
673 TRUE), /* pcrel_offset */
674
675 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
676 HOWTO (AARCH64_R (CONDBR19), /* type */
677 2, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 19, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed, /* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 AARCH64_R_STR (CONDBR19), /* name */
685 FALSE, /* partial_inplace */
686 0x7ffff, /* src_mask */
687 0x7ffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 /* B: ((S+A-P) >> 2) & 0x3ffffff */
691 HOWTO (AARCH64_R (JUMP26), /* type */
692 2, /* rightshift */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
694 26, /* bitsize */
695 TRUE, /* pc_relative */
696 0, /* bitpos */
697 complain_overflow_signed, /* complain_on_overflow */
698 bfd_elf_generic_reloc, /* special_function */
699 AARCH64_R_STR (JUMP26), /* name */
700 FALSE, /* partial_inplace */
701 0x3ffffff, /* src_mask */
702 0x3ffffff, /* dst_mask */
703 TRUE), /* pcrel_offset */
704
705 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
706 HOWTO (AARCH64_R (CALL26), /* type */
707 2, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 26, /* bitsize */
710 TRUE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_signed, /* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 AARCH64_R_STR (CALL26), /* name */
715 FALSE, /* partial_inplace */
716 0x3ffffff, /* src_mask */
717 0x3ffffff, /* dst_mask */
718 TRUE), /* pcrel_offset */
719
720 /* LD/ST16: (S+A) & 0xffe */
721 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
722 1, /* rightshift */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
724 12, /* bitsize */
725 FALSE, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_dont, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
730 FALSE, /* partial_inplace */
731 0xffe, /* src_mask */
732 0xffe, /* dst_mask */
733 FALSE), /* pcrel_offset */
734
735 /* LD/ST32: (S+A) & 0xffc */
736 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
737 2, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 12, /* bitsize */
740 FALSE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
745 FALSE, /* partial_inplace */
746 0xffc, /* src_mask */
747 0xffc, /* dst_mask */
748 FALSE), /* pcrel_offset */
749
750 /* LD/ST64: (S+A) & 0xff8 */
751 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
752 3, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 12, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
760 FALSE, /* partial_inplace */
761 0xff8, /* src_mask */
762 0xff8, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 /* LD/ST128: (S+A) & 0xff0 */
766 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
767 4, /* rightshift */
768 2, /* size (0 = byte, 1 = short, 2 = long) */
769 12, /* bitsize */
770 FALSE, /* pc_relative */
771 0, /* bitpos */
772 complain_overflow_dont, /* complain_on_overflow */
773 bfd_elf_generic_reloc, /* special_function */
774 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
775 FALSE, /* partial_inplace */
776 0xff0, /* src_mask */
777 0xff0, /* dst_mask */
778 FALSE), /* pcrel_offset */
779
780 /* Set a load-literal immediate field to bits
781 0x1FFFFC of G(S)-P */
782 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
783 2, /* rightshift */
784 2, /* size (0 = byte,1 = short,2 = long) */
785 19, /* bitsize */
786 TRUE, /* pc_relative */
787 0, /* bitpos */
788 complain_overflow_signed, /* complain_on_overflow */
789 bfd_elf_generic_reloc, /* special_function */
790 AARCH64_R_STR (GOT_LD_PREL19), /* name */
791 FALSE, /* partial_inplace */
792 0xffffe0, /* src_mask */
793 0xffffe0, /* dst_mask */
794 TRUE), /* pcrel_offset */
795
796 /* Get to the page for the GOT entry for the symbol
797 (G(S) - P) using an ADRP instruction. */
798 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
799 12, /* rightshift */
800 2, /* size (0 = byte, 1 = short, 2 = long) */
801 21, /* bitsize */
802 TRUE, /* pc_relative */
803 0, /* bitpos */
804 complain_overflow_dont, /* complain_on_overflow */
805 bfd_elf_generic_reloc, /* special_function */
806 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
807 FALSE, /* partial_inplace */
808 0x1fffff, /* src_mask */
809 0x1fffff, /* dst_mask */
810 TRUE), /* pcrel_offset */
811
812 /* LD64: GOT offset G(S) & 0xff8 */
813 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
814 3, /* rightshift */
815 2, /* size (0 = byte, 1 = short, 2 = long) */
816 12, /* bitsize */
817 FALSE, /* pc_relative */
818 0, /* bitpos */
819 complain_overflow_dont, /* complain_on_overflow */
820 bfd_elf_generic_reloc, /* special_function */
821 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
822 FALSE, /* partial_inplace */
823 0xff8, /* src_mask */
824 0xff8, /* dst_mask */
825 FALSE), /* pcrel_offset */
826
827 /* LD32: GOT offset G(S) & 0xffc */
828 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
829 2, /* rightshift */
830 2, /* size (0 = byte, 1 = short, 2 = long) */
831 12, /* bitsize */
832 FALSE, /* pc_relative */
833 0, /* bitpos */
834 complain_overflow_dont, /* complain_on_overflow */
835 bfd_elf_generic_reloc, /* special_function */
836 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
837 FALSE, /* partial_inplace */
838 0xffc, /* src_mask */
839 0xffc, /* dst_mask */
840 FALSE), /* pcrel_offset */
841
842 /* Get to the page for the GOT entry for the symbol
843 (G(S) - P) using an ADRP instruction. */
844 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
845 12, /* rightshift */
846 2, /* size (0 = byte, 1 = short, 2 = long) */
847 21, /* bitsize */
848 TRUE, /* pc_relative */
849 0, /* bitpos */
850 complain_overflow_dont, /* complain_on_overflow */
851 bfd_elf_generic_reloc, /* special_function */
852 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
853 FALSE, /* partial_inplace */
854 0x1fffff, /* src_mask */
855 0x1fffff, /* dst_mask */
856 TRUE), /* pcrel_offset */
857
858 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
859 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
860 0, /* rightshift */
861 2, /* size (0 = byte, 1 = short, 2 = long) */
862 12, /* bitsize */
863 FALSE, /* pc_relative */
864 0, /* bitpos */
865 complain_overflow_dont, /* complain_on_overflow */
866 bfd_elf_generic_reloc, /* special_function */
867 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
868 FALSE, /* partial_inplace */
869 0xfff, /* src_mask */
870 0xfff, /* dst_mask */
871 FALSE), /* pcrel_offset */
872
873 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
874 16, /* rightshift */
875 2, /* size (0 = byte, 1 = short, 2 = long) */
876 16, /* bitsize */
877 FALSE, /* pc_relative */
878 0, /* bitpos */
879 complain_overflow_dont, /* complain_on_overflow */
880 bfd_elf_generic_reloc, /* special_function */
881 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
882 FALSE, /* partial_inplace */
883 0xffff, /* src_mask */
884 0xffff, /* dst_mask */
885 FALSE), /* pcrel_offset */
886
887 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
888 0, /* rightshift */
889 2, /* size (0 = byte, 1 = short, 2 = long) */
890 32, /* bitsize */
891 FALSE, /* pc_relative */
892 0, /* bitpos */
893 complain_overflow_dont, /* complain_on_overflow */
894 bfd_elf_generic_reloc, /* special_function */
895 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
896 FALSE, /* partial_inplace */
897 0xffff, /* src_mask */
898 0xffff, /* dst_mask */
899 FALSE), /* pcrel_offset */
900
901 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
902 12, /* rightshift */
903 2, /* size (0 = byte, 1 = short, 2 = long) */
904 21, /* bitsize */
905 FALSE, /* pc_relative */
906 0, /* bitpos */
907 complain_overflow_dont, /* complain_on_overflow */
908 bfd_elf_generic_reloc, /* special_function */
909 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
910 FALSE, /* partial_inplace */
911 0x1fffff, /* src_mask */
912 0x1fffff, /* dst_mask */
913 FALSE), /* pcrel_offset */
914
915 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
916 3, /* rightshift */
917 2, /* size (0 = byte, 1 = short, 2 = long) */
918 12, /* bitsize */
919 FALSE, /* pc_relative */
920 0, /* bitpos */
921 complain_overflow_dont, /* complain_on_overflow */
922 bfd_elf_generic_reloc, /* special_function */
923 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
924 FALSE, /* partial_inplace */
925 0xff8, /* src_mask */
926 0xff8, /* dst_mask */
927 FALSE), /* pcrel_offset */
928
929 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
930 2, /* rightshift */
931 2, /* size (0 = byte, 1 = short, 2 = long) */
932 12, /* bitsize */
933 FALSE, /* pc_relative */
934 0, /* bitpos */
935 complain_overflow_dont, /* complain_on_overflow */
936 bfd_elf_generic_reloc, /* special_function */
937 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
938 FALSE, /* partial_inplace */
939 0xffc, /* src_mask */
940 0xffc, /* dst_mask */
941 FALSE), /* pcrel_offset */
942
943 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
944 2, /* rightshift */
945 2, /* size (0 = byte, 1 = short, 2 = long) */
946 21, /* bitsize */
947 FALSE, /* pc_relative */
948 0, /* bitpos */
949 complain_overflow_dont, /* complain_on_overflow */
950 bfd_elf_generic_reloc, /* special_function */
951 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
952 FALSE, /* partial_inplace */
953 0x1ffffc, /* src_mask */
954 0x1ffffc, /* dst_mask */
955 FALSE), /* pcrel_offset */
956
957 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
958 32, /* rightshift */
959 2, /* size (0 = byte, 1 = short, 2 = long) */
960 12, /* bitsize */
961 FALSE, /* pc_relative */
962 0, /* bitpos */
963 complain_overflow_dont, /* complain_on_overflow */
964 bfd_elf_generic_reloc, /* special_function */
965 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
966 FALSE, /* partial_inplace */
967 0xffff, /* src_mask */
968 0xffff, /* dst_mask */
969 FALSE), /* pcrel_offset */
970
971 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
972 16, /* rightshift */
973 2, /* size (0 = byte, 1 = short, 2 = long) */
974 12, /* bitsize */
975 FALSE, /* pc_relative */
976 0, /* bitpos */
977 complain_overflow_dont, /* complain_on_overflow */
978 bfd_elf_generic_reloc, /* special_function */
979 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
980 FALSE, /* partial_inplace */
981 0xffff, /* src_mask */
982 0xffff, /* dst_mask */
983 FALSE), /* pcrel_offset */
984
985 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
986 16, /* rightshift */
987 2, /* size (0 = byte, 1 = short, 2 = long) */
988 12, /* bitsize */
989 FALSE, /* pc_relative */
990 0, /* bitpos */
991 complain_overflow_dont, /* complain_on_overflow */
992 bfd_elf_generic_reloc, /* special_function */
993 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
994 FALSE, /* partial_inplace */
995 0xffff, /* src_mask */
996 0xffff, /* dst_mask */
997 FALSE), /* pcrel_offset */
998
999 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1000 0, /* rightshift */
1001 2, /* size (0 = byte, 1 = short, 2 = long) */
1002 12, /* bitsize */
1003 FALSE, /* pc_relative */
1004 0, /* bitpos */
1005 complain_overflow_dont, /* complain_on_overflow */
1006 bfd_elf_generic_reloc, /* special_function */
1007 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1008 FALSE, /* partial_inplace */
1009 0xffff, /* src_mask */
1010 0xffff, /* dst_mask */
1011 FALSE), /* pcrel_offset */
1012
1013 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1014 0, /* rightshift */
1015 2, /* size (0 = byte, 1 = short, 2 = long) */
1016 12, /* bitsize */
1017 FALSE, /* pc_relative */
1018 0, /* bitpos */
1019 complain_overflow_dont, /* complain_on_overflow */
1020 bfd_elf_generic_reloc, /* special_function */
1021 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1022 FALSE, /* partial_inplace */
1023 0xffff, /* src_mask */
1024 0xffff, /* dst_mask */
1025 FALSE), /* pcrel_offset */
1026
1027 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1028 12, /* rightshift */
1029 2, /* size (0 = byte, 1 = short, 2 = long) */
1030 12, /* bitsize */
1031 FALSE, /* pc_relative */
1032 0, /* bitpos */
1033 complain_overflow_dont, /* complain_on_overflow */
1034 bfd_elf_generic_reloc, /* special_function */
1035 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1036 FALSE, /* partial_inplace */
1037 0xfff, /* src_mask */
1038 0xfff, /* dst_mask */
1039 FALSE), /* pcrel_offset */
1040
1041 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1042 0, /* rightshift */
1043 2, /* size (0 = byte, 1 = short, 2 = long) */
1044 12, /* bitsize */
1045 FALSE, /* pc_relative */
1046 0, /* bitpos */
1047 complain_overflow_dont, /* complain_on_overflow */
1048 bfd_elf_generic_reloc, /* special_function */
1049 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1050 FALSE, /* partial_inplace */
1051 0xfff, /* src_mask */
1052 0xfff, /* dst_mask */
1053 FALSE), /* pcrel_offset */
1054
1055 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1056 0, /* rightshift */
1057 2, /* size (0 = byte, 1 = short, 2 = long) */
1058 12, /* bitsize */
1059 FALSE, /* pc_relative */
1060 0, /* bitpos */
1061 complain_overflow_dont, /* complain_on_overflow */
1062 bfd_elf_generic_reloc, /* special_function */
1063 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1064 FALSE, /* partial_inplace */
1065 0xfff, /* src_mask */
1066 0xfff, /* dst_mask */
1067 FALSE), /* pcrel_offset */
1068
1069 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1070 2, /* rightshift */
1071 2, /* size (0 = byte, 1 = short, 2 = long) */
1072 21, /* bitsize */
1073 TRUE, /* pc_relative */
1074 0, /* bitpos */
1075 complain_overflow_dont, /* complain_on_overflow */
1076 bfd_elf_generic_reloc, /* special_function */
1077 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1078 FALSE, /* partial_inplace */
1079 0x1ffffc, /* src_mask */
1080 0x1ffffc, /* dst_mask */
1081 TRUE), /* pcrel_offset */
1082
1083 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1084 0, /* rightshift */
1085 2, /* size (0 = byte, 1 = short, 2 = long) */
1086 21, /* bitsize */
1087 TRUE, /* pc_relative */
1088 0, /* bitpos */
1089 complain_overflow_dont, /* complain_on_overflow */
1090 bfd_elf_generic_reloc, /* special_function */
1091 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1092 FALSE, /* partial_inplace */
1093 0x1fffff, /* src_mask */
1094 0x1fffff, /* dst_mask */
1095 TRUE), /* pcrel_offset */
1096
1097 /* Get to the page for the GOT entry for the symbol
1098 (G(S) - P) using an ADRP instruction. */
1099 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1100 12, /* rightshift */
1101 2, /* size (0 = byte, 1 = short, 2 = long) */
1102 21, /* bitsize */
1103 TRUE, /* pc_relative */
1104 0, /* bitpos */
1105 complain_overflow_dont, /* complain_on_overflow */
1106 bfd_elf_generic_reloc, /* special_function */
1107 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1108 FALSE, /* partial_inplace */
1109 0x1fffff, /* src_mask */
1110 0x1fffff, /* dst_mask */
1111 TRUE), /* pcrel_offset */
1112
1113 /* LD64: GOT offset G(S) & 0xff8. */
1114 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1115 3, /* rightshift */
1116 2, /* size (0 = byte, 1 = short, 2 = long) */
1117 12, /* bitsize */
1118 FALSE, /* pc_relative */
1119 0, /* bitpos */
1120 complain_overflow_dont, /* complain_on_overflow */
1121 bfd_elf_generic_reloc, /* special_function */
1122 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1123 FALSE, /* partial_inplace */
1124 0xff8, /* src_mask */
1125 0xff8, /* dst_mask */
1126 FALSE), /* pcrel_offset */
1127
1128 /* LD32: GOT offset G(S) & 0xffc. */
1129 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1130 2, /* rightshift */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1132 12, /* bitsize */
1133 FALSE, /* pc_relative */
1134 0, /* bitpos */
1135 complain_overflow_dont, /* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1138 FALSE, /* partial_inplace */
1139 0xffc, /* src_mask */
1140 0xffc, /* dst_mask */
1141 FALSE), /* pcrel_offset */
1142
1143 /* ADD: GOT offset G(S) & 0xfff. */
1144 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1145 0, /* rightshift */
1146 2, /* size (0 = byte, 1 = short, 2 = long) */
1147 12, /* bitsize */
1148 FALSE, /* pc_relative */
1149 0, /* bitpos */
1150 complain_overflow_dont, /* complain_on_overflow */
1151 bfd_elf_generic_reloc, /* special_function */
1152 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1153 FALSE, /* partial_inplace */
1154 0xfff, /* src_mask */
1155 0xfff, /* dst_mask */
1156 FALSE), /* pcrel_offset */
1157
1158 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1159 16, /* rightshift */
1160 2, /* size (0 = byte, 1 = short, 2 = long) */
1161 12, /* bitsize */
1162 FALSE, /* pc_relative */
1163 0, /* bitpos */
1164 complain_overflow_dont, /* complain_on_overflow */
1165 bfd_elf_generic_reloc, /* special_function */
1166 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1167 FALSE, /* partial_inplace */
1168 0xffff, /* src_mask */
1169 0xffff, /* dst_mask */
1170 FALSE), /* pcrel_offset */
1171
1172 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1173 0, /* rightshift */
1174 2, /* size (0 = byte, 1 = short, 2 = long) */
1175 12, /* bitsize */
1176 FALSE, /* pc_relative */
1177 0, /* bitpos */
1178 complain_overflow_dont, /* complain_on_overflow */
1179 bfd_elf_generic_reloc, /* special_function */
1180 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1181 FALSE, /* partial_inplace */
1182 0xffff, /* src_mask */
1183 0xffff, /* dst_mask */
1184 FALSE), /* pcrel_offset */
1185
1186 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1187 0, /* rightshift */
1188 2, /* size (0 = byte, 1 = short, 2 = long) */
1189 12, /* bitsize */
1190 FALSE, /* pc_relative */
1191 0, /* bitpos */
1192 complain_overflow_dont, /* complain_on_overflow */
1193 bfd_elf_generic_reloc, /* special_function */
1194 AARCH64_R_STR (TLSDESC_LDR), /* name */
1195 FALSE, /* partial_inplace */
1196 0x0, /* src_mask */
1197 0x0, /* dst_mask */
1198 FALSE), /* pcrel_offset */
1199
1200 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1201 0, /* rightshift */
1202 2, /* size (0 = byte, 1 = short, 2 = long) */
1203 12, /* bitsize */
1204 FALSE, /* pc_relative */
1205 0, /* bitpos */
1206 complain_overflow_dont, /* complain_on_overflow */
1207 bfd_elf_generic_reloc, /* special_function */
1208 AARCH64_R_STR (TLSDESC_ADD), /* name */
1209 FALSE, /* partial_inplace */
1210 0x0, /* src_mask */
1211 0x0, /* dst_mask */
1212 FALSE), /* pcrel_offset */
1213
1214 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1215 0, /* rightshift */
1216 2, /* size (0 = byte, 1 = short, 2 = long) */
1217 12, /* bitsize */
1218 FALSE, /* pc_relative */
1219 0, /* bitpos */
1220 complain_overflow_dont, /* complain_on_overflow */
1221 bfd_elf_generic_reloc, /* special_function */
1222 AARCH64_R_STR (TLSDESC_CALL), /* name */
1223 FALSE, /* partial_inplace */
1224 0x0, /* src_mask */
1225 0x0, /* dst_mask */
1226 FALSE), /* pcrel_offset */
1227
1228 HOWTO (AARCH64_R (COPY), /* type */
1229 0, /* rightshift */
1230 2, /* size (0 = byte, 1 = short, 2 = long) */
1231 64, /* bitsize */
1232 FALSE, /* pc_relative */
1233 0, /* bitpos */
1234 complain_overflow_bitfield, /* complain_on_overflow */
1235 bfd_elf_generic_reloc, /* special_function */
1236 AARCH64_R_STR (COPY), /* name */
1237 TRUE, /* partial_inplace */
1238 0xffffffff, /* src_mask */
1239 0xffffffff, /* dst_mask */
1240 FALSE), /* pcrel_offset */
1241
1242 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1243 0, /* rightshift */
1244 2, /* size (0 = byte, 1 = short, 2 = long) */
1245 64, /* bitsize */
1246 FALSE, /* pc_relative */
1247 0, /* bitpos */
1248 complain_overflow_bitfield, /* complain_on_overflow */
1249 bfd_elf_generic_reloc, /* special_function */
1250 AARCH64_R_STR (GLOB_DAT), /* name */
1251 TRUE, /* partial_inplace */
1252 0xffffffff, /* src_mask */
1253 0xffffffff, /* dst_mask */
1254 FALSE), /* pcrel_offset */
1255
1256 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1257 0, /* rightshift */
1258 2, /* size (0 = byte, 1 = short, 2 = long) */
1259 64, /* bitsize */
1260 FALSE, /* pc_relative */
1261 0, /* bitpos */
1262 complain_overflow_bitfield, /* complain_on_overflow */
1263 bfd_elf_generic_reloc, /* special_function */
1264 AARCH64_R_STR (JUMP_SLOT), /* name */
1265 TRUE, /* partial_inplace */
1266 0xffffffff, /* src_mask */
1267 0xffffffff, /* dst_mask */
1268 FALSE), /* pcrel_offset */
1269
1270 HOWTO (AARCH64_R (RELATIVE), /* type */
1271 0, /* rightshift */
1272 2, /* size (0 = byte, 1 = short, 2 = long) */
1273 64, /* bitsize */
1274 FALSE, /* pc_relative */
1275 0, /* bitpos */
1276 complain_overflow_bitfield, /* complain_on_overflow */
1277 bfd_elf_generic_reloc, /* special_function */
1278 AARCH64_R_STR (RELATIVE), /* name */
1279 TRUE, /* partial_inplace */
1280 ALL_ONES, /* src_mask */
1281 ALL_ONES, /* dst_mask */
1282 FALSE), /* pcrel_offset */
1283
1284 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1285 0, /* rightshift */
1286 2, /* size (0 = byte, 1 = short, 2 = long) */
1287 64, /* bitsize */
1288 FALSE, /* pc_relative */
1289 0, /* bitpos */
1290 complain_overflow_dont, /* complain_on_overflow */
1291 bfd_elf_generic_reloc, /* special_function */
1292 AARCH64_R_STR (TLS_DTPMOD), /* name */
1293 FALSE, /* partial_inplace */
1294 0, /* src_mask */
1295 ALL_ONES, /* dst_mask */
1296 FALSE), /* pc_reloffset */
1297
1298 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1299 0, /* rightshift */
1300 2, /* size (0 = byte, 1 = short, 2 = long) */
1301 64, /* bitsize */
1302 FALSE, /* pc_relative */
1303 0, /* bitpos */
1304 complain_overflow_dont, /* complain_on_overflow */
1305 bfd_elf_generic_reloc, /* special_function */
1306 AARCH64_R_STR (TLS_DTPREL), /* name */
1307 FALSE, /* partial_inplace */
1308 0, /* src_mask */
1309 ALL_ONES, /* dst_mask */
1310 FALSE), /* pcrel_offset */
1311
1312 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1313 0, /* rightshift */
1314 2, /* size (0 = byte, 1 = short, 2 = long) */
1315 64, /* bitsize */
1316 FALSE, /* pc_relative */
1317 0, /* bitpos */
1318 complain_overflow_dont, /* complain_on_overflow */
1319 bfd_elf_generic_reloc, /* special_function */
1320 AARCH64_R_STR (TLS_TPREL), /* name */
1321 FALSE, /* partial_inplace */
1322 0, /* src_mask */
1323 ALL_ONES, /* dst_mask */
1324 FALSE), /* pcrel_offset */
1325
1326 HOWTO (AARCH64_R (TLSDESC), /* type */
1327 0, /* rightshift */
1328 2, /* size (0 = byte, 1 = short, 2 = long) */
1329 64, /* bitsize */
1330 FALSE, /* pc_relative */
1331 0, /* bitpos */
1332 complain_overflow_dont, /* complain_on_overflow */
1333 bfd_elf_generic_reloc, /* special_function */
1334 AARCH64_R_STR (TLSDESC), /* name */
1335 FALSE, /* partial_inplace */
1336 0, /* src_mask */
1337 ALL_ONES, /* dst_mask */
1338 FALSE), /* pcrel_offset */
1339
1340 HOWTO (AARCH64_R (IRELATIVE), /* type */
1341 0, /* rightshift */
1342 2, /* size (0 = byte, 1 = short, 2 = long) */
1343 64, /* bitsize */
1344 FALSE, /* pc_relative */
1345 0, /* bitpos */
1346 complain_overflow_bitfield, /* complain_on_overflow */
1347 bfd_elf_generic_reloc, /* special_function */
1348 AARCH64_R_STR (IRELATIVE), /* name */
1349 FALSE, /* partial_inplace */
1350 0, /* src_mask */
1351 ALL_ONES, /* dst_mask */
1352 FALSE), /* pcrel_offset */
1353
1354 EMPTY_HOWTO (0),
1355 };
1356
1357 static reloc_howto_type elfNN_aarch64_howto_none =
1358 HOWTO (R_AARCH64_NONE, /* type */
1359 0, /* rightshift */
1360 0, /* size (0 = byte, 1 = short, 2 = long) */
1361 0, /* bitsize */
1362 FALSE, /* pc_relative */
1363 0, /* bitpos */
1364 complain_overflow_dont,/* complain_on_overflow */
1365 bfd_elf_generic_reloc, /* special_function */
1366 "R_AARCH64_NONE", /* name */
1367 FALSE, /* partial_inplace */
1368 0, /* src_mask */
1369 0, /* dst_mask */
1370 FALSE); /* pcrel_offset */
1371
1372 /* Given HOWTO, return the bfd internal relocation enumerator. */
1373
1374 static bfd_reloc_code_real_type
1375 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1376 {
1377 const int size
1378 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1379 const ptrdiff_t offset
1380 = howto - elfNN_aarch64_howto_table;
1381
1382 if (offset > 0 && offset < size - 1)
1383 return BFD_RELOC_AARCH64_RELOC_START + offset;
1384
1385 if (howto == &elfNN_aarch64_howto_none)
1386 return BFD_RELOC_AARCH64_NONE;
1387
1388 return BFD_RELOC_AARCH64_RELOC_START;
1389 }
1390
1391 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1392
1393 static bfd_reloc_code_real_type
1394 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1395 {
1396 static bfd_boolean initialized_p = FALSE;
1397 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1398 static unsigned int offsets[R_AARCH64_end];
1399
1400 if (initialized_p == FALSE)
1401 {
1402 unsigned int i;
1403
1404 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1405 if (elfNN_aarch64_howto_table[i].type != 0)
1406 offsets[elfNN_aarch64_howto_table[i].type] = i;
1407
1408 initialized_p = TRUE;
1409 }
1410
1411 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1412 return BFD_RELOC_AARCH64_NONE;
1413
1414 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1415 }
1416
1417 struct elf_aarch64_reloc_map
1418 {
1419 bfd_reloc_code_real_type from;
1420 bfd_reloc_code_real_type to;
1421 };
1422
1423 /* Map bfd generic reloc to AArch64-specific reloc. */
1424 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1425 {
1426 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1427
1428 /* Basic data relocations. */
1429 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1430 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1431 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1432 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1433 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1434 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1435 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1436 };
1437
1438 /* Given the bfd internal relocation enumerator in CODE, return the
1439 corresponding howto entry. */
1440
1441 static reloc_howto_type *
1442 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1443 {
1444 unsigned int i;
1445
1446 /* Convert bfd generic reloc to AArch64-specific reloc. */
1447 if (code < BFD_RELOC_AARCH64_RELOC_START
1448 || code > BFD_RELOC_AARCH64_RELOC_END)
1449 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1450 if (elf_aarch64_reloc_map[i].from == code)
1451 {
1452 code = elf_aarch64_reloc_map[i].to;
1453 break;
1454 }
1455
1456 if (code > BFD_RELOC_AARCH64_RELOC_START
1457 && code < BFD_RELOC_AARCH64_RELOC_END)
1458 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1459 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1460
1461 return NULL;
1462 }
1463
1464 static reloc_howto_type *
1465 elfNN_aarch64_howto_from_type (unsigned int r_type)
1466 {
1467 bfd_reloc_code_real_type val;
1468 reloc_howto_type *howto;
1469
1470 #if ARCH_SIZE == 32
1471 if (r_type > 256)
1472 {
1473 bfd_set_error (bfd_error_bad_value);
1474 return NULL;
1475 }
1476 #endif
1477
1478 if (r_type == R_AARCH64_NONE)
1479 return &elfNN_aarch64_howto_none;
1480
1481 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1482 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1483
1484 if (howto != NULL)
1485 return howto;
1486
1487 bfd_set_error (bfd_error_bad_value);
1488 return NULL;
1489 }
1490
1491 static void
1492 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1493 Elf_Internal_Rela *elf_reloc)
1494 {
1495 unsigned int r_type;
1496
1497 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1498 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1499 }
1500
1501 static reloc_howto_type *
1502 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1503 bfd_reloc_code_real_type code)
1504 {
1505 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1506
1507 if (howto != NULL)
1508 return howto;
1509
1510 bfd_set_error (bfd_error_bad_value);
1511 return NULL;
1512 }
1513
1514 static reloc_howto_type *
1515 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1516 const char *r_name)
1517 {
1518 unsigned int i;
1519
1520 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1521 if (elfNN_aarch64_howto_table[i].name != NULL
1522 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1523 return &elfNN_aarch64_howto_table[i];
1524
1525 return NULL;
1526 }
1527
1528 /* Support for core dump NOTE sections. */
1529
1530 static bfd_boolean
1531 elf64_aarch64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1532 {
1533 int offset;
1534 size_t size;
1535
1536 switch (note->descsz)
1537 {
1538 default:
1539 return FALSE;
1540
1541 case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
1542 /* pr_cursig */
1543 elf_tdata (abfd)->core->signal
1544 = bfd_get_16 (abfd, note->descdata + 12);
1545
1546 /* pr_pid */
1547 elf_tdata (abfd)->core->lwpid
1548 = bfd_get_32 (abfd, note->descdata + 32);
1549
1550 /* pr_reg */
1551 offset = 112;
1552 size = 272;
1553
1554 break;
1555 }
1556
1557 /* Make a ".reg/999" section. */
1558 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1559 size, note->descpos + offset);
1560 }
1561
1562 #define TARGET_LITTLE_SYM bfd_elfNN_littleaarch64_vec
1563 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1564 #define TARGET_BIG_SYM bfd_elfNN_bigaarch64_vec
1565 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1566
1567 #define elf_backend_grok_prstatus elf64_aarch64_grok_prstatus
1568
1569 typedef unsigned long int insn32;
1570
1571 /* The linker script knows the section names for placement.
1572 The entry_names are used to do simple name mangling on the stubs.
1573 Given a function name, and its type, the stub can be found. The
1574 name can be changed. The only requirement is the %s be present. */
1575 #define STUB_ENTRY_NAME "__%s_veneer"
1576
1577 /* The name of the dynamic interpreter. This is put in the .interp
1578 section. */
1579 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1580
1581 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1582 (((1 << 25) - 1) << 2)
1583 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1584 (-((1 << 25) << 2))
1585
1586 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1587 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1588
1589 static int
1590 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1591 {
1592 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1593 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1594 }
1595
1596 static int
1597 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1598 {
1599 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1600 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1601 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1602 }
1603
1604 static const uint32_t aarch64_adrp_branch_stub [] =
1605 {
1606 0x90000010, /* adrp ip0, X */
1607 /* R_AARCH64_ADR_HI21_PCREL(X) */
1608 0x91000210, /* add ip0, ip0, :lo12:X */
1609 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1610 0xd61f0200, /* br ip0 */
1611 };
1612
1613 static const uint32_t aarch64_long_branch_stub[] =
1614 {
1615 #if ARCH_SIZE == 64
1616 0x58000090, /* ldr ip0, 1f */
1617 #else
1618 0x18000090, /* ldr wip0, 1f */
1619 #endif
1620 0x10000011, /* adr ip1, #0 */
1621 0x8b110210, /* add ip0, ip0, ip1 */
1622 0xd61f0200, /* br ip0 */
1623 0x00000000, /* 1: .xword or .word
1624 R_AARCH64_PRELNN(X) + 12
1625 */
1626 0x00000000,
1627 };
1628
1629 /* Section name for stubs is the associated section name plus this
1630 string. */
1631 #define STUB_SUFFIX ".stub"
1632
1633 enum elf_aarch64_stub_type
1634 {
1635 aarch64_stub_none,
1636 aarch64_stub_adrp_branch,
1637 aarch64_stub_long_branch,
1638 };
1639
1640 struct elf_aarch64_stub_hash_entry
1641 {
1642 /* Base hash table entry structure. */
1643 struct bfd_hash_entry root;
1644
1645 /* The stub section. */
1646 asection *stub_sec;
1647
1648 /* Offset within stub_sec of the beginning of this stub. */
1649 bfd_vma stub_offset;
1650
1651 /* Given the symbol's value and its section we can determine its final
1652 value when building the stubs (so the stub knows where to jump). */
1653 bfd_vma target_value;
1654 asection *target_section;
1655
1656 enum elf_aarch64_stub_type stub_type;
1657
1658 /* The symbol table entry, if any, that this was derived from. */
1659 struct elf_aarch64_link_hash_entry *h;
1660
1661 /* Destination symbol type */
1662 unsigned char st_type;
1663
1664 /* Where this stub is being called from, or, in the case of combined
1665 stub sections, the first input section in the group. */
1666 asection *id_sec;
1667
1668 /* The name for the local symbol at the start of this stub. The
1669 stub name in the hash table has to be unique; this does not, so
1670 it can be friendlier. */
1671 char *output_name;
1672 };
1673
1674 /* Used to build a map of a section. This is required for mixed-endian
1675 code/data. */
1676
1677 typedef struct elf_elf_section_map
1678 {
1679 bfd_vma vma;
1680 char type;
1681 }
1682 elf_aarch64_section_map;
1683
1684
1685 typedef struct _aarch64_elf_section_data
1686 {
1687 struct bfd_elf_section_data elf;
1688 unsigned int mapcount;
1689 unsigned int mapsize;
1690 elf_aarch64_section_map *map;
1691 }
1692 _aarch64_elf_section_data;
1693
1694 #define elf_aarch64_section_data(sec) \
1695 ((_aarch64_elf_section_data *) elf_section_data (sec))
1696
1697 /* The size of the thread control block. */
1698 #define TCB_SIZE 16
1699
1700 struct elf_aarch64_local_symbol
1701 {
1702 unsigned int got_type;
1703 bfd_signed_vma got_refcount;
1704 bfd_vma got_offset;
1705
1706 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1707 offset is from the end of the jump table and reserved entries
1708 within the PLTGOT.
1709
1710 The magic value (bfd_vma) -1 indicates that an offset has not be
1711 allocated. */
1712 bfd_vma tlsdesc_got_jump_table_offset;
1713 };
1714
1715 struct elf_aarch64_obj_tdata
1716 {
1717 struct elf_obj_tdata root;
1718
1719 /* local symbol descriptors */
1720 struct elf_aarch64_local_symbol *locals;
1721
1722 /* Zero to warn when linking objects with incompatible enum sizes. */
1723 int no_enum_size_warning;
1724
1725 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1726 int no_wchar_size_warning;
1727 };
1728
1729 #define elf_aarch64_tdata(bfd) \
1730 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1731
1732 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1733
1734 #define is_aarch64_elf(bfd) \
1735 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1736 && elf_tdata (bfd) != NULL \
1737 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1738
1739 static bfd_boolean
1740 elfNN_aarch64_mkobject (bfd *abfd)
1741 {
1742 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1743 AARCH64_ELF_DATA);
1744 }
1745
1746 #define elf_aarch64_hash_entry(ent) \
1747 ((struct elf_aarch64_link_hash_entry *)(ent))
1748
1749 #define GOT_UNKNOWN 0
1750 #define GOT_NORMAL 1
1751 #define GOT_TLS_GD 2
1752 #define GOT_TLS_IE 4
1753 #define GOT_TLSDESC_GD 8
1754
1755 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1756
1757 /* AArch64 ELF linker hash entry. */
1758 struct elf_aarch64_link_hash_entry
1759 {
1760 struct elf_link_hash_entry root;
1761
1762 /* Track dynamic relocs copied for this symbol. */
1763 struct elf_dyn_relocs *dyn_relocs;
1764
1765 /* Since PLT entries have variable size, we need to record the
1766 index into .got.plt instead of recomputing it from the PLT
1767 offset. */
1768 bfd_signed_vma plt_got_offset;
1769
1770 /* Bit mask representing the type of GOT entry(s) if any required by
1771 this symbol. */
1772 unsigned int got_type;
1773
1774 /* A pointer to the most recently used stub hash entry against this
1775 symbol. */
1776 struct elf_aarch64_stub_hash_entry *stub_cache;
1777
1778 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1779 is from the end of the jump table and reserved entries within the PLTGOT.
1780
1781 The magic value (bfd_vma) -1 indicates that an offset has not
1782 be allocated. */
1783 bfd_vma tlsdesc_got_jump_table_offset;
1784 };
1785
1786 static unsigned int
1787 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1788 bfd *abfd,
1789 unsigned long r_symndx)
1790 {
1791 if (h)
1792 return elf_aarch64_hash_entry (h)->got_type;
1793
1794 if (! elf_aarch64_locals (abfd))
1795 return GOT_UNKNOWN;
1796
1797 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1798 }
1799
1800 /* Get the AArch64 elf linker hash table from a link_info structure. */
1801 #define elf_aarch64_hash_table(info) \
1802 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1803
1804 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1805 ((struct elf_aarch64_stub_hash_entry *) \
1806 bfd_hash_lookup ((table), (string), (create), (copy)))
1807
1808 /* AArch64 ELF linker hash table. */
1809 struct elf_aarch64_link_hash_table
1810 {
1811 /* The main hash table. */
1812 struct elf_link_hash_table root;
1813
1814 /* Nonzero to force PIC branch veneers. */
1815 int pic_veneer;
1816
1817 /* The number of bytes in the initial entry in the PLT. */
1818 bfd_size_type plt_header_size;
1819
1820 /* The number of bytes in the subsequent PLT etries. */
1821 bfd_size_type plt_entry_size;
1822
1823 /* Short-cuts to get to dynamic linker sections. */
1824 asection *sdynbss;
1825 asection *srelbss;
1826
1827 /* Small local sym cache. */
1828 struct sym_cache sym_cache;
1829
1830 /* For convenience in allocate_dynrelocs. */
1831 bfd *obfd;
1832
1833 /* The amount of space used by the reserved portion of the sgotplt
1834 section, plus whatever space is used by the jump slots. */
1835 bfd_vma sgotplt_jump_table_size;
1836
1837 /* The stub hash table. */
1838 struct bfd_hash_table stub_hash_table;
1839
1840 /* Linker stub bfd. */
1841 bfd *stub_bfd;
1842
1843 /* Linker call-backs. */
1844 asection *(*add_stub_section) (const char *, asection *);
1845 void (*layout_sections_again) (void);
1846
1847 /* Array to keep track of which stub sections have been created, and
1848 information on stub grouping. */
1849 struct map_stub
1850 {
1851 /* This is the section to which stubs in the group will be
1852 attached. */
1853 asection *link_sec;
1854 /* The stub section. */
1855 asection *stub_sec;
1856 } *stub_group;
1857
1858 /* Assorted information used by elfNN_aarch64_size_stubs. */
1859 unsigned int bfd_count;
1860 int top_index;
1861 asection **input_list;
1862
1863 /* The offset into splt of the PLT entry for the TLS descriptor
1864 resolver. Special values are 0, if not necessary (or not found
1865 to be necessary yet), and -1 if needed but not determined
1866 yet. */
1867 bfd_vma tlsdesc_plt;
1868
1869 /* The GOT offset for the lazy trampoline. Communicated to the
1870 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1871 indicates an offset is not allocated. */
1872 bfd_vma dt_tlsdesc_got;
1873 };
1874
1875
1876 /* Return non-zero if the indicated VALUE has overflowed the maximum
1877 range expressible by a unsigned number with the indicated number of
1878 BITS. */
1879
1880 static bfd_reloc_status_type
1881 aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
1882 {
1883 bfd_vma lim;
1884 if (bits >= sizeof (bfd_vma) * 8)
1885 return bfd_reloc_ok;
1886 lim = (bfd_vma) 1 << bits;
1887 if (value >= lim)
1888 return bfd_reloc_overflow;
1889 return bfd_reloc_ok;
1890 }
1891
1892
1893 /* Return non-zero if the indicated VALUE has overflowed the maximum
1894 range expressible by an signed number with the indicated number of
1895 BITS. */
1896
1897 static bfd_reloc_status_type
1898 aarch64_signed_overflow (bfd_vma value, unsigned int bits)
1899 {
1900 bfd_signed_vma svalue = (bfd_signed_vma) value;
1901 bfd_signed_vma lim;
1902
1903 if (bits >= sizeof (bfd_vma) * 8)
1904 return bfd_reloc_ok;
1905 lim = (bfd_signed_vma) 1 << (bits - 1);
1906 if (svalue < -lim || svalue >= lim)
1907 return bfd_reloc_overflow;
1908 return bfd_reloc_ok;
1909 }
1910
1911 /* Create an entry in an AArch64 ELF linker hash table. */
1912
1913 static struct bfd_hash_entry *
1914 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1915 struct bfd_hash_table *table,
1916 const char *string)
1917 {
1918 struct elf_aarch64_link_hash_entry *ret =
1919 (struct elf_aarch64_link_hash_entry *) entry;
1920
1921 /* Allocate the structure if it has not already been allocated by a
1922 subclass. */
1923 if (ret == NULL)
1924 ret = bfd_hash_allocate (table,
1925 sizeof (struct elf_aarch64_link_hash_entry));
1926 if (ret == NULL)
1927 return (struct bfd_hash_entry *) ret;
1928
1929 /* Call the allocation method of the superclass. */
1930 ret = ((struct elf_aarch64_link_hash_entry *)
1931 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1932 table, string));
1933 if (ret != NULL)
1934 {
1935 ret->dyn_relocs = NULL;
1936 ret->got_type = GOT_UNKNOWN;
1937 ret->plt_got_offset = (bfd_vma) - 1;
1938 ret->stub_cache = NULL;
1939 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1940 }
1941
1942 return (struct bfd_hash_entry *) ret;
1943 }
1944
1945 /* Initialize an entry in the stub hash table. */
1946
1947 static struct bfd_hash_entry *
1948 stub_hash_newfunc (struct bfd_hash_entry *entry,
1949 struct bfd_hash_table *table, const char *string)
1950 {
1951 /* Allocate the structure if it has not already been allocated by a
1952 subclass. */
1953 if (entry == NULL)
1954 {
1955 entry = bfd_hash_allocate (table,
1956 sizeof (struct
1957 elf_aarch64_stub_hash_entry));
1958 if (entry == NULL)
1959 return entry;
1960 }
1961
1962 /* Call the allocation method of the superclass. */
1963 entry = bfd_hash_newfunc (entry, table, string);
1964 if (entry != NULL)
1965 {
1966 struct elf_aarch64_stub_hash_entry *eh;
1967
1968 /* Initialize the local fields. */
1969 eh = (struct elf_aarch64_stub_hash_entry *) entry;
1970 eh->stub_sec = NULL;
1971 eh->stub_offset = 0;
1972 eh->target_value = 0;
1973 eh->target_section = NULL;
1974 eh->stub_type = aarch64_stub_none;
1975 eh->h = NULL;
1976 eh->id_sec = NULL;
1977 }
1978
1979 return entry;
1980 }
1981
1982
1983 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1984
1985 static void
1986 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
1987 struct elf_link_hash_entry *dir,
1988 struct elf_link_hash_entry *ind)
1989 {
1990 struct elf_aarch64_link_hash_entry *edir, *eind;
1991
1992 edir = (struct elf_aarch64_link_hash_entry *) dir;
1993 eind = (struct elf_aarch64_link_hash_entry *) ind;
1994
1995 if (eind->dyn_relocs != NULL)
1996 {
1997 if (edir->dyn_relocs != NULL)
1998 {
1999 struct elf_dyn_relocs **pp;
2000 struct elf_dyn_relocs *p;
2001
2002 /* Add reloc counts against the indirect sym to the direct sym
2003 list. Merge any entries against the same section. */
2004 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2005 {
2006 struct elf_dyn_relocs *q;
2007
2008 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2009 if (q->sec == p->sec)
2010 {
2011 q->pc_count += p->pc_count;
2012 q->count += p->count;
2013 *pp = p->next;
2014 break;
2015 }
2016 if (q == NULL)
2017 pp = &p->next;
2018 }
2019 *pp = edir->dyn_relocs;
2020 }
2021
2022 edir->dyn_relocs = eind->dyn_relocs;
2023 eind->dyn_relocs = NULL;
2024 }
2025
2026 if (ind->root.type == bfd_link_hash_indirect)
2027 {
2028 /* Copy over PLT info. */
2029 if (dir->got.refcount <= 0)
2030 {
2031 edir->got_type = eind->got_type;
2032 eind->got_type = GOT_UNKNOWN;
2033 }
2034 }
2035
2036 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2037 }
2038
2039 /* Create an AArch64 elf linker hash table. */
2040
2041 static struct bfd_link_hash_table *
2042 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2043 {
2044 struct elf_aarch64_link_hash_table *ret;
2045 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2046
2047 ret = bfd_zmalloc (amt);
2048 if (ret == NULL)
2049 return NULL;
2050
2051 if (!_bfd_elf_link_hash_table_init
2052 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2053 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2054 {
2055 free (ret);
2056 return NULL;
2057 }
2058
2059 ret->plt_header_size = PLT_ENTRY_SIZE;
2060 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2061 ret->obfd = abfd;
2062 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2063
2064 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2065 sizeof (struct elf_aarch64_stub_hash_entry)))
2066 {
2067 free (ret);
2068 return NULL;
2069 }
2070
2071 return &ret->root.root;
2072 }
2073
2074 /* Free the derived linker hash table. */
2075
2076 static void
2077 elfNN_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
2078 {
2079 struct elf_aarch64_link_hash_table *ret
2080 = (struct elf_aarch64_link_hash_table *) hash;
2081
2082 bfd_hash_table_free (&ret->stub_hash_table);
2083 _bfd_elf_link_hash_table_free (hash);
2084 }
2085
2086 static bfd_vma
2087 aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
2088 bfd_vma addend, bfd_boolean weak_undef_p)
2089 {
2090 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
2091 {
2092 case BFD_RELOC_AARCH64_TLSDESC_CALL:
2093 case BFD_RELOC_AARCH64_NONE:
2094 break;
2095
2096 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
2097 case BFD_RELOC_AARCH64_BRANCH19:
2098 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
2099 case BFD_RELOC_AARCH64_16_PCREL:
2100 case BFD_RELOC_AARCH64_32_PCREL:
2101 case BFD_RELOC_AARCH64_64_PCREL:
2102 case BFD_RELOC_AARCH64_TSTBR14:
2103 if (weak_undef_p)
2104 value = place;
2105 value = value + addend - place;
2106 break;
2107
2108 case BFD_RELOC_AARCH64_CALL26:
2109 case BFD_RELOC_AARCH64_JUMP26:
2110 value = value + addend - place;
2111 break;
2112
2113 case BFD_RELOC_AARCH64_16:
2114 case BFD_RELOC_AARCH64_32:
2115 case BFD_RELOC_AARCH64_MOVW_G0_S:
2116 case BFD_RELOC_AARCH64_MOVW_G1_S:
2117 case BFD_RELOC_AARCH64_MOVW_G2_S:
2118 case BFD_RELOC_AARCH64_MOVW_G0:
2119 case BFD_RELOC_AARCH64_MOVW_G0_NC:
2120 case BFD_RELOC_AARCH64_MOVW_G1:
2121 case BFD_RELOC_AARCH64_MOVW_G1_NC:
2122 case BFD_RELOC_AARCH64_MOVW_G2:
2123 case BFD_RELOC_AARCH64_MOVW_G2_NC:
2124 case BFD_RELOC_AARCH64_MOVW_G3:
2125 value = value + addend;
2126 break;
2127
2128 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
2129 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
2130 if (weak_undef_p)
2131 value = PG (place);
2132 value = PG (value + addend) - PG (place);
2133 break;
2134
2135 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
2136 value = value + addend - place;
2137 break;
2138
2139 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
2140 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
2141 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
2142 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
2143 value = PG (value + addend) - PG (place);
2144 break;
2145
2146 case BFD_RELOC_AARCH64_ADD_LO12:
2147 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
2148 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
2149 case BFD_RELOC_AARCH64_LDST8_LO12:
2150 case BFD_RELOC_AARCH64_LDST16_LO12:
2151 case BFD_RELOC_AARCH64_LDST32_LO12:
2152 case BFD_RELOC_AARCH64_LDST64_LO12:
2153 case BFD_RELOC_AARCH64_LDST128_LO12:
2154 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
2155 case BFD_RELOC_AARCH64_TLSDESC_ADD:
2156 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
2157 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
2158 case BFD_RELOC_AARCH64_TLSDESC_LDR:
2159 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
2160 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
2161 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
2162 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
2163 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
2164 value = PG_OFFSET (value + addend);
2165 break;
2166
2167 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
2168 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
2169 value = (value + addend) & (bfd_vma) 0xffff0000;
2170 break;
2171 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
2172 value = (value + addend) & (bfd_vma) 0xfff000;
2173 break;
2174
2175 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
2176 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
2177 value = (value + addend) & (bfd_vma) 0xffff;
2178 break;
2179
2180 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
2181 value = (value + addend) & ~(bfd_vma) 0xffffffff;
2182 value -= place & ~(bfd_vma) 0xffffffff;
2183 break;
2184
2185 default:
2186 break;
2187 }
2188
2189 return value;
2190 }
2191
2192 static bfd_boolean
2193 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2194 bfd_vma offset, bfd_vma value)
2195 {
2196 reloc_howto_type *howto;
2197 bfd_vma place;
2198
2199 howto = elfNN_aarch64_howto_from_type (r_type);
2200 place = (input_section->output_section->vma + input_section->output_offset
2201 + offset);
2202 value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
2203 return bfd_elf_aarch64_put_addend (input_bfd,
2204 input_section->contents + offset,
2205 howto, value);
2206 }
2207
2208 static enum elf_aarch64_stub_type
2209 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2210 {
2211 if (aarch64_valid_for_adrp_p (value, place))
2212 return aarch64_stub_adrp_branch;
2213 return aarch64_stub_long_branch;
2214 }
2215
2216 /* Determine the type of stub needed, if any, for a call. */
2217
2218 static enum elf_aarch64_stub_type
2219 aarch64_type_of_stub (struct bfd_link_info *info,
2220 asection *input_sec,
2221 const Elf_Internal_Rela *rel,
2222 unsigned char st_type,
2223 struct elf_aarch64_link_hash_entry *hash,
2224 bfd_vma destination)
2225 {
2226 bfd_vma location;
2227 bfd_signed_vma branch_offset;
2228 unsigned int r_type;
2229 struct elf_aarch64_link_hash_table *globals;
2230 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2231 bfd_boolean via_plt_p;
2232
2233 if (st_type != STT_FUNC)
2234 return stub_type;
2235
2236 globals = elf_aarch64_hash_table (info);
2237 via_plt_p = (globals->root.splt != NULL && hash != NULL
2238 && hash->root.plt.offset != (bfd_vma) - 1);
2239
2240 if (via_plt_p)
2241 return stub_type;
2242
2243 /* Determine where the call point is. */
2244 location = (input_sec->output_offset
2245 + input_sec->output_section->vma + rel->r_offset);
2246
2247 branch_offset = (bfd_signed_vma) (destination - location);
2248
2249 r_type = ELFNN_R_TYPE (rel->r_info);
2250
2251 /* We don't want to redirect any old unconditional jump in this way,
2252 only one which is being used for a sibcall, where it is
2253 acceptable for the IP0 and IP1 registers to be clobbered. */
2254 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2255 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2256 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2257 {
2258 stub_type = aarch64_stub_long_branch;
2259 }
2260
2261 return stub_type;
2262 }
2263
2264 /* Build a name for an entry in the stub hash table. */
2265
2266 static char *
2267 elfNN_aarch64_stub_name (const asection *input_section,
2268 const asection *sym_sec,
2269 const struct elf_aarch64_link_hash_entry *hash,
2270 const Elf_Internal_Rela *rel)
2271 {
2272 char *stub_name;
2273 bfd_size_type len;
2274
2275 if (hash)
2276 {
2277 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2278 stub_name = bfd_malloc (len);
2279 if (stub_name != NULL)
2280 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2281 (unsigned int) input_section->id,
2282 hash->root.root.root.string,
2283 rel->r_addend);
2284 }
2285 else
2286 {
2287 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2288 stub_name = bfd_malloc (len);
2289 if (stub_name != NULL)
2290 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2291 (unsigned int) input_section->id,
2292 (unsigned int) sym_sec->id,
2293 (unsigned int) ELFNN_R_SYM (rel->r_info),
2294 rel->r_addend);
2295 }
2296
2297 return stub_name;
2298 }
2299
2300 /* Look up an entry in the stub hash. Stub entries are cached because
2301 creating the stub name takes a bit of time. */
2302
2303 static struct elf_aarch64_stub_hash_entry *
2304 elfNN_aarch64_get_stub_entry (const asection *input_section,
2305 const asection *sym_sec,
2306 struct elf_link_hash_entry *hash,
2307 const Elf_Internal_Rela *rel,
2308 struct elf_aarch64_link_hash_table *htab)
2309 {
2310 struct elf_aarch64_stub_hash_entry *stub_entry;
2311 struct elf_aarch64_link_hash_entry *h =
2312 (struct elf_aarch64_link_hash_entry *) hash;
2313 const asection *id_sec;
2314
2315 if ((input_section->flags & SEC_CODE) == 0)
2316 return NULL;
2317
2318 /* If this input section is part of a group of sections sharing one
2319 stub section, then use the id of the first section in the group.
2320 Stub names need to include a section id, as there may well be
2321 more than one stub used to reach say, printf, and we need to
2322 distinguish between them. */
2323 id_sec = htab->stub_group[input_section->id].link_sec;
2324
2325 if (h != NULL && h->stub_cache != NULL
2326 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2327 {
2328 stub_entry = h->stub_cache;
2329 }
2330 else
2331 {
2332 char *stub_name;
2333
2334 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2335 if (stub_name == NULL)
2336 return NULL;
2337
2338 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2339 stub_name, FALSE, FALSE);
2340 if (h != NULL)
2341 h->stub_cache = stub_entry;
2342
2343 free (stub_name);
2344 }
2345
2346 return stub_entry;
2347 }
2348
2349 /* Add a new stub entry to the stub hash. Not all fields of the new
2350 stub entry are initialised. */
2351
2352 static struct elf_aarch64_stub_hash_entry *
2353 elfNN_aarch64_add_stub (const char *stub_name,
2354 asection *section,
2355 struct elf_aarch64_link_hash_table *htab)
2356 {
2357 asection *link_sec;
2358 asection *stub_sec;
2359 struct elf_aarch64_stub_hash_entry *stub_entry;
2360
2361 link_sec = htab->stub_group[section->id].link_sec;
2362 stub_sec = htab->stub_group[section->id].stub_sec;
2363 if (stub_sec == NULL)
2364 {
2365 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2366 if (stub_sec == NULL)
2367 {
2368 size_t namelen;
2369 bfd_size_type len;
2370 char *s_name;
2371
2372 namelen = strlen (link_sec->name);
2373 len = namelen + sizeof (STUB_SUFFIX);
2374 s_name = bfd_alloc (htab->stub_bfd, len);
2375 if (s_name == NULL)
2376 return NULL;
2377
2378 memcpy (s_name, link_sec->name, namelen);
2379 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2380 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2381 if (stub_sec == NULL)
2382 return NULL;
2383 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2384 }
2385 htab->stub_group[section->id].stub_sec = stub_sec;
2386 }
2387
2388 /* Enter this entry into the linker stub hash table. */
2389 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2390 TRUE, FALSE);
2391 if (stub_entry == NULL)
2392 {
2393 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2394 section->owner, stub_name);
2395 return NULL;
2396 }
2397
2398 stub_entry->stub_sec = stub_sec;
2399 stub_entry->stub_offset = 0;
2400 stub_entry->id_sec = link_sec;
2401
2402 return stub_entry;
2403 }
2404
2405 static bfd_boolean
2406 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2407 void *in_arg ATTRIBUTE_UNUSED)
2408 {
2409 struct elf_aarch64_stub_hash_entry *stub_entry;
2410 asection *stub_sec;
2411 bfd *stub_bfd;
2412 bfd_byte *loc;
2413 bfd_vma sym_value;
2414 unsigned int template_size;
2415 const uint32_t *template;
2416 unsigned int i;
2417
2418 /* Massage our args to the form they really have. */
2419 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2420
2421 stub_sec = stub_entry->stub_sec;
2422
2423 /* Make a note of the offset within the stubs for this entry. */
2424 stub_entry->stub_offset = stub_sec->size;
2425 loc = stub_sec->contents + stub_entry->stub_offset;
2426
2427 stub_bfd = stub_sec->owner;
2428
2429 /* This is the address of the stub destination. */
2430 sym_value = (stub_entry->target_value
2431 + stub_entry->target_section->output_offset
2432 + stub_entry->target_section->output_section->vma);
2433
2434 if (stub_entry->stub_type == aarch64_stub_long_branch)
2435 {
2436 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2437 + stub_sec->output_offset);
2438
2439 /* See if we can relax the stub. */
2440 if (aarch64_valid_for_adrp_p (sym_value, place))
2441 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2442 }
2443
2444 switch (stub_entry->stub_type)
2445 {
2446 case aarch64_stub_adrp_branch:
2447 template = aarch64_adrp_branch_stub;
2448 template_size = sizeof (aarch64_adrp_branch_stub);
2449 break;
2450 case aarch64_stub_long_branch:
2451 template = aarch64_long_branch_stub;
2452 template_size = sizeof (aarch64_long_branch_stub);
2453 break;
2454 default:
2455 BFD_FAIL ();
2456 return FALSE;
2457 }
2458
2459 for (i = 0; i < (template_size / sizeof template[0]); i++)
2460 {
2461 bfd_putl32 (template[i], loc);
2462 loc += 4;
2463 }
2464
2465 template_size = (template_size + 7) & ~7;
2466 stub_sec->size += template_size;
2467
2468 switch (stub_entry->stub_type)
2469 {
2470 case aarch64_stub_adrp_branch:
2471 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2472 stub_entry->stub_offset, sym_value))
2473 /* The stub would not have been relaxed if the offset was out
2474 of range. */
2475 BFD_FAIL ();
2476
2477 _bfd_final_link_relocate
2478 (elfNN_aarch64_howto_from_type (AARCH64_R (ADD_ABS_LO12_NC)),
2479 stub_bfd,
2480 stub_sec,
2481 stub_sec->contents,
2482 stub_entry->stub_offset + 4,
2483 sym_value,
2484 0);
2485 break;
2486
2487 case aarch64_stub_long_branch:
2488 /* We want the value relative to the address 12 bytes back from the
2489 value itself. */
2490 _bfd_final_link_relocate (elfNN_aarch64_howto_from_type
2491 (AARCH64_R (PRELNN)), stub_bfd, stub_sec,
2492 stub_sec->contents,
2493 stub_entry->stub_offset + 16,
2494 sym_value + 12, 0);
2495 break;
2496 default:
2497 break;
2498 }
2499
2500 return TRUE;
2501 }
2502
2503 /* As above, but don't actually build the stub. Just bump offset so
2504 we know stub section sizes. */
2505
2506 static bfd_boolean
2507 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2508 void *in_arg ATTRIBUTE_UNUSED)
2509 {
2510 struct elf_aarch64_stub_hash_entry *stub_entry;
2511 int size;
2512
2513 /* Massage our args to the form they really have. */
2514 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2515
2516 switch (stub_entry->stub_type)
2517 {
2518 case aarch64_stub_adrp_branch:
2519 size = sizeof (aarch64_adrp_branch_stub);
2520 break;
2521 case aarch64_stub_long_branch:
2522 size = sizeof (aarch64_long_branch_stub);
2523 break;
2524 default:
2525 BFD_FAIL ();
2526 return FALSE;
2527 break;
2528 }
2529
2530 size = (size + 7) & ~7;
2531 stub_entry->stub_sec->size += size;
2532 return TRUE;
2533 }
2534
2535 /* External entry points for sizing and building linker stubs. */
2536
2537 /* Set up various things so that we can make a list of input sections
2538 for each output section included in the link. Returns -1 on error,
2539 0 when no stubs will be needed, and 1 on success. */
2540
2541 int
2542 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2543 struct bfd_link_info *info)
2544 {
2545 bfd *input_bfd;
2546 unsigned int bfd_count;
2547 int top_id, top_index;
2548 asection *section;
2549 asection **input_list, **list;
2550 bfd_size_type amt;
2551 struct elf_aarch64_link_hash_table *htab =
2552 elf_aarch64_hash_table (info);
2553
2554 if (!is_elf_hash_table (htab))
2555 return 0;
2556
2557 /* Count the number of input BFDs and find the top input section id. */
2558 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2559 input_bfd != NULL; input_bfd = input_bfd->link_next)
2560 {
2561 bfd_count += 1;
2562 for (section = input_bfd->sections;
2563 section != NULL; section = section->next)
2564 {
2565 if (top_id < section->id)
2566 top_id = section->id;
2567 }
2568 }
2569 htab->bfd_count = bfd_count;
2570
2571 amt = sizeof (struct map_stub) * (top_id + 1);
2572 htab->stub_group = bfd_zmalloc (amt);
2573 if (htab->stub_group == NULL)
2574 return -1;
2575
2576 /* We can't use output_bfd->section_count here to find the top output
2577 section index as some sections may have been removed, and
2578 _bfd_strip_section_from_output doesn't renumber the indices. */
2579 for (section = output_bfd->sections, top_index = 0;
2580 section != NULL; section = section->next)
2581 {
2582 if (top_index < section->index)
2583 top_index = section->index;
2584 }
2585
2586 htab->top_index = top_index;
2587 amt = sizeof (asection *) * (top_index + 1);
2588 input_list = bfd_malloc (amt);
2589 htab->input_list = input_list;
2590 if (input_list == NULL)
2591 return -1;
2592
2593 /* For sections we aren't interested in, mark their entries with a
2594 value we can check later. */
2595 list = input_list + top_index;
2596 do
2597 *list = bfd_abs_section_ptr;
2598 while (list-- != input_list);
2599
2600 for (section = output_bfd->sections;
2601 section != NULL; section = section->next)
2602 {
2603 if ((section->flags & SEC_CODE) != 0)
2604 input_list[section->index] = NULL;
2605 }
2606
2607 return 1;
2608 }
2609
2610 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2611 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2612
2613 /* The linker repeatedly calls this function for each input section,
2614 in the order that input sections are linked into output sections.
2615 Build lists of input sections to determine groupings between which
2616 we may insert linker stubs. */
2617
2618 void
2619 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2620 {
2621 struct elf_aarch64_link_hash_table *htab =
2622 elf_aarch64_hash_table (info);
2623
2624 if (isec->output_section->index <= htab->top_index)
2625 {
2626 asection **list = htab->input_list + isec->output_section->index;
2627
2628 if (*list != bfd_abs_section_ptr)
2629 {
2630 /* Steal the link_sec pointer for our list. */
2631 /* This happens to make the list in reverse order,
2632 which is what we want. */
2633 PREV_SEC (isec) = *list;
2634 *list = isec;
2635 }
2636 }
2637 }
2638
2639 /* See whether we can group stub sections together. Grouping stub
2640 sections may result in fewer stubs. More importantly, we need to
2641 put all .init* and .fini* stubs at the beginning of the .init or
2642 .fini output sections respectively, because glibc splits the
2643 _init and _fini functions into multiple parts. Putting a stub in
2644 the middle of a function is not a good idea. */
2645
2646 static void
2647 group_sections (struct elf_aarch64_link_hash_table *htab,
2648 bfd_size_type stub_group_size,
2649 bfd_boolean stubs_always_before_branch)
2650 {
2651 asection **list = htab->input_list + htab->top_index;
2652
2653 do
2654 {
2655 asection *tail = *list;
2656
2657 if (tail == bfd_abs_section_ptr)
2658 continue;
2659
2660 while (tail != NULL)
2661 {
2662 asection *curr;
2663 asection *prev;
2664 bfd_size_type total;
2665
2666 curr = tail;
2667 total = tail->size;
2668 while ((prev = PREV_SEC (curr)) != NULL
2669 && ((total += curr->output_offset - prev->output_offset)
2670 < stub_group_size))
2671 curr = prev;
2672
2673 /* OK, the size from the start of CURR to the end is less
2674 than stub_group_size and thus can be handled by one stub
2675 section. (Or the tail section is itself larger than
2676 stub_group_size, in which case we may be toast.)
2677 We should really be keeping track of the total size of
2678 stubs added here, as stubs contribute to the final output
2679 section size. */
2680 do
2681 {
2682 prev = PREV_SEC (tail);
2683 /* Set up this stub group. */
2684 htab->stub_group[tail->id].link_sec = curr;
2685 }
2686 while (tail != curr && (tail = prev) != NULL);
2687
2688 /* But wait, there's more! Input sections up to stub_group_size
2689 bytes before the stub section can be handled by it too. */
2690 if (!stubs_always_before_branch)
2691 {
2692 total = 0;
2693 while (prev != NULL
2694 && ((total += tail->output_offset - prev->output_offset)
2695 < stub_group_size))
2696 {
2697 tail = prev;
2698 prev = PREV_SEC (tail);
2699 htab->stub_group[tail->id].link_sec = curr;
2700 }
2701 }
2702 tail = prev;
2703 }
2704 }
2705 while (list-- != htab->input_list);
2706
2707 free (htab->input_list);
2708 }
2709
2710 #undef PREV_SEC
2711
2712 /* Determine and set the size of the stub section for a final link.
2713
2714 The basic idea here is to examine all the relocations looking for
2715 PC-relative calls to a target that is unreachable with a "bl"
2716 instruction. */
2717
2718 bfd_boolean
2719 elfNN_aarch64_size_stubs (bfd *output_bfd,
2720 bfd *stub_bfd,
2721 struct bfd_link_info *info,
2722 bfd_signed_vma group_size,
2723 asection * (*add_stub_section) (const char *,
2724 asection *),
2725 void (*layout_sections_again) (void))
2726 {
2727 bfd_size_type stub_group_size;
2728 bfd_boolean stubs_always_before_branch;
2729 bfd_boolean stub_changed = 0;
2730 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
2731
2732 /* Propagate mach to stub bfd, because it may not have been
2733 finalized when we created stub_bfd. */
2734 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
2735 bfd_get_mach (output_bfd));
2736
2737 /* Stash our params away. */
2738 htab->stub_bfd = stub_bfd;
2739 htab->add_stub_section = add_stub_section;
2740 htab->layout_sections_again = layout_sections_again;
2741 stubs_always_before_branch = group_size < 0;
2742 if (group_size < 0)
2743 stub_group_size = -group_size;
2744 else
2745 stub_group_size = group_size;
2746
2747 if (stub_group_size == 1)
2748 {
2749 /* Default values. */
2750 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
2751 stub_group_size = 127 * 1024 * 1024;
2752 }
2753
2754 group_sections (htab, stub_group_size, stubs_always_before_branch);
2755
2756 while (1)
2757 {
2758 bfd *input_bfd;
2759 unsigned int bfd_indx;
2760 asection *stub_sec;
2761
2762 for (input_bfd = info->input_bfds, bfd_indx = 0;
2763 input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++)
2764 {
2765 Elf_Internal_Shdr *symtab_hdr;
2766 asection *section;
2767 Elf_Internal_Sym *local_syms = NULL;
2768
2769 /* We'll need the symbol table in a second. */
2770 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2771 if (symtab_hdr->sh_info == 0)
2772 continue;
2773
2774 /* Walk over each section attached to the input bfd. */
2775 for (section = input_bfd->sections;
2776 section != NULL; section = section->next)
2777 {
2778 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2779
2780 /* If there aren't any relocs, then there's nothing more
2781 to do. */
2782 if ((section->flags & SEC_RELOC) == 0
2783 || section->reloc_count == 0
2784 || (section->flags & SEC_CODE) == 0)
2785 continue;
2786
2787 /* If this section is a link-once section that will be
2788 discarded, then don't create any stubs. */
2789 if (section->output_section == NULL
2790 || section->output_section->owner != output_bfd)
2791 continue;
2792
2793 /* Get the relocs. */
2794 internal_relocs
2795 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
2796 NULL, info->keep_memory);
2797 if (internal_relocs == NULL)
2798 goto error_ret_free_local;
2799
2800 /* Now examine each relocation. */
2801 irela = internal_relocs;
2802 irelaend = irela + section->reloc_count;
2803 for (; irela < irelaend; irela++)
2804 {
2805 unsigned int r_type, r_indx;
2806 enum elf_aarch64_stub_type stub_type;
2807 struct elf_aarch64_stub_hash_entry *stub_entry;
2808 asection *sym_sec;
2809 bfd_vma sym_value;
2810 bfd_vma destination;
2811 struct elf_aarch64_link_hash_entry *hash;
2812 const char *sym_name;
2813 char *stub_name;
2814 const asection *id_sec;
2815 unsigned char st_type;
2816 bfd_size_type len;
2817
2818 r_type = ELFNN_R_TYPE (irela->r_info);
2819 r_indx = ELFNN_R_SYM (irela->r_info);
2820
2821 if (r_type >= (unsigned int) R_AARCH64_end)
2822 {
2823 bfd_set_error (bfd_error_bad_value);
2824 error_ret_free_internal:
2825 if (elf_section_data (section)->relocs == NULL)
2826 free (internal_relocs);
2827 goto error_ret_free_local;
2828 }
2829
2830 /* Only look for stubs on unconditional branch and
2831 branch and link instructions. */
2832 if (r_type != (unsigned int) AARCH64_R (CALL26)
2833 && r_type != (unsigned int) AARCH64_R (JUMP26))
2834 continue;
2835
2836 /* Now determine the call target, its name, value,
2837 section. */
2838 sym_sec = NULL;
2839 sym_value = 0;
2840 destination = 0;
2841 hash = NULL;
2842 sym_name = NULL;
2843 if (r_indx < symtab_hdr->sh_info)
2844 {
2845 /* It's a local symbol. */
2846 Elf_Internal_Sym *sym;
2847 Elf_Internal_Shdr *hdr;
2848
2849 if (local_syms == NULL)
2850 {
2851 local_syms
2852 = (Elf_Internal_Sym *) symtab_hdr->contents;
2853 if (local_syms == NULL)
2854 local_syms
2855 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
2856 symtab_hdr->sh_info, 0,
2857 NULL, NULL, NULL);
2858 if (local_syms == NULL)
2859 goto error_ret_free_internal;
2860 }
2861
2862 sym = local_syms + r_indx;
2863 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
2864 sym_sec = hdr->bfd_section;
2865 if (!sym_sec)
2866 /* This is an undefined symbol. It can never
2867 be resolved. */
2868 continue;
2869
2870 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
2871 sym_value = sym->st_value;
2872 destination = (sym_value + irela->r_addend
2873 + sym_sec->output_offset
2874 + sym_sec->output_section->vma);
2875 st_type = ELF_ST_TYPE (sym->st_info);
2876 sym_name
2877 = bfd_elf_string_from_elf_section (input_bfd,
2878 symtab_hdr->sh_link,
2879 sym->st_name);
2880 }
2881 else
2882 {
2883 int e_indx;
2884
2885 e_indx = r_indx - symtab_hdr->sh_info;
2886 hash = ((struct elf_aarch64_link_hash_entry *)
2887 elf_sym_hashes (input_bfd)[e_indx]);
2888
2889 while (hash->root.root.type == bfd_link_hash_indirect
2890 || hash->root.root.type == bfd_link_hash_warning)
2891 hash = ((struct elf_aarch64_link_hash_entry *)
2892 hash->root.root.u.i.link);
2893
2894 if (hash->root.root.type == bfd_link_hash_defined
2895 || hash->root.root.type == bfd_link_hash_defweak)
2896 {
2897 struct elf_aarch64_link_hash_table *globals =
2898 elf_aarch64_hash_table (info);
2899 sym_sec = hash->root.root.u.def.section;
2900 sym_value = hash->root.root.u.def.value;
2901 /* For a destination in a shared library,
2902 use the PLT stub as target address to
2903 decide whether a branch stub is
2904 needed. */
2905 if (globals->root.splt != NULL && hash != NULL
2906 && hash->root.plt.offset != (bfd_vma) - 1)
2907 {
2908 sym_sec = globals->root.splt;
2909 sym_value = hash->root.plt.offset;
2910 if (sym_sec->output_section != NULL)
2911 destination = (sym_value
2912 + sym_sec->output_offset
2913 +
2914 sym_sec->output_section->vma);
2915 }
2916 else if (sym_sec->output_section != NULL)
2917 destination = (sym_value + irela->r_addend
2918 + sym_sec->output_offset
2919 + sym_sec->output_section->vma);
2920 }
2921 else if (hash->root.root.type == bfd_link_hash_undefined
2922 || (hash->root.root.type
2923 == bfd_link_hash_undefweak))
2924 {
2925 /* For a shared library, use the PLT stub as
2926 target address to decide whether a long
2927 branch stub is needed.
2928 For absolute code, they cannot be handled. */
2929 struct elf_aarch64_link_hash_table *globals =
2930 elf_aarch64_hash_table (info);
2931
2932 if (globals->root.splt != NULL && hash != NULL
2933 && hash->root.plt.offset != (bfd_vma) - 1)
2934 {
2935 sym_sec = globals->root.splt;
2936 sym_value = hash->root.plt.offset;
2937 if (sym_sec->output_section != NULL)
2938 destination = (sym_value
2939 + sym_sec->output_offset
2940 +
2941 sym_sec->output_section->vma);
2942 }
2943 else
2944 continue;
2945 }
2946 else
2947 {
2948 bfd_set_error (bfd_error_bad_value);
2949 goto error_ret_free_internal;
2950 }
2951 st_type = ELF_ST_TYPE (hash->root.type);
2952 sym_name = hash->root.root.root.string;
2953 }
2954
2955 /* Determine what (if any) linker stub is needed. */
2956 stub_type = aarch64_type_of_stub
2957 (info, section, irela, st_type, hash, destination);
2958 if (stub_type == aarch64_stub_none)
2959 continue;
2960
2961 /* Support for grouping stub sections. */
2962 id_sec = htab->stub_group[section->id].link_sec;
2963
2964 /* Get the name of this stub. */
2965 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
2966 irela);
2967 if (!stub_name)
2968 goto error_ret_free_internal;
2969
2970 stub_entry =
2971 aarch64_stub_hash_lookup (&htab->stub_hash_table,
2972 stub_name, FALSE, FALSE);
2973 if (stub_entry != NULL)
2974 {
2975 /* The proper stub has already been created. */
2976 free (stub_name);
2977 continue;
2978 }
2979
2980 stub_entry = elfNN_aarch64_add_stub (stub_name, section,
2981 htab);
2982 if (stub_entry == NULL)
2983 {
2984 free (stub_name);
2985 goto error_ret_free_internal;
2986 }
2987
2988 stub_entry->target_value = sym_value;
2989 stub_entry->target_section = sym_sec;
2990 stub_entry->stub_type = stub_type;
2991 stub_entry->h = hash;
2992 stub_entry->st_type = st_type;
2993
2994 if (sym_name == NULL)
2995 sym_name = "unnamed";
2996 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
2997 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
2998 if (stub_entry->output_name == NULL)
2999 {
3000 free (stub_name);
3001 goto error_ret_free_internal;
3002 }
3003
3004 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3005 sym_name);
3006
3007 stub_changed = TRUE;
3008 }
3009
3010 /* We're done with the internal relocs, free them. */
3011 if (elf_section_data (section)->relocs == NULL)
3012 free (internal_relocs);
3013 }
3014 }
3015
3016 if (!stub_changed)
3017 break;
3018
3019 /* OK, we've added some stubs. Find out the new size of the
3020 stub sections. */
3021 for (stub_sec = htab->stub_bfd->sections;
3022 stub_sec != NULL; stub_sec = stub_sec->next)
3023 stub_sec->size = 0;
3024
3025 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3026
3027 /* Ask the linker to do its stuff. */
3028 (*htab->layout_sections_again) ();
3029 stub_changed = FALSE;
3030 }
3031
3032 return TRUE;
3033
3034 error_ret_free_local:
3035 return FALSE;
3036 }
3037
3038 /* Build all the stubs associated with the current output file. The
3039 stubs are kept in a hash table attached to the main linker hash
3040 table. We also set up the .plt entries for statically linked PIC
3041 functions here. This function is called via aarch64_elf_finish in the
3042 linker. */
3043
3044 bfd_boolean
3045 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3046 {
3047 asection *stub_sec;
3048 struct bfd_hash_table *table;
3049 struct elf_aarch64_link_hash_table *htab;
3050
3051 htab = elf_aarch64_hash_table (info);
3052
3053 for (stub_sec = htab->stub_bfd->sections;
3054 stub_sec != NULL; stub_sec = stub_sec->next)
3055 {
3056 bfd_size_type size;
3057
3058 /* Ignore non-stub sections. */
3059 if (!strstr (stub_sec->name, STUB_SUFFIX))
3060 continue;
3061
3062 /* Allocate memory to hold the linker stubs. */
3063 size = stub_sec->size;
3064 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3065 if (stub_sec->contents == NULL && size != 0)
3066 return FALSE;
3067 stub_sec->size = 0;
3068 }
3069
3070 /* Build the stubs as directed by the stub hash table. */
3071 table = &htab->stub_hash_table;
3072 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3073
3074 return TRUE;
3075 }
3076
3077
3078 /* Add an entry to the code/data map for section SEC. */
3079
3080 static void
3081 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3082 {
3083 struct _aarch64_elf_section_data *sec_data =
3084 elf_aarch64_section_data (sec);
3085 unsigned int newidx;
3086
3087 if (sec_data->map == NULL)
3088 {
3089 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3090 sec_data->mapcount = 0;
3091 sec_data->mapsize = 1;
3092 }
3093
3094 newidx = sec_data->mapcount++;
3095
3096 if (sec_data->mapcount > sec_data->mapsize)
3097 {
3098 sec_data->mapsize *= 2;
3099 sec_data->map = bfd_realloc_or_free
3100 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3101 }
3102
3103 if (sec_data->map)
3104 {
3105 sec_data->map[newidx].vma = vma;
3106 sec_data->map[newidx].type = type;
3107 }
3108 }
3109
3110
3111 /* Initialise maps of insn/data for input BFDs. */
3112 void
3113 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3114 {
3115 Elf_Internal_Sym *isymbuf;
3116 Elf_Internal_Shdr *hdr;
3117 unsigned int i, localsyms;
3118
3119 /* Make sure that we are dealing with an AArch64 elf binary. */
3120 if (!is_aarch64_elf (abfd))
3121 return;
3122
3123 if ((abfd->flags & DYNAMIC) != 0)
3124 return;
3125
3126 hdr = &elf_symtab_hdr (abfd);
3127 localsyms = hdr->sh_info;
3128
3129 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3130 should contain the number of local symbols, which should come before any
3131 global symbols. Mapping symbols are always local. */
3132 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3133
3134 /* No internal symbols read? Skip this BFD. */
3135 if (isymbuf == NULL)
3136 return;
3137
3138 for (i = 0; i < localsyms; i++)
3139 {
3140 Elf_Internal_Sym *isym = &isymbuf[i];
3141 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3142 const char *name;
3143
3144 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3145 {
3146 name = bfd_elf_string_from_elf_section (abfd,
3147 hdr->sh_link,
3148 isym->st_name);
3149
3150 if (bfd_is_aarch64_special_symbol_name
3151 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3152 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3153 }
3154 }
3155 }
3156
3157 /* Set option values needed during linking. */
3158 void
3159 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3160 struct bfd_link_info *link_info,
3161 int no_enum_warn,
3162 int no_wchar_warn, int pic_veneer)
3163 {
3164 struct elf_aarch64_link_hash_table *globals;
3165
3166 globals = elf_aarch64_hash_table (link_info);
3167 globals->pic_veneer = pic_veneer;
3168
3169 BFD_ASSERT (is_aarch64_elf (output_bfd));
3170 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3171 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3172 }
3173
3174 #define MASK(n) ((1u << (n)) - 1)
3175
3176 /* Decode the 26-bit offset of unconditional branch. */
3177 static inline uint32_t
3178 decode_branch_ofs_26 (uint32_t insn)
3179 {
3180 return insn & MASK (26);
3181 }
3182
3183 /* Decode the 19-bit offset of conditional branch and compare & branch. */
3184 static inline uint32_t
3185 decode_cond_branch_ofs_19 (uint32_t insn)
3186 {
3187 return (insn >> 5) & MASK (19);
3188 }
3189
3190 /* Decode the 19-bit offset of load literal. */
3191 static inline uint32_t
3192 decode_ld_lit_ofs_19 (uint32_t insn)
3193 {
3194 return (insn >> 5) & MASK (19);
3195 }
3196
3197 /* Decode the 14-bit offset of test & branch. */
3198 static inline uint32_t
3199 decode_tst_branch_ofs_14 (uint32_t insn)
3200 {
3201 return (insn >> 5) & MASK (14);
3202 }
3203
3204 /* Decode the 16-bit imm of move wide. */
3205 static inline uint32_t
3206 decode_movw_imm (uint32_t insn)
3207 {
3208 return (insn >> 5) & MASK (16);
3209 }
3210
3211 /* Decode the 21-bit imm of adr. */
3212 static inline uint32_t
3213 decode_adr_imm (uint32_t insn)
3214 {
3215 return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
3216 }
3217
3218 /* Decode the 12-bit imm of add immediate. */
3219 static inline uint32_t
3220 decode_add_imm (uint32_t insn)
3221 {
3222 return (insn >> 10) & MASK (12);
3223 }
3224
3225
3226 /* Encode the 26-bit offset of unconditional branch. */
3227 static inline uint32_t
3228 reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
3229 {
3230 return (insn & ~MASK (26)) | (ofs & MASK (26));
3231 }
3232
3233 /* Encode the 19-bit offset of conditional branch and compare & branch. */
3234 static inline uint32_t
3235 reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
3236 {
3237 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3238 }
3239
3240 /* Decode the 19-bit offset of load literal. */
3241 static inline uint32_t
3242 reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
3243 {
3244 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3245 }
3246
3247 /* Encode the 14-bit offset of test & branch. */
3248 static inline uint32_t
3249 reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
3250 {
3251 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
3252 }
3253
3254 /* Reencode the imm field of move wide. */
3255 static inline uint32_t
3256 reencode_movw_imm (uint32_t insn, uint32_t imm)
3257 {
3258 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
3259 }
3260
3261 /* Reencode the imm field of adr. */
3262 static inline uint32_t
3263 reencode_adr_imm (uint32_t insn, uint32_t imm)
3264 {
3265 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
3266 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
3267 }
3268
3269 /* Reencode the imm field of ld/st pos immediate. */
3270 static inline uint32_t
3271 reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
3272 {
3273 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3274 }
3275
3276 /* Reencode the imm field of add immediate. */
3277 static inline uint32_t
3278 reencode_add_imm (uint32_t insn, uint32_t imm)
3279 {
3280 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3281 }
3282
3283 /* Reencode mov[zn] to movz. */
3284 static inline uint32_t
3285 reencode_movzn_to_movz (uint32_t opcode)
3286 {
3287 return opcode | (1 << 30);
3288 }
3289
3290 /* Reencode mov[zn] to movn. */
3291 static inline uint32_t
3292 reencode_movzn_to_movn (uint32_t opcode)
3293 {
3294 return opcode & ~(1 << 30);
3295 }
3296
3297 /* Insert the addend/value into the instruction or data object being
3298 relocated. */
3299 static bfd_reloc_status_type
3300 bfd_elf_aarch64_put_addend (bfd *abfd,
3301 bfd_byte *address,
3302 reloc_howto_type *howto, bfd_signed_vma addend)
3303 {
3304 bfd_reloc_status_type status = bfd_reloc_ok;
3305 bfd_signed_vma old_addend = addend;
3306 bfd_vma contents;
3307 int size;
3308
3309 size = bfd_get_reloc_size (howto);
3310 switch (size)
3311 {
3312 case 2:
3313 contents = bfd_get_16 (abfd, address);
3314 break;
3315 case 4:
3316 if (howto->src_mask != 0xffffffff)
3317 /* Must be 32-bit instruction, always little-endian. */
3318 contents = bfd_getl32 (address);
3319 else
3320 /* Must be 32-bit data (endianness dependent). */
3321 contents = bfd_get_32 (abfd, address);
3322 break;
3323 case 8:
3324 contents = bfd_get_64 (abfd, address);
3325 break;
3326 default:
3327 abort ();
3328 }
3329
3330 switch (howto->complain_on_overflow)
3331 {
3332 case complain_overflow_dont:
3333 break;
3334 case complain_overflow_signed:
3335 status = aarch64_signed_overflow (addend,
3336 howto->bitsize + howto->rightshift);
3337 break;
3338 case complain_overflow_unsigned:
3339 status = aarch64_unsigned_overflow (addend,
3340 howto->bitsize + howto->rightshift);
3341 break;
3342 case complain_overflow_bitfield:
3343 default:
3344 abort ();
3345 }
3346
3347 addend >>= howto->rightshift;
3348
3349 switch (elfNN_aarch64_bfd_reloc_from_howto (howto))
3350 {
3351 case BFD_RELOC_AARCH64_JUMP26:
3352 case BFD_RELOC_AARCH64_CALL26:
3353 contents = reencode_branch_ofs_26 (contents, addend);
3354 break;
3355
3356 case BFD_RELOC_AARCH64_BRANCH19:
3357 contents = reencode_cond_branch_ofs_19 (contents, addend);
3358 break;
3359
3360 case BFD_RELOC_AARCH64_TSTBR14:
3361 contents = reencode_tst_branch_ofs_14 (contents, addend);
3362 break;
3363
3364 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
3365 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3366 if (old_addend & ((1 << howto->rightshift) - 1))
3367 return bfd_reloc_overflow;
3368 contents = reencode_ld_lit_ofs_19 (contents, addend);
3369 break;
3370
3371 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3372 break;
3373
3374 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3375 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3376 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3377 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3378 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
3379 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3380 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3381 contents = reencode_adr_imm (contents, addend);
3382 break;
3383
3384 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3385 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3386 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3387 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3388 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3389 case BFD_RELOC_AARCH64_ADD_LO12:
3390 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3391 12 bits of the page offset following
3392 BFD_RELOC_AARCH64_ADR_HI21_PCREL which computes the
3393 (pc-relative) page base. */
3394 contents = reencode_add_imm (contents, addend);
3395 break;
3396
3397 case BFD_RELOC_AARCH64_LDST8_LO12:
3398 case BFD_RELOC_AARCH64_LDST16_LO12:
3399 case BFD_RELOC_AARCH64_LDST32_LO12:
3400 case BFD_RELOC_AARCH64_LDST64_LO12:
3401 case BFD_RELOC_AARCH64_LDST128_LO12:
3402 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
3403 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3404 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3405 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3406 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3407 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3408 if (old_addend & ((1 << howto->rightshift) - 1))
3409 return bfd_reloc_overflow;
3410 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3411 12 bits of the page offset following BFD_RELOC_AARCH64_ADR_HI21_PCREL
3412 which computes the (pc-relative) page base. */
3413 contents = reencode_ldst_pos_imm (contents, addend);
3414 break;
3415
3416 /* Group relocations to create high bits of a 16, 32, 48 or 64
3417 bit signed data or abs address inline. Will change
3418 instruction to MOVN or MOVZ depending on sign of calculated
3419 value. */
3420
3421 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3422 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3423 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3424 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3425 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3426 case BFD_RELOC_AARCH64_MOVW_G0_S:
3427 case BFD_RELOC_AARCH64_MOVW_G1_S:
3428 case BFD_RELOC_AARCH64_MOVW_G2_S:
3429 /* NOTE: We can only come here with movz or movn. */
3430 if (addend < 0)
3431 {
3432 /* Force use of MOVN. */
3433 addend = ~addend;
3434 contents = reencode_movzn_to_movn (contents);
3435 }
3436 else
3437 {
3438 /* Force use of MOVZ. */
3439 contents = reencode_movzn_to_movz (contents);
3440 }
3441 /* fall through */
3442
3443 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3444 data or abs address inline. */
3445
3446 case BFD_RELOC_AARCH64_MOVW_G0:
3447 case BFD_RELOC_AARCH64_MOVW_G0_NC:
3448 case BFD_RELOC_AARCH64_MOVW_G1:
3449 case BFD_RELOC_AARCH64_MOVW_G1_NC:
3450 case BFD_RELOC_AARCH64_MOVW_G2:
3451 case BFD_RELOC_AARCH64_MOVW_G2_NC:
3452 case BFD_RELOC_AARCH64_MOVW_G3:
3453 contents = reencode_movw_imm (contents, addend);
3454 break;
3455
3456 default:
3457 /* Repack simple data */
3458 if (howto->dst_mask & (howto->dst_mask + 1))
3459 return bfd_reloc_notsupported;
3460
3461 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
3462 break;
3463 }
3464
3465 switch (size)
3466 {
3467 case 2:
3468 bfd_put_16 (abfd, contents, address);
3469 break;
3470 case 4:
3471 if (howto->dst_mask != 0xffffffff)
3472 /* must be 32-bit instruction, always little-endian */
3473 bfd_putl32 (contents, address);
3474 else
3475 /* must be 32-bit data (endianness dependent) */
3476 bfd_put_32 (abfd, contents, address);
3477 break;
3478 case 8:
3479 bfd_put_64 (abfd, contents, address);
3480 break;
3481 default:
3482 abort ();
3483 }
3484
3485 return status;
3486 }
3487
3488 static bfd_vma
3489 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3490 struct elf_aarch64_link_hash_table
3491 *globals, struct bfd_link_info *info,
3492 bfd_vma value, bfd *output_bfd,
3493 bfd_boolean *unresolved_reloc_p)
3494 {
3495 bfd_vma off = (bfd_vma) - 1;
3496 asection *basegot = globals->root.sgot;
3497 bfd_boolean dyn = globals->root.dynamic_sections_created;
3498
3499 if (h != NULL)
3500 {
3501 BFD_ASSERT (basegot != NULL);
3502 off = h->got.offset;
3503 BFD_ASSERT (off != (bfd_vma) - 1);
3504 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3505 || (info->shared
3506 && SYMBOL_REFERENCES_LOCAL (info, h))
3507 || (ELF_ST_VISIBILITY (h->other)
3508 && h->root.type == bfd_link_hash_undefweak))
3509 {
3510 /* This is actually a static link, or it is a -Bsymbolic link
3511 and the symbol is defined locally. We must initialize this
3512 entry in the global offset table. Since the offset must
3513 always be a multiple of 8 (4 in the case of ILP32), we use
3514 the least significant bit to record whether we have
3515 initialized it already.
3516 When doing a dynamic link, we create a .rel(a).got relocation
3517 entry to initialize the value. This is done in the
3518 finish_dynamic_symbol routine. */
3519 if ((off & 1) != 0)
3520 off &= ~1;
3521 else
3522 {
3523 bfd_put_NN (output_bfd, value, basegot->contents + off);
3524 h->got.offset |= 1;
3525 }
3526 }
3527 else
3528 *unresolved_reloc_p = FALSE;
3529
3530 off = off + basegot->output_section->vma + basegot->output_offset;
3531 }
3532
3533 return off;
3534 }
3535
3536 /* Change R_TYPE to a more efficient access model where possible,
3537 return the new reloc type. */
3538
3539 static bfd_reloc_code_real_type
3540 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
3541 struct elf_link_hash_entry *h)
3542 {
3543 bfd_boolean is_local = h == NULL;
3544
3545 switch (r_type)
3546 {
3547 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3548 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3549 return (is_local
3550 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3551 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
3552
3553 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3554 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
3555 return (is_local
3556 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3557 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
3558
3559 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3560 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3561
3562 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
3563 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3564
3565 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3566 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3567 /* Instructions with these relocations will become NOPs. */
3568 return BFD_RELOC_AARCH64_NONE;
3569
3570 default:
3571 break;
3572 }
3573
3574 return r_type;
3575 }
3576
3577 static unsigned int
3578 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
3579 {
3580 switch (r_type)
3581 {
3582 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3583 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3584 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3585 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3586 return GOT_NORMAL;
3587
3588 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3589 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3590 return GOT_TLS_GD;
3591
3592 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3593 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3594 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3595 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
3596 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3597 return GOT_TLSDESC_GD;
3598
3599 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3600 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3601 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3602 return GOT_TLS_IE;
3603
3604 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3605 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3606 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3607 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3608 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3609 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3610 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3611 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3612 return GOT_UNKNOWN;
3613
3614 default:
3615 break;
3616 }
3617 return GOT_UNKNOWN;
3618 }
3619
3620 static bfd_boolean
3621 aarch64_can_relax_tls (bfd *input_bfd,
3622 struct bfd_link_info *info,
3623 bfd_reloc_code_real_type r_type,
3624 struct elf_link_hash_entry *h,
3625 unsigned long r_symndx)
3626 {
3627 unsigned int symbol_got_type;
3628 unsigned int reloc_got_type;
3629
3630 if (! IS_AARCH64_TLS_RELOC (r_type))
3631 return FALSE;
3632
3633 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3634 reloc_got_type = aarch64_reloc_got_type (r_type);
3635
3636 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3637 return TRUE;
3638
3639 if (info->shared)
3640 return FALSE;
3641
3642 if (h && h->root.type == bfd_link_hash_undefweak)
3643 return FALSE;
3644
3645 return TRUE;
3646 }
3647
3648 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
3649 enumerator. */
3650
3651 static bfd_reloc_code_real_type
3652 aarch64_tls_transition (bfd *input_bfd,
3653 struct bfd_link_info *info,
3654 unsigned int r_type,
3655 struct elf_link_hash_entry *h,
3656 unsigned long r_symndx)
3657 {
3658 bfd_reloc_code_real_type bfd_r_type
3659 = elfNN_aarch64_bfd_reloc_from_type (r_type);
3660
3661 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
3662 return bfd_r_type;
3663
3664 return aarch64_tls_transition_without_check (bfd_r_type, h);
3665 }
3666
3667 /* Return the base VMA address which should be subtracted from real addresses
3668 when resolving R_AARCH64_TLS_DTPREL relocation. */
3669
3670 static bfd_vma
3671 dtpoff_base (struct bfd_link_info *info)
3672 {
3673 /* If tls_sec is NULL, we should have signalled an error already. */
3674 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3675 return elf_hash_table (info)->tls_sec->vma;
3676 }
3677
3678 /* Return the base VMA address which should be subtracted from real addresses
3679 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3680
3681 static bfd_vma
3682 tpoff_base (struct bfd_link_info *info)
3683 {
3684 struct elf_link_hash_table *htab = elf_hash_table (info);
3685
3686 /* If tls_sec is NULL, we should have signalled an error already. */
3687 if (htab->tls_sec == NULL)
3688 return 0;
3689
3690 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3691 htab->tls_sec->alignment_power);
3692 return htab->tls_sec->vma - base;
3693 }
3694
3695 static bfd_vma *
3696 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3697 unsigned long r_symndx)
3698 {
3699 /* Calculate the address of the GOT entry for symbol
3700 referred to in h. */
3701 if (h != NULL)
3702 return &h->got.offset;
3703 else
3704 {
3705 /* local symbol */
3706 struct elf_aarch64_local_symbol *l;
3707
3708 l = elf_aarch64_locals (input_bfd);
3709 return &l[r_symndx].got_offset;
3710 }
3711 }
3712
3713 static void
3714 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3715 unsigned long r_symndx)
3716 {
3717 bfd_vma *p;
3718 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3719 *p |= 1;
3720 }
3721
3722 static int
3723 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3724 unsigned long r_symndx)
3725 {
3726 bfd_vma value;
3727 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3728 return value & 1;
3729 }
3730
3731 static bfd_vma
3732 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3733 unsigned long r_symndx)
3734 {
3735 bfd_vma value;
3736 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3737 value &= ~1;
3738 return value;
3739 }
3740
3741 static bfd_vma *
3742 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3743 unsigned long r_symndx)
3744 {
3745 /* Calculate the address of the GOT entry for symbol
3746 referred to in h. */
3747 if (h != NULL)
3748 {
3749 struct elf_aarch64_link_hash_entry *eh;
3750 eh = (struct elf_aarch64_link_hash_entry *) h;
3751 return &eh->tlsdesc_got_jump_table_offset;
3752 }
3753 else
3754 {
3755 /* local symbol */
3756 struct elf_aarch64_local_symbol *l;
3757
3758 l = elf_aarch64_locals (input_bfd);
3759 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3760 }
3761 }
3762
3763 static void
3764 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3765 unsigned long r_symndx)
3766 {
3767 bfd_vma *p;
3768 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3769 *p |= 1;
3770 }
3771
3772 static int
3773 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3774 struct elf_link_hash_entry *h,
3775 unsigned long r_symndx)
3776 {
3777 bfd_vma value;
3778 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3779 return value & 1;
3780 }
3781
3782 static bfd_vma
3783 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3784 unsigned long r_symndx)
3785 {
3786 bfd_vma value;
3787 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3788 value &= ~1;
3789 return value;
3790 }
3791
3792 /* Perform a relocation as part of a final link. */
3793 static bfd_reloc_status_type
3794 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
3795 bfd *input_bfd,
3796 bfd *output_bfd,
3797 asection *input_section,
3798 bfd_byte *contents,
3799 Elf_Internal_Rela *rel,
3800 bfd_vma value,
3801 struct bfd_link_info *info,
3802 asection *sym_sec,
3803 struct elf_link_hash_entry *h,
3804 bfd_boolean *unresolved_reloc_p,
3805 bfd_boolean save_addend,
3806 bfd_vma *saved_addend)
3807 {
3808 unsigned int r_type = howto->type;
3809 bfd_reloc_code_real_type bfd_r_type
3810 = elfNN_aarch64_bfd_reloc_from_howto (howto);
3811 bfd_reloc_code_real_type new_bfd_r_type;
3812 unsigned long r_symndx;
3813 bfd_byte *hit_data = contents + rel->r_offset;
3814 bfd_vma place;
3815 bfd_signed_vma signed_addend;
3816 struct elf_aarch64_link_hash_table *globals;
3817 bfd_boolean weak_undef_p;
3818
3819 globals = elf_aarch64_hash_table (info);
3820
3821 BFD_ASSERT (is_aarch64_elf (input_bfd));
3822
3823 r_symndx = ELFNN_R_SYM (rel->r_info);
3824
3825 /* It is possible to have linker relaxations on some TLS access
3826 models. Update our information here. */
3827 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
3828 if (new_bfd_r_type != bfd_r_type)
3829 {
3830 bfd_r_type = new_bfd_r_type;
3831 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
3832 BFD_ASSERT (howto != NULL);
3833 r_type = howto->type;
3834 }
3835
3836 place = input_section->output_section->vma
3837 + input_section->output_offset + rel->r_offset;
3838
3839 /* Get addend, accumulating the addend for consecutive relocs
3840 which refer to the same offset. */
3841 signed_addend = saved_addend ? *saved_addend : 0;
3842 signed_addend += rel->r_addend;
3843
3844 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
3845 : bfd_is_und_section (sym_sec));
3846
3847 switch (bfd_r_type)
3848 {
3849 case BFD_RELOC_AARCH64_NONE:
3850 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3851 *unresolved_reloc_p = FALSE;
3852 return bfd_reloc_ok;
3853
3854 case BFD_RELOC_AARCH64_NN:
3855
3856 /* When generating a shared object or relocatable executable, these
3857 relocations are copied into the output file to be resolved at
3858 run time. */
3859 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
3860 && (input_section->flags & SEC_ALLOC)
3861 && (h == NULL
3862 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3863 || h->root.type != bfd_link_hash_undefweak))
3864 {
3865 Elf_Internal_Rela outrel;
3866 bfd_byte *loc;
3867 bfd_boolean skip, relocate;
3868 asection *sreloc;
3869
3870 *unresolved_reloc_p = FALSE;
3871
3872 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd,
3873 input_section, 1);
3874 if (sreloc == NULL)
3875 return bfd_reloc_notsupported;
3876
3877 skip = FALSE;
3878 relocate = FALSE;
3879
3880 outrel.r_addend = signed_addend;
3881 outrel.r_offset =
3882 _bfd_elf_section_offset (output_bfd, info, input_section,
3883 rel->r_offset);
3884 if (outrel.r_offset == (bfd_vma) - 1)
3885 skip = TRUE;
3886 else if (outrel.r_offset == (bfd_vma) - 2)
3887 {
3888 skip = TRUE;
3889 relocate = TRUE;
3890 }
3891
3892 outrel.r_offset += (input_section->output_section->vma
3893 + input_section->output_offset);
3894
3895 if (skip)
3896 memset (&outrel, 0, sizeof outrel);
3897 else if (h != NULL
3898 && h->dynindx != -1
3899 && (!info->shared || !info->symbolic || !h->def_regular))
3900 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
3901 else
3902 {
3903 int symbol;
3904
3905 /* On SVR4-ish systems, the dynamic loader cannot
3906 relocate the text and data segments independently,
3907 so the symbol does not matter. */
3908 symbol = 0;
3909 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
3910 outrel.r_addend += value;
3911 }
3912
3913 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab);
3914 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
3915
3916 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
3917 {
3918 /* Sanity to check that we have previously allocated
3919 sufficient space in the relocation section for the
3920 number of relocations we actually want to emit. */
3921 abort ();
3922 }
3923
3924 /* If this reloc is against an external symbol, we do not want to
3925 fiddle with the addend. Otherwise, we need to include the symbol
3926 value so that it becomes an addend for the dynamic reloc. */
3927 if (!relocate)
3928 return bfd_reloc_ok;
3929
3930 return _bfd_final_link_relocate (howto, input_bfd, input_section,
3931 contents, rel->r_offset, value,
3932 signed_addend);
3933 }
3934 else
3935 value += signed_addend;
3936 break;
3937
3938 case BFD_RELOC_AARCH64_JUMP26:
3939 case BFD_RELOC_AARCH64_CALL26:
3940 {
3941 asection *splt = globals->root.splt;
3942 bfd_boolean via_plt_p =
3943 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
3944
3945 /* A call to an undefined weak symbol is converted to a jump to
3946 the next instruction unless a PLT entry will be created.
3947 The jump to the next instruction is optimized as a NOP.
3948 Do the same for local undefined symbols. */
3949 if (weak_undef_p && ! via_plt_p)
3950 {
3951 bfd_putl32 (INSN_NOP, hit_data);
3952 return bfd_reloc_ok;
3953 }
3954
3955 /* If the call goes through a PLT entry, make sure to
3956 check distance to the right destination address. */
3957 if (via_plt_p)
3958 {
3959 value = (splt->output_section->vma
3960 + splt->output_offset + h->plt.offset);
3961 *unresolved_reloc_p = FALSE;
3962 }
3963
3964 /* If the target symbol is global and marked as a function the
3965 relocation applies a function call or a tail call. In this
3966 situation we can veneer out of range branches. The veneers
3967 use IP0 and IP1 hence cannot be used arbitrary out of range
3968 branches that occur within the body of a function. */
3969 if (h && h->type == STT_FUNC)
3970 {
3971 /* Check if a stub has to be inserted because the destination
3972 is too far away. */
3973 if (! aarch64_valid_branch_p (value, place))
3974 {
3975 /* The target is out of reach, so redirect the branch to
3976 the local stub for this function. */
3977 struct elf_aarch64_stub_hash_entry *stub_entry;
3978 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
3979 sym_sec, h,
3980 rel, globals);
3981 if (stub_entry != NULL)
3982 value = (stub_entry->stub_offset
3983 + stub_entry->stub_sec->output_offset
3984 + stub_entry->stub_sec->output_section->vma);
3985 }
3986 }
3987 }
3988 value = aarch64_resolve_relocation (r_type, place, value,
3989 signed_addend, weak_undef_p);
3990 break;
3991
3992 case BFD_RELOC_AARCH64_16:
3993 #if ARCH_SIZE == 64
3994 case BFD_RELOC_AARCH64_32:
3995 #endif
3996 case BFD_RELOC_AARCH64_ADD_LO12:
3997 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
3998 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3999 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4000 case BFD_RELOC_AARCH64_BRANCH19:
4001 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4002 case BFD_RELOC_AARCH64_LDST8_LO12:
4003 case BFD_RELOC_AARCH64_LDST16_LO12:
4004 case BFD_RELOC_AARCH64_LDST32_LO12:
4005 case BFD_RELOC_AARCH64_LDST64_LO12:
4006 case BFD_RELOC_AARCH64_LDST128_LO12:
4007 case BFD_RELOC_AARCH64_MOVW_G0_S:
4008 case BFD_RELOC_AARCH64_MOVW_G1_S:
4009 case BFD_RELOC_AARCH64_MOVW_G2_S:
4010 case BFD_RELOC_AARCH64_MOVW_G0:
4011 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4012 case BFD_RELOC_AARCH64_MOVW_G1:
4013 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4014 case BFD_RELOC_AARCH64_MOVW_G2:
4015 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4016 case BFD_RELOC_AARCH64_MOVW_G3:
4017 case BFD_RELOC_AARCH64_16_PCREL:
4018 case BFD_RELOC_AARCH64_32_PCREL:
4019 case BFD_RELOC_AARCH64_64_PCREL:
4020 case BFD_RELOC_AARCH64_TSTBR14:
4021 value = aarch64_resolve_relocation (r_type, place, value,
4022 signed_addend, weak_undef_p);
4023 break;
4024
4025 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4026 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4027 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4028 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4029 if (globals->root.sgot == NULL)
4030 BFD_ASSERT (h != NULL);
4031
4032 if (h != NULL)
4033 {
4034 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4035 output_bfd,
4036 unresolved_reloc_p);
4037 value = aarch64_resolve_relocation (r_type, place, value,
4038 0, weak_undef_p);
4039 }
4040 break;
4041
4042 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4043 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4044 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4045 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4046 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4047 if (globals->root.sgot == NULL)
4048 return bfd_reloc_notsupported;
4049
4050 value = (symbol_got_offset (input_bfd, h, r_symndx)
4051 + globals->root.sgot->output_section->vma
4052 + globals->root.sgot->output_section->output_offset);
4053
4054 value = aarch64_resolve_relocation (r_type, place, value,
4055 0, weak_undef_p);
4056 *unresolved_reloc_p = FALSE;
4057 break;
4058
4059 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4060 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4061 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4062 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4063 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4064 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4065 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4066 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4067 value = aarch64_resolve_relocation (r_type, place, value,
4068 signed_addend - tpoff_base (info), weak_undef_p);
4069 *unresolved_reloc_p = FALSE;
4070 break;
4071
4072 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4073 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4074 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4075 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4076 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4077 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4078 if (globals->root.sgot == NULL)
4079 return bfd_reloc_notsupported;
4080
4081 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4082 + globals->root.sgotplt->output_section->vma
4083 + globals->root.sgotplt->output_section->output_offset
4084 + globals->sgotplt_jump_table_size);
4085
4086 value = aarch64_resolve_relocation (r_type, place, value,
4087 0, weak_undef_p);
4088 *unresolved_reloc_p = FALSE;
4089 break;
4090
4091 default:
4092 return bfd_reloc_notsupported;
4093 }
4094
4095 if (saved_addend)
4096 *saved_addend = value;
4097
4098 /* Only apply the final relocation in a sequence. */
4099 if (save_addend)
4100 return bfd_reloc_continue;
4101
4102 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4103 }
4104
4105 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4106 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4107 link.
4108
4109 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4110 is to then call final_link_relocate. Return other values in the
4111 case of error. */
4112
4113 static bfd_reloc_status_type
4114 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
4115 bfd *input_bfd, bfd_byte *contents,
4116 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4117 {
4118 bfd_boolean is_local = h == NULL;
4119 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
4120 unsigned long insn;
4121
4122 BFD_ASSERT (globals && input_bfd && contents && rel);
4123
4124 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4125 {
4126 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4127 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4128 if (is_local)
4129 {
4130 /* GD->LE relaxation:
4131 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4132 or
4133 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4134 */
4135 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4136 return bfd_reloc_continue;
4137 }
4138 else
4139 {
4140 /* GD->IE relaxation:
4141 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4142 or
4143 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4144 */
4145 insn = bfd_getl32 (contents + rel->r_offset);
4146 return bfd_reloc_continue;
4147 }
4148
4149 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4150 if (is_local)
4151 {
4152 /* GD->LE relaxation:
4153 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4154 */
4155 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4156 return bfd_reloc_continue;
4157 }
4158 else
4159 {
4160 /* GD->IE relaxation:
4161 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4162 */
4163 insn = bfd_getl32 (contents + rel->r_offset);
4164 insn &= 0xfffffff0;
4165 bfd_putl32 (insn, contents + rel->r_offset);
4166 return bfd_reloc_continue;
4167 }
4168
4169 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4170 if (is_local)
4171 {
4172 /* GD->LE relaxation
4173 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4174 bl __tls_get_addr => mrs x1, tpidr_el0
4175 nop => add x0, x1, x0
4176 */
4177
4178 /* First kill the tls_get_addr reloc on the bl instruction. */
4179 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4180 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4181
4182 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4183 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4184 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4185 return bfd_reloc_continue;
4186 }
4187 else
4188 {
4189 /* GD->IE relaxation
4190 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4191 BL __tls_get_addr => mrs x1, tpidr_el0
4192 R_AARCH64_CALL26
4193 NOP => add x0, x1, x0
4194 */
4195
4196 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
4197
4198 /* Remove the relocation on the BL instruction. */
4199 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4200
4201 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4202
4203 /* We choose to fixup the BL and NOP instructions using the
4204 offset from the second relocation to allow flexibility in
4205 scheduling instructions between the ADD and BL. */
4206 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4207 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4208 return bfd_reloc_continue;
4209 }
4210
4211 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4212 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4213 /* GD->IE/LE relaxation:
4214 add x0, x0, #:tlsdesc_lo12:var => nop
4215 blr xd => nop
4216 */
4217 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4218 return bfd_reloc_ok;
4219
4220 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4221 /* IE->LE relaxation:
4222 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4223 */
4224 if (is_local)
4225 {
4226 insn = bfd_getl32 (contents + rel->r_offset);
4227 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4228 }
4229 return bfd_reloc_continue;
4230
4231 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4232 /* IE->LE relaxation:
4233 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4234 */
4235 if (is_local)
4236 {
4237 insn = bfd_getl32 (contents + rel->r_offset);
4238 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4239 }
4240 return bfd_reloc_continue;
4241
4242 default:
4243 return bfd_reloc_continue;
4244 }
4245
4246 return bfd_reloc_ok;
4247 }
4248
4249 /* Relocate an AArch64 ELF section. */
4250
4251 static bfd_boolean
4252 elfNN_aarch64_relocate_section (bfd *output_bfd,
4253 struct bfd_link_info *info,
4254 bfd *input_bfd,
4255 asection *input_section,
4256 bfd_byte *contents,
4257 Elf_Internal_Rela *relocs,
4258 Elf_Internal_Sym *local_syms,
4259 asection **local_sections)
4260 {
4261 Elf_Internal_Shdr *symtab_hdr;
4262 struct elf_link_hash_entry **sym_hashes;
4263 Elf_Internal_Rela *rel;
4264 Elf_Internal_Rela *relend;
4265 const char *name;
4266 struct elf_aarch64_link_hash_table *globals;
4267 bfd_boolean save_addend = FALSE;
4268 bfd_vma addend = 0;
4269
4270 globals = elf_aarch64_hash_table (info);
4271
4272 symtab_hdr = &elf_symtab_hdr (input_bfd);
4273 sym_hashes = elf_sym_hashes (input_bfd);
4274
4275 rel = relocs;
4276 relend = relocs + input_section->reloc_count;
4277 for (; rel < relend; rel++)
4278 {
4279 unsigned int r_type;
4280 bfd_reloc_code_real_type bfd_r_type;
4281 bfd_reloc_code_real_type relaxed_bfd_r_type;
4282 reloc_howto_type *howto;
4283 unsigned long r_symndx;
4284 Elf_Internal_Sym *sym;
4285 asection *sec;
4286 struct elf_link_hash_entry *h;
4287 bfd_vma relocation;
4288 bfd_reloc_status_type r;
4289 arelent bfd_reloc;
4290 char sym_type;
4291 bfd_boolean unresolved_reloc = FALSE;
4292 char *error_message = NULL;
4293
4294 r_symndx = ELFNN_R_SYM (rel->r_info);
4295 r_type = ELFNN_R_TYPE (rel->r_info);
4296
4297 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
4298 howto = bfd_reloc.howto;
4299
4300 if (howto == NULL)
4301 {
4302 (*_bfd_error_handler)
4303 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4304 input_bfd, input_section, r_type);
4305 return FALSE;
4306 }
4307 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
4308
4309 h = NULL;
4310 sym = NULL;
4311 sec = NULL;
4312
4313 if (r_symndx < symtab_hdr->sh_info)
4314 {
4315 sym = local_syms + r_symndx;
4316 sym_type = ELFNN_ST_TYPE (sym->st_info);
4317 sec = local_sections[r_symndx];
4318
4319 /* An object file might have a reference to a local
4320 undefined symbol. This is a daft object file, but we
4321 should at least do something about it. */
4322 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4323 && bfd_is_und_section (sec)
4324 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4325 {
4326 if (!info->callbacks->undefined_symbol
4327 (info, bfd_elf_string_from_elf_section
4328 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4329 input_bfd, input_section, rel->r_offset, TRUE))
4330 return FALSE;
4331 }
4332
4333 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4334 }
4335 else
4336 {
4337 bfd_boolean warned;
4338
4339 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4340 r_symndx, symtab_hdr, sym_hashes,
4341 h, sec, relocation,
4342 unresolved_reloc, warned);
4343
4344 sym_type = h->type;
4345 }
4346
4347 if (sec != NULL && discarded_section (sec))
4348 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4349 rel, 1, relend, howto, 0, contents);
4350
4351 if (info->relocatable)
4352 {
4353 /* This is a relocatable link. We don't have to change
4354 anything, unless the reloc is against a section symbol,
4355 in which case we have to adjust according to where the
4356 section symbol winds up in the output section. */
4357 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
4358 rel->r_addend += sec->output_offset;
4359 continue;
4360 }
4361
4362 if (h != NULL)
4363 name = h->root.root.string;
4364 else
4365 {
4366 name = (bfd_elf_string_from_elf_section
4367 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4368 if (name == NULL || *name == '\0')
4369 name = bfd_section_name (input_bfd, sec);
4370 }
4371
4372 if (r_symndx != 0
4373 && r_type != R_AARCH64_NONE
4374 && r_type != R_AARCH64_NULL
4375 && (h == NULL
4376 || h->root.type == bfd_link_hash_defined
4377 || h->root.type == bfd_link_hash_defweak)
4378 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
4379 {
4380 (*_bfd_error_handler)
4381 ((sym_type == STT_TLS
4382 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4383 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4384 input_bfd,
4385 input_section, (long) rel->r_offset, howto->name, name);
4386 }
4387
4388 /* We relax only if we can see that there can be a valid transition
4389 from a reloc type to another.
4390 We call elfNN_aarch64_final_link_relocate unless we're completely
4391 done, i.e., the relaxation produced the final output we want. */
4392
4393 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4394 h, r_symndx);
4395 if (relaxed_bfd_r_type != bfd_r_type)
4396 {
4397 bfd_r_type = relaxed_bfd_r_type;
4398 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4399 BFD_ASSERT (howto != NULL);
4400 r_type = howto->type;
4401 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4402 unresolved_reloc = 0;
4403 }
4404 else
4405 r = bfd_reloc_continue;
4406
4407 /* There may be multiple consecutive relocations for the
4408 same offset. In that case we are supposed to treat the
4409 output of each relocation as the addend for the next. */
4410 if (rel + 1 < relend
4411 && rel->r_offset == rel[1].r_offset
4412 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4413 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4414 save_addend = TRUE;
4415 else
4416 save_addend = FALSE;
4417
4418 if (r == bfd_reloc_continue)
4419 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4420 input_section, contents, rel,
4421 relocation, info, sec,
4422 h, &unresolved_reloc,
4423 save_addend, &addend);
4424
4425 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4426 {
4427 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4428 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4429 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4430 {
4431 bfd_boolean need_relocs = FALSE;
4432 bfd_byte *loc;
4433 int indx;
4434 bfd_vma off;
4435
4436 off = symbol_got_offset (input_bfd, h, r_symndx);
4437 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4438
4439 need_relocs =
4440 (info->shared || indx != 0) &&
4441 (h == NULL
4442 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4443 || h->root.type != bfd_link_hash_undefweak);
4444
4445 BFD_ASSERT (globals->root.srelgot != NULL);
4446
4447 if (need_relocs)
4448 {
4449 Elf_Internal_Rela rela;
4450 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
4451 rela.r_addend = 0;
4452 rela.r_offset = globals->root.sgot->output_section->vma +
4453 globals->root.sgot->output_offset + off;
4454
4455
4456 loc = globals->root.srelgot->contents;
4457 loc += globals->root.srelgot->reloc_count++
4458 * RELOC_SIZE (htab);
4459 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4460
4461 if (indx == 0)
4462 {
4463 bfd_put_NN (output_bfd,
4464 relocation - dtpoff_base (info),
4465 globals->root.sgot->contents + off
4466 + GOT_ENTRY_SIZE);
4467 }
4468 else
4469 {
4470 /* This TLS symbol is global. We emit a
4471 relocation to fixup the tls offset at load
4472 time. */
4473 rela.r_info =
4474 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
4475 rela.r_addend = 0;
4476 rela.r_offset =
4477 (globals->root.sgot->output_section->vma
4478 + globals->root.sgot->output_offset + off
4479 + GOT_ENTRY_SIZE);
4480
4481 loc = globals->root.srelgot->contents;
4482 loc += globals->root.srelgot->reloc_count++
4483 * RELOC_SIZE (globals);
4484 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4485 bfd_put_NN (output_bfd, (bfd_vma) 0,
4486 globals->root.sgot->contents + off
4487 + GOT_ENTRY_SIZE);
4488 }
4489 }
4490 else
4491 {
4492 bfd_put_NN (output_bfd, (bfd_vma) 1,
4493 globals->root.sgot->contents + off);
4494 bfd_put_NN (output_bfd,
4495 relocation - dtpoff_base (info),
4496 globals->root.sgot->contents + off
4497 + GOT_ENTRY_SIZE);
4498 }
4499
4500 symbol_got_offset_mark (input_bfd, h, r_symndx);
4501 }
4502 break;
4503
4504 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4505 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4506 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4507 {
4508 bfd_boolean need_relocs = FALSE;
4509 bfd_byte *loc;
4510 int indx;
4511 bfd_vma off;
4512
4513 off = symbol_got_offset (input_bfd, h, r_symndx);
4514
4515 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4516
4517 need_relocs =
4518 (info->shared || indx != 0) &&
4519 (h == NULL
4520 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4521 || h->root.type != bfd_link_hash_undefweak);
4522
4523 BFD_ASSERT (globals->root.srelgot != NULL);
4524
4525 if (need_relocs)
4526 {
4527 Elf_Internal_Rela rela;
4528
4529 if (indx == 0)
4530 rela.r_addend = relocation - dtpoff_base (info);
4531 else
4532 rela.r_addend = 0;
4533
4534 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
4535 rela.r_offset = globals->root.sgot->output_section->vma +
4536 globals->root.sgot->output_offset + off;
4537
4538 loc = globals->root.srelgot->contents;
4539 loc += globals->root.srelgot->reloc_count++
4540 * RELOC_SIZE (htab);
4541
4542 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4543
4544 bfd_put_NN (output_bfd, rela.r_addend,
4545 globals->root.sgot->contents + off);
4546 }
4547 else
4548 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
4549 globals->root.sgot->contents + off);
4550
4551 symbol_got_offset_mark (input_bfd, h, r_symndx);
4552 }
4553 break;
4554
4555 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4556 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4557 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4558 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4559 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4560 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4561 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4562 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4563 break;
4564
4565 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4566 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4567 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4568 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
4569 {
4570 bfd_boolean need_relocs = FALSE;
4571 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
4572 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
4573
4574 need_relocs = (h == NULL
4575 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4576 || h->root.type != bfd_link_hash_undefweak);
4577
4578 BFD_ASSERT (globals->root.srelgot != NULL);
4579 BFD_ASSERT (globals->root.sgot != NULL);
4580
4581 if (need_relocs)
4582 {
4583 bfd_byte *loc;
4584 Elf_Internal_Rela rela;
4585 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
4586
4587 rela.r_addend = 0;
4588 rela.r_offset = (globals->root.sgotplt->output_section->vma
4589 + globals->root.sgotplt->output_offset
4590 + off + globals->sgotplt_jump_table_size);
4591
4592 if (indx == 0)
4593 rela.r_addend = relocation - dtpoff_base (info);
4594
4595 /* Allocate the next available slot in the PLT reloc
4596 section to hold our R_AARCH64_TLSDESC, the next
4597 available slot is determined from reloc_count,
4598 which we step. But note, reloc_count was
4599 artifically moved down while allocating slots for
4600 real PLT relocs such that all of the PLT relocs
4601 will fit above the initial reloc_count and the
4602 extra stuff will fit below. */
4603 loc = globals->root.srelplt->contents;
4604 loc += globals->root.srelplt->reloc_count++
4605 * RELOC_SIZE (globals);
4606
4607 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4608
4609 bfd_put_NN (output_bfd, (bfd_vma) 0,
4610 globals->root.sgotplt->contents + off +
4611 globals->sgotplt_jump_table_size);
4612 bfd_put_NN (output_bfd, (bfd_vma) 0,
4613 globals->root.sgotplt->contents + off +
4614 globals->sgotplt_jump_table_size +
4615 GOT_ENTRY_SIZE);
4616 }
4617
4618 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
4619 }
4620 break;
4621 default:
4622 break;
4623 }
4624
4625 if (!save_addend)
4626 addend = 0;
4627
4628
4629 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4630 because such sections are not SEC_ALLOC and thus ld.so will
4631 not process them. */
4632 if (unresolved_reloc
4633 && !((input_section->flags & SEC_DEBUGGING) != 0
4634 && h->def_dynamic)
4635 && _bfd_elf_section_offset (output_bfd, info, input_section,
4636 +rel->r_offset) != (bfd_vma) - 1)
4637 {
4638 (*_bfd_error_handler)
4639 (_
4640 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4641 input_bfd, input_section, (long) rel->r_offset, howto->name,
4642 h->root.root.string);
4643 return FALSE;
4644 }
4645
4646 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
4647 {
4648 switch (r)
4649 {
4650 case bfd_reloc_overflow:
4651 /* If the overflowing reloc was to an undefined symbol,
4652 we have already printed one error message and there
4653 is no point complaining again. */
4654 if ((!h ||
4655 h->root.type != bfd_link_hash_undefined)
4656 && (!((*info->callbacks->reloc_overflow)
4657 (info, (h ? &h->root : NULL), name, howto->name,
4658 (bfd_vma) 0, input_bfd, input_section,
4659 rel->r_offset))))
4660 return FALSE;
4661 break;
4662
4663 case bfd_reloc_undefined:
4664 if (!((*info->callbacks->undefined_symbol)
4665 (info, name, input_bfd, input_section,
4666 rel->r_offset, TRUE)))
4667 return FALSE;
4668 break;
4669
4670 case bfd_reloc_outofrange:
4671 error_message = _("out of range");
4672 goto common_error;
4673
4674 case bfd_reloc_notsupported:
4675 error_message = _("unsupported relocation");
4676 goto common_error;
4677
4678 case bfd_reloc_dangerous:
4679 /* error_message should already be set. */
4680 goto common_error;
4681
4682 default:
4683 error_message = _("unknown error");
4684 /* Fall through. */
4685
4686 common_error:
4687 BFD_ASSERT (error_message != NULL);
4688 if (!((*info->callbacks->reloc_dangerous)
4689 (info, error_message, input_bfd, input_section,
4690 rel->r_offset)))
4691 return FALSE;
4692 break;
4693 }
4694 }
4695 }
4696
4697 return TRUE;
4698 }
4699
4700 /* Set the right machine number. */
4701
4702 static bfd_boolean
4703 elfNN_aarch64_object_p (bfd *abfd)
4704 {
4705 #if ARCH_SIZE == 32
4706 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
4707 #else
4708 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
4709 #endif
4710 return TRUE;
4711 }
4712
4713 /* Function to keep AArch64 specific flags in the ELF header. */
4714
4715 static bfd_boolean
4716 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
4717 {
4718 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
4719 {
4720 }
4721 else
4722 {
4723 elf_elfheader (abfd)->e_flags = flags;
4724 elf_flags_init (abfd) = TRUE;
4725 }
4726
4727 return TRUE;
4728 }
4729
4730 /* Copy backend specific data from one object module to another. */
4731
4732 static bfd_boolean
4733 elfNN_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
4734 {
4735 flagword in_flags;
4736
4737 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4738 return TRUE;
4739
4740 in_flags = elf_elfheader (ibfd)->e_flags;
4741
4742 elf_elfheader (obfd)->e_flags = in_flags;
4743 elf_flags_init (obfd) = TRUE;
4744
4745 /* Also copy the EI_OSABI field. */
4746 elf_elfheader (obfd)->e_ident[EI_OSABI] =
4747 elf_elfheader (ibfd)->e_ident[EI_OSABI];
4748
4749 /* Copy object attributes. */
4750 _bfd_elf_copy_obj_attributes (ibfd, obfd);
4751
4752 return TRUE;
4753 }
4754
4755 /* Merge backend specific data from an object file to the output
4756 object file when linking. */
4757
4758 static bfd_boolean
4759 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
4760 {
4761 flagword out_flags;
4762 flagword in_flags;
4763 bfd_boolean flags_compatible = TRUE;
4764 asection *sec;
4765
4766 /* Check if we have the same endianess. */
4767 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
4768 return FALSE;
4769
4770 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4771 return TRUE;
4772
4773 /* The input BFD must have had its flags initialised. */
4774 /* The following seems bogus to me -- The flags are initialized in
4775 the assembler but I don't think an elf_flags_init field is
4776 written into the object. */
4777 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4778
4779 in_flags = elf_elfheader (ibfd)->e_flags;
4780 out_flags = elf_elfheader (obfd)->e_flags;
4781
4782 if (!elf_flags_init (obfd))
4783 {
4784 /* If the input is the default architecture and had the default
4785 flags then do not bother setting the flags for the output
4786 architecture, instead allow future merges to do this. If no
4787 future merges ever set these flags then they will retain their
4788 uninitialised values, which surprise surprise, correspond
4789 to the default values. */
4790 if (bfd_get_arch_info (ibfd)->the_default
4791 && elf_elfheader (ibfd)->e_flags == 0)
4792 return TRUE;
4793
4794 elf_flags_init (obfd) = TRUE;
4795 elf_elfheader (obfd)->e_flags = in_flags;
4796
4797 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
4798 && bfd_get_arch_info (obfd)->the_default)
4799 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
4800 bfd_get_mach (ibfd));
4801
4802 return TRUE;
4803 }
4804
4805 /* Identical flags must be compatible. */
4806 if (in_flags == out_flags)
4807 return TRUE;
4808
4809 /* Check to see if the input BFD actually contains any sections. If
4810 not, its flags may not have been initialised either, but it
4811 cannot actually cause any incompatiblity. Do not short-circuit
4812 dynamic objects; their section list may be emptied by
4813 elf_link_add_object_symbols.
4814
4815 Also check to see if there are no code sections in the input.
4816 In this case there is no need to check for code specific flags.
4817 XXX - do we need to worry about floating-point format compatability
4818 in data sections ? */
4819 if (!(ibfd->flags & DYNAMIC))
4820 {
4821 bfd_boolean null_input_bfd = TRUE;
4822 bfd_boolean only_data_sections = TRUE;
4823
4824 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4825 {
4826 if ((bfd_get_section_flags (ibfd, sec)
4827 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4828 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4829 only_data_sections = FALSE;
4830
4831 null_input_bfd = FALSE;
4832 break;
4833 }
4834
4835 if (null_input_bfd || only_data_sections)
4836 return TRUE;
4837 }
4838
4839 return flags_compatible;
4840 }
4841
4842 /* Display the flags field. */
4843
4844 static bfd_boolean
4845 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
4846 {
4847 FILE *file = (FILE *) ptr;
4848 unsigned long flags;
4849
4850 BFD_ASSERT (abfd != NULL && ptr != NULL);
4851
4852 /* Print normal ELF private data. */
4853 _bfd_elf_print_private_bfd_data (abfd, ptr);
4854
4855 flags = elf_elfheader (abfd)->e_flags;
4856 /* Ignore init flag - it may not be set, despite the flags field
4857 containing valid data. */
4858
4859 /* xgettext:c-format */
4860 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
4861
4862 if (flags)
4863 fprintf (file, _("<Unrecognised flag bits set>"));
4864
4865 fputc ('\n', file);
4866
4867 return TRUE;
4868 }
4869
4870 /* Update the got entry reference counts for the section being removed. */
4871
4872 static bfd_boolean
4873 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
4874 struct bfd_link_info *info,
4875 asection *sec,
4876 const Elf_Internal_Rela * relocs)
4877 {
4878 struct elf_aarch64_link_hash_table *htab;
4879 Elf_Internal_Shdr *symtab_hdr;
4880 struct elf_link_hash_entry **sym_hashes;
4881 struct elf_aarch64_local_symbol *locals;
4882 const Elf_Internal_Rela *rel, *relend;
4883
4884 if (info->relocatable)
4885 return TRUE;
4886
4887 htab = elf_aarch64_hash_table (info);
4888
4889 if (htab == NULL)
4890 return FALSE;
4891
4892 elf_section_data (sec)->local_dynrel = NULL;
4893
4894 symtab_hdr = &elf_symtab_hdr (abfd);
4895 sym_hashes = elf_sym_hashes (abfd);
4896
4897 locals = elf_aarch64_locals (abfd);
4898
4899 relend = relocs + sec->reloc_count;
4900 for (rel = relocs; rel < relend; rel++)
4901 {
4902 unsigned long r_symndx;
4903 unsigned int r_type;
4904 struct elf_link_hash_entry *h = NULL;
4905
4906 r_symndx = ELFNN_R_SYM (rel->r_info);
4907
4908 if (r_symndx >= symtab_hdr->sh_info)
4909 {
4910 struct elf_aarch64_link_hash_entry *eh;
4911 struct elf_dyn_relocs **pp;
4912 struct elf_dyn_relocs *p;
4913
4914 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4915 while (h->root.type == bfd_link_hash_indirect
4916 || h->root.type == bfd_link_hash_warning)
4917 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4918 eh = (struct elf_aarch64_link_hash_entry *) h;
4919
4920 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
4921 {
4922 if (p->sec == sec)
4923 {
4924 /* Everything must go for SEC. */
4925 *pp = p->next;
4926 break;
4927 }
4928 }
4929 }
4930 else
4931 {
4932 Elf_Internal_Sym *isym;
4933
4934 /* A local symbol. */
4935 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
4936 abfd, r_symndx);
4937 if (isym == NULL)
4938 return FALSE;
4939 }
4940
4941 r_type = ELFNN_R_TYPE (rel->r_info);
4942 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
4943 {
4944 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4945 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4946 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4947 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4948 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4949 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4950 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4951 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4952 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4953 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4954 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4955 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4956 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4957 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4958 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4959 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4960 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4961 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4962 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4963 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4964 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4965 if (h != NULL)
4966 {
4967 if (h->got.refcount > 0)
4968 h->got.refcount -= 1;
4969 }
4970 else if (locals != NULL)
4971 {
4972 if (locals[r_symndx].got_refcount > 0)
4973 locals[r_symndx].got_refcount -= 1;
4974 }
4975 break;
4976
4977 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4978 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4979 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4980 if (h != NULL && info->executable)
4981 {
4982 if (h->plt.refcount > 0)
4983 h->plt.refcount -= 1;
4984 }
4985 break;
4986
4987 case BFD_RELOC_AARCH64_CALL26:
4988 case BFD_RELOC_AARCH64_JUMP26:
4989 /* If this is a local symbol then we resolve it
4990 directly without creating a PLT entry. */
4991 if (h == NULL)
4992 continue;
4993
4994 if (h->plt.refcount > 0)
4995 h->plt.refcount -= 1;
4996 break;
4997
4998 case BFD_RELOC_AARCH64_NN:
4999 if (h != NULL && info->executable)
5000 {
5001 if (h->plt.refcount > 0)
5002 h->plt.refcount -= 1;
5003 }
5004 break;
5005
5006 default:
5007 break;
5008 }
5009 }
5010
5011 return TRUE;
5012 }
5013
5014 /* Adjust a symbol defined by a dynamic object and referenced by a
5015 regular object. The current definition is in some section of the
5016 dynamic object, but we're not including those sections. We have to
5017 change the definition to something the rest of the link can
5018 understand. */
5019
5020 static bfd_boolean
5021 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5022 struct elf_link_hash_entry *h)
5023 {
5024 struct elf_aarch64_link_hash_table *htab;
5025 asection *s;
5026
5027 /* If this is a function, put it in the procedure linkage table. We
5028 will fill in the contents of the procedure linkage table later,
5029 when we know the address of the .got section. */
5030 if (h->type == STT_FUNC || h->needs_plt)
5031 {
5032 if (h->plt.refcount <= 0
5033 || SYMBOL_CALLS_LOCAL (info, h)
5034 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
5035 && h->root.type == bfd_link_hash_undefweak))
5036 {
5037 /* This case can occur if we saw a CALL26 reloc in
5038 an input file, but the symbol wasn't referred to
5039 by a dynamic object or all references were
5040 garbage collected. In which case we can end up
5041 resolving. */
5042 h->plt.offset = (bfd_vma) - 1;
5043 h->needs_plt = 0;
5044 }
5045
5046 return TRUE;
5047 }
5048 else
5049 /* It's possible that we incorrectly decided a .plt reloc was
5050 needed for an R_X86_64_PC32 reloc to a non-function sym in
5051 check_relocs. We can't decide accurately between function and
5052 non-function syms in check-relocs; Objects loaded later in
5053 the link may change h->type. So fix it now. */
5054 h->plt.offset = (bfd_vma) - 1;
5055
5056
5057 /* If this is a weak symbol, and there is a real definition, the
5058 processor independent code will have arranged for us to see the
5059 real definition first, and we can just use the same value. */
5060 if (h->u.weakdef != NULL)
5061 {
5062 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5063 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5064 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5065 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5066 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5067 h->non_got_ref = h->u.weakdef->non_got_ref;
5068 return TRUE;
5069 }
5070
5071 /* If we are creating a shared library, we must presume that the
5072 only references to the symbol are via the global offset table.
5073 For such cases we need not do anything here; the relocations will
5074 be handled correctly by relocate_section. */
5075 if (info->shared)
5076 return TRUE;
5077
5078 /* If there are no references to this symbol that do not use the
5079 GOT, we don't need to generate a copy reloc. */
5080 if (!h->non_got_ref)
5081 return TRUE;
5082
5083 /* If -z nocopyreloc was given, we won't generate them either. */
5084 if (info->nocopyreloc)
5085 {
5086 h->non_got_ref = 0;
5087 return TRUE;
5088 }
5089
5090 /* We must allocate the symbol in our .dynbss section, which will
5091 become part of the .bss section of the executable. There will be
5092 an entry for this symbol in the .dynsym section. The dynamic
5093 object will contain position independent code, so all references
5094 from the dynamic object to this symbol will go through the global
5095 offset table. The dynamic linker will use the .dynsym entry to
5096 determine the address it must put in the global offset table, so
5097 both the dynamic object and the regular object will refer to the
5098 same memory location for the variable. */
5099
5100 htab = elf_aarch64_hash_table (info);
5101
5102 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5103 to copy the initial value out of the dynamic object and into the
5104 runtime process image. */
5105 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5106 {
5107 htab->srelbss->size += RELOC_SIZE (htab);
5108 h->needs_copy = 1;
5109 }
5110
5111 s = htab->sdynbss;
5112
5113 return _bfd_elf_adjust_dynamic_copy (h, s);
5114
5115 }
5116
5117 static bfd_boolean
5118 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5119 {
5120 struct elf_aarch64_local_symbol *locals;
5121 locals = elf_aarch64_locals (abfd);
5122 if (locals == NULL)
5123 {
5124 locals = (struct elf_aarch64_local_symbol *)
5125 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5126 if (locals == NULL)
5127 return FALSE;
5128 elf_aarch64_locals (abfd) = locals;
5129 }
5130 return TRUE;
5131 }
5132
5133 /* Look through the relocs for a section during the first phase. */
5134
5135 static bfd_boolean
5136 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5137 asection *sec, const Elf_Internal_Rela *relocs)
5138 {
5139 Elf_Internal_Shdr *symtab_hdr;
5140 struct elf_link_hash_entry **sym_hashes;
5141 const Elf_Internal_Rela *rel;
5142 const Elf_Internal_Rela *rel_end;
5143 asection *sreloc;
5144
5145 struct elf_aarch64_link_hash_table *htab;
5146
5147 if (info->relocatable)
5148 return TRUE;
5149
5150 BFD_ASSERT (is_aarch64_elf (abfd));
5151
5152 htab = elf_aarch64_hash_table (info);
5153 sreloc = NULL;
5154
5155 symtab_hdr = &elf_symtab_hdr (abfd);
5156 sym_hashes = elf_sym_hashes (abfd);
5157
5158 rel_end = relocs + sec->reloc_count;
5159 for (rel = relocs; rel < rel_end; rel++)
5160 {
5161 struct elf_link_hash_entry *h;
5162 unsigned long r_symndx;
5163 unsigned int r_type;
5164 bfd_reloc_code_real_type bfd_r_type;
5165
5166 r_symndx = ELFNN_R_SYM (rel->r_info);
5167 r_type = ELFNN_R_TYPE (rel->r_info);
5168
5169 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5170 {
5171 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5172 r_symndx);
5173 return FALSE;
5174 }
5175
5176 if (r_symndx < symtab_hdr->sh_info)
5177 h = NULL;
5178 else
5179 {
5180 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5181 while (h->root.type == bfd_link_hash_indirect
5182 || h->root.type == bfd_link_hash_warning)
5183 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5184
5185 /* PR15323, ref flags aren't set for references in the same
5186 object. */
5187 h->root.non_ir_ref = 1;
5188 }
5189
5190 /* Could be done earlier, if h were already available. */
5191 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5192
5193 switch (bfd_r_type)
5194 {
5195 case BFD_RELOC_AARCH64_NN:
5196
5197 /* We don't need to handle relocs into sections not going into
5198 the "real" output. */
5199 if ((sec->flags & SEC_ALLOC) == 0)
5200 break;
5201
5202 if (h != NULL)
5203 {
5204 if (!info->shared)
5205 h->non_got_ref = 1;
5206
5207 h->plt.refcount += 1;
5208 h->pointer_equality_needed = 1;
5209 }
5210
5211 /* No need to do anything if we're not creating a shared
5212 object. */
5213 if (! info->shared)
5214 break;
5215
5216 {
5217 struct elf_dyn_relocs *p;
5218 struct elf_dyn_relocs **head;
5219
5220 /* We must copy these reloc types into the output file.
5221 Create a reloc section in dynobj and make room for
5222 this reloc. */
5223 if (sreloc == NULL)
5224 {
5225 if (htab->root.dynobj == NULL)
5226 htab->root.dynobj = abfd;
5227
5228 sreloc = _bfd_elf_make_dynamic_reloc_section
5229 (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
5230
5231 if (sreloc == NULL)
5232 return FALSE;
5233 }
5234
5235 /* If this is a global symbol, we count the number of
5236 relocations we need for this symbol. */
5237 if (h != NULL)
5238 {
5239 struct elf_aarch64_link_hash_entry *eh;
5240 eh = (struct elf_aarch64_link_hash_entry *) h;
5241 head = &eh->dyn_relocs;
5242 }
5243 else
5244 {
5245 /* Track dynamic relocs needed for local syms too.
5246 We really need local syms available to do this
5247 easily. Oh well. */
5248
5249 asection *s;
5250 void **vpp;
5251 Elf_Internal_Sym *isym;
5252
5253 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5254 abfd, r_symndx);
5255 if (isym == NULL)
5256 return FALSE;
5257
5258 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5259 if (s == NULL)
5260 s = sec;
5261
5262 /* Beware of type punned pointers vs strict aliasing
5263 rules. */
5264 vpp = &(elf_section_data (s)->local_dynrel);
5265 head = (struct elf_dyn_relocs **) vpp;
5266 }
5267
5268 p = *head;
5269 if (p == NULL || p->sec != sec)
5270 {
5271 bfd_size_type amt = sizeof *p;
5272 p = ((struct elf_dyn_relocs *)
5273 bfd_zalloc (htab->root.dynobj, amt));
5274 if (p == NULL)
5275 return FALSE;
5276 p->next = *head;
5277 *head = p;
5278 p->sec = sec;
5279 }
5280
5281 p->count += 1;
5282
5283 }
5284 break;
5285
5286 /* RR: We probably want to keep a consistency check that
5287 there are no dangling GOT_PAGE relocs. */
5288 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5289 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5290 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5291 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5292 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5293 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5294 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5295 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5296 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5297 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5298 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5299 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5300 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5301 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5302 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5303 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5304 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5305 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5306 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5307 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5308 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5309 {
5310 unsigned got_type;
5311 unsigned old_got_type;
5312
5313 got_type = aarch64_reloc_got_type (bfd_r_type);
5314
5315 if (h)
5316 {
5317 h->got.refcount += 1;
5318 old_got_type = elf_aarch64_hash_entry (h)->got_type;
5319 }
5320 else
5321 {
5322 struct elf_aarch64_local_symbol *locals;
5323
5324 if (!elfNN_aarch64_allocate_local_symbols
5325 (abfd, symtab_hdr->sh_info))
5326 return FALSE;
5327
5328 locals = elf_aarch64_locals (abfd);
5329 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5330 locals[r_symndx].got_refcount += 1;
5331 old_got_type = locals[r_symndx].got_type;
5332 }
5333
5334 /* If a variable is accessed with both general dynamic TLS
5335 methods, two slots may be created. */
5336 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5337 got_type |= old_got_type;
5338
5339 /* We will already have issued an error message if there
5340 is a TLS/non-TLS mismatch, based on the symbol type.
5341 So just combine any TLS types needed. */
5342 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5343 && got_type != GOT_NORMAL)
5344 got_type |= old_got_type;
5345
5346 /* If the symbol is accessed by both IE and GD methods, we
5347 are able to relax. Turn off the GD flag, without
5348 messing up with any other kind of TLS types that may be
5349 involved. */
5350 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5351 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5352
5353 if (old_got_type != got_type)
5354 {
5355 if (h != NULL)
5356 elf_aarch64_hash_entry (h)->got_type = got_type;
5357 else
5358 {
5359 struct elf_aarch64_local_symbol *locals;
5360 locals = elf_aarch64_locals (abfd);
5361 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5362 locals[r_symndx].got_type = got_type;
5363 }
5364 }
5365
5366 if (htab->root.sgot == NULL)
5367 {
5368 if (htab->root.dynobj == NULL)
5369 htab->root.dynobj = abfd;
5370 if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
5371 return FALSE;
5372 }
5373 break;
5374 }
5375
5376 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5377 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5378 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5379 if (h != NULL && info->executable)
5380 {
5381 /* If this reloc is in a read-only section, we might
5382 need a copy reloc. We can't check reliably at this
5383 stage whether the section is read-only, as input
5384 sections have not yet been mapped to output sections.
5385 Tentatively set the flag for now, and correct in
5386 adjust_dynamic_symbol. */
5387 h->non_got_ref = 1;
5388 h->plt.refcount += 1;
5389 h->pointer_equality_needed = 1;
5390 }
5391 /* FIXME:: RR need to handle these in shared libraries
5392 and essentially bomb out as these being non-PIC
5393 relocations in shared libraries. */
5394 break;
5395
5396 case BFD_RELOC_AARCH64_CALL26:
5397 case BFD_RELOC_AARCH64_JUMP26:
5398 /* If this is a local symbol then we resolve it
5399 directly without creating a PLT entry. */
5400 if (h == NULL)
5401 continue;
5402
5403 h->needs_plt = 1;
5404 h->plt.refcount += 1;
5405 break;
5406
5407 default:
5408 break;
5409 }
5410 }
5411
5412 return TRUE;
5413 }
5414
5415 /* Treat mapping symbols as special target symbols. */
5416
5417 static bfd_boolean
5418 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
5419 asymbol *sym)
5420 {
5421 return bfd_is_aarch64_special_symbol_name (sym->name,
5422 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
5423 }
5424
5425 /* This is a copy of elf_find_function () from elf.c except that
5426 AArch64 mapping symbols are ignored when looking for function names. */
5427
5428 static bfd_boolean
5429 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
5430 asection *section,
5431 asymbol **symbols,
5432 bfd_vma offset,
5433 const char **filename_ptr,
5434 const char **functionname_ptr)
5435 {
5436 const char *filename = NULL;
5437 asymbol *func = NULL;
5438 bfd_vma low_func = 0;
5439 asymbol **p;
5440
5441 for (p = symbols; *p != NULL; p++)
5442 {
5443 elf_symbol_type *q;
5444
5445 q = (elf_symbol_type *) * p;
5446
5447 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
5448 {
5449 default:
5450 break;
5451 case STT_FILE:
5452 filename = bfd_asymbol_name (&q->symbol);
5453 break;
5454 case STT_FUNC:
5455 case STT_NOTYPE:
5456 /* Skip mapping symbols. */
5457 if ((q->symbol.flags & BSF_LOCAL)
5458 && (bfd_is_aarch64_special_symbol_name
5459 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
5460 continue;
5461 /* Fall through. */
5462 if (bfd_get_section (&q->symbol) == section
5463 && q->symbol.value >= low_func && q->symbol.value <= offset)
5464 {
5465 func = (asymbol *) q;
5466 low_func = q->symbol.value;
5467 }
5468 break;
5469 }
5470 }
5471
5472 if (func == NULL)
5473 return FALSE;
5474
5475 if (filename_ptr)
5476 *filename_ptr = filename;
5477 if (functionname_ptr)
5478 *functionname_ptr = bfd_asymbol_name (func);
5479
5480 return TRUE;
5481 }
5482
5483
5484 /* Find the nearest line to a particular section and offset, for error
5485 reporting. This code is a duplicate of the code in elf.c, except
5486 that it uses aarch64_elf_find_function. */
5487
5488 static bfd_boolean
5489 elfNN_aarch64_find_nearest_line (bfd *abfd,
5490 asection *section,
5491 asymbol **symbols,
5492 bfd_vma offset,
5493 const char **filename_ptr,
5494 const char **functionname_ptr,
5495 unsigned int *line_ptr)
5496 {
5497 bfd_boolean found = FALSE;
5498
5499 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5500 toolchain uses it. */
5501
5502 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
5503 section, symbols, offset,
5504 filename_ptr, functionname_ptr,
5505 line_ptr, NULL, 0,
5506 &elf_tdata (abfd)->dwarf2_find_line_info))
5507 {
5508 if (!*functionname_ptr)
5509 aarch64_elf_find_function (abfd, section, symbols, offset,
5510 *filename_ptr ? NULL : filename_ptr,
5511 functionname_ptr);
5512
5513 return TRUE;
5514 }
5515
5516 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
5517 &found, filename_ptr,
5518 functionname_ptr, line_ptr,
5519 &elf_tdata (abfd)->line_info))
5520 return FALSE;
5521
5522 if (found && (*functionname_ptr || *line_ptr))
5523 return TRUE;
5524
5525 if (symbols == NULL)
5526 return FALSE;
5527
5528 if (!aarch64_elf_find_function (abfd, section, symbols, offset,
5529 filename_ptr, functionname_ptr))
5530 return FALSE;
5531
5532 *line_ptr = 0;
5533 return TRUE;
5534 }
5535
5536 static bfd_boolean
5537 elfNN_aarch64_find_inliner_info (bfd *abfd,
5538 const char **filename_ptr,
5539 const char **functionname_ptr,
5540 unsigned int *line_ptr)
5541 {
5542 bfd_boolean found;
5543 found = _bfd_dwarf2_find_inliner_info
5544 (abfd, filename_ptr,
5545 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
5546 return found;
5547 }
5548
5549
5550 static void
5551 elfNN_aarch64_post_process_headers (bfd *abfd,
5552 struct bfd_link_info *link_info
5553 ATTRIBUTE_UNUSED)
5554 {
5555 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
5556
5557 i_ehdrp = elf_elfheader (abfd);
5558 i_ehdrp->e_ident[EI_OSABI] = 0;
5559 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
5560 }
5561
5562 static enum elf_reloc_type_class
5563 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5564 const asection *rel_sec ATTRIBUTE_UNUSED,
5565 const Elf_Internal_Rela *rela)
5566 {
5567 switch ((int) ELFNN_R_TYPE (rela->r_info))
5568 {
5569 case AARCH64_R (RELATIVE):
5570 return reloc_class_relative;
5571 case AARCH64_R (JUMP_SLOT):
5572 return reloc_class_plt;
5573 case AARCH64_R (COPY):
5574 return reloc_class_copy;
5575 default:
5576 return reloc_class_normal;
5577 }
5578 }
5579
5580 /* Set the right machine number for an AArch64 ELF file. */
5581
5582 static bfd_boolean
5583 elfNN_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
5584 {
5585 if (hdr->sh_type == SHT_NOTE)
5586 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
5587
5588 return TRUE;
5589 }
5590
5591 /* Handle an AArch64 specific section when reading an object file. This is
5592 called when bfd_section_from_shdr finds a section with an unknown
5593 type. */
5594
5595 static bfd_boolean
5596 elfNN_aarch64_section_from_shdr (bfd *abfd,
5597 Elf_Internal_Shdr *hdr,
5598 const char *name, int shindex)
5599 {
5600 /* There ought to be a place to keep ELF backend specific flags, but
5601 at the moment there isn't one. We just keep track of the
5602 sections by their name, instead. Fortunately, the ABI gives
5603 names for all the AArch64 specific sections, so we will probably get
5604 away with this. */
5605 switch (hdr->sh_type)
5606 {
5607 case SHT_AARCH64_ATTRIBUTES:
5608 break;
5609
5610 default:
5611 return FALSE;
5612 }
5613
5614 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5615 return FALSE;
5616
5617 return TRUE;
5618 }
5619
5620 /* A structure used to record a list of sections, independently
5621 of the next and prev fields in the asection structure. */
5622 typedef struct section_list
5623 {
5624 asection *sec;
5625 struct section_list *next;
5626 struct section_list *prev;
5627 }
5628 section_list;
5629
5630 /* Unfortunately we need to keep a list of sections for which
5631 an _aarch64_elf_section_data structure has been allocated. This
5632 is because it is possible for functions like elfNN_aarch64_write_section
5633 to be called on a section which has had an elf_data_structure
5634 allocated for it (and so the used_by_bfd field is valid) but
5635 for which the AArch64 extended version of this structure - the
5636 _aarch64_elf_section_data structure - has not been allocated. */
5637 static section_list *sections_with_aarch64_elf_section_data = NULL;
5638
5639 static void
5640 record_section_with_aarch64_elf_section_data (asection *sec)
5641 {
5642 struct section_list *entry;
5643
5644 entry = bfd_malloc (sizeof (*entry));
5645 if (entry == NULL)
5646 return;
5647 entry->sec = sec;
5648 entry->next = sections_with_aarch64_elf_section_data;
5649 entry->prev = NULL;
5650 if (entry->next != NULL)
5651 entry->next->prev = entry;
5652 sections_with_aarch64_elf_section_data = entry;
5653 }
5654
5655 static struct section_list *
5656 find_aarch64_elf_section_entry (asection *sec)
5657 {
5658 struct section_list *entry;
5659 static struct section_list *last_entry = NULL;
5660
5661 /* This is a short cut for the typical case where the sections are added
5662 to the sections_with_aarch64_elf_section_data list in forward order and
5663 then looked up here in backwards order. This makes a real difference
5664 to the ld-srec/sec64k.exp linker test. */
5665 entry = sections_with_aarch64_elf_section_data;
5666 if (last_entry != NULL)
5667 {
5668 if (last_entry->sec == sec)
5669 entry = last_entry;
5670 else if (last_entry->next != NULL && last_entry->next->sec == sec)
5671 entry = last_entry->next;
5672 }
5673
5674 for (; entry; entry = entry->next)
5675 if (entry->sec == sec)
5676 break;
5677
5678 if (entry)
5679 /* Record the entry prior to this one - it is the entry we are
5680 most likely to want to locate next time. Also this way if we
5681 have been called from
5682 unrecord_section_with_aarch64_elf_section_data () we will not
5683 be caching a pointer that is about to be freed. */
5684 last_entry = entry->prev;
5685
5686 return entry;
5687 }
5688
5689 static void
5690 unrecord_section_with_aarch64_elf_section_data (asection *sec)
5691 {
5692 struct section_list *entry;
5693
5694 entry = find_aarch64_elf_section_entry (sec);
5695
5696 if (entry)
5697 {
5698 if (entry->prev != NULL)
5699 entry->prev->next = entry->next;
5700 if (entry->next != NULL)
5701 entry->next->prev = entry->prev;
5702 if (entry == sections_with_aarch64_elf_section_data)
5703 sections_with_aarch64_elf_section_data = entry->next;
5704 free (entry);
5705 }
5706 }
5707
5708
5709 typedef struct
5710 {
5711 void *finfo;
5712 struct bfd_link_info *info;
5713 asection *sec;
5714 int sec_shndx;
5715 int (*func) (void *, const char *, Elf_Internal_Sym *,
5716 asection *, struct elf_link_hash_entry *);
5717 } output_arch_syminfo;
5718
5719 enum map_symbol_type
5720 {
5721 AARCH64_MAP_INSN,
5722 AARCH64_MAP_DATA
5723 };
5724
5725
5726 /* Output a single mapping symbol. */
5727
5728 static bfd_boolean
5729 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
5730 enum map_symbol_type type, bfd_vma offset)
5731 {
5732 static const char *names[2] = { "$x", "$d" };
5733 Elf_Internal_Sym sym;
5734
5735 sym.st_value = (osi->sec->output_section->vma
5736 + osi->sec->output_offset + offset);
5737 sym.st_size = 0;
5738 sym.st_other = 0;
5739 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5740 sym.st_shndx = osi->sec_shndx;
5741 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
5742 }
5743
5744
5745
5746 /* Output mapping symbols for PLT entries associated with H. */
5747
5748 static bfd_boolean
5749 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
5750 {
5751 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
5752 bfd_vma addr;
5753
5754 if (h->root.type == bfd_link_hash_indirect)
5755 return TRUE;
5756
5757 if (h->root.type == bfd_link_hash_warning)
5758 /* When warning symbols are created, they **replace** the "real"
5759 entry in the hash table, thus we never get to see the real
5760 symbol in a hash traversal. So look at it now. */
5761 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5762
5763 if (h->plt.offset == (bfd_vma) - 1)
5764 return TRUE;
5765
5766 addr = h->plt.offset;
5767 if (addr == 32)
5768 {
5769 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5770 return FALSE;
5771 }
5772 return TRUE;
5773 }
5774
5775
5776 /* Output a single local symbol for a generated stub. */
5777
5778 static bfd_boolean
5779 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
5780 bfd_vma offset, bfd_vma size)
5781 {
5782 Elf_Internal_Sym sym;
5783
5784 sym.st_value = (osi->sec->output_section->vma
5785 + osi->sec->output_offset + offset);
5786 sym.st_size = size;
5787 sym.st_other = 0;
5788 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5789 sym.st_shndx = osi->sec_shndx;
5790 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
5791 }
5792
5793 static bfd_boolean
5794 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
5795 {
5796 struct elf_aarch64_stub_hash_entry *stub_entry;
5797 asection *stub_sec;
5798 bfd_vma addr;
5799 char *stub_name;
5800 output_arch_syminfo *osi;
5801
5802 /* Massage our args to the form they really have. */
5803 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5804 osi = (output_arch_syminfo *) in_arg;
5805
5806 stub_sec = stub_entry->stub_sec;
5807
5808 /* Ensure this stub is attached to the current section being
5809 processed. */
5810 if (stub_sec != osi->sec)
5811 return TRUE;
5812
5813 addr = (bfd_vma) stub_entry->stub_offset;
5814
5815 stub_name = stub_entry->output_name;
5816
5817 switch (stub_entry->stub_type)
5818 {
5819 case aarch64_stub_adrp_branch:
5820 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
5821 sizeof (aarch64_adrp_branch_stub)))
5822 return FALSE;
5823 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5824 return FALSE;
5825 break;
5826 case aarch64_stub_long_branch:
5827 if (!elfNN_aarch64_output_stub_sym
5828 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
5829 return FALSE;
5830 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5831 return FALSE;
5832 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
5833 return FALSE;
5834 break;
5835 default:
5836 BFD_FAIL ();
5837 }
5838
5839 return TRUE;
5840 }
5841
5842 /* Output mapping symbols for linker generated sections. */
5843
5844 static bfd_boolean
5845 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
5846 struct bfd_link_info *info,
5847 void *finfo,
5848 int (*func) (void *, const char *,
5849 Elf_Internal_Sym *,
5850 asection *,
5851 struct elf_link_hash_entry
5852 *))
5853 {
5854 output_arch_syminfo osi;
5855 struct elf_aarch64_link_hash_table *htab;
5856
5857 htab = elf_aarch64_hash_table (info);
5858
5859 osi.finfo = finfo;
5860 osi.info = info;
5861 osi.func = func;
5862
5863 /* Long calls stubs. */
5864 if (htab->stub_bfd && htab->stub_bfd->sections)
5865 {
5866 asection *stub_sec;
5867
5868 for (stub_sec = htab->stub_bfd->sections;
5869 stub_sec != NULL; stub_sec = stub_sec->next)
5870 {
5871 /* Ignore non-stub sections. */
5872 if (!strstr (stub_sec->name, STUB_SUFFIX))
5873 continue;
5874
5875 osi.sec = stub_sec;
5876
5877 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5878 (output_bfd, osi.sec->output_section);
5879
5880 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
5881 &osi);
5882 }
5883 }
5884
5885 /* Finally, output mapping symbols for the PLT. */
5886 if (!htab->root.splt || htab->root.splt->size == 0)
5887 return TRUE;
5888
5889 /* For now live without mapping symbols for the plt. */
5890 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5891 (output_bfd, htab->root.splt->output_section);
5892 osi.sec = htab->root.splt;
5893
5894 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
5895 (void *) &osi);
5896
5897 return TRUE;
5898
5899 }
5900
5901 /* Allocate target specific section data. */
5902
5903 static bfd_boolean
5904 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
5905 {
5906 if (!sec->used_by_bfd)
5907 {
5908 _aarch64_elf_section_data *sdata;
5909 bfd_size_type amt = sizeof (*sdata);
5910
5911 sdata = bfd_zalloc (abfd, amt);
5912 if (sdata == NULL)
5913 return FALSE;
5914 sec->used_by_bfd = sdata;
5915 }
5916
5917 record_section_with_aarch64_elf_section_data (sec);
5918
5919 return _bfd_elf_new_section_hook (abfd, sec);
5920 }
5921
5922
5923 static void
5924 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
5925 asection *sec,
5926 void *ignore ATTRIBUTE_UNUSED)
5927 {
5928 unrecord_section_with_aarch64_elf_section_data (sec);
5929 }
5930
5931 static bfd_boolean
5932 elfNN_aarch64_close_and_cleanup (bfd *abfd)
5933 {
5934 if (abfd->sections)
5935 bfd_map_over_sections (abfd,
5936 unrecord_section_via_map_over_sections, NULL);
5937
5938 return _bfd_elf_close_and_cleanup (abfd);
5939 }
5940
5941 static bfd_boolean
5942 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
5943 {
5944 if (abfd->sections)
5945 bfd_map_over_sections (abfd,
5946 unrecord_section_via_map_over_sections, NULL);
5947
5948 return _bfd_free_cached_info (abfd);
5949 }
5950
5951 static bfd_boolean
5952 elfNN_aarch64_is_function_type (unsigned int type)
5953 {
5954 return type == STT_FUNC;
5955 }
5956
5957 /* Create dynamic sections. This is different from the ARM backend in that
5958 the got, plt, gotplt and their relocation sections are all created in the
5959 standard part of the bfd elf backend. */
5960
5961 static bfd_boolean
5962 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
5963 struct bfd_link_info *info)
5964 {
5965 struct elf_aarch64_link_hash_table *htab;
5966 struct elf_link_hash_entry *h;
5967
5968 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
5969 return FALSE;
5970
5971 htab = elf_aarch64_hash_table (info);
5972 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
5973 if (!info->shared)
5974 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
5975
5976 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
5977 abort ();
5978
5979 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5980 dynobj's .got section. We don't do this in the linker script
5981 because we don't want to define the symbol if we are not creating
5982 a global offset table. */
5983 h = _bfd_elf_define_linkage_sym (dynobj, info,
5984 htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
5985 elf_hash_table (info)->hgot = h;
5986 if (h == NULL)
5987 return FALSE;
5988
5989 return TRUE;
5990 }
5991
5992
5993 /* Allocate space in .plt, .got and associated reloc sections for
5994 dynamic relocs. */
5995
5996 static bfd_boolean
5997 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
5998 {
5999 struct bfd_link_info *info;
6000 struct elf_aarch64_link_hash_table *htab;
6001 struct elf_aarch64_link_hash_entry *eh;
6002 struct elf_dyn_relocs *p;
6003
6004 /* An example of a bfd_link_hash_indirect symbol is versioned
6005 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6006 -> __gxx_personality_v0(bfd_link_hash_defined)
6007
6008 There is no need to process bfd_link_hash_indirect symbols here
6009 because we will also be presented with the concrete instance of
6010 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6011 called to copy all relevant data from the generic to the concrete
6012 symbol instance.
6013 */
6014 if (h->root.type == bfd_link_hash_indirect)
6015 return TRUE;
6016
6017 if (h->root.type == bfd_link_hash_warning)
6018 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6019
6020 info = (struct bfd_link_info *) inf;
6021 htab = elf_aarch64_hash_table (info);
6022
6023 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
6024 {
6025 /* Make sure this symbol is output as a dynamic symbol.
6026 Undefined weak syms won't yet be marked as dynamic. */
6027 if (h->dynindx == -1 && !h->forced_local)
6028 {
6029 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6030 return FALSE;
6031 }
6032
6033 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
6034 {
6035 asection *s = htab->root.splt;
6036
6037 /* If this is the first .plt entry, make room for the special
6038 first entry. */
6039 if (s->size == 0)
6040 s->size += htab->plt_header_size;
6041
6042 h->plt.offset = s->size;
6043
6044 /* If this symbol is not defined in a regular file, and we are
6045 not generating a shared library, then set the symbol to this
6046 location in the .plt. This is required to make function
6047 pointers compare as equal between the normal executable and
6048 the shared library. */
6049 if (!info->shared && !h->def_regular)
6050 {
6051 h->root.u.def.section = s;
6052 h->root.u.def.value = h->plt.offset;
6053 }
6054
6055 /* Make room for this entry. For now we only create the
6056 small model PLT entries. We later need to find a way
6057 of relaxing into these from the large model PLT entries. */
6058 s->size += PLT_SMALL_ENTRY_SIZE;
6059
6060 /* We also need to make an entry in the .got.plt section, which
6061 will be placed in the .got section by the linker script. */
6062 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6063
6064 /* We also need to make an entry in the .rela.plt section. */
6065 htab->root.srelplt->size += RELOC_SIZE (htab);
6066
6067 /* We need to ensure that all GOT entries that serve the PLT
6068 are consecutive with the special GOT slots [0] [1] and
6069 [2]. Any addtional relocations, such as
6070 R_AARCH64_TLSDESC, must be placed after the PLT related
6071 entries. We abuse the reloc_count such that during
6072 sizing we adjust reloc_count to indicate the number of
6073 PLT related reserved entries. In subsequent phases when
6074 filling in the contents of the reloc entries, PLT related
6075 entries are placed by computing their PLT index (0
6076 .. reloc_count). While other none PLT relocs are placed
6077 at the slot indicated by reloc_count and reloc_count is
6078 updated. */
6079
6080 htab->root.srelplt->reloc_count++;
6081 }
6082 else
6083 {
6084 h->plt.offset = (bfd_vma) - 1;
6085 h->needs_plt = 0;
6086 }
6087 }
6088 else
6089 {
6090 h->plt.offset = (bfd_vma) - 1;
6091 h->needs_plt = 0;
6092 }
6093
6094 eh = (struct elf_aarch64_link_hash_entry *) h;
6095 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6096
6097 if (h->got.refcount > 0)
6098 {
6099 bfd_boolean dyn;
6100 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
6101
6102 h->got.offset = (bfd_vma) - 1;
6103
6104 dyn = htab->root.dynamic_sections_created;
6105
6106 /* Make sure this symbol is output as a dynamic symbol.
6107 Undefined weak syms won't yet be marked as dynamic. */
6108 if (dyn && h->dynindx == -1 && !h->forced_local)
6109 {
6110 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6111 return FALSE;
6112 }
6113
6114 if (got_type == GOT_UNKNOWN)
6115 {
6116 }
6117 else if (got_type == GOT_NORMAL)
6118 {
6119 h->got.offset = htab->root.sgot->size;
6120 htab->root.sgot->size += GOT_ENTRY_SIZE;
6121 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6122 || h->root.type != bfd_link_hash_undefweak)
6123 && (info->shared
6124 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6125 {
6126 htab->root.srelgot->size += RELOC_SIZE (htab);
6127 }
6128 }
6129 else
6130 {
6131 int indx;
6132 if (got_type & GOT_TLSDESC_GD)
6133 {
6134 eh->tlsdesc_got_jump_table_offset =
6135 (htab->root.sgotplt->size
6136 - aarch64_compute_jump_table_size (htab));
6137 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6138 h->got.offset = (bfd_vma) - 2;
6139 }
6140
6141 if (got_type & GOT_TLS_GD)
6142 {
6143 h->got.offset = htab->root.sgot->size;
6144 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6145 }
6146
6147 if (got_type & GOT_TLS_IE)
6148 {
6149 h->got.offset = htab->root.sgot->size;
6150 htab->root.sgot->size += GOT_ENTRY_SIZE;
6151 }
6152
6153 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6154 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6155 || h->root.type != bfd_link_hash_undefweak)
6156 && (info->shared
6157 || indx != 0
6158 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6159 {
6160 if (got_type & GOT_TLSDESC_GD)
6161 {
6162 htab->root.srelplt->size += RELOC_SIZE (htab);
6163 /* Note reloc_count not incremented here! We have
6164 already adjusted reloc_count for this relocation
6165 type. */
6166
6167 /* TLSDESC PLT is now needed, but not yet determined. */
6168 htab->tlsdesc_plt = (bfd_vma) - 1;
6169 }
6170
6171 if (got_type & GOT_TLS_GD)
6172 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6173
6174 if (got_type & GOT_TLS_IE)
6175 htab->root.srelgot->size += RELOC_SIZE (htab);
6176 }
6177 }
6178 }
6179 else
6180 {
6181 h->got.offset = (bfd_vma) - 1;
6182 }
6183
6184 if (eh->dyn_relocs == NULL)
6185 return TRUE;
6186
6187 /* In the shared -Bsymbolic case, discard space allocated for
6188 dynamic pc-relative relocs against symbols which turn out to be
6189 defined in regular objects. For the normal shared case, discard
6190 space for pc-relative relocs that have become local due to symbol
6191 visibility changes. */
6192
6193 if (info->shared)
6194 {
6195 /* Relocs that use pc_count are those that appear on a call
6196 insn, or certain REL relocs that can generated via assembly.
6197 We want calls to protected symbols to resolve directly to the
6198 function rather than going via the plt. If people want
6199 function pointer comparisons to work as expected then they
6200 should avoid writing weird assembly. */
6201 if (SYMBOL_CALLS_LOCAL (info, h))
6202 {
6203 struct elf_dyn_relocs **pp;
6204
6205 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6206 {
6207 p->count -= p->pc_count;
6208 p->pc_count = 0;
6209 if (p->count == 0)
6210 *pp = p->next;
6211 else
6212 pp = &p->next;
6213 }
6214 }
6215
6216 /* Also discard relocs on undefined weak syms with non-default
6217 visibility. */
6218 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6219 {
6220 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6221 eh->dyn_relocs = NULL;
6222
6223 /* Make sure undefined weak symbols are output as a dynamic
6224 symbol in PIEs. */
6225 else if (h->dynindx == -1
6226 && !h->forced_local
6227 && !bfd_elf_link_record_dynamic_symbol (info, h))
6228 return FALSE;
6229 }
6230
6231 }
6232 else if (ELIMINATE_COPY_RELOCS)
6233 {
6234 /* For the non-shared case, discard space for relocs against
6235 symbols which turn out to need copy relocs or are not
6236 dynamic. */
6237
6238 if (!h->non_got_ref
6239 && ((h->def_dynamic
6240 && !h->def_regular)
6241 || (htab->root.dynamic_sections_created
6242 && (h->root.type == bfd_link_hash_undefweak
6243 || h->root.type == bfd_link_hash_undefined))))
6244 {
6245 /* Make sure this symbol is output as a dynamic symbol.
6246 Undefined weak syms won't yet be marked as dynamic. */
6247 if (h->dynindx == -1
6248 && !h->forced_local
6249 && !bfd_elf_link_record_dynamic_symbol (info, h))
6250 return FALSE;
6251
6252 /* If that succeeded, we know we'll be keeping all the
6253 relocs. */
6254 if (h->dynindx != -1)
6255 goto keep;
6256 }
6257
6258 eh->dyn_relocs = NULL;
6259
6260 keep:;
6261 }
6262
6263 /* Finally, allocate space. */
6264 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6265 {
6266 asection *sreloc;
6267
6268 sreloc = elf_section_data (p->sec)->sreloc;
6269
6270 BFD_ASSERT (sreloc != NULL);
6271
6272 sreloc->size += p->count * RELOC_SIZE (htab);
6273 }
6274
6275 return TRUE;
6276 }
6277
6278
6279 /* This is the most important function of all . Innocuosly named
6280 though ! */
6281 static bfd_boolean
6282 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6283 struct bfd_link_info *info)
6284 {
6285 struct elf_aarch64_link_hash_table *htab;
6286 bfd *dynobj;
6287 asection *s;
6288 bfd_boolean relocs;
6289 bfd *ibfd;
6290
6291 htab = elf_aarch64_hash_table ((info));
6292 dynobj = htab->root.dynobj;
6293
6294 BFD_ASSERT (dynobj != NULL);
6295
6296 if (htab->root.dynamic_sections_created)
6297 {
6298 if (info->executable)
6299 {
6300 s = bfd_get_linker_section (dynobj, ".interp");
6301 if (s == NULL)
6302 abort ();
6303 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
6304 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
6305 }
6306 }
6307
6308 /* Set up .got offsets for local syms, and space for local dynamic
6309 relocs. */
6310 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
6311 {
6312 struct elf_aarch64_local_symbol *locals = NULL;
6313 Elf_Internal_Shdr *symtab_hdr;
6314 asection *srel;
6315 unsigned int i;
6316
6317 if (!is_aarch64_elf (ibfd))
6318 continue;
6319
6320 for (s = ibfd->sections; s != NULL; s = s->next)
6321 {
6322 struct elf_dyn_relocs *p;
6323
6324 for (p = (struct elf_dyn_relocs *)
6325 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
6326 {
6327 if (!bfd_is_abs_section (p->sec)
6328 && bfd_is_abs_section (p->sec->output_section))
6329 {
6330 /* Input section has been discarded, either because
6331 it is a copy of a linkonce section or due to
6332 linker script /DISCARD/, so we'll be discarding
6333 the relocs too. */
6334 }
6335 else if (p->count != 0)
6336 {
6337 srel = elf_section_data (p->sec)->sreloc;
6338 srel->size += p->count * RELOC_SIZE (htab);
6339 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
6340 info->flags |= DF_TEXTREL;
6341 }
6342 }
6343 }
6344
6345 locals = elf_aarch64_locals (ibfd);
6346 if (!locals)
6347 continue;
6348
6349 symtab_hdr = &elf_symtab_hdr (ibfd);
6350 srel = htab->root.srelgot;
6351 for (i = 0; i < symtab_hdr->sh_info; i++)
6352 {
6353 locals[i].got_offset = (bfd_vma) - 1;
6354 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6355 if (locals[i].got_refcount > 0)
6356 {
6357 unsigned got_type = locals[i].got_type;
6358 if (got_type & GOT_TLSDESC_GD)
6359 {
6360 locals[i].tlsdesc_got_jump_table_offset =
6361 (htab->root.sgotplt->size
6362 - aarch64_compute_jump_table_size (htab));
6363 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6364 locals[i].got_offset = (bfd_vma) - 2;
6365 }
6366
6367 if (got_type & GOT_TLS_GD)
6368 {
6369 locals[i].got_offset = htab->root.sgot->size;
6370 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6371 }
6372
6373 if (got_type & GOT_TLS_IE)
6374 {
6375 locals[i].got_offset = htab->root.sgot->size;
6376 htab->root.sgot->size += GOT_ENTRY_SIZE;
6377 }
6378
6379 if (got_type == GOT_UNKNOWN)
6380 {
6381 }
6382
6383 if (got_type == GOT_NORMAL)
6384 {
6385 }
6386
6387 if (info->shared)
6388 {
6389 if (got_type & GOT_TLSDESC_GD)
6390 {
6391 htab->root.srelplt->size += RELOC_SIZE (htab);
6392 /* Note RELOC_COUNT not incremented here! */
6393 htab->tlsdesc_plt = (bfd_vma) - 1;
6394 }
6395
6396 if (got_type & GOT_TLS_GD)
6397 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6398
6399 if (got_type & GOT_TLS_IE)
6400 htab->root.srelgot->size += RELOC_SIZE (htab);
6401 }
6402 }
6403 else
6404 {
6405 locals[i].got_refcount = (bfd_vma) - 1;
6406 }
6407 }
6408 }
6409
6410
6411 /* Allocate global sym .plt and .got entries, and space for global
6412 sym dynamic relocs. */
6413 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
6414 info);
6415
6416
6417 /* For every jump slot reserved in the sgotplt, reloc_count is
6418 incremented. However, when we reserve space for TLS descriptors,
6419 it's not incremented, so in order to compute the space reserved
6420 for them, it suffices to multiply the reloc count by the jump
6421 slot size. */
6422
6423 if (htab->root.srelplt)
6424 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
6425
6426 if (htab->tlsdesc_plt)
6427 {
6428 if (htab->root.splt->size == 0)
6429 htab->root.splt->size += PLT_ENTRY_SIZE;
6430
6431 htab->tlsdesc_plt = htab->root.splt->size;
6432 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
6433
6434 /* If we're not using lazy TLS relocations, don't generate the
6435 GOT entry required. */
6436 if (!(info->flags & DF_BIND_NOW))
6437 {
6438 htab->dt_tlsdesc_got = htab->root.sgot->size;
6439 htab->root.sgot->size += GOT_ENTRY_SIZE;
6440 }
6441 }
6442
6443 /* We now have determined the sizes of the various dynamic sections.
6444 Allocate memory for them. */
6445 relocs = FALSE;
6446 for (s = dynobj->sections; s != NULL; s = s->next)
6447 {
6448 if ((s->flags & SEC_LINKER_CREATED) == 0)
6449 continue;
6450
6451 if (s == htab->root.splt
6452 || s == htab->root.sgot
6453 || s == htab->root.sgotplt
6454 || s == htab->root.iplt
6455 || s == htab->root.igotplt || s == htab->sdynbss)
6456 {
6457 /* Strip this section if we don't need it; see the
6458 comment below. */
6459 }
6460 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
6461 {
6462 if (s->size != 0 && s != htab->root.srelplt)
6463 relocs = TRUE;
6464
6465 /* We use the reloc_count field as a counter if we need
6466 to copy relocs into the output file. */
6467 if (s != htab->root.srelplt)
6468 s->reloc_count = 0;
6469 }
6470 else
6471 {
6472 /* It's not one of our sections, so don't allocate space. */
6473 continue;
6474 }
6475
6476 if (s->size == 0)
6477 {
6478 /* If we don't need this section, strip it from the
6479 output file. This is mostly to handle .rela.bss and
6480 .rela.plt. We must create both sections in
6481 create_dynamic_sections, because they must be created
6482 before the linker maps input sections to output
6483 sections. The linker does that before
6484 adjust_dynamic_symbol is called, and it is that
6485 function which decides whether anything needs to go
6486 into these sections. */
6487
6488 s->flags |= SEC_EXCLUDE;
6489 continue;
6490 }
6491
6492 if ((s->flags & SEC_HAS_CONTENTS) == 0)
6493 continue;
6494
6495 /* Allocate memory for the section contents. We use bfd_zalloc
6496 here in case unused entries are not reclaimed before the
6497 section's contents are written out. This should not happen,
6498 but this way if it does, we get a R_AARCH64_NONE reloc instead
6499 of garbage. */
6500 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
6501 if (s->contents == NULL)
6502 return FALSE;
6503 }
6504
6505 if (htab->root.dynamic_sections_created)
6506 {
6507 /* Add some entries to the .dynamic section. We fill in the
6508 values later, in elfNN_aarch64_finish_dynamic_sections, but we
6509 must add the entries now so that we get the correct size for
6510 the .dynamic section. The DT_DEBUG entry is filled in by the
6511 dynamic linker and used by the debugger. */
6512 #define add_dynamic_entry(TAG, VAL) \
6513 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6514
6515 if (info->executable)
6516 {
6517 if (!add_dynamic_entry (DT_DEBUG, 0))
6518 return FALSE;
6519 }
6520
6521 if (htab->root.splt->size != 0)
6522 {
6523 if (!add_dynamic_entry (DT_PLTGOT, 0)
6524 || !add_dynamic_entry (DT_PLTRELSZ, 0)
6525 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
6526 || !add_dynamic_entry (DT_JMPREL, 0))
6527 return FALSE;
6528
6529 if (htab->tlsdesc_plt
6530 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
6531 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
6532 return FALSE;
6533 }
6534
6535 if (relocs)
6536 {
6537 if (!add_dynamic_entry (DT_RELA, 0)
6538 || !add_dynamic_entry (DT_RELASZ, 0)
6539 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
6540 return FALSE;
6541
6542 /* If any dynamic relocs apply to a read-only section,
6543 then we need a DT_TEXTREL entry. */
6544 if ((info->flags & DF_TEXTREL) != 0)
6545 {
6546 if (!add_dynamic_entry (DT_TEXTREL, 0))
6547 return FALSE;
6548 }
6549 }
6550 }
6551 #undef add_dynamic_entry
6552
6553 return TRUE;
6554 }
6555
6556 static inline void
6557 elf64_aarch64_update_plt_entry (bfd *output_bfd,
6558 unsigned int r_type,
6559 bfd_byte *plt_entry, bfd_vma value)
6560 {
6561 reloc_howto_type *howto;
6562 howto = elfNN_aarch64_howto_from_type (r_type);
6563 bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
6564 }
6565
6566 static void
6567 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
6568 struct elf_aarch64_link_hash_table
6569 *htab, bfd *output_bfd)
6570 {
6571 bfd_byte *plt_entry;
6572 bfd_vma plt_index;
6573 bfd_vma got_offset;
6574 bfd_vma gotplt_entry_address;
6575 bfd_vma plt_entry_address;
6576 Elf_Internal_Rela rela;
6577 bfd_byte *loc;
6578
6579 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
6580
6581 /* Offset in the GOT is PLT index plus got GOT headers(3)
6582 times GOT_ENTRY_SIZE. */
6583 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
6584 plt_entry = htab->root.splt->contents + h->plt.offset;
6585 plt_entry_address = htab->root.splt->output_section->vma
6586 + htab->root.splt->output_section->output_offset + h->plt.offset;
6587 gotplt_entry_address = htab->root.sgotplt->output_section->vma +
6588 htab->root.sgotplt->output_offset + got_offset;
6589
6590 /* Copy in the boiler-plate for the PLTn entry. */
6591 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
6592
6593 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6594 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6595 elf64_aarch64_update_plt_entry (output_bfd, AARCH64_R (ADR_PREL_PG_HI21),
6596 plt_entry,
6597 PG (gotplt_entry_address) -
6598 PG (plt_entry_address));
6599
6600 /* Fill in the lo12 bits for the load from the pltgot. */
6601 elf64_aarch64_update_plt_entry (output_bfd, AARCH64_R (LDSTNN_ABS_LO12_NC),
6602 plt_entry + 4,
6603 PG_OFFSET (gotplt_entry_address));
6604
6605 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6606 elf64_aarch64_update_plt_entry (output_bfd, AARCH64_R (ADD_ABS_LO12_NC),
6607 plt_entry + 8,
6608 PG_OFFSET (gotplt_entry_address));
6609
6610 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6611 bfd_put_NN (output_bfd,
6612 (htab->root.splt->output_section->vma
6613 + htab->root.splt->output_offset),
6614 htab->root.sgotplt->contents + got_offset);
6615
6616 /* Fill in the entry in the .rela.plt section. */
6617 rela.r_offset = gotplt_entry_address;
6618 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
6619 rela.r_addend = 0;
6620
6621 /* Compute the relocation entry to used based on PLT index and do
6622 not adjust reloc_count. The reloc_count has already been adjusted
6623 to account for this entry. */
6624 loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab);
6625 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6626 }
6627
6628 /* Size sections even though they're not dynamic. We use it to setup
6629 _TLS_MODULE_BASE_, if needed. */
6630
6631 static bfd_boolean
6632 elfNN_aarch64_always_size_sections (bfd *output_bfd,
6633 struct bfd_link_info *info)
6634 {
6635 asection *tls_sec;
6636
6637 if (info->relocatable)
6638 return TRUE;
6639
6640 tls_sec = elf_hash_table (info)->tls_sec;
6641
6642 if (tls_sec)
6643 {
6644 struct elf_link_hash_entry *tlsbase;
6645
6646 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
6647 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
6648
6649 if (tlsbase)
6650 {
6651 struct bfd_link_hash_entry *h = NULL;
6652 const struct elf_backend_data *bed =
6653 get_elf_backend_data (output_bfd);
6654
6655 if (!(_bfd_generic_link_add_one_symbol
6656 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
6657 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
6658 return FALSE;
6659
6660 tlsbase->type = STT_TLS;
6661 tlsbase = (struct elf_link_hash_entry *) h;
6662 tlsbase->def_regular = 1;
6663 tlsbase->other = STV_HIDDEN;
6664 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
6665 }
6666 }
6667
6668 return TRUE;
6669 }
6670
6671 /* Finish up dynamic symbol handling. We set the contents of various
6672 dynamic sections here. */
6673 static bfd_boolean
6674 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
6675 struct bfd_link_info *info,
6676 struct elf_link_hash_entry *h,
6677 Elf_Internal_Sym *sym)
6678 {
6679 struct elf_aarch64_link_hash_table *htab;
6680 htab = elf_aarch64_hash_table (info);
6681
6682 if (h->plt.offset != (bfd_vma) - 1)
6683 {
6684 /* This symbol has an entry in the procedure linkage table. Set
6685 it up. */
6686
6687 if (h->dynindx == -1
6688 || htab->root.splt == NULL
6689 || htab->root.sgotplt == NULL || htab->root.srelplt == NULL)
6690 abort ();
6691
6692 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd);
6693 if (!h->def_regular)
6694 {
6695 /* Mark the symbol as undefined, rather than as defined in
6696 the .plt section. Leave the value alone. This is a clue
6697 for the dynamic linker, to make function pointer
6698 comparisons work between an application and shared
6699 library. */
6700 sym->st_shndx = SHN_UNDEF;
6701 }
6702 }
6703
6704 if (h->got.offset != (bfd_vma) - 1
6705 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
6706 {
6707 Elf_Internal_Rela rela;
6708 bfd_byte *loc;
6709
6710 /* This symbol has an entry in the global offset table. Set it
6711 up. */
6712 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
6713 abort ();
6714
6715 rela.r_offset = (htab->root.sgot->output_section->vma
6716 + htab->root.sgot->output_offset
6717 + (h->got.offset & ~(bfd_vma) 1));
6718
6719 if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
6720 {
6721 if (!h->def_regular)
6722 return FALSE;
6723
6724 BFD_ASSERT ((h->got.offset & 1) != 0);
6725 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
6726 rela.r_addend = (h->root.u.def.value
6727 + h->root.u.def.section->output_section->vma
6728 + h->root.u.def.section->output_offset);
6729 }
6730 else
6731 {
6732 BFD_ASSERT ((h->got.offset & 1) == 0);
6733 bfd_put_NN (output_bfd, (bfd_vma) 0,
6734 htab->root.sgot->contents + h->got.offset);
6735 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
6736 rela.r_addend = 0;
6737 }
6738
6739 loc = htab->root.srelgot->contents;
6740 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
6741 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6742 }
6743
6744 if (h->needs_copy)
6745 {
6746 Elf_Internal_Rela rela;
6747 bfd_byte *loc;
6748
6749 /* This symbol needs a copy reloc. Set it up. */
6750
6751 if (h->dynindx == -1
6752 || (h->root.type != bfd_link_hash_defined
6753 && h->root.type != bfd_link_hash_defweak)
6754 || htab->srelbss == NULL)
6755 abort ();
6756
6757 rela.r_offset = (h->root.u.def.value
6758 + h->root.u.def.section->output_section->vma
6759 + h->root.u.def.section->output_offset);
6760 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
6761 rela.r_addend = 0;
6762 loc = htab->srelbss->contents;
6763 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
6764 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6765 }
6766
6767 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6768 be NULL for local symbols. */
6769 if (sym != NULL
6770 && (h == elf_hash_table (info)->hdynamic
6771 || h == elf_hash_table (info)->hgot))
6772 sym->st_shndx = SHN_ABS;
6773
6774 return TRUE;
6775 }
6776
6777 static void
6778 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
6779 struct elf_aarch64_link_hash_table
6780 *htab)
6781 {
6782 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6783 small and large plts and at the minute just generates
6784 the small PLT. */
6785
6786 /* PLT0 of the small PLT looks like this in ELF64 -
6787 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6788 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6789 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6790 // symbol resolver
6791 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6792 // GOTPLT entry for this.
6793 br x17
6794 PLT0 will be slightly different in ELF32 due to different got entry
6795 size.
6796 */
6797 bfd_vma plt_got_base;
6798 bfd_vma plt_base;
6799
6800
6801 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
6802 PLT_ENTRY_SIZE);
6803 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
6804 PLT_ENTRY_SIZE;
6805
6806 plt_got_base = (htab->root.sgotplt->output_section->vma
6807 + htab->root.sgotplt->output_offset);
6808
6809 plt_base = htab->root.splt->output_section->vma +
6810 htab->root.splt->output_section->output_offset;
6811
6812 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6813 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6814 elf64_aarch64_update_plt_entry (output_bfd, AARCH64_R (ADR_PREL_PG_HI21),
6815 htab->root.splt->contents + 4,
6816 PG (plt_got_base + 16) - PG (plt_base + 4));
6817
6818 elf64_aarch64_update_plt_entry (output_bfd, AARCH64_R (LDSTNN_ABS_LO12_NC),
6819 htab->root.splt->contents + 8,
6820 PG_OFFSET (plt_got_base + 16));
6821
6822 elf64_aarch64_update_plt_entry (output_bfd, AARCH64_R (ADD_ABS_LO12_NC),
6823 htab->root.splt->contents + 12,
6824 PG_OFFSET (plt_got_base + 16));
6825 }
6826
6827 static bfd_boolean
6828 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
6829 struct bfd_link_info *info)
6830 {
6831 struct elf_aarch64_link_hash_table *htab;
6832 bfd *dynobj;
6833 asection *sdyn;
6834
6835 htab = elf_aarch64_hash_table (info);
6836 dynobj = htab->root.dynobj;
6837 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6838
6839 if (htab->root.dynamic_sections_created)
6840 {
6841 ElfNN_External_Dyn *dyncon, *dynconend;
6842
6843 if (sdyn == NULL || htab->root.sgot == NULL)
6844 abort ();
6845
6846 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
6847 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
6848 for (; dyncon < dynconend; dyncon++)
6849 {
6850 Elf_Internal_Dyn dyn;
6851 asection *s;
6852
6853 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
6854
6855 switch (dyn.d_tag)
6856 {
6857 default:
6858 continue;
6859
6860 case DT_PLTGOT:
6861 s = htab->root.sgotplt;
6862 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6863 break;
6864
6865 case DT_JMPREL:
6866 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
6867 break;
6868
6869 case DT_PLTRELSZ:
6870 s = htab->root.srelplt->output_section;
6871 dyn.d_un.d_val = s->size;
6872 break;
6873
6874 case DT_RELASZ:
6875 /* The procedure linkage table relocs (DT_JMPREL) should
6876 not be included in the overall relocs (DT_RELA).
6877 Therefore, we override the DT_RELASZ entry here to
6878 make it not include the JMPREL relocs. Since the
6879 linker script arranges for .rela.plt to follow all
6880 other relocation sections, we don't have to worry
6881 about changing the DT_RELA entry. */
6882 if (htab->root.srelplt != NULL)
6883 {
6884 s = htab->root.srelplt->output_section;
6885 dyn.d_un.d_val -= s->size;
6886 }
6887 break;
6888
6889 case DT_TLSDESC_PLT:
6890 s = htab->root.splt;
6891 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6892 + htab->tlsdesc_plt;
6893 break;
6894
6895 case DT_TLSDESC_GOT:
6896 s = htab->root.sgot;
6897 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6898 + htab->dt_tlsdesc_got;
6899 break;
6900 }
6901
6902 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
6903 }
6904
6905 }
6906
6907 /* Fill in the special first entry in the procedure linkage table. */
6908 if (htab->root.splt && htab->root.splt->size > 0)
6909 {
6910 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
6911
6912 elf_section_data (htab->root.splt->output_section)->
6913 this_hdr.sh_entsize = htab->plt_entry_size;
6914
6915
6916 if (htab->tlsdesc_plt)
6917 {
6918 bfd_put_NN (output_bfd, (bfd_vma) 0,
6919 htab->root.sgot->contents + htab->dt_tlsdesc_got);
6920
6921 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
6922 elfNN_aarch64_tlsdesc_small_plt_entry,
6923 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
6924
6925 {
6926 bfd_vma adrp1_addr =
6927 htab->root.splt->output_section->vma
6928 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
6929
6930 bfd_vma adrp2_addr =
6931 htab->root.splt->output_section->vma
6932 + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
6933
6934 bfd_vma got_addr =
6935 htab->root.sgot->output_section->vma
6936 + htab->root.sgot->output_offset;
6937
6938 bfd_vma pltgot_addr =
6939 htab->root.sgotplt->output_section->vma
6940 + htab->root.sgotplt->output_offset;
6941
6942 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
6943 bfd_vma opcode;
6944
6945 /* adrp x2, DT_TLSDESC_GOT */
6946 opcode = bfd_get_32 (output_bfd,
6947 htab->root.splt->contents
6948 + htab->tlsdesc_plt + 4);
6949 opcode = reencode_adr_imm
6950 (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
6951 bfd_put_32 (output_bfd, opcode,
6952 htab->root.splt->contents + htab->tlsdesc_plt + 4);
6953
6954 /* adrp x3, 0 */
6955 opcode = bfd_get_32 (output_bfd,
6956 htab->root.splt->contents
6957 + htab->tlsdesc_plt + 8);
6958 opcode = reencode_adr_imm
6959 (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
6960 bfd_put_32 (output_bfd, opcode,
6961 htab->root.splt->contents + htab->tlsdesc_plt + 8);
6962
6963 /* ldr x2, [x2, #0] */
6964 opcode = bfd_get_32 (output_bfd,
6965 htab->root.splt->contents
6966 + htab->tlsdesc_plt + 12);
6967 opcode = reencode_ldst_pos_imm (opcode,
6968 PG_OFFSET (dt_tlsdesc_got) >> 3);
6969 bfd_put_32 (output_bfd, opcode,
6970 htab->root.splt->contents + htab->tlsdesc_plt + 12);
6971
6972 /* add x3, x3, 0 */
6973 opcode = bfd_get_32 (output_bfd,
6974 htab->root.splt->contents
6975 + htab->tlsdesc_plt + 16);
6976 opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
6977 bfd_put_32 (output_bfd, opcode,
6978 htab->root.splt->contents + htab->tlsdesc_plt + 16);
6979 }
6980 }
6981 }
6982
6983 if (htab->root.sgotplt)
6984 {
6985 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
6986 {
6987 (*_bfd_error_handler)
6988 (_("discarded output section: `%A'"), htab->root.sgotplt);
6989 return FALSE;
6990 }
6991
6992 /* Fill in the first three entries in the global offset table. */
6993 if (htab->root.sgotplt->size > 0)
6994 {
6995 /* Set the first entry in the global offset table to the address of
6996 the dynamic section. */
6997 if (sdyn == NULL)
6998 bfd_put_NN (output_bfd, (bfd_vma) 0,
6999 htab->root.sgotplt->contents);
7000 else
7001 bfd_put_NN (output_bfd,
7002 sdyn->output_section->vma + sdyn->output_offset,
7003 htab->root.sgotplt->contents);
7004 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
7005 bfd_put_NN (output_bfd,
7006 (bfd_vma) 0,
7007 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
7008 bfd_put_NN (output_bfd,
7009 (bfd_vma) 0,
7010 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
7011 }
7012
7013 elf_section_data (htab->root.sgotplt->output_section)->
7014 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
7015 }
7016
7017 if (htab->root.sgot && htab->root.sgot->size > 0)
7018 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
7019 = GOT_ENTRY_SIZE;
7020
7021 return TRUE;
7022 }
7023
7024 /* Return address for Ith PLT stub in section PLT, for relocation REL
7025 or (bfd_vma) -1 if it should not be included. */
7026
7027 static bfd_vma
7028 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
7029 const arelent *rel ATTRIBUTE_UNUSED)
7030 {
7031 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
7032 }
7033
7034
7035 /* We use this so we can override certain functions
7036 (though currently we don't). */
7037
7038 const struct elf_size_info elfNN_aarch64_size_info =
7039 {
7040 sizeof (ElfNN_External_Ehdr),
7041 sizeof (ElfNN_External_Phdr),
7042 sizeof (ElfNN_External_Shdr),
7043 sizeof (ElfNN_External_Rel),
7044 sizeof (ElfNN_External_Rela),
7045 sizeof (ElfNN_External_Sym),
7046 sizeof (ElfNN_External_Dyn),
7047 sizeof (Elf_External_Note),
7048 4, /* Hash table entry size. */
7049 1, /* Internal relocs per external relocs. */
7050 ARCH_SIZE, /* Arch size. */
7051 LOG_FILE_ALIGN, /* Log_file_align. */
7052 ELFCLASSNN, EV_CURRENT,
7053 bfd_elfNN_write_out_phdrs,
7054 bfd_elfNN_write_shdrs_and_ehdr,
7055 bfd_elfNN_checksum_contents,
7056 bfd_elfNN_write_relocs,
7057 bfd_elfNN_swap_symbol_in,
7058 bfd_elfNN_swap_symbol_out,
7059 bfd_elfNN_slurp_reloc_table,
7060 bfd_elfNN_slurp_symbol_table,
7061 bfd_elfNN_swap_dyn_in,
7062 bfd_elfNN_swap_dyn_out,
7063 bfd_elfNN_swap_reloc_in,
7064 bfd_elfNN_swap_reloc_out,
7065 bfd_elfNN_swap_reloca_in,
7066 bfd_elfNN_swap_reloca_out
7067 };
7068
7069 #define ELF_ARCH bfd_arch_aarch64
7070 #define ELF_MACHINE_CODE EM_AARCH64
7071 #define ELF_MAXPAGESIZE 0x10000
7072 #define ELF_MINPAGESIZE 0x1000
7073 #define ELF_COMMONPAGESIZE 0x1000
7074
7075 #define bfd_elfNN_close_and_cleanup \
7076 elfNN_aarch64_close_and_cleanup
7077
7078 #define bfd_elfNN_bfd_copy_private_bfd_data \
7079 elfNN_aarch64_copy_private_bfd_data
7080
7081 #define bfd_elfNN_bfd_free_cached_info \
7082 elfNN_aarch64_bfd_free_cached_info
7083
7084 #define bfd_elfNN_bfd_is_target_special_symbol \
7085 elfNN_aarch64_is_target_special_symbol
7086
7087 #define bfd_elfNN_bfd_link_hash_table_create \
7088 elfNN_aarch64_link_hash_table_create
7089
7090 #define bfd_elfNN_bfd_link_hash_table_free \
7091 elfNN_aarch64_hash_table_free
7092
7093 #define bfd_elfNN_bfd_merge_private_bfd_data \
7094 elfNN_aarch64_merge_private_bfd_data
7095
7096 #define bfd_elfNN_bfd_print_private_bfd_data \
7097 elfNN_aarch64_print_private_bfd_data
7098
7099 #define bfd_elfNN_bfd_reloc_type_lookup \
7100 elfNN_aarch64_reloc_type_lookup
7101
7102 #define bfd_elfNN_bfd_reloc_name_lookup \
7103 elfNN_aarch64_reloc_name_lookup
7104
7105 #define bfd_elfNN_bfd_set_private_flags \
7106 elfNN_aarch64_set_private_flags
7107
7108 #define bfd_elfNN_find_inliner_info \
7109 elfNN_aarch64_find_inliner_info
7110
7111 #define bfd_elfNN_find_nearest_line \
7112 elfNN_aarch64_find_nearest_line
7113
7114 #define bfd_elfNN_mkobject \
7115 elfNN_aarch64_mkobject
7116
7117 #define bfd_elfNN_new_section_hook \
7118 elfNN_aarch64_new_section_hook
7119
7120 #define elf_backend_adjust_dynamic_symbol \
7121 elfNN_aarch64_adjust_dynamic_symbol
7122
7123 #define elf_backend_always_size_sections \
7124 elfNN_aarch64_always_size_sections
7125
7126 #define elf_backend_check_relocs \
7127 elfNN_aarch64_check_relocs
7128
7129 #define elf_backend_copy_indirect_symbol \
7130 elfNN_aarch64_copy_indirect_symbol
7131
7132 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7133 to them in our hash. */
7134 #define elf_backend_create_dynamic_sections \
7135 elfNN_aarch64_create_dynamic_sections
7136
7137 #define elf_backend_init_index_section \
7138 _bfd_elf_init_2_index_sections
7139
7140 #define elf_backend_is_function_type \
7141 elfNN_aarch64_is_function_type
7142
7143 #define elf_backend_finish_dynamic_sections \
7144 elfNN_aarch64_finish_dynamic_sections
7145
7146 #define elf_backend_finish_dynamic_symbol \
7147 elfNN_aarch64_finish_dynamic_symbol
7148
7149 #define elf_backend_gc_sweep_hook \
7150 elfNN_aarch64_gc_sweep_hook
7151
7152 #define elf_backend_object_p \
7153 elfNN_aarch64_object_p
7154
7155 #define elf_backend_output_arch_local_syms \
7156 elfNN_aarch64_output_arch_local_syms
7157
7158 #define elf_backend_plt_sym_val \
7159 elfNN_aarch64_plt_sym_val
7160
7161 #define elf_backend_post_process_headers \
7162 elfNN_aarch64_post_process_headers
7163
7164 #define elf_backend_relocate_section \
7165 elfNN_aarch64_relocate_section
7166
7167 #define elf_backend_reloc_type_class \
7168 elfNN_aarch64_reloc_type_class
7169
7170 #define elf_backend_section_flags \
7171 elfNN_aarch64_section_flags
7172
7173 #define elf_backend_section_from_shdr \
7174 elfNN_aarch64_section_from_shdr
7175
7176 #define elf_backend_size_dynamic_sections \
7177 elfNN_aarch64_size_dynamic_sections
7178
7179 #define elf_backend_size_info \
7180 elfNN_aarch64_size_info
7181
7182 #define elf_backend_can_refcount 1
7183 #define elf_backend_can_gc_sections 1
7184 #define elf_backend_plt_readonly 1
7185 #define elf_backend_want_got_plt 1
7186 #define elf_backend_want_plt_sym 0
7187 #define elf_backend_may_use_rel_p 0
7188 #define elf_backend_may_use_rela_p 1
7189 #define elf_backend_default_use_rela_p 1
7190 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
7191 #define elf_backend_default_execstack 0
7192
7193 #undef elf_backend_obj_attrs_section
7194 #define elf_backend_obj_attrs_section ".ARM.attributes"
7195
7196 #include "elfNN-target.h"
This page took 0.171198 seconds and 5 git commands to generate.