Add --with-system-zlib to top level configure
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
188 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
189
190 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
191 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC)
203
204 #define ELIMINATE_COPY_RELOCS 0
205
206 /* Return size of a relocation entry. HTAB is the bfd's
207 elf_aarch64_link_hash_entry. */
208 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
209
210 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
211 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
212 #define PLT_ENTRY_SIZE (32)
213 #define PLT_SMALL_ENTRY_SIZE (16)
214 #define PLT_TLSDESC_ENTRY_SIZE (32)
215
216 /* Encoding of the nop instruction */
217 #define INSN_NOP 0xd503201f
218
219 #define aarch64_compute_jump_table_size(htab) \
220 (((htab)->root.srelplt == NULL) ? 0 \
221 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
222
223 /* The first entry in a procedure linkage table looks like this
224 if the distance between the PLTGOT and the PLT is < 4GB use
225 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
226 in x16 and needs to work out PLTGOT[1] by using an address of
227 [x16,#-GOT_ENTRY_SIZE]. */
228 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
229 {
230 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
231 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
232 #if ARCH_SIZE == 64
233 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
234 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
235 #else
236 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
237 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
238 #endif
239 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
240 0x1f, 0x20, 0x03, 0xd5, /* nop */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
243 };
244
245 /* Per function entry in a procedure linkage table looks like this
246 if the distance between the PLTGOT and the PLT is < 4GB use
247 these PLT entries. */
248 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
249 {
250 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
251 #if ARCH_SIZE == 64
252 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
253 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
254 #else
255 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
256 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
257 #endif
258 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
259 };
260
261 static const bfd_byte
262 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
263 {
264 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
265 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
266 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
267 #if ARCH_SIZE == 64
268 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
269 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
270 #else
271 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
272 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
273 #endif
274 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
275 0x1f, 0x20, 0x03, 0xd5, /* nop */
276 0x1f, 0x20, 0x03, 0xd5, /* nop */
277 };
278
279 #define elf_info_to_howto elfNN_aarch64_info_to_howto
280 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
281
282 #define AARCH64_ELF_ABI_VERSION 0
283
284 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
285 #define ALL_ONES (~ (bfd_vma) 0)
286
287 /* Indexed by the bfd interal reloc enumerators.
288 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
289 in reloc.c. */
290
291 static reloc_howto_type elfNN_aarch64_howto_table[] =
292 {
293 EMPTY_HOWTO (0),
294
295 /* Basic data relocations. */
296
297 #if ARCH_SIZE == 64
298 HOWTO (R_AARCH64_NULL, /* type */
299 0, /* rightshift */
300 3, /* size (0 = byte, 1 = short, 2 = long) */
301 0, /* bitsize */
302 FALSE, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_dont, /* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_AARCH64_NULL", /* name */
307 FALSE, /* partial_inplace */
308 0, /* src_mask */
309 0, /* dst_mask */
310 FALSE), /* pcrel_offset */
311 #else
312 HOWTO (R_AARCH64_NONE, /* type */
313 0, /* rightshift */
314 3, /* size (0 = byte, 1 = short, 2 = long) */
315 0, /* bitsize */
316 FALSE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_dont, /* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_AARCH64_NONE", /* name */
321 FALSE, /* partial_inplace */
322 0, /* src_mask */
323 0, /* dst_mask */
324 FALSE), /* pcrel_offset */
325 #endif
326
327 /* .xword: (S+A) */
328 HOWTO64 (AARCH64_R (ABS64), /* type */
329 0, /* rightshift */
330 4, /* size (4 = long long) */
331 64, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_unsigned, /* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 AARCH64_R_STR (ABS64), /* name */
337 FALSE, /* partial_inplace */
338 ALL_ONES, /* src_mask */
339 ALL_ONES, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 /* .word: (S+A) */
343 HOWTO (AARCH64_R (ABS32), /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_unsigned, /* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 AARCH64_R_STR (ABS32), /* name */
352 FALSE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
356
357 /* .half: (S+A) */
358 HOWTO (AARCH64_R (ABS16), /* type */
359 0, /* rightshift */
360 1, /* size (0 = byte, 1 = short, 2 = long) */
361 16, /* bitsize */
362 FALSE, /* pc_relative */
363 0, /* bitpos */
364 complain_overflow_unsigned, /* complain_on_overflow */
365 bfd_elf_generic_reloc, /* special_function */
366 AARCH64_R_STR (ABS16), /* name */
367 FALSE, /* partial_inplace */
368 0xffff, /* src_mask */
369 0xffff, /* dst_mask */
370 FALSE), /* pcrel_offset */
371
372 /* .xword: (S+A-P) */
373 HOWTO64 (AARCH64_R (PREL64), /* type */
374 0, /* rightshift */
375 4, /* size (4 = long long) */
376 64, /* bitsize */
377 TRUE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_signed, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 AARCH64_R_STR (PREL64), /* name */
382 FALSE, /* partial_inplace */
383 ALL_ONES, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 TRUE), /* pcrel_offset */
386
387 /* .word: (S+A-P) */
388 HOWTO (AARCH64_R (PREL32), /* type */
389 0, /* rightshift */
390 2, /* size (0 = byte, 1 = short, 2 = long) */
391 32, /* bitsize */
392 TRUE, /* pc_relative */
393 0, /* bitpos */
394 complain_overflow_signed, /* complain_on_overflow */
395 bfd_elf_generic_reloc, /* special_function */
396 AARCH64_R_STR (PREL32), /* name */
397 FALSE, /* partial_inplace */
398 0xffffffff, /* src_mask */
399 0xffffffff, /* dst_mask */
400 TRUE), /* pcrel_offset */
401
402 /* .half: (S+A-P) */
403 HOWTO (AARCH64_R (PREL16), /* type */
404 0, /* rightshift */
405 1, /* size (0 = byte, 1 = short, 2 = long) */
406 16, /* bitsize */
407 TRUE, /* pc_relative */
408 0, /* bitpos */
409 complain_overflow_signed, /* complain_on_overflow */
410 bfd_elf_generic_reloc, /* special_function */
411 AARCH64_R_STR (PREL16), /* name */
412 FALSE, /* partial_inplace */
413 0xffff, /* src_mask */
414 0xffff, /* dst_mask */
415 TRUE), /* pcrel_offset */
416
417 /* Group relocations to create a 16, 32, 48 or 64 bit
418 unsigned data or abs address inline. */
419
420 /* MOVZ: ((S+A) >> 0) & 0xffff */
421 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
422 0, /* rightshift */
423 2, /* size (0 = byte, 1 = short, 2 = long) */
424 16, /* bitsize */
425 FALSE, /* pc_relative */
426 0, /* bitpos */
427 complain_overflow_unsigned, /* complain_on_overflow */
428 bfd_elf_generic_reloc, /* special_function */
429 AARCH64_R_STR (MOVW_UABS_G0), /* name */
430 FALSE, /* partial_inplace */
431 0xffff, /* src_mask */
432 0xffff, /* dst_mask */
433 FALSE), /* pcrel_offset */
434
435 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
436 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
437 0, /* rightshift */
438 2, /* size (0 = byte, 1 = short, 2 = long) */
439 16, /* bitsize */
440 FALSE, /* pc_relative */
441 0, /* bitpos */
442 complain_overflow_dont, /* complain_on_overflow */
443 bfd_elf_generic_reloc, /* special_function */
444 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
445 FALSE, /* partial_inplace */
446 0xffff, /* src_mask */
447 0xffff, /* dst_mask */
448 FALSE), /* pcrel_offset */
449
450 /* MOVZ: ((S+A) >> 16) & 0xffff */
451 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
452 16, /* rightshift */
453 2, /* size (0 = byte, 1 = short, 2 = long) */
454 16, /* bitsize */
455 FALSE, /* pc_relative */
456 0, /* bitpos */
457 complain_overflow_unsigned, /* complain_on_overflow */
458 bfd_elf_generic_reloc, /* special_function */
459 AARCH64_R_STR (MOVW_UABS_G1), /* name */
460 FALSE, /* partial_inplace */
461 0xffff, /* src_mask */
462 0xffff, /* dst_mask */
463 FALSE), /* pcrel_offset */
464
465 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
466 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
467 16, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 16, /* bitsize */
470 FALSE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_dont, /* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
475 FALSE, /* partial_inplace */
476 0xffff, /* src_mask */
477 0xffff, /* dst_mask */
478 FALSE), /* pcrel_offset */
479
480 /* MOVZ: ((S+A) >> 32) & 0xffff */
481 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
482 32, /* rightshift */
483 2, /* size (0 = byte, 1 = short, 2 = long) */
484 16, /* bitsize */
485 FALSE, /* pc_relative */
486 0, /* bitpos */
487 complain_overflow_unsigned, /* complain_on_overflow */
488 bfd_elf_generic_reloc, /* special_function */
489 AARCH64_R_STR (MOVW_UABS_G2), /* name */
490 FALSE, /* partial_inplace */
491 0xffff, /* src_mask */
492 0xffff, /* dst_mask */
493 FALSE), /* pcrel_offset */
494
495 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
496 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
497 32, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 16, /* bitsize */
500 FALSE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_dont, /* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
505 FALSE, /* partial_inplace */
506 0xffff, /* src_mask */
507 0xffff, /* dst_mask */
508 FALSE), /* pcrel_offset */
509
510 /* MOVZ: ((S+A) >> 48) & 0xffff */
511 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
512 48, /* rightshift */
513 2, /* size (0 = byte, 1 = short, 2 = long) */
514 16, /* bitsize */
515 FALSE, /* pc_relative */
516 0, /* bitpos */
517 complain_overflow_unsigned, /* complain_on_overflow */
518 bfd_elf_generic_reloc, /* special_function */
519 AARCH64_R_STR (MOVW_UABS_G3), /* name */
520 FALSE, /* partial_inplace */
521 0xffff, /* src_mask */
522 0xffff, /* dst_mask */
523 FALSE), /* pcrel_offset */
524
525 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
526 signed data or abs address inline. Will change instruction
527 to MOVN or MOVZ depending on sign of calculated value. */
528
529 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
530 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
531 0, /* rightshift */
532 2, /* size (0 = byte, 1 = short, 2 = long) */
533 16, /* bitsize */
534 FALSE, /* pc_relative */
535 0, /* bitpos */
536 complain_overflow_signed, /* complain_on_overflow */
537 bfd_elf_generic_reloc, /* special_function */
538 AARCH64_R_STR (MOVW_SABS_G0), /* name */
539 FALSE, /* partial_inplace */
540 0xffff, /* src_mask */
541 0xffff, /* dst_mask */
542 FALSE), /* pcrel_offset */
543
544 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
545 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
546 16, /* rightshift */
547 2, /* size (0 = byte, 1 = short, 2 = long) */
548 16, /* bitsize */
549 FALSE, /* pc_relative */
550 0, /* bitpos */
551 complain_overflow_signed, /* complain_on_overflow */
552 bfd_elf_generic_reloc, /* special_function */
553 AARCH64_R_STR (MOVW_SABS_G1), /* name */
554 FALSE, /* partial_inplace */
555 0xffff, /* src_mask */
556 0xffff, /* dst_mask */
557 FALSE), /* pcrel_offset */
558
559 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
560 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
561 32, /* rightshift */
562 2, /* size (0 = byte, 1 = short, 2 = long) */
563 16, /* bitsize */
564 FALSE, /* pc_relative */
565 0, /* bitpos */
566 complain_overflow_signed, /* complain_on_overflow */
567 bfd_elf_generic_reloc, /* special_function */
568 AARCH64_R_STR (MOVW_SABS_G2), /* name */
569 FALSE, /* partial_inplace */
570 0xffff, /* src_mask */
571 0xffff, /* dst_mask */
572 FALSE), /* pcrel_offset */
573
574 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
575 addresses: PG(x) is (x & ~0xfff). */
576
577 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
578 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
579 2, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 19, /* bitsize */
582 TRUE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_signed, /* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 AARCH64_R_STR (LD_PREL_LO19), /* name */
587 FALSE, /* partial_inplace */
588 0x7ffff, /* src_mask */
589 0x7ffff, /* dst_mask */
590 TRUE), /* pcrel_offset */
591
592 /* ADR: (S+A-P) & 0x1fffff */
593 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
594 0, /* rightshift */
595 2, /* size (0 = byte, 1 = short, 2 = long) */
596 21, /* bitsize */
597 TRUE, /* pc_relative */
598 0, /* bitpos */
599 complain_overflow_signed, /* complain_on_overflow */
600 bfd_elf_generic_reloc, /* special_function */
601 AARCH64_R_STR (ADR_PREL_LO21), /* name */
602 FALSE, /* partial_inplace */
603 0x1fffff, /* src_mask */
604 0x1fffff, /* dst_mask */
605 TRUE), /* pcrel_offset */
606
607 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
608 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
609 12, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 21, /* bitsize */
612 TRUE, /* pc_relative */
613 0, /* bitpos */
614 complain_overflow_signed, /* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
617 FALSE, /* partial_inplace */
618 0x1fffff, /* src_mask */
619 0x1fffff, /* dst_mask */
620 TRUE), /* pcrel_offset */
621
622 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
623 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
624 12, /* rightshift */
625 2, /* size (0 = byte, 1 = short, 2 = long) */
626 21, /* bitsize */
627 TRUE, /* pc_relative */
628 0, /* bitpos */
629 complain_overflow_dont, /* complain_on_overflow */
630 bfd_elf_generic_reloc, /* special_function */
631 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
632 FALSE, /* partial_inplace */
633 0x1fffff, /* src_mask */
634 0x1fffff, /* dst_mask */
635 TRUE), /* pcrel_offset */
636
637 /* ADD: (S+A) & 0xfff [no overflow check] */
638 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 12, /* bitsize */
642 FALSE, /* pc_relative */
643 10, /* bitpos */
644 complain_overflow_dont, /* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
647 FALSE, /* partial_inplace */
648 0x3ffc00, /* src_mask */
649 0x3ffc00, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 /* LD/ST8: (S+A) & 0xfff */
653 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 12, /* bitsize */
657 FALSE, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont, /* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
662 FALSE, /* partial_inplace */
663 0xfff, /* src_mask */
664 0xfff, /* dst_mask */
665 FALSE), /* pcrel_offset */
666
667 /* Relocations for control-flow instructions. */
668
669 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
670 HOWTO (AARCH64_R (TSTBR14), /* type */
671 2, /* rightshift */
672 2, /* size (0 = byte, 1 = short, 2 = long) */
673 14, /* bitsize */
674 TRUE, /* pc_relative */
675 0, /* bitpos */
676 complain_overflow_signed, /* complain_on_overflow */
677 bfd_elf_generic_reloc, /* special_function */
678 AARCH64_R_STR (TSTBR14), /* name */
679 FALSE, /* partial_inplace */
680 0x3fff, /* src_mask */
681 0x3fff, /* dst_mask */
682 TRUE), /* pcrel_offset */
683
684 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
685 HOWTO (AARCH64_R (CONDBR19), /* type */
686 2, /* rightshift */
687 2, /* size (0 = byte, 1 = short, 2 = long) */
688 19, /* bitsize */
689 TRUE, /* pc_relative */
690 0, /* bitpos */
691 complain_overflow_signed, /* complain_on_overflow */
692 bfd_elf_generic_reloc, /* special_function */
693 AARCH64_R_STR (CONDBR19), /* name */
694 FALSE, /* partial_inplace */
695 0x7ffff, /* src_mask */
696 0x7ffff, /* dst_mask */
697 TRUE), /* pcrel_offset */
698
699 /* B: ((S+A-P) >> 2) & 0x3ffffff */
700 HOWTO (AARCH64_R (JUMP26), /* type */
701 2, /* rightshift */
702 2, /* size (0 = byte, 1 = short, 2 = long) */
703 26, /* bitsize */
704 TRUE, /* pc_relative */
705 0, /* bitpos */
706 complain_overflow_signed, /* complain_on_overflow */
707 bfd_elf_generic_reloc, /* special_function */
708 AARCH64_R_STR (JUMP26), /* name */
709 FALSE, /* partial_inplace */
710 0x3ffffff, /* src_mask */
711 0x3ffffff, /* dst_mask */
712 TRUE), /* pcrel_offset */
713
714 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
715 HOWTO (AARCH64_R (CALL26), /* type */
716 2, /* rightshift */
717 2, /* size (0 = byte, 1 = short, 2 = long) */
718 26, /* bitsize */
719 TRUE, /* pc_relative */
720 0, /* bitpos */
721 complain_overflow_signed, /* complain_on_overflow */
722 bfd_elf_generic_reloc, /* special_function */
723 AARCH64_R_STR (CALL26), /* name */
724 FALSE, /* partial_inplace */
725 0x3ffffff, /* src_mask */
726 0x3ffffff, /* dst_mask */
727 TRUE), /* pcrel_offset */
728
729 /* LD/ST16: (S+A) & 0xffe */
730 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
731 1, /* rightshift */
732 2, /* size (0 = byte, 1 = short, 2 = long) */
733 12, /* bitsize */
734 FALSE, /* pc_relative */
735 0, /* bitpos */
736 complain_overflow_dont, /* complain_on_overflow */
737 bfd_elf_generic_reloc, /* special_function */
738 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
739 FALSE, /* partial_inplace */
740 0xffe, /* src_mask */
741 0xffe, /* dst_mask */
742 FALSE), /* pcrel_offset */
743
744 /* LD/ST32: (S+A) & 0xffc */
745 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
746 2, /* rightshift */
747 2, /* size (0 = byte, 1 = short, 2 = long) */
748 12, /* bitsize */
749 FALSE, /* pc_relative */
750 0, /* bitpos */
751 complain_overflow_dont, /* complain_on_overflow */
752 bfd_elf_generic_reloc, /* special_function */
753 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
754 FALSE, /* partial_inplace */
755 0xffc, /* src_mask */
756 0xffc, /* dst_mask */
757 FALSE), /* pcrel_offset */
758
759 /* LD/ST64: (S+A) & 0xff8 */
760 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
761 3, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 12, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_dont, /* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
769 FALSE, /* partial_inplace */
770 0xff8, /* src_mask */
771 0xff8, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 /* LD/ST128: (S+A) & 0xff0 */
775 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
776 4, /* rightshift */
777 2, /* size (0 = byte, 1 = short, 2 = long) */
778 12, /* bitsize */
779 FALSE, /* pc_relative */
780 0, /* bitpos */
781 complain_overflow_dont, /* complain_on_overflow */
782 bfd_elf_generic_reloc, /* special_function */
783 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
784 FALSE, /* partial_inplace */
785 0xff0, /* src_mask */
786 0xff0, /* dst_mask */
787 FALSE), /* pcrel_offset */
788
789 /* Set a load-literal immediate field to bits
790 0x1FFFFC of G(S)-P */
791 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
792 2, /* rightshift */
793 2, /* size (0 = byte,1 = short,2 = long) */
794 19, /* bitsize */
795 TRUE, /* pc_relative */
796 0, /* bitpos */
797 complain_overflow_signed, /* complain_on_overflow */
798 bfd_elf_generic_reloc, /* special_function */
799 AARCH64_R_STR (GOT_LD_PREL19), /* name */
800 FALSE, /* partial_inplace */
801 0xffffe0, /* src_mask */
802 0xffffe0, /* dst_mask */
803 TRUE), /* pcrel_offset */
804
805 /* Get to the page for the GOT entry for the symbol
806 (G(S) - P) using an ADRP instruction. */
807 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
808 12, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 21, /* bitsize */
811 TRUE, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_dont, /* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
816 FALSE, /* partial_inplace */
817 0x1fffff, /* src_mask */
818 0x1fffff, /* dst_mask */
819 TRUE), /* pcrel_offset */
820
821 /* LD64: GOT offset G(S) & 0xff8 */
822 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
823 3, /* rightshift */
824 2, /* size (0 = byte, 1 = short, 2 = long) */
825 12, /* bitsize */
826 FALSE, /* pc_relative */
827 0, /* bitpos */
828 complain_overflow_dont, /* complain_on_overflow */
829 bfd_elf_generic_reloc, /* special_function */
830 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
831 FALSE, /* partial_inplace */
832 0xff8, /* src_mask */
833 0xff8, /* dst_mask */
834 FALSE), /* pcrel_offset */
835
836 /* LD32: GOT offset G(S) & 0xffc */
837 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
838 2, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 12, /* bitsize */
841 FALSE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont, /* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
846 FALSE, /* partial_inplace */
847 0xffc, /* src_mask */
848 0xffc, /* dst_mask */
849 FALSE), /* pcrel_offset */
850
851 /* Get to the page for the GOT entry for the symbol
852 (G(S) - P) using an ADRP instruction. */
853 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
854 12, /* rightshift */
855 2, /* size (0 = byte, 1 = short, 2 = long) */
856 21, /* bitsize */
857 TRUE, /* pc_relative */
858 0, /* bitpos */
859 complain_overflow_dont, /* complain_on_overflow */
860 bfd_elf_generic_reloc, /* special_function */
861 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
862 FALSE, /* partial_inplace */
863 0x1fffff, /* src_mask */
864 0x1fffff, /* dst_mask */
865 TRUE), /* pcrel_offset */
866
867 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
868 0, /* rightshift */
869 2, /* size (0 = byte, 1 = short, 2 = long) */
870 21, /* bitsize */
871 TRUE, /* pc_relative */
872 0, /* bitpos */
873 complain_overflow_dont, /* complain_on_overflow */
874 bfd_elf_generic_reloc, /* special_function */
875 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
876 FALSE, /* partial_inplace */
877 0x1fffff, /* src_mask */
878 0x1fffff, /* dst_mask */
879 TRUE), /* pcrel_offset */
880
881 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
882 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
883 0, /* rightshift */
884 2, /* size (0 = byte, 1 = short, 2 = long) */
885 12, /* bitsize */
886 FALSE, /* pc_relative */
887 0, /* bitpos */
888 complain_overflow_dont, /* complain_on_overflow */
889 bfd_elf_generic_reloc, /* special_function */
890 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
891 FALSE, /* partial_inplace */
892 0xfff, /* src_mask */
893 0xfff, /* dst_mask */
894 FALSE), /* pcrel_offset */
895
896 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
897 16, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 16, /* bitsize */
900 FALSE, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont, /* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
905 FALSE, /* partial_inplace */
906 0xffff, /* src_mask */
907 0xffff, /* dst_mask */
908 FALSE), /* pcrel_offset */
909
910 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 16, /* bitsize */
914 FALSE, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont, /* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
919 FALSE, /* partial_inplace */
920 0xffff, /* src_mask */
921 0xffff, /* dst_mask */
922 FALSE), /* pcrel_offset */
923
924 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
925 12, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 21, /* bitsize */
928 FALSE, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont, /* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
933 FALSE, /* partial_inplace */
934 0x1fffff, /* src_mask */
935 0x1fffff, /* dst_mask */
936 FALSE), /* pcrel_offset */
937
938 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
939 3, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 12, /* bitsize */
942 FALSE, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont, /* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
947 FALSE, /* partial_inplace */
948 0xff8, /* src_mask */
949 0xff8, /* dst_mask */
950 FALSE), /* pcrel_offset */
951
952 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
953 2, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 12, /* bitsize */
956 FALSE, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont, /* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
961 FALSE, /* partial_inplace */
962 0xffc, /* src_mask */
963 0xffc, /* dst_mask */
964 FALSE), /* pcrel_offset */
965
966 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
967 2, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 19, /* bitsize */
970 FALSE, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
975 FALSE, /* partial_inplace */
976 0x1ffffc, /* src_mask */
977 0x1ffffc, /* dst_mask */
978 FALSE), /* pcrel_offset */
979
980 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
981 32, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 16, /* bitsize */
984 FALSE, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_unsigned, /* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
989 FALSE, /* partial_inplace */
990 0xffff, /* src_mask */
991 0xffff, /* dst_mask */
992 FALSE), /* pcrel_offset */
993
994 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
995 16, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 16, /* bitsize */
998 FALSE, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont, /* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1003 FALSE, /* partial_inplace */
1004 0xffff, /* src_mask */
1005 0xffff, /* dst_mask */
1006 FALSE), /* pcrel_offset */
1007
1008 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1009 16, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 16, /* bitsize */
1012 FALSE, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont, /* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1017 FALSE, /* partial_inplace */
1018 0xffff, /* src_mask */
1019 0xffff, /* dst_mask */
1020 FALSE), /* pcrel_offset */
1021
1022 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 16, /* bitsize */
1026 FALSE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1035
1036 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 16, /* bitsize */
1040 FALSE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1049
1050 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 12, /* bitsize */
1054 FALSE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_unsigned, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1059 FALSE, /* partial_inplace */
1060 0xfff, /* src_mask */
1061 0xfff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1063
1064 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 12, /* bitsize */
1068 FALSE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1073 FALSE, /* partial_inplace */
1074 0xfff, /* src_mask */
1075 0xfff, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1077
1078 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 12, /* bitsize */
1082 FALSE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1087 FALSE, /* partial_inplace */
1088 0xfff, /* src_mask */
1089 0xfff, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1091
1092 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1093 2, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 19, /* bitsize */
1096 TRUE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1101 FALSE, /* partial_inplace */
1102 0x0ffffe0, /* src_mask */
1103 0x0ffffe0, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1105
1106 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 21, /* bitsize */
1110 TRUE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1115 FALSE, /* partial_inplace */
1116 0x1fffff, /* src_mask */
1117 0x1fffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1119
1120 /* Get to the page for the GOT entry for the symbol
1121 (G(S) - P) using an ADRP instruction. */
1122 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1123 12, /* rightshift */
1124 2, /* size (0 = byte, 1 = short, 2 = long) */
1125 21, /* bitsize */
1126 TRUE, /* pc_relative */
1127 0, /* bitpos */
1128 complain_overflow_dont, /* complain_on_overflow */
1129 bfd_elf_generic_reloc, /* special_function */
1130 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1131 FALSE, /* partial_inplace */
1132 0x1fffff, /* src_mask */
1133 0x1fffff, /* dst_mask */
1134 TRUE), /* pcrel_offset */
1135
1136 /* LD64: GOT offset G(S) & 0xff8. */
1137 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1138 3, /* rightshift */
1139 2, /* size (0 = byte, 1 = short, 2 = long) */
1140 12, /* bitsize */
1141 FALSE, /* pc_relative */
1142 0, /* bitpos */
1143 complain_overflow_dont, /* complain_on_overflow */
1144 bfd_elf_generic_reloc, /* special_function */
1145 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1146 FALSE, /* partial_inplace */
1147 0xff8, /* src_mask */
1148 0xff8, /* dst_mask */
1149 FALSE), /* pcrel_offset */
1150
1151 /* LD32: GOT offset G(S) & 0xffc. */
1152 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1153 2, /* rightshift */
1154 2, /* size (0 = byte, 1 = short, 2 = long) */
1155 12, /* bitsize */
1156 FALSE, /* pc_relative */
1157 0, /* bitpos */
1158 complain_overflow_dont, /* complain_on_overflow */
1159 bfd_elf_generic_reloc, /* special_function */
1160 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1161 FALSE, /* partial_inplace */
1162 0xffc, /* src_mask */
1163 0xffc, /* dst_mask */
1164 FALSE), /* pcrel_offset */
1165
1166 /* ADD: GOT offset G(S) & 0xfff. */
1167 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1168 0, /* rightshift */
1169 2, /* size (0 = byte, 1 = short, 2 = long) */
1170 12, /* bitsize */
1171 FALSE, /* pc_relative */
1172 0, /* bitpos */
1173 complain_overflow_dont, /* complain_on_overflow */
1174 bfd_elf_generic_reloc, /* special_function */
1175 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1176 FALSE, /* partial_inplace */
1177 0xfff, /* src_mask */
1178 0xfff, /* dst_mask */
1179 FALSE), /* pcrel_offset */
1180
1181 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1182 16, /* rightshift */
1183 2, /* size (0 = byte, 1 = short, 2 = long) */
1184 12, /* bitsize */
1185 FALSE, /* pc_relative */
1186 0, /* bitpos */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1190 FALSE, /* partial_inplace */
1191 0xffff, /* src_mask */
1192 0xffff, /* dst_mask */
1193 FALSE), /* pcrel_offset */
1194
1195 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1196 0, /* rightshift */
1197 2, /* size (0 = byte, 1 = short, 2 = long) */
1198 12, /* bitsize */
1199 FALSE, /* pc_relative */
1200 0, /* bitpos */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1204 FALSE, /* partial_inplace */
1205 0xffff, /* src_mask */
1206 0xffff, /* dst_mask */
1207 FALSE), /* pcrel_offset */
1208
1209 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1210 0, /* rightshift */
1211 2, /* size (0 = byte, 1 = short, 2 = long) */
1212 12, /* bitsize */
1213 FALSE, /* pc_relative */
1214 0, /* bitpos */
1215 complain_overflow_dont, /* complain_on_overflow */
1216 bfd_elf_generic_reloc, /* special_function */
1217 AARCH64_R_STR (TLSDESC_LDR), /* name */
1218 FALSE, /* partial_inplace */
1219 0x0, /* src_mask */
1220 0x0, /* dst_mask */
1221 FALSE), /* pcrel_offset */
1222
1223 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1224 0, /* rightshift */
1225 2, /* size (0 = byte, 1 = short, 2 = long) */
1226 12, /* bitsize */
1227 FALSE, /* pc_relative */
1228 0, /* bitpos */
1229 complain_overflow_dont, /* complain_on_overflow */
1230 bfd_elf_generic_reloc, /* special_function */
1231 AARCH64_R_STR (TLSDESC_ADD), /* name */
1232 FALSE, /* partial_inplace */
1233 0x0, /* src_mask */
1234 0x0, /* dst_mask */
1235 FALSE), /* pcrel_offset */
1236
1237 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1238 0, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1240 0, /* bitsize */
1241 FALSE, /* pc_relative */
1242 0, /* bitpos */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 AARCH64_R_STR (TLSDESC_CALL), /* name */
1246 FALSE, /* partial_inplace */
1247 0x0, /* src_mask */
1248 0x0, /* dst_mask */
1249 FALSE), /* pcrel_offset */
1250
1251 HOWTO (AARCH64_R (COPY), /* type */
1252 0, /* rightshift */
1253 2, /* size (0 = byte, 1 = short, 2 = long) */
1254 64, /* bitsize */
1255 FALSE, /* pc_relative */
1256 0, /* bitpos */
1257 complain_overflow_bitfield, /* complain_on_overflow */
1258 bfd_elf_generic_reloc, /* special_function */
1259 AARCH64_R_STR (COPY), /* name */
1260 TRUE, /* partial_inplace */
1261 0xffffffff, /* src_mask */
1262 0xffffffff, /* dst_mask */
1263 FALSE), /* pcrel_offset */
1264
1265 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1266 0, /* rightshift */
1267 2, /* size (0 = byte, 1 = short, 2 = long) */
1268 64, /* bitsize */
1269 FALSE, /* pc_relative */
1270 0, /* bitpos */
1271 complain_overflow_bitfield, /* complain_on_overflow */
1272 bfd_elf_generic_reloc, /* special_function */
1273 AARCH64_R_STR (GLOB_DAT), /* name */
1274 TRUE, /* partial_inplace */
1275 0xffffffff, /* src_mask */
1276 0xffffffff, /* dst_mask */
1277 FALSE), /* pcrel_offset */
1278
1279 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1280 0, /* rightshift */
1281 2, /* size (0 = byte, 1 = short, 2 = long) */
1282 64, /* bitsize */
1283 FALSE, /* pc_relative */
1284 0, /* bitpos */
1285 complain_overflow_bitfield, /* complain_on_overflow */
1286 bfd_elf_generic_reloc, /* special_function */
1287 AARCH64_R_STR (JUMP_SLOT), /* name */
1288 TRUE, /* partial_inplace */
1289 0xffffffff, /* src_mask */
1290 0xffffffff, /* dst_mask */
1291 FALSE), /* pcrel_offset */
1292
1293 HOWTO (AARCH64_R (RELATIVE), /* type */
1294 0, /* rightshift */
1295 2, /* size (0 = byte, 1 = short, 2 = long) */
1296 64, /* bitsize */
1297 FALSE, /* pc_relative */
1298 0, /* bitpos */
1299 complain_overflow_bitfield, /* complain_on_overflow */
1300 bfd_elf_generic_reloc, /* special_function */
1301 AARCH64_R_STR (RELATIVE), /* name */
1302 TRUE, /* partial_inplace */
1303 ALL_ONES, /* src_mask */
1304 ALL_ONES, /* dst_mask */
1305 FALSE), /* pcrel_offset */
1306
1307 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1308 0, /* rightshift */
1309 2, /* size (0 = byte, 1 = short, 2 = long) */
1310 64, /* bitsize */
1311 FALSE, /* pc_relative */
1312 0, /* bitpos */
1313 complain_overflow_dont, /* complain_on_overflow */
1314 bfd_elf_generic_reloc, /* special_function */
1315 #if ARCH_SIZE == 64
1316 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1317 #else
1318 AARCH64_R_STR (TLS_DTPMOD), /* name */
1319 #endif
1320 FALSE, /* partial_inplace */
1321 0, /* src_mask */
1322 ALL_ONES, /* dst_mask */
1323 FALSE), /* pc_reloffset */
1324
1325 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1326 0, /* rightshift */
1327 2, /* size (0 = byte, 1 = short, 2 = long) */
1328 64, /* bitsize */
1329 FALSE, /* pc_relative */
1330 0, /* bitpos */
1331 complain_overflow_dont, /* complain_on_overflow */
1332 bfd_elf_generic_reloc, /* special_function */
1333 #if ARCH_SIZE == 64
1334 AARCH64_R_STR (TLS_DTPREL64), /* name */
1335 #else
1336 AARCH64_R_STR (TLS_DTPREL), /* name */
1337 #endif
1338 FALSE, /* partial_inplace */
1339 0, /* src_mask */
1340 ALL_ONES, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 64, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont, /* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 #if ARCH_SIZE == 64
1352 AARCH64_R_STR (TLS_TPREL64), /* name */
1353 #else
1354 AARCH64_R_STR (TLS_TPREL), /* name */
1355 #endif
1356 FALSE, /* partial_inplace */
1357 0, /* src_mask */
1358 ALL_ONES, /* dst_mask */
1359 FALSE), /* pcrel_offset */
1360
1361 HOWTO (AARCH64_R (TLSDESC), /* type */
1362 0, /* rightshift */
1363 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 64, /* bitsize */
1365 FALSE, /* pc_relative */
1366 0, /* bitpos */
1367 complain_overflow_dont, /* complain_on_overflow */
1368 bfd_elf_generic_reloc, /* special_function */
1369 AARCH64_R_STR (TLSDESC), /* name */
1370 FALSE, /* partial_inplace */
1371 0, /* src_mask */
1372 ALL_ONES, /* dst_mask */
1373 FALSE), /* pcrel_offset */
1374
1375 HOWTO (AARCH64_R (IRELATIVE), /* type */
1376 0, /* rightshift */
1377 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 64, /* bitsize */
1379 FALSE, /* pc_relative */
1380 0, /* bitpos */
1381 complain_overflow_bitfield, /* complain_on_overflow */
1382 bfd_elf_generic_reloc, /* special_function */
1383 AARCH64_R_STR (IRELATIVE), /* name */
1384 FALSE, /* partial_inplace */
1385 0, /* src_mask */
1386 ALL_ONES, /* dst_mask */
1387 FALSE), /* pcrel_offset */
1388
1389 EMPTY_HOWTO (0),
1390 };
1391
1392 static reloc_howto_type elfNN_aarch64_howto_none =
1393 HOWTO (R_AARCH64_NONE, /* type */
1394 0, /* rightshift */
1395 3, /* size (0 = byte, 1 = short, 2 = long) */
1396 0, /* bitsize */
1397 FALSE, /* pc_relative */
1398 0, /* bitpos */
1399 complain_overflow_dont,/* complain_on_overflow */
1400 bfd_elf_generic_reloc, /* special_function */
1401 "R_AARCH64_NONE", /* name */
1402 FALSE, /* partial_inplace */
1403 0, /* src_mask */
1404 0, /* dst_mask */
1405 FALSE); /* pcrel_offset */
1406
1407 /* Given HOWTO, return the bfd internal relocation enumerator. */
1408
1409 static bfd_reloc_code_real_type
1410 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1411 {
1412 const int size
1413 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1414 const ptrdiff_t offset
1415 = howto - elfNN_aarch64_howto_table;
1416
1417 if (offset > 0 && offset < size - 1)
1418 return BFD_RELOC_AARCH64_RELOC_START + offset;
1419
1420 if (howto == &elfNN_aarch64_howto_none)
1421 return BFD_RELOC_AARCH64_NONE;
1422
1423 return BFD_RELOC_AARCH64_RELOC_START;
1424 }
1425
1426 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1427
1428 static bfd_reloc_code_real_type
1429 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1430 {
1431 static bfd_boolean initialized_p = FALSE;
1432 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1433 static unsigned int offsets[R_AARCH64_end];
1434
1435 if (initialized_p == FALSE)
1436 {
1437 unsigned int i;
1438
1439 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1440 if (elfNN_aarch64_howto_table[i].type != 0)
1441 offsets[elfNN_aarch64_howto_table[i].type] = i;
1442
1443 initialized_p = TRUE;
1444 }
1445
1446 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1447 return BFD_RELOC_AARCH64_NONE;
1448
1449 /* PR 17512: file: b371e70a. */
1450 if (r_type >= R_AARCH64_end)
1451 {
1452 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1453 bfd_set_error (bfd_error_bad_value);
1454 return BFD_RELOC_AARCH64_NONE;
1455 }
1456
1457 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1458 }
1459
1460 struct elf_aarch64_reloc_map
1461 {
1462 bfd_reloc_code_real_type from;
1463 bfd_reloc_code_real_type to;
1464 };
1465
1466 /* Map bfd generic reloc to AArch64-specific reloc. */
1467 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1468 {
1469 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1470
1471 /* Basic data relocations. */
1472 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1473 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1474 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1475 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1476 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1477 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1478 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1479 };
1480
1481 /* Given the bfd internal relocation enumerator in CODE, return the
1482 corresponding howto entry. */
1483
1484 static reloc_howto_type *
1485 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1486 {
1487 unsigned int i;
1488
1489 /* Convert bfd generic reloc to AArch64-specific reloc. */
1490 if (code < BFD_RELOC_AARCH64_RELOC_START
1491 || code > BFD_RELOC_AARCH64_RELOC_END)
1492 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1493 if (elf_aarch64_reloc_map[i].from == code)
1494 {
1495 code = elf_aarch64_reloc_map[i].to;
1496 break;
1497 }
1498
1499 if (code > BFD_RELOC_AARCH64_RELOC_START
1500 && code < BFD_RELOC_AARCH64_RELOC_END)
1501 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1502 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1503
1504 if (code == BFD_RELOC_AARCH64_NONE)
1505 return &elfNN_aarch64_howto_none;
1506
1507 return NULL;
1508 }
1509
1510 static reloc_howto_type *
1511 elfNN_aarch64_howto_from_type (unsigned int r_type)
1512 {
1513 bfd_reloc_code_real_type val;
1514 reloc_howto_type *howto;
1515
1516 #if ARCH_SIZE == 32
1517 if (r_type > 256)
1518 {
1519 bfd_set_error (bfd_error_bad_value);
1520 return NULL;
1521 }
1522 #endif
1523
1524 if (r_type == R_AARCH64_NONE)
1525 return &elfNN_aarch64_howto_none;
1526
1527 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1528 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1529
1530 if (howto != NULL)
1531 return howto;
1532
1533 bfd_set_error (bfd_error_bad_value);
1534 return NULL;
1535 }
1536
1537 static void
1538 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1539 Elf_Internal_Rela *elf_reloc)
1540 {
1541 unsigned int r_type;
1542
1543 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1544 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1545 }
1546
1547 static reloc_howto_type *
1548 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1549 bfd_reloc_code_real_type code)
1550 {
1551 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1552
1553 if (howto != NULL)
1554 return howto;
1555
1556 bfd_set_error (bfd_error_bad_value);
1557 return NULL;
1558 }
1559
1560 static reloc_howto_type *
1561 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1562 const char *r_name)
1563 {
1564 unsigned int i;
1565
1566 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1567 if (elfNN_aarch64_howto_table[i].name != NULL
1568 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1569 return &elfNN_aarch64_howto_table[i];
1570
1571 return NULL;
1572 }
1573
1574 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1575 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1576 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1577 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1578
1579 /* The linker script knows the section names for placement.
1580 The entry_names are used to do simple name mangling on the stubs.
1581 Given a function name, and its type, the stub can be found. The
1582 name can be changed. The only requirement is the %s be present. */
1583 #define STUB_ENTRY_NAME "__%s_veneer"
1584
1585 /* The name of the dynamic interpreter. This is put in the .interp
1586 section. */
1587 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1588
1589 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1590 (((1 << 25) - 1) << 2)
1591 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1592 (-((1 << 25) << 2))
1593
1594 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1595 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1596
1597 static int
1598 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1599 {
1600 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1601 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1602 }
1603
1604 static int
1605 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1606 {
1607 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1608 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1609 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1610 }
1611
1612 static const uint32_t aarch64_adrp_branch_stub [] =
1613 {
1614 0x90000010, /* adrp ip0, X */
1615 /* R_AARCH64_ADR_HI21_PCREL(X) */
1616 0x91000210, /* add ip0, ip0, :lo12:X */
1617 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1618 0xd61f0200, /* br ip0 */
1619 };
1620
1621 static const uint32_t aarch64_long_branch_stub[] =
1622 {
1623 #if ARCH_SIZE == 64
1624 0x58000090, /* ldr ip0, 1f */
1625 #else
1626 0x18000090, /* ldr wip0, 1f */
1627 #endif
1628 0x10000011, /* adr ip1, #0 */
1629 0x8b110210, /* add ip0, ip0, ip1 */
1630 0xd61f0200, /* br ip0 */
1631 0x00000000, /* 1: .xword or .word
1632 R_AARCH64_PRELNN(X) + 12
1633 */
1634 0x00000000,
1635 };
1636
1637 static const uint32_t aarch64_erratum_835769_stub[] =
1638 {
1639 0x00000000, /* Placeholder for multiply accumulate. */
1640 0x14000000, /* b <label> */
1641 };
1642
1643 /* Section name for stubs is the associated section name plus this
1644 string. */
1645 #define STUB_SUFFIX ".stub"
1646
1647 enum elf_aarch64_stub_type
1648 {
1649 aarch64_stub_none,
1650 aarch64_stub_adrp_branch,
1651 aarch64_stub_long_branch,
1652 aarch64_stub_erratum_835769_veneer,
1653 };
1654
1655 struct elf_aarch64_stub_hash_entry
1656 {
1657 /* Base hash table entry structure. */
1658 struct bfd_hash_entry root;
1659
1660 /* The stub section. */
1661 asection *stub_sec;
1662
1663 /* Offset within stub_sec of the beginning of this stub. */
1664 bfd_vma stub_offset;
1665
1666 /* Given the symbol's value and its section we can determine its final
1667 value when building the stubs (so the stub knows where to jump). */
1668 bfd_vma target_value;
1669 asection *target_section;
1670
1671 enum elf_aarch64_stub_type stub_type;
1672
1673 /* The symbol table entry, if any, that this was derived from. */
1674 struct elf_aarch64_link_hash_entry *h;
1675
1676 /* Destination symbol type */
1677 unsigned char st_type;
1678
1679 /* Where this stub is being called from, or, in the case of combined
1680 stub sections, the first input section in the group. */
1681 asection *id_sec;
1682
1683 /* The name for the local symbol at the start of this stub. The
1684 stub name in the hash table has to be unique; this does not, so
1685 it can be friendlier. */
1686 char *output_name;
1687
1688 /* The instruction which caused this stub to be generated (only valid for
1689 erratum 835769 workaround stubs at present). */
1690 uint32_t veneered_insn;
1691 };
1692
1693 /* Used to build a map of a section. This is required for mixed-endian
1694 code/data. */
1695
1696 typedef struct elf_elf_section_map
1697 {
1698 bfd_vma vma;
1699 char type;
1700 }
1701 elf_aarch64_section_map;
1702
1703
1704 typedef struct _aarch64_elf_section_data
1705 {
1706 struct bfd_elf_section_data elf;
1707 unsigned int mapcount;
1708 unsigned int mapsize;
1709 elf_aarch64_section_map *map;
1710 }
1711 _aarch64_elf_section_data;
1712
1713 #define elf_aarch64_section_data(sec) \
1714 ((_aarch64_elf_section_data *) elf_section_data (sec))
1715
1716 /* The size of the thread control block which is defined to be two pointers. */
1717 #define TCB_SIZE (ARCH_SIZE/8)*2
1718
1719 struct elf_aarch64_local_symbol
1720 {
1721 unsigned int got_type;
1722 bfd_signed_vma got_refcount;
1723 bfd_vma got_offset;
1724
1725 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1726 offset is from the end of the jump table and reserved entries
1727 within the PLTGOT.
1728
1729 The magic value (bfd_vma) -1 indicates that an offset has not be
1730 allocated. */
1731 bfd_vma tlsdesc_got_jump_table_offset;
1732 };
1733
1734 struct elf_aarch64_obj_tdata
1735 {
1736 struct elf_obj_tdata root;
1737
1738 /* local symbol descriptors */
1739 struct elf_aarch64_local_symbol *locals;
1740
1741 /* Zero to warn when linking objects with incompatible enum sizes. */
1742 int no_enum_size_warning;
1743
1744 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1745 int no_wchar_size_warning;
1746 };
1747
1748 #define elf_aarch64_tdata(bfd) \
1749 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1750
1751 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1752
1753 #define is_aarch64_elf(bfd) \
1754 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1755 && elf_tdata (bfd) != NULL \
1756 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1757
1758 static bfd_boolean
1759 elfNN_aarch64_mkobject (bfd *abfd)
1760 {
1761 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1762 AARCH64_ELF_DATA);
1763 }
1764
1765 #define elf_aarch64_hash_entry(ent) \
1766 ((struct elf_aarch64_link_hash_entry *)(ent))
1767
1768 #define GOT_UNKNOWN 0
1769 #define GOT_NORMAL 1
1770 #define GOT_TLS_GD 2
1771 #define GOT_TLS_IE 4
1772 #define GOT_TLSDESC_GD 8
1773
1774 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1775
1776 /* AArch64 ELF linker hash entry. */
1777 struct elf_aarch64_link_hash_entry
1778 {
1779 struct elf_link_hash_entry root;
1780
1781 /* Track dynamic relocs copied for this symbol. */
1782 struct elf_dyn_relocs *dyn_relocs;
1783
1784 /* Since PLT entries have variable size, we need to record the
1785 index into .got.plt instead of recomputing it from the PLT
1786 offset. */
1787 bfd_signed_vma plt_got_offset;
1788
1789 /* Bit mask representing the type of GOT entry(s) if any required by
1790 this symbol. */
1791 unsigned int got_type;
1792
1793 /* A pointer to the most recently used stub hash entry against this
1794 symbol. */
1795 struct elf_aarch64_stub_hash_entry *stub_cache;
1796
1797 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1798 is from the end of the jump table and reserved entries within the PLTGOT.
1799
1800 The magic value (bfd_vma) -1 indicates that an offset has not
1801 be allocated. */
1802 bfd_vma tlsdesc_got_jump_table_offset;
1803 };
1804
1805 static unsigned int
1806 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1807 bfd *abfd,
1808 unsigned long r_symndx)
1809 {
1810 if (h)
1811 return elf_aarch64_hash_entry (h)->got_type;
1812
1813 if (! elf_aarch64_locals (abfd))
1814 return GOT_UNKNOWN;
1815
1816 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1817 }
1818
1819 /* Get the AArch64 elf linker hash table from a link_info structure. */
1820 #define elf_aarch64_hash_table(info) \
1821 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1822
1823 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1824 ((struct elf_aarch64_stub_hash_entry *) \
1825 bfd_hash_lookup ((table), (string), (create), (copy)))
1826
1827 /* AArch64 ELF linker hash table. */
1828 struct elf_aarch64_link_hash_table
1829 {
1830 /* The main hash table. */
1831 struct elf_link_hash_table root;
1832
1833 /* Nonzero to force PIC branch veneers. */
1834 int pic_veneer;
1835
1836 /* Fix erratum 835769. */
1837 int fix_erratum_835769;
1838
1839 /* The number of bytes in the initial entry in the PLT. */
1840 bfd_size_type plt_header_size;
1841
1842 /* The number of bytes in the subsequent PLT etries. */
1843 bfd_size_type plt_entry_size;
1844
1845 /* Short-cuts to get to dynamic linker sections. */
1846 asection *sdynbss;
1847 asection *srelbss;
1848
1849 /* Small local sym cache. */
1850 struct sym_cache sym_cache;
1851
1852 /* For convenience in allocate_dynrelocs. */
1853 bfd *obfd;
1854
1855 /* The amount of space used by the reserved portion of the sgotplt
1856 section, plus whatever space is used by the jump slots. */
1857 bfd_vma sgotplt_jump_table_size;
1858
1859 /* The stub hash table. */
1860 struct bfd_hash_table stub_hash_table;
1861
1862 /* Linker stub bfd. */
1863 bfd *stub_bfd;
1864
1865 /* Linker call-backs. */
1866 asection *(*add_stub_section) (const char *, asection *);
1867 void (*layout_sections_again) (void);
1868
1869 /* Array to keep track of which stub sections have been created, and
1870 information on stub grouping. */
1871 struct map_stub
1872 {
1873 /* This is the section to which stubs in the group will be
1874 attached. */
1875 asection *link_sec;
1876 /* The stub section. */
1877 asection *stub_sec;
1878 } *stub_group;
1879
1880 /* Assorted information used by elfNN_aarch64_size_stubs. */
1881 unsigned int bfd_count;
1882 int top_index;
1883 asection **input_list;
1884
1885 /* The offset into splt of the PLT entry for the TLS descriptor
1886 resolver. Special values are 0, if not necessary (or not found
1887 to be necessary yet), and -1 if needed but not determined
1888 yet. */
1889 bfd_vma tlsdesc_plt;
1890
1891 /* The GOT offset for the lazy trampoline. Communicated to the
1892 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1893 indicates an offset is not allocated. */
1894 bfd_vma dt_tlsdesc_got;
1895
1896 /* Used by local STT_GNU_IFUNC symbols. */
1897 htab_t loc_hash_table;
1898 void * loc_hash_memory;
1899 };
1900
1901 /* Create an entry in an AArch64 ELF linker hash table. */
1902
1903 static struct bfd_hash_entry *
1904 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1905 struct bfd_hash_table *table,
1906 const char *string)
1907 {
1908 struct elf_aarch64_link_hash_entry *ret =
1909 (struct elf_aarch64_link_hash_entry *) entry;
1910
1911 /* Allocate the structure if it has not already been allocated by a
1912 subclass. */
1913 if (ret == NULL)
1914 ret = bfd_hash_allocate (table,
1915 sizeof (struct elf_aarch64_link_hash_entry));
1916 if (ret == NULL)
1917 return (struct bfd_hash_entry *) ret;
1918
1919 /* Call the allocation method of the superclass. */
1920 ret = ((struct elf_aarch64_link_hash_entry *)
1921 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1922 table, string));
1923 if (ret != NULL)
1924 {
1925 ret->dyn_relocs = NULL;
1926 ret->got_type = GOT_UNKNOWN;
1927 ret->plt_got_offset = (bfd_vma) - 1;
1928 ret->stub_cache = NULL;
1929 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1930 }
1931
1932 return (struct bfd_hash_entry *) ret;
1933 }
1934
1935 /* Initialize an entry in the stub hash table. */
1936
1937 static struct bfd_hash_entry *
1938 stub_hash_newfunc (struct bfd_hash_entry *entry,
1939 struct bfd_hash_table *table, const char *string)
1940 {
1941 /* Allocate the structure if it has not already been allocated by a
1942 subclass. */
1943 if (entry == NULL)
1944 {
1945 entry = bfd_hash_allocate (table,
1946 sizeof (struct
1947 elf_aarch64_stub_hash_entry));
1948 if (entry == NULL)
1949 return entry;
1950 }
1951
1952 /* Call the allocation method of the superclass. */
1953 entry = bfd_hash_newfunc (entry, table, string);
1954 if (entry != NULL)
1955 {
1956 struct elf_aarch64_stub_hash_entry *eh;
1957
1958 /* Initialize the local fields. */
1959 eh = (struct elf_aarch64_stub_hash_entry *) entry;
1960 eh->stub_sec = NULL;
1961 eh->stub_offset = 0;
1962 eh->target_value = 0;
1963 eh->target_section = NULL;
1964 eh->stub_type = aarch64_stub_none;
1965 eh->h = NULL;
1966 eh->id_sec = NULL;
1967 }
1968
1969 return entry;
1970 }
1971
1972 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
1973 for local symbol so that we can handle local STT_GNU_IFUNC symbols
1974 as global symbol. We reuse indx and dynstr_index for local symbol
1975 hash since they aren't used by global symbols in this backend. */
1976
1977 static hashval_t
1978 elfNN_aarch64_local_htab_hash (const void *ptr)
1979 {
1980 struct elf_link_hash_entry *h
1981 = (struct elf_link_hash_entry *) ptr;
1982 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
1983 }
1984
1985 /* Compare local hash entries. */
1986
1987 static int
1988 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
1989 {
1990 struct elf_link_hash_entry *h1
1991 = (struct elf_link_hash_entry *) ptr1;
1992 struct elf_link_hash_entry *h2
1993 = (struct elf_link_hash_entry *) ptr2;
1994
1995 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
1996 }
1997
1998 /* Find and/or create a hash entry for local symbol. */
1999
2000 static struct elf_link_hash_entry *
2001 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2002 bfd *abfd, const Elf_Internal_Rela *rel,
2003 bfd_boolean create)
2004 {
2005 struct elf_aarch64_link_hash_entry e, *ret;
2006 asection *sec = abfd->sections;
2007 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2008 ELFNN_R_SYM (rel->r_info));
2009 void **slot;
2010
2011 e.root.indx = sec->id;
2012 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2013 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2014 create ? INSERT : NO_INSERT);
2015
2016 if (!slot)
2017 return NULL;
2018
2019 if (*slot)
2020 {
2021 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2022 return &ret->root;
2023 }
2024
2025 ret = (struct elf_aarch64_link_hash_entry *)
2026 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2027 sizeof (struct elf_aarch64_link_hash_entry));
2028 if (ret)
2029 {
2030 memset (ret, 0, sizeof (*ret));
2031 ret->root.indx = sec->id;
2032 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2033 ret->root.dynindx = -1;
2034 *slot = ret;
2035 }
2036 return &ret->root;
2037 }
2038
2039 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2040
2041 static void
2042 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2043 struct elf_link_hash_entry *dir,
2044 struct elf_link_hash_entry *ind)
2045 {
2046 struct elf_aarch64_link_hash_entry *edir, *eind;
2047
2048 edir = (struct elf_aarch64_link_hash_entry *) dir;
2049 eind = (struct elf_aarch64_link_hash_entry *) ind;
2050
2051 if (eind->dyn_relocs != NULL)
2052 {
2053 if (edir->dyn_relocs != NULL)
2054 {
2055 struct elf_dyn_relocs **pp;
2056 struct elf_dyn_relocs *p;
2057
2058 /* Add reloc counts against the indirect sym to the direct sym
2059 list. Merge any entries against the same section. */
2060 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2061 {
2062 struct elf_dyn_relocs *q;
2063
2064 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2065 if (q->sec == p->sec)
2066 {
2067 q->pc_count += p->pc_count;
2068 q->count += p->count;
2069 *pp = p->next;
2070 break;
2071 }
2072 if (q == NULL)
2073 pp = &p->next;
2074 }
2075 *pp = edir->dyn_relocs;
2076 }
2077
2078 edir->dyn_relocs = eind->dyn_relocs;
2079 eind->dyn_relocs = NULL;
2080 }
2081
2082 if (ind->root.type == bfd_link_hash_indirect)
2083 {
2084 /* Copy over PLT info. */
2085 if (dir->got.refcount <= 0)
2086 {
2087 edir->got_type = eind->got_type;
2088 eind->got_type = GOT_UNKNOWN;
2089 }
2090 }
2091
2092 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2093 }
2094
2095 /* Destroy an AArch64 elf linker hash table. */
2096
2097 static void
2098 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2099 {
2100 struct elf_aarch64_link_hash_table *ret
2101 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2102
2103 if (ret->loc_hash_table)
2104 htab_delete (ret->loc_hash_table);
2105 if (ret->loc_hash_memory)
2106 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2107
2108 bfd_hash_table_free (&ret->stub_hash_table);
2109 _bfd_elf_link_hash_table_free (obfd);
2110 }
2111
2112 /* Create an AArch64 elf linker hash table. */
2113
2114 static struct bfd_link_hash_table *
2115 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2116 {
2117 struct elf_aarch64_link_hash_table *ret;
2118 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2119
2120 ret = bfd_zmalloc (amt);
2121 if (ret == NULL)
2122 return NULL;
2123
2124 if (!_bfd_elf_link_hash_table_init
2125 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2126 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2127 {
2128 free (ret);
2129 return NULL;
2130 }
2131
2132 ret->plt_header_size = PLT_ENTRY_SIZE;
2133 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2134 ret->obfd = abfd;
2135 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2136
2137 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2138 sizeof (struct elf_aarch64_stub_hash_entry)))
2139 {
2140 _bfd_elf_link_hash_table_free (abfd);
2141 return NULL;
2142 }
2143
2144 ret->loc_hash_table = htab_try_create (1024,
2145 elfNN_aarch64_local_htab_hash,
2146 elfNN_aarch64_local_htab_eq,
2147 NULL);
2148 ret->loc_hash_memory = objalloc_create ();
2149 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2150 {
2151 elfNN_aarch64_link_hash_table_free (abfd);
2152 return NULL;
2153 }
2154 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2155
2156 return &ret->root.root;
2157 }
2158
2159 static bfd_boolean
2160 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2161 bfd_vma offset, bfd_vma value)
2162 {
2163 reloc_howto_type *howto;
2164 bfd_vma place;
2165
2166 howto = elfNN_aarch64_howto_from_type (r_type);
2167 place = (input_section->output_section->vma + input_section->output_offset
2168 + offset);
2169
2170 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2171 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2172 return _bfd_aarch64_elf_put_addend (input_bfd,
2173 input_section->contents + offset, r_type,
2174 howto, value);
2175 }
2176
2177 static enum elf_aarch64_stub_type
2178 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2179 {
2180 if (aarch64_valid_for_adrp_p (value, place))
2181 return aarch64_stub_adrp_branch;
2182 return aarch64_stub_long_branch;
2183 }
2184
2185 /* Determine the type of stub needed, if any, for a call. */
2186
2187 static enum elf_aarch64_stub_type
2188 aarch64_type_of_stub (struct bfd_link_info *info,
2189 asection *input_sec,
2190 const Elf_Internal_Rela *rel,
2191 unsigned char st_type,
2192 struct elf_aarch64_link_hash_entry *hash,
2193 bfd_vma destination)
2194 {
2195 bfd_vma location;
2196 bfd_signed_vma branch_offset;
2197 unsigned int r_type;
2198 struct elf_aarch64_link_hash_table *globals;
2199 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2200 bfd_boolean via_plt_p;
2201
2202 if (st_type != STT_FUNC)
2203 return stub_type;
2204
2205 globals = elf_aarch64_hash_table (info);
2206 via_plt_p = (globals->root.splt != NULL && hash != NULL
2207 && hash->root.plt.offset != (bfd_vma) - 1);
2208
2209 if (via_plt_p)
2210 return stub_type;
2211
2212 /* Determine where the call point is. */
2213 location = (input_sec->output_offset
2214 + input_sec->output_section->vma + rel->r_offset);
2215
2216 branch_offset = (bfd_signed_vma) (destination - location);
2217
2218 r_type = ELFNN_R_TYPE (rel->r_info);
2219
2220 /* We don't want to redirect any old unconditional jump in this way,
2221 only one which is being used for a sibcall, where it is
2222 acceptable for the IP0 and IP1 registers to be clobbered. */
2223 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2224 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2225 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2226 {
2227 stub_type = aarch64_stub_long_branch;
2228 }
2229
2230 return stub_type;
2231 }
2232
2233 /* Build a name for an entry in the stub hash table. */
2234
2235 static char *
2236 elfNN_aarch64_stub_name (const asection *input_section,
2237 const asection *sym_sec,
2238 const struct elf_aarch64_link_hash_entry *hash,
2239 const Elf_Internal_Rela *rel)
2240 {
2241 char *stub_name;
2242 bfd_size_type len;
2243
2244 if (hash)
2245 {
2246 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2247 stub_name = bfd_malloc (len);
2248 if (stub_name != NULL)
2249 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2250 (unsigned int) input_section->id,
2251 hash->root.root.root.string,
2252 rel->r_addend);
2253 }
2254 else
2255 {
2256 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2257 stub_name = bfd_malloc (len);
2258 if (stub_name != NULL)
2259 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2260 (unsigned int) input_section->id,
2261 (unsigned int) sym_sec->id,
2262 (unsigned int) ELFNN_R_SYM (rel->r_info),
2263 rel->r_addend);
2264 }
2265
2266 return stub_name;
2267 }
2268
2269 /* Look up an entry in the stub hash. Stub entries are cached because
2270 creating the stub name takes a bit of time. */
2271
2272 static struct elf_aarch64_stub_hash_entry *
2273 elfNN_aarch64_get_stub_entry (const asection *input_section,
2274 const asection *sym_sec,
2275 struct elf_link_hash_entry *hash,
2276 const Elf_Internal_Rela *rel,
2277 struct elf_aarch64_link_hash_table *htab)
2278 {
2279 struct elf_aarch64_stub_hash_entry *stub_entry;
2280 struct elf_aarch64_link_hash_entry *h =
2281 (struct elf_aarch64_link_hash_entry *) hash;
2282 const asection *id_sec;
2283
2284 if ((input_section->flags & SEC_CODE) == 0)
2285 return NULL;
2286
2287 /* If this input section is part of a group of sections sharing one
2288 stub section, then use the id of the first section in the group.
2289 Stub names need to include a section id, as there may well be
2290 more than one stub used to reach say, printf, and we need to
2291 distinguish between them. */
2292 id_sec = htab->stub_group[input_section->id].link_sec;
2293
2294 if (h != NULL && h->stub_cache != NULL
2295 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2296 {
2297 stub_entry = h->stub_cache;
2298 }
2299 else
2300 {
2301 char *stub_name;
2302
2303 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2304 if (stub_name == NULL)
2305 return NULL;
2306
2307 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2308 stub_name, FALSE, FALSE);
2309 if (h != NULL)
2310 h->stub_cache = stub_entry;
2311
2312 free (stub_name);
2313 }
2314
2315 return stub_entry;
2316 }
2317
2318
2319 /* Create a stub section. */
2320
2321 static asection *
2322 _bfd_aarch64_create_stub_section (asection *section,
2323 struct elf_aarch64_link_hash_table *htab)
2324 {
2325 size_t namelen;
2326 bfd_size_type len;
2327 char *s_name;
2328
2329 namelen = strlen (section->name);
2330 len = namelen + sizeof (STUB_SUFFIX);
2331 s_name = bfd_alloc (htab->stub_bfd, len);
2332 if (s_name == NULL)
2333 return NULL;
2334
2335 memcpy (s_name, section->name, namelen);
2336 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2337 return (*htab->add_stub_section) (s_name, section);
2338 }
2339
2340
2341 /* Find or create a stub section for a link section.
2342
2343 Fix or create the stub section used to collect stubs attached to
2344 the specified link section. */
2345
2346 static asection *
2347 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2348 struct elf_aarch64_link_hash_table *htab)
2349 {
2350 if (htab->stub_group[link_section->id].stub_sec == NULL)
2351 htab->stub_group[link_section->id].stub_sec
2352 = _bfd_aarch64_create_stub_section (link_section, htab);
2353 return htab->stub_group[link_section->id].stub_sec;
2354 }
2355
2356
2357 /* Find or create a stub section in the stub group for an input
2358 section. */
2359
2360 static asection *
2361 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2362 struct elf_aarch64_link_hash_table *htab)
2363 {
2364 asection *link_sec = htab->stub_group[section->id].link_sec;
2365 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2366 }
2367
2368
2369 /* Add a new stub entry in the stub group associated with an input
2370 section to the stub hash. Not all fields of the new stub entry are
2371 initialised. */
2372
2373 static struct elf_aarch64_stub_hash_entry *
2374 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2375 asection *section,
2376 struct elf_aarch64_link_hash_table *htab)
2377 {
2378 asection *link_sec;
2379 asection *stub_sec;
2380 struct elf_aarch64_stub_hash_entry *stub_entry;
2381
2382 link_sec = htab->stub_group[section->id].link_sec;
2383 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2384
2385 /* Enter this entry into the linker stub hash table. */
2386 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2387 TRUE, FALSE);
2388 if (stub_entry == NULL)
2389 {
2390 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2391 section->owner, stub_name);
2392 return NULL;
2393 }
2394
2395 stub_entry->stub_sec = stub_sec;
2396 stub_entry->stub_offset = 0;
2397 stub_entry->id_sec = link_sec;
2398
2399 return stub_entry;
2400 }
2401
2402 static bfd_boolean
2403 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2404 void *in_arg ATTRIBUTE_UNUSED)
2405 {
2406 struct elf_aarch64_stub_hash_entry *stub_entry;
2407 asection *stub_sec;
2408 bfd *stub_bfd;
2409 bfd_byte *loc;
2410 bfd_vma sym_value;
2411 bfd_vma veneered_insn_loc;
2412 bfd_vma veneer_entry_loc;
2413 bfd_signed_vma branch_offset = 0;
2414 unsigned int template_size;
2415 const uint32_t *template;
2416 unsigned int i;
2417
2418 /* Massage our args to the form they really have. */
2419 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2420
2421 stub_sec = stub_entry->stub_sec;
2422
2423 /* Make a note of the offset within the stubs for this entry. */
2424 stub_entry->stub_offset = stub_sec->size;
2425 loc = stub_sec->contents + stub_entry->stub_offset;
2426
2427 stub_bfd = stub_sec->owner;
2428
2429 /* This is the address of the stub destination. */
2430 sym_value = (stub_entry->target_value
2431 + stub_entry->target_section->output_offset
2432 + stub_entry->target_section->output_section->vma);
2433
2434 if (stub_entry->stub_type == aarch64_stub_long_branch)
2435 {
2436 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2437 + stub_sec->output_offset);
2438
2439 /* See if we can relax the stub. */
2440 if (aarch64_valid_for_adrp_p (sym_value, place))
2441 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2442 }
2443
2444 switch (stub_entry->stub_type)
2445 {
2446 case aarch64_stub_adrp_branch:
2447 template = aarch64_adrp_branch_stub;
2448 template_size = sizeof (aarch64_adrp_branch_stub);
2449 break;
2450 case aarch64_stub_long_branch:
2451 template = aarch64_long_branch_stub;
2452 template_size = sizeof (aarch64_long_branch_stub);
2453 break;
2454 case aarch64_stub_erratum_835769_veneer:
2455 template = aarch64_erratum_835769_stub;
2456 template_size = sizeof (aarch64_erratum_835769_stub);
2457 break;
2458 default:
2459 abort ();
2460 }
2461
2462 for (i = 0; i < (template_size / sizeof template[0]); i++)
2463 {
2464 bfd_putl32 (template[i], loc);
2465 loc += 4;
2466 }
2467
2468 template_size = (template_size + 7) & ~7;
2469 stub_sec->size += template_size;
2470
2471 switch (stub_entry->stub_type)
2472 {
2473 case aarch64_stub_adrp_branch:
2474 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2475 stub_entry->stub_offset, sym_value))
2476 /* The stub would not have been relaxed if the offset was out
2477 of range. */
2478 BFD_FAIL ();
2479
2480 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2481 stub_entry->stub_offset + 4, sym_value))
2482 BFD_FAIL ();
2483 break;
2484
2485 case aarch64_stub_long_branch:
2486 /* We want the value relative to the address 12 bytes back from the
2487 value itself. */
2488 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2489 stub_entry->stub_offset + 16, sym_value + 12))
2490 BFD_FAIL ();
2491 break;
2492
2493 case aarch64_stub_erratum_835769_veneer:
2494 veneered_insn_loc = stub_entry->target_section->output_section->vma
2495 + stub_entry->target_section->output_offset
2496 + stub_entry->target_value;
2497 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2498 + stub_entry->stub_sec->output_offset
2499 + stub_entry->stub_offset;
2500 branch_offset = veneered_insn_loc - veneer_entry_loc;
2501 branch_offset >>= 2;
2502 branch_offset &= 0x3ffffff;
2503 bfd_putl32 (stub_entry->veneered_insn,
2504 stub_sec->contents + stub_entry->stub_offset);
2505 bfd_putl32 (template[1] | branch_offset,
2506 stub_sec->contents + stub_entry->stub_offset + 4);
2507 break;
2508
2509 default:
2510 abort ();
2511 }
2512
2513 return TRUE;
2514 }
2515
2516 /* As above, but don't actually build the stub. Just bump offset so
2517 we know stub section sizes. */
2518
2519 static bfd_boolean
2520 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2521 void *in_arg ATTRIBUTE_UNUSED)
2522 {
2523 struct elf_aarch64_stub_hash_entry *stub_entry;
2524 int size;
2525
2526 /* Massage our args to the form they really have. */
2527 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2528
2529 switch (stub_entry->stub_type)
2530 {
2531 case aarch64_stub_adrp_branch:
2532 size = sizeof (aarch64_adrp_branch_stub);
2533 break;
2534 case aarch64_stub_long_branch:
2535 size = sizeof (aarch64_long_branch_stub);
2536 break;
2537 case aarch64_stub_erratum_835769_veneer:
2538 size = sizeof (aarch64_erratum_835769_stub);
2539 break;
2540 default:
2541 abort ();
2542 }
2543
2544 size = (size + 7) & ~7;
2545 stub_entry->stub_sec->size += size;
2546 return TRUE;
2547 }
2548
2549 /* External entry points for sizing and building linker stubs. */
2550
2551 /* Set up various things so that we can make a list of input sections
2552 for each output section included in the link. Returns -1 on error,
2553 0 when no stubs will be needed, and 1 on success. */
2554
2555 int
2556 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2557 struct bfd_link_info *info)
2558 {
2559 bfd *input_bfd;
2560 unsigned int bfd_count;
2561 int top_id, top_index;
2562 asection *section;
2563 asection **input_list, **list;
2564 bfd_size_type amt;
2565 struct elf_aarch64_link_hash_table *htab =
2566 elf_aarch64_hash_table (info);
2567
2568 if (!is_elf_hash_table (htab))
2569 return 0;
2570
2571 /* Count the number of input BFDs and find the top input section id. */
2572 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2573 input_bfd != NULL; input_bfd = input_bfd->link.next)
2574 {
2575 bfd_count += 1;
2576 for (section = input_bfd->sections;
2577 section != NULL; section = section->next)
2578 {
2579 if (top_id < section->id)
2580 top_id = section->id;
2581 }
2582 }
2583 htab->bfd_count = bfd_count;
2584
2585 amt = sizeof (struct map_stub) * (top_id + 1);
2586 htab->stub_group = bfd_zmalloc (amt);
2587 if (htab->stub_group == NULL)
2588 return -1;
2589
2590 /* We can't use output_bfd->section_count here to find the top output
2591 section index as some sections may have been removed, and
2592 _bfd_strip_section_from_output doesn't renumber the indices. */
2593 for (section = output_bfd->sections, top_index = 0;
2594 section != NULL; section = section->next)
2595 {
2596 if (top_index < section->index)
2597 top_index = section->index;
2598 }
2599
2600 htab->top_index = top_index;
2601 amt = sizeof (asection *) * (top_index + 1);
2602 input_list = bfd_malloc (amt);
2603 htab->input_list = input_list;
2604 if (input_list == NULL)
2605 return -1;
2606
2607 /* For sections we aren't interested in, mark their entries with a
2608 value we can check later. */
2609 list = input_list + top_index;
2610 do
2611 *list = bfd_abs_section_ptr;
2612 while (list-- != input_list);
2613
2614 for (section = output_bfd->sections;
2615 section != NULL; section = section->next)
2616 {
2617 if ((section->flags & SEC_CODE) != 0)
2618 input_list[section->index] = NULL;
2619 }
2620
2621 return 1;
2622 }
2623
2624 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2625 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2626
2627 /* The linker repeatedly calls this function for each input section,
2628 in the order that input sections are linked into output sections.
2629 Build lists of input sections to determine groupings between which
2630 we may insert linker stubs. */
2631
2632 void
2633 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2634 {
2635 struct elf_aarch64_link_hash_table *htab =
2636 elf_aarch64_hash_table (info);
2637
2638 if (isec->output_section->index <= htab->top_index)
2639 {
2640 asection **list = htab->input_list + isec->output_section->index;
2641
2642 if (*list != bfd_abs_section_ptr)
2643 {
2644 /* Steal the link_sec pointer for our list. */
2645 /* This happens to make the list in reverse order,
2646 which is what we want. */
2647 PREV_SEC (isec) = *list;
2648 *list = isec;
2649 }
2650 }
2651 }
2652
2653 /* See whether we can group stub sections together. Grouping stub
2654 sections may result in fewer stubs. More importantly, we need to
2655 put all .init* and .fini* stubs at the beginning of the .init or
2656 .fini output sections respectively, because glibc splits the
2657 _init and _fini functions into multiple parts. Putting a stub in
2658 the middle of a function is not a good idea. */
2659
2660 static void
2661 group_sections (struct elf_aarch64_link_hash_table *htab,
2662 bfd_size_type stub_group_size,
2663 bfd_boolean stubs_always_before_branch)
2664 {
2665 asection **list = htab->input_list + htab->top_index;
2666
2667 do
2668 {
2669 asection *tail = *list;
2670
2671 if (tail == bfd_abs_section_ptr)
2672 continue;
2673
2674 while (tail != NULL)
2675 {
2676 asection *curr;
2677 asection *prev;
2678 bfd_size_type total;
2679
2680 curr = tail;
2681 total = tail->size;
2682 while ((prev = PREV_SEC (curr)) != NULL
2683 && ((total += curr->output_offset - prev->output_offset)
2684 < stub_group_size))
2685 curr = prev;
2686
2687 /* OK, the size from the start of CURR to the end is less
2688 than stub_group_size and thus can be handled by one stub
2689 section. (Or the tail section is itself larger than
2690 stub_group_size, in which case we may be toast.)
2691 We should really be keeping track of the total size of
2692 stubs added here, as stubs contribute to the final output
2693 section size. */
2694 do
2695 {
2696 prev = PREV_SEC (tail);
2697 /* Set up this stub group. */
2698 htab->stub_group[tail->id].link_sec = curr;
2699 }
2700 while (tail != curr && (tail = prev) != NULL);
2701
2702 /* But wait, there's more! Input sections up to stub_group_size
2703 bytes before the stub section can be handled by it too. */
2704 if (!stubs_always_before_branch)
2705 {
2706 total = 0;
2707 while (prev != NULL
2708 && ((total += tail->output_offset - prev->output_offset)
2709 < stub_group_size))
2710 {
2711 tail = prev;
2712 prev = PREV_SEC (tail);
2713 htab->stub_group[tail->id].link_sec = curr;
2714 }
2715 }
2716 tail = prev;
2717 }
2718 }
2719 while (list-- != htab->input_list);
2720
2721 free (htab->input_list);
2722 }
2723
2724 #undef PREV_SEC
2725
2726 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2727
2728 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2729 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2730 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2731 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2732 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2733 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2734
2735 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2736 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2737 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2738 #define AARCH64_ZR 0x1f
2739
2740 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2741 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2742
2743 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2744 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2745 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2746 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2747 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2748 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2749 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2750 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2751 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2752 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2753 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2754 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2755 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2756 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2757 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2758 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2759 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2760 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2761
2762 /* Classify an INSN if it is indeed a load/store.
2763
2764 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2765
2766 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2767 is set equal to RT.
2768
2769 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2770
2771 */
2772
2773 static bfd_boolean
2774 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2775 bfd_boolean *pair, bfd_boolean *load)
2776 {
2777 uint32_t opcode;
2778 unsigned int r;
2779 uint32_t opc = 0;
2780 uint32_t v = 0;
2781 uint32_t opc_v = 0;
2782
2783 /* Bail out quickly if INSN doesn't fall into the the load-store
2784 encoding space. */
2785 if (!AARCH64_LDST (insn))
2786 return FALSE;
2787
2788 *pair = FALSE;
2789 *load = FALSE;
2790 if (AARCH64_LDST_EX (insn))
2791 {
2792 *rt = AARCH64_RT (insn);
2793 *rt2 = *rt;
2794 if (AARCH64_BIT (insn, 21) == 1)
2795 {
2796 *pair = TRUE;
2797 *rt2 = AARCH64_RT2 (insn);
2798 }
2799 *load = AARCH64_LD (insn);
2800 return TRUE;
2801 }
2802 else if (AARCH64_LDST_NAP (insn)
2803 || AARCH64_LDSTP_PI (insn)
2804 || AARCH64_LDSTP_O (insn)
2805 || AARCH64_LDSTP_PRE (insn))
2806 {
2807 *pair = TRUE;
2808 *rt = AARCH64_RT (insn);
2809 *rt2 = AARCH64_RT2 (insn);
2810 *load = AARCH64_LD (insn);
2811 return TRUE;
2812 }
2813 else if (AARCH64_LDST_PCREL (insn)
2814 || AARCH64_LDST_UI (insn)
2815 || AARCH64_LDST_PIIMM (insn)
2816 || AARCH64_LDST_U (insn)
2817 || AARCH64_LDST_PREIMM (insn)
2818 || AARCH64_LDST_RO (insn)
2819 || AARCH64_LDST_UIMM (insn))
2820 {
2821 *rt = AARCH64_RT (insn);
2822 *rt2 = *rt;
2823 if (AARCH64_LDST_PCREL (insn))
2824 *load = TRUE;
2825 opc = AARCH64_BITS (insn, 22, 2);
2826 v = AARCH64_BIT (insn, 26);
2827 opc_v = opc | (v << 2);
2828 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2829 || opc_v == 5 || opc_v == 7);
2830 return TRUE;
2831 }
2832 else if (AARCH64_LDST_SIMD_M (insn)
2833 || AARCH64_LDST_SIMD_M_PI (insn))
2834 {
2835 *rt = AARCH64_RT (insn);
2836 *load = AARCH64_BIT (insn, 22);
2837 opcode = (insn >> 12) & 0xf;
2838 switch (opcode)
2839 {
2840 case 0:
2841 case 2:
2842 *rt2 = *rt + 3;
2843 break;
2844
2845 case 4:
2846 case 6:
2847 *rt2 = *rt + 2;
2848 break;
2849
2850 case 7:
2851 *rt2 = *rt;
2852 break;
2853
2854 case 8:
2855 case 10:
2856 *rt2 = *rt + 1;
2857 break;
2858
2859 default:
2860 return FALSE;
2861 }
2862 return TRUE;
2863 }
2864 else if (AARCH64_LDST_SIMD_S (insn)
2865 || AARCH64_LDST_SIMD_S_PI (insn))
2866 {
2867 *rt = AARCH64_RT (insn);
2868 r = (insn >> 21) & 1;
2869 *load = AARCH64_BIT (insn, 22);
2870 opcode = (insn >> 13) & 0x7;
2871 switch (opcode)
2872 {
2873 case 0:
2874 case 2:
2875 case 4:
2876 *rt2 = *rt + r;
2877 break;
2878
2879 case 1:
2880 case 3:
2881 case 5:
2882 *rt2 = *rt + (r == 0 ? 2 : 3);
2883 break;
2884
2885 case 6:
2886 *rt2 = *rt + r;
2887 break;
2888
2889 case 7:
2890 *rt2 = *rt + (r == 0 ? 2 : 3);
2891 break;
2892
2893 default:
2894 return FALSE;
2895 }
2896 return TRUE;
2897 }
2898
2899 return FALSE;
2900 }
2901
2902 /* Return TRUE if INSN is multiply-accumulate. */
2903
2904 static bfd_boolean
2905 aarch64_mlxl_p (uint32_t insn)
2906 {
2907 uint32_t op31 = AARCH64_OP31 (insn);
2908
2909 if (AARCH64_MAC (insn)
2910 && (op31 == 0 || op31 == 1 || op31 == 5)
2911 /* Exclude MUL instructions which are encoded as a multiple accumulate
2912 with RA = XZR. */
2913 && AARCH64_RA (insn) != AARCH64_ZR)
2914 return TRUE;
2915
2916 return FALSE;
2917 }
2918
2919 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
2920 it is possible for a 64-bit multiply-accumulate instruction to generate an
2921 incorrect result. The details are quite complex and hard to
2922 determine statically, since branches in the code may exist in some
2923 circumstances, but all cases end with a memory (load, store, or
2924 prefetch) instruction followed immediately by the multiply-accumulate
2925 operation. We employ a linker patching technique, by moving the potentially
2926 affected multiply-accumulate instruction into a patch region and replacing
2927 the original instruction with a branch to the patch. This function checks
2928 if INSN_1 is the memory operation followed by a multiply-accumulate
2929 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
2930 if INSN_1 and INSN_2 are safe. */
2931
2932 static bfd_boolean
2933 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
2934 {
2935 uint32_t rt;
2936 uint32_t rt2;
2937 uint32_t rn;
2938 uint32_t rm;
2939 uint32_t ra;
2940 bfd_boolean pair;
2941 bfd_boolean load;
2942
2943 if (aarch64_mlxl_p (insn_2)
2944 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
2945 {
2946 /* Any SIMD memory op is independent of the subsequent MLA
2947 by definition of the erratum. */
2948 if (AARCH64_BIT (insn_1, 26))
2949 return TRUE;
2950
2951 /* If not SIMD, check for integer memory ops and MLA relationship. */
2952 rn = AARCH64_RN (insn_2);
2953 ra = AARCH64_RA (insn_2);
2954 rm = AARCH64_RM (insn_2);
2955
2956 /* If this is a load and there's a true(RAW) dependency, we are safe
2957 and this is not an erratum sequence. */
2958 if (load &&
2959 (rt == rn || rt == rm || rt == ra
2960 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
2961 return FALSE;
2962
2963 /* We conservatively put out stubs for all other cases (including
2964 writebacks). */
2965 return TRUE;
2966 }
2967
2968 return FALSE;
2969 }
2970
2971 /* Used to order a list of mapping symbols by address. */
2972
2973 static int
2974 elf_aarch64_compare_mapping (const void *a, const void *b)
2975 {
2976 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
2977 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
2978
2979 if (amap->vma > bmap->vma)
2980 return 1;
2981 else if (amap->vma < bmap->vma)
2982 return -1;
2983 else if (amap->type > bmap->type)
2984 /* Ensure results do not depend on the host qsort for objects with
2985 multiple mapping symbols at the same address by sorting on type
2986 after vma. */
2987 return 1;
2988 else if (amap->type < bmap->type)
2989 return -1;
2990 else
2991 return 0;
2992 }
2993
2994
2995 static char *
2996 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
2997 {
2998 char *stub_name = (char *) bfd_malloc
2999 (strlen ("__erratum_835769_veneer_") + 16);
3000 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3001 return stub_name;
3002 }
3003
3004 /* Scan for cortex-a53 erratum 835769 sequence.
3005
3006 Return TRUE else FALSE on abnormal termination. */
3007
3008 static bfd_boolean
3009 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3010 struct bfd_link_info *info,
3011 unsigned int *num_fixes_p)
3012 {
3013 asection *section;
3014 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3015 unsigned int num_fixes = *num_fixes_p;
3016
3017 if (htab == NULL)
3018 return TRUE;
3019
3020 for (section = input_bfd->sections;
3021 section != NULL;
3022 section = section->next)
3023 {
3024 bfd_byte *contents = NULL;
3025 struct _aarch64_elf_section_data *sec_data;
3026 unsigned int span;
3027
3028 if (elf_section_type (section) != SHT_PROGBITS
3029 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3030 || (section->flags & SEC_EXCLUDE) != 0
3031 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3032 || (section->output_section == bfd_abs_section_ptr))
3033 continue;
3034
3035 if (elf_section_data (section)->this_hdr.contents != NULL)
3036 contents = elf_section_data (section)->this_hdr.contents;
3037 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3038 return FALSE;
3039
3040 sec_data = elf_aarch64_section_data (section);
3041
3042 qsort (sec_data->map, sec_data->mapcount,
3043 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3044
3045 for (span = 0; span < sec_data->mapcount; span++)
3046 {
3047 unsigned int span_start = sec_data->map[span].vma;
3048 unsigned int span_end = ((span == sec_data->mapcount - 1)
3049 ? sec_data->map[0].vma + section->size
3050 : sec_data->map[span + 1].vma);
3051 unsigned int i;
3052 char span_type = sec_data->map[span].type;
3053
3054 if (span_type == 'd')
3055 continue;
3056
3057 for (i = span_start; i + 4 < span_end; i += 4)
3058 {
3059 uint32_t insn_1 = bfd_getl32 (contents + i);
3060 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3061
3062 if (aarch64_erratum_sequence (insn_1, insn_2))
3063 {
3064 struct elf_aarch64_stub_hash_entry *stub_entry;
3065 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3066 if (! stub_name)
3067 return FALSE;
3068
3069 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3070 section,
3071 htab);
3072 if (! stub_entry)
3073 return FALSE;
3074
3075 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3076 stub_entry->target_section = section;
3077 stub_entry->target_value = i + 4;
3078 stub_entry->veneered_insn = insn_2;
3079 stub_entry->output_name = stub_name;
3080 num_fixes++;
3081 }
3082 }
3083 }
3084 if (elf_section_data (section)->this_hdr.contents == NULL)
3085 free (contents);
3086 }
3087
3088 *num_fixes_p = num_fixes;
3089
3090 return TRUE;
3091 }
3092
3093
3094 /* Resize all stub sections. */
3095
3096 static void
3097 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3098 {
3099 asection *section;
3100
3101 /* OK, we've added some stubs. Find out the new size of the
3102 stub sections. */
3103 for (section = htab->stub_bfd->sections;
3104 section != NULL; section = section->next)
3105 {
3106 /* Ignore non-stub sections. */
3107 if (!strstr (section->name, STUB_SUFFIX))
3108 continue;
3109 section->size = 0;
3110 }
3111
3112 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3113
3114 for (section = htab->stub_bfd->sections;
3115 section != NULL; section = section->next)
3116 {
3117 if (!strstr (section->name, STUB_SUFFIX))
3118 continue;
3119
3120 if (section->size)
3121 section->size += 4;
3122 }
3123 }
3124
3125 /* Determine and set the size of the stub section for a final link.
3126
3127 The basic idea here is to examine all the relocations looking for
3128 PC-relative calls to a target that is unreachable with a "bl"
3129 instruction. */
3130
3131 bfd_boolean
3132 elfNN_aarch64_size_stubs (bfd *output_bfd,
3133 bfd *stub_bfd,
3134 struct bfd_link_info *info,
3135 bfd_signed_vma group_size,
3136 asection * (*add_stub_section) (const char *,
3137 asection *),
3138 void (*layout_sections_again) (void))
3139 {
3140 bfd_size_type stub_group_size;
3141 bfd_boolean stubs_always_before_branch;
3142 bfd_boolean stub_changed = FALSE;
3143 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3144 unsigned int num_erratum_835769_fixes = 0;
3145
3146 /* Propagate mach to stub bfd, because it may not have been
3147 finalized when we created stub_bfd. */
3148 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3149 bfd_get_mach (output_bfd));
3150
3151 /* Stash our params away. */
3152 htab->stub_bfd = stub_bfd;
3153 htab->add_stub_section = add_stub_section;
3154 htab->layout_sections_again = layout_sections_again;
3155 stubs_always_before_branch = group_size < 0;
3156 if (group_size < 0)
3157 stub_group_size = -group_size;
3158 else
3159 stub_group_size = group_size;
3160
3161 if (stub_group_size == 1)
3162 {
3163 /* Default values. */
3164 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3165 stub_group_size = 127 * 1024 * 1024;
3166 }
3167
3168 group_sections (htab, stub_group_size, stubs_always_before_branch);
3169
3170 if (htab->fix_erratum_835769)
3171 {
3172 bfd *input_bfd;
3173
3174 for (input_bfd = info->input_bfds;
3175 input_bfd != NULL; input_bfd = input_bfd->link.next)
3176 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3177 &num_erratum_835769_fixes))
3178 return FALSE;
3179
3180 stub_changed = TRUE;
3181 }
3182
3183 while (1)
3184 {
3185 bfd *input_bfd;
3186
3187 for (input_bfd = info->input_bfds;
3188 input_bfd != NULL; input_bfd = input_bfd->link.next)
3189 {
3190 Elf_Internal_Shdr *symtab_hdr;
3191 asection *section;
3192 Elf_Internal_Sym *local_syms = NULL;
3193
3194 /* We'll need the symbol table in a second. */
3195 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3196 if (symtab_hdr->sh_info == 0)
3197 continue;
3198
3199 /* Walk over each section attached to the input bfd. */
3200 for (section = input_bfd->sections;
3201 section != NULL; section = section->next)
3202 {
3203 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3204
3205 /* If there aren't any relocs, then there's nothing more
3206 to do. */
3207 if ((section->flags & SEC_RELOC) == 0
3208 || section->reloc_count == 0
3209 || (section->flags & SEC_CODE) == 0)
3210 continue;
3211
3212 /* If this section is a link-once section that will be
3213 discarded, then don't create any stubs. */
3214 if (section->output_section == NULL
3215 || section->output_section->owner != output_bfd)
3216 continue;
3217
3218 /* Get the relocs. */
3219 internal_relocs
3220 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3221 NULL, info->keep_memory);
3222 if (internal_relocs == NULL)
3223 goto error_ret_free_local;
3224
3225 /* Now examine each relocation. */
3226 irela = internal_relocs;
3227 irelaend = irela + section->reloc_count;
3228 for (; irela < irelaend; irela++)
3229 {
3230 unsigned int r_type, r_indx;
3231 enum elf_aarch64_stub_type stub_type;
3232 struct elf_aarch64_stub_hash_entry *stub_entry;
3233 asection *sym_sec;
3234 bfd_vma sym_value;
3235 bfd_vma destination;
3236 struct elf_aarch64_link_hash_entry *hash;
3237 const char *sym_name;
3238 char *stub_name;
3239 const asection *id_sec;
3240 unsigned char st_type;
3241 bfd_size_type len;
3242
3243 r_type = ELFNN_R_TYPE (irela->r_info);
3244 r_indx = ELFNN_R_SYM (irela->r_info);
3245
3246 if (r_type >= (unsigned int) R_AARCH64_end)
3247 {
3248 bfd_set_error (bfd_error_bad_value);
3249 error_ret_free_internal:
3250 if (elf_section_data (section)->relocs == NULL)
3251 free (internal_relocs);
3252 goto error_ret_free_local;
3253 }
3254
3255 /* Only look for stubs on unconditional branch and
3256 branch and link instructions. */
3257 if (r_type != (unsigned int) AARCH64_R (CALL26)
3258 && r_type != (unsigned int) AARCH64_R (JUMP26))
3259 continue;
3260
3261 /* Now determine the call target, its name, value,
3262 section. */
3263 sym_sec = NULL;
3264 sym_value = 0;
3265 destination = 0;
3266 hash = NULL;
3267 sym_name = NULL;
3268 if (r_indx < symtab_hdr->sh_info)
3269 {
3270 /* It's a local symbol. */
3271 Elf_Internal_Sym *sym;
3272 Elf_Internal_Shdr *hdr;
3273
3274 if (local_syms == NULL)
3275 {
3276 local_syms
3277 = (Elf_Internal_Sym *) symtab_hdr->contents;
3278 if (local_syms == NULL)
3279 local_syms
3280 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3281 symtab_hdr->sh_info, 0,
3282 NULL, NULL, NULL);
3283 if (local_syms == NULL)
3284 goto error_ret_free_internal;
3285 }
3286
3287 sym = local_syms + r_indx;
3288 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3289 sym_sec = hdr->bfd_section;
3290 if (!sym_sec)
3291 /* This is an undefined symbol. It can never
3292 be resolved. */
3293 continue;
3294
3295 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3296 sym_value = sym->st_value;
3297 destination = (sym_value + irela->r_addend
3298 + sym_sec->output_offset
3299 + sym_sec->output_section->vma);
3300 st_type = ELF_ST_TYPE (sym->st_info);
3301 sym_name
3302 = bfd_elf_string_from_elf_section (input_bfd,
3303 symtab_hdr->sh_link,
3304 sym->st_name);
3305 }
3306 else
3307 {
3308 int e_indx;
3309
3310 e_indx = r_indx - symtab_hdr->sh_info;
3311 hash = ((struct elf_aarch64_link_hash_entry *)
3312 elf_sym_hashes (input_bfd)[e_indx]);
3313
3314 while (hash->root.root.type == bfd_link_hash_indirect
3315 || hash->root.root.type == bfd_link_hash_warning)
3316 hash = ((struct elf_aarch64_link_hash_entry *)
3317 hash->root.root.u.i.link);
3318
3319 if (hash->root.root.type == bfd_link_hash_defined
3320 || hash->root.root.type == bfd_link_hash_defweak)
3321 {
3322 struct elf_aarch64_link_hash_table *globals =
3323 elf_aarch64_hash_table (info);
3324 sym_sec = hash->root.root.u.def.section;
3325 sym_value = hash->root.root.u.def.value;
3326 /* For a destination in a shared library,
3327 use the PLT stub as target address to
3328 decide whether a branch stub is
3329 needed. */
3330 if (globals->root.splt != NULL && hash != NULL
3331 && hash->root.plt.offset != (bfd_vma) - 1)
3332 {
3333 sym_sec = globals->root.splt;
3334 sym_value = hash->root.plt.offset;
3335 if (sym_sec->output_section != NULL)
3336 destination = (sym_value
3337 + sym_sec->output_offset
3338 +
3339 sym_sec->output_section->vma);
3340 }
3341 else if (sym_sec->output_section != NULL)
3342 destination = (sym_value + irela->r_addend
3343 + sym_sec->output_offset
3344 + sym_sec->output_section->vma);
3345 }
3346 else if (hash->root.root.type == bfd_link_hash_undefined
3347 || (hash->root.root.type
3348 == bfd_link_hash_undefweak))
3349 {
3350 /* For a shared library, use the PLT stub as
3351 target address to decide whether a long
3352 branch stub is needed.
3353 For absolute code, they cannot be handled. */
3354 struct elf_aarch64_link_hash_table *globals =
3355 elf_aarch64_hash_table (info);
3356
3357 if (globals->root.splt != NULL && hash != NULL
3358 && hash->root.plt.offset != (bfd_vma) - 1)
3359 {
3360 sym_sec = globals->root.splt;
3361 sym_value = hash->root.plt.offset;
3362 if (sym_sec->output_section != NULL)
3363 destination = (sym_value
3364 + sym_sec->output_offset
3365 +
3366 sym_sec->output_section->vma);
3367 }
3368 else
3369 continue;
3370 }
3371 else
3372 {
3373 bfd_set_error (bfd_error_bad_value);
3374 goto error_ret_free_internal;
3375 }
3376 st_type = ELF_ST_TYPE (hash->root.type);
3377 sym_name = hash->root.root.root.string;
3378 }
3379
3380 /* Determine what (if any) linker stub is needed. */
3381 stub_type = aarch64_type_of_stub
3382 (info, section, irela, st_type, hash, destination);
3383 if (stub_type == aarch64_stub_none)
3384 continue;
3385
3386 /* Support for grouping stub sections. */
3387 id_sec = htab->stub_group[section->id].link_sec;
3388
3389 /* Get the name of this stub. */
3390 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3391 irela);
3392 if (!stub_name)
3393 goto error_ret_free_internal;
3394
3395 stub_entry =
3396 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3397 stub_name, FALSE, FALSE);
3398 if (stub_entry != NULL)
3399 {
3400 /* The proper stub has already been created. */
3401 free (stub_name);
3402 continue;
3403 }
3404
3405 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3406 (stub_name, section, htab);
3407 if (stub_entry == NULL)
3408 {
3409 free (stub_name);
3410 goto error_ret_free_internal;
3411 }
3412
3413 stub_entry->target_value = sym_value;
3414 stub_entry->target_section = sym_sec;
3415 stub_entry->stub_type = stub_type;
3416 stub_entry->h = hash;
3417 stub_entry->st_type = st_type;
3418
3419 if (sym_name == NULL)
3420 sym_name = "unnamed";
3421 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3422 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3423 if (stub_entry->output_name == NULL)
3424 {
3425 free (stub_name);
3426 goto error_ret_free_internal;
3427 }
3428
3429 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3430 sym_name);
3431
3432 stub_changed = TRUE;
3433 }
3434
3435 /* We're done with the internal relocs, free them. */
3436 if (elf_section_data (section)->relocs == NULL)
3437 free (internal_relocs);
3438 }
3439 }
3440
3441 if (!stub_changed)
3442 break;
3443
3444 _bfd_aarch64_resize_stubs (htab);
3445
3446 /* Ask the linker to do its stuff. */
3447 (*htab->layout_sections_again) ();
3448 stub_changed = FALSE;
3449 }
3450
3451 return TRUE;
3452
3453 error_ret_free_local:
3454 return FALSE;
3455 }
3456
3457 /* Build all the stubs associated with the current output file. The
3458 stubs are kept in a hash table attached to the main linker hash
3459 table. We also set up the .plt entries for statically linked PIC
3460 functions here. This function is called via aarch64_elf_finish in the
3461 linker. */
3462
3463 bfd_boolean
3464 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3465 {
3466 asection *stub_sec;
3467 struct bfd_hash_table *table;
3468 struct elf_aarch64_link_hash_table *htab;
3469
3470 htab = elf_aarch64_hash_table (info);
3471
3472 for (stub_sec = htab->stub_bfd->sections;
3473 stub_sec != NULL; stub_sec = stub_sec->next)
3474 {
3475 bfd_size_type size;
3476
3477 /* Ignore non-stub sections. */
3478 if (!strstr (stub_sec->name, STUB_SUFFIX))
3479 continue;
3480
3481 /* Allocate memory to hold the linker stubs. */
3482 size = stub_sec->size;
3483 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3484 if (stub_sec->contents == NULL && size != 0)
3485 return FALSE;
3486 stub_sec->size = 0;
3487
3488 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3489 stub_sec->size += 4;
3490 }
3491
3492 /* Build the stubs as directed by the stub hash table. */
3493 table = &htab->stub_hash_table;
3494 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3495
3496 return TRUE;
3497 }
3498
3499
3500 /* Add an entry to the code/data map for section SEC. */
3501
3502 static void
3503 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3504 {
3505 struct _aarch64_elf_section_data *sec_data =
3506 elf_aarch64_section_data (sec);
3507 unsigned int newidx;
3508
3509 if (sec_data->map == NULL)
3510 {
3511 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3512 sec_data->mapcount = 0;
3513 sec_data->mapsize = 1;
3514 }
3515
3516 newidx = sec_data->mapcount++;
3517
3518 if (sec_data->mapcount > sec_data->mapsize)
3519 {
3520 sec_data->mapsize *= 2;
3521 sec_data->map = bfd_realloc_or_free
3522 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3523 }
3524
3525 if (sec_data->map)
3526 {
3527 sec_data->map[newidx].vma = vma;
3528 sec_data->map[newidx].type = type;
3529 }
3530 }
3531
3532
3533 /* Initialise maps of insn/data for input BFDs. */
3534 void
3535 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3536 {
3537 Elf_Internal_Sym *isymbuf;
3538 Elf_Internal_Shdr *hdr;
3539 unsigned int i, localsyms;
3540
3541 /* Make sure that we are dealing with an AArch64 elf binary. */
3542 if (!is_aarch64_elf (abfd))
3543 return;
3544
3545 if ((abfd->flags & DYNAMIC) != 0)
3546 return;
3547
3548 hdr = &elf_symtab_hdr (abfd);
3549 localsyms = hdr->sh_info;
3550
3551 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3552 should contain the number of local symbols, which should come before any
3553 global symbols. Mapping symbols are always local. */
3554 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3555
3556 /* No internal symbols read? Skip this BFD. */
3557 if (isymbuf == NULL)
3558 return;
3559
3560 for (i = 0; i < localsyms; i++)
3561 {
3562 Elf_Internal_Sym *isym = &isymbuf[i];
3563 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3564 const char *name;
3565
3566 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3567 {
3568 name = bfd_elf_string_from_elf_section (abfd,
3569 hdr->sh_link,
3570 isym->st_name);
3571
3572 if (bfd_is_aarch64_special_symbol_name
3573 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3574 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3575 }
3576 }
3577 }
3578
3579 /* Set option values needed during linking. */
3580 void
3581 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3582 struct bfd_link_info *link_info,
3583 int no_enum_warn,
3584 int no_wchar_warn, int pic_veneer,
3585 int fix_erratum_835769)
3586 {
3587 struct elf_aarch64_link_hash_table *globals;
3588
3589 globals = elf_aarch64_hash_table (link_info);
3590 globals->pic_veneer = pic_veneer;
3591 globals->fix_erratum_835769 = fix_erratum_835769;
3592
3593 BFD_ASSERT (is_aarch64_elf (output_bfd));
3594 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3595 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3596 }
3597
3598 static bfd_vma
3599 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3600 struct elf_aarch64_link_hash_table
3601 *globals, struct bfd_link_info *info,
3602 bfd_vma value, bfd *output_bfd,
3603 bfd_boolean *unresolved_reloc_p)
3604 {
3605 bfd_vma off = (bfd_vma) - 1;
3606 asection *basegot = globals->root.sgot;
3607 bfd_boolean dyn = globals->root.dynamic_sections_created;
3608
3609 if (h != NULL)
3610 {
3611 BFD_ASSERT (basegot != NULL);
3612 off = h->got.offset;
3613 BFD_ASSERT (off != (bfd_vma) - 1);
3614 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3615 || (info->shared
3616 && SYMBOL_REFERENCES_LOCAL (info, h))
3617 || (ELF_ST_VISIBILITY (h->other)
3618 && h->root.type == bfd_link_hash_undefweak))
3619 {
3620 /* This is actually a static link, or it is a -Bsymbolic link
3621 and the symbol is defined locally. We must initialize this
3622 entry in the global offset table. Since the offset must
3623 always be a multiple of 8 (4 in the case of ILP32), we use
3624 the least significant bit to record whether we have
3625 initialized it already.
3626 When doing a dynamic link, we create a .rel(a).got relocation
3627 entry to initialize the value. This is done in the
3628 finish_dynamic_symbol routine. */
3629 if ((off & 1) != 0)
3630 off &= ~1;
3631 else
3632 {
3633 bfd_put_NN (output_bfd, value, basegot->contents + off);
3634 h->got.offset |= 1;
3635 }
3636 }
3637 else
3638 *unresolved_reloc_p = FALSE;
3639
3640 off = off + basegot->output_section->vma + basegot->output_offset;
3641 }
3642
3643 return off;
3644 }
3645
3646 /* Change R_TYPE to a more efficient access model where possible,
3647 return the new reloc type. */
3648
3649 static bfd_reloc_code_real_type
3650 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
3651 struct elf_link_hash_entry *h)
3652 {
3653 bfd_boolean is_local = h == NULL;
3654
3655 switch (r_type)
3656 {
3657 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3658 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3659 return (is_local
3660 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3661 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
3662
3663 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3664 return (is_local
3665 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3666 : r_type);
3667
3668 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3669 return (is_local
3670 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3671 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3672
3673 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3674 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
3675 return (is_local
3676 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3677 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
3678
3679 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3680 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3681
3682 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
3683 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3684
3685 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3686 return r_type;
3687
3688 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3689 return (is_local
3690 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
3691 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3692
3693 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3694 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3695 /* Instructions with these relocations will become NOPs. */
3696 return BFD_RELOC_AARCH64_NONE;
3697
3698 default:
3699 break;
3700 }
3701
3702 return r_type;
3703 }
3704
3705 static unsigned int
3706 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
3707 {
3708 switch (r_type)
3709 {
3710 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3711 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3712 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3713 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3714 return GOT_NORMAL;
3715
3716 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3717 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3718 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3719 return GOT_TLS_GD;
3720
3721 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3722 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3723 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3724 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3725 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
3726 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3727 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3728 return GOT_TLSDESC_GD;
3729
3730 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3731 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3732 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3733 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3734 return GOT_TLS_IE;
3735
3736 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3737 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3738 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3739 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3740 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3741 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3742 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3743 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3744 return GOT_UNKNOWN;
3745
3746 default:
3747 break;
3748 }
3749 return GOT_UNKNOWN;
3750 }
3751
3752 static bfd_boolean
3753 aarch64_can_relax_tls (bfd *input_bfd,
3754 struct bfd_link_info *info,
3755 bfd_reloc_code_real_type r_type,
3756 struct elf_link_hash_entry *h,
3757 unsigned long r_symndx)
3758 {
3759 unsigned int symbol_got_type;
3760 unsigned int reloc_got_type;
3761
3762 if (! IS_AARCH64_TLS_RELOC (r_type))
3763 return FALSE;
3764
3765 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3766 reloc_got_type = aarch64_reloc_got_type (r_type);
3767
3768 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3769 return TRUE;
3770
3771 if (info->shared)
3772 return FALSE;
3773
3774 if (h && h->root.type == bfd_link_hash_undefweak)
3775 return FALSE;
3776
3777 return TRUE;
3778 }
3779
3780 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
3781 enumerator. */
3782
3783 static bfd_reloc_code_real_type
3784 aarch64_tls_transition (bfd *input_bfd,
3785 struct bfd_link_info *info,
3786 unsigned int r_type,
3787 struct elf_link_hash_entry *h,
3788 unsigned long r_symndx)
3789 {
3790 bfd_reloc_code_real_type bfd_r_type
3791 = elfNN_aarch64_bfd_reloc_from_type (r_type);
3792
3793 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
3794 return bfd_r_type;
3795
3796 return aarch64_tls_transition_without_check (bfd_r_type, h);
3797 }
3798
3799 /* Return the base VMA address which should be subtracted from real addresses
3800 when resolving R_AARCH64_TLS_DTPREL relocation. */
3801
3802 static bfd_vma
3803 dtpoff_base (struct bfd_link_info *info)
3804 {
3805 /* If tls_sec is NULL, we should have signalled an error already. */
3806 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3807 return elf_hash_table (info)->tls_sec->vma;
3808 }
3809
3810 /* Return the base VMA address which should be subtracted from real addresses
3811 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3812
3813 static bfd_vma
3814 tpoff_base (struct bfd_link_info *info)
3815 {
3816 struct elf_link_hash_table *htab = elf_hash_table (info);
3817
3818 /* If tls_sec is NULL, we should have signalled an error already. */
3819 BFD_ASSERT (htab->tls_sec != NULL);
3820
3821 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3822 htab->tls_sec->alignment_power);
3823 return htab->tls_sec->vma - base;
3824 }
3825
3826 static bfd_vma *
3827 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3828 unsigned long r_symndx)
3829 {
3830 /* Calculate the address of the GOT entry for symbol
3831 referred to in h. */
3832 if (h != NULL)
3833 return &h->got.offset;
3834 else
3835 {
3836 /* local symbol */
3837 struct elf_aarch64_local_symbol *l;
3838
3839 l = elf_aarch64_locals (input_bfd);
3840 return &l[r_symndx].got_offset;
3841 }
3842 }
3843
3844 static void
3845 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3846 unsigned long r_symndx)
3847 {
3848 bfd_vma *p;
3849 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3850 *p |= 1;
3851 }
3852
3853 static int
3854 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3855 unsigned long r_symndx)
3856 {
3857 bfd_vma value;
3858 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3859 return value & 1;
3860 }
3861
3862 static bfd_vma
3863 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3864 unsigned long r_symndx)
3865 {
3866 bfd_vma value;
3867 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3868 value &= ~1;
3869 return value;
3870 }
3871
3872 static bfd_vma *
3873 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3874 unsigned long r_symndx)
3875 {
3876 /* Calculate the address of the GOT entry for symbol
3877 referred to in h. */
3878 if (h != NULL)
3879 {
3880 struct elf_aarch64_link_hash_entry *eh;
3881 eh = (struct elf_aarch64_link_hash_entry *) h;
3882 return &eh->tlsdesc_got_jump_table_offset;
3883 }
3884 else
3885 {
3886 /* local symbol */
3887 struct elf_aarch64_local_symbol *l;
3888
3889 l = elf_aarch64_locals (input_bfd);
3890 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3891 }
3892 }
3893
3894 static void
3895 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3896 unsigned long r_symndx)
3897 {
3898 bfd_vma *p;
3899 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3900 *p |= 1;
3901 }
3902
3903 static int
3904 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3905 struct elf_link_hash_entry *h,
3906 unsigned long r_symndx)
3907 {
3908 bfd_vma value;
3909 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3910 return value & 1;
3911 }
3912
3913 static bfd_vma
3914 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3915 unsigned long r_symndx)
3916 {
3917 bfd_vma value;
3918 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3919 value &= ~1;
3920 return value;
3921 }
3922
3923 /* Data for make_branch_to_erratum_835769_stub(). */
3924
3925 struct erratum_835769_branch_to_stub_data
3926 {
3927 asection *output_section;
3928 bfd_byte *contents;
3929 };
3930
3931 /* Helper to insert branches to erratum 835769 stubs in the right
3932 places for a particular section. */
3933
3934 static bfd_boolean
3935 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
3936 void *in_arg)
3937 {
3938 struct elf_aarch64_stub_hash_entry *stub_entry;
3939 struct erratum_835769_branch_to_stub_data *data;
3940 bfd_byte *contents;
3941 unsigned long branch_insn = 0;
3942 bfd_vma veneered_insn_loc, veneer_entry_loc;
3943 bfd_signed_vma branch_offset;
3944 unsigned int target;
3945 bfd *abfd;
3946
3947 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3948 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
3949
3950 if (stub_entry->target_section != data->output_section
3951 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
3952 return TRUE;
3953
3954 contents = data->contents;
3955 veneered_insn_loc = stub_entry->target_section->output_section->vma
3956 + stub_entry->target_section->output_offset
3957 + stub_entry->target_value;
3958 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
3959 + stub_entry->stub_sec->output_offset
3960 + stub_entry->stub_offset;
3961 branch_offset = veneer_entry_loc - veneered_insn_loc;
3962
3963 abfd = stub_entry->target_section->owner;
3964 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
3965 (*_bfd_error_handler)
3966 (_("%B: error: Erratum 835769 stub out "
3967 "of range (input file too large)"), abfd);
3968
3969 target = stub_entry->target_value;
3970 branch_insn = 0x14000000;
3971 branch_offset >>= 2;
3972 branch_offset &= 0x3ffffff;
3973 branch_insn |= branch_offset;
3974 bfd_putl32 (branch_insn, &contents[target]);
3975
3976 return TRUE;
3977 }
3978
3979 static bfd_boolean
3980 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
3981 struct bfd_link_info *link_info,
3982 asection *sec,
3983 bfd_byte *contents)
3984
3985 {
3986 struct elf_aarch64_link_hash_table *globals =
3987 elf_aarch64_hash_table (link_info);
3988
3989 if (globals == NULL)
3990 return FALSE;
3991
3992 /* Fix code to point to erratum 835769 stubs. */
3993 if (globals->fix_erratum_835769)
3994 {
3995 struct erratum_835769_branch_to_stub_data data;
3996
3997 data.output_section = sec;
3998 data.contents = contents;
3999 bfd_hash_traverse (&globals->stub_hash_table,
4000 make_branch_to_erratum_835769_stub, &data);
4001 }
4002
4003 return FALSE;
4004 }
4005
4006 /* Perform a relocation as part of a final link. */
4007 static bfd_reloc_status_type
4008 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4009 bfd *input_bfd,
4010 bfd *output_bfd,
4011 asection *input_section,
4012 bfd_byte *contents,
4013 Elf_Internal_Rela *rel,
4014 bfd_vma value,
4015 struct bfd_link_info *info,
4016 asection *sym_sec,
4017 struct elf_link_hash_entry *h,
4018 bfd_boolean *unresolved_reloc_p,
4019 bfd_boolean save_addend,
4020 bfd_vma *saved_addend,
4021 Elf_Internal_Sym *sym)
4022 {
4023 Elf_Internal_Shdr *symtab_hdr;
4024 unsigned int r_type = howto->type;
4025 bfd_reloc_code_real_type bfd_r_type
4026 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4027 bfd_reloc_code_real_type new_bfd_r_type;
4028 unsigned long r_symndx;
4029 bfd_byte *hit_data = contents + rel->r_offset;
4030 bfd_vma place;
4031 bfd_signed_vma signed_addend;
4032 struct elf_aarch64_link_hash_table *globals;
4033 bfd_boolean weak_undef_p;
4034
4035 globals = elf_aarch64_hash_table (info);
4036
4037 symtab_hdr = &elf_symtab_hdr (input_bfd);
4038
4039 BFD_ASSERT (is_aarch64_elf (input_bfd));
4040
4041 r_symndx = ELFNN_R_SYM (rel->r_info);
4042
4043 /* It is possible to have linker relaxations on some TLS access
4044 models. Update our information here. */
4045 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4046 if (new_bfd_r_type != bfd_r_type)
4047 {
4048 bfd_r_type = new_bfd_r_type;
4049 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4050 BFD_ASSERT (howto != NULL);
4051 r_type = howto->type;
4052 }
4053
4054 place = input_section->output_section->vma
4055 + input_section->output_offset + rel->r_offset;
4056
4057 /* Get addend, accumulating the addend for consecutive relocs
4058 which refer to the same offset. */
4059 signed_addend = saved_addend ? *saved_addend : 0;
4060 signed_addend += rel->r_addend;
4061
4062 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4063 : bfd_is_und_section (sym_sec));
4064
4065 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4066 it here if it is defined in a non-shared object. */
4067 if (h != NULL
4068 && h->type == STT_GNU_IFUNC
4069 && h->def_regular)
4070 {
4071 asection *plt;
4072 const char *name;
4073 asection *base_got;
4074 bfd_vma off;
4075
4076 if ((input_section->flags & SEC_ALLOC) == 0
4077 || h->plt.offset == (bfd_vma) -1)
4078 abort ();
4079
4080 /* STT_GNU_IFUNC symbol must go through PLT. */
4081 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4082 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4083
4084 switch (bfd_r_type)
4085 {
4086 default:
4087 if (h->root.root.string)
4088 name = h->root.root.string;
4089 else
4090 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4091 NULL);
4092 (*_bfd_error_handler)
4093 (_("%B: relocation %s against STT_GNU_IFUNC "
4094 "symbol `%s' isn't handled by %s"), input_bfd,
4095 howto->name, name, __FUNCTION__);
4096 bfd_set_error (bfd_error_bad_value);
4097 return FALSE;
4098
4099 case BFD_RELOC_AARCH64_NN:
4100 if (rel->r_addend != 0)
4101 {
4102 if (h->root.root.string)
4103 name = h->root.root.string;
4104 else
4105 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4106 sym, NULL);
4107 (*_bfd_error_handler)
4108 (_("%B: relocation %s against STT_GNU_IFUNC "
4109 "symbol `%s' has non-zero addend: %d"),
4110 input_bfd, howto->name, name, rel->r_addend);
4111 bfd_set_error (bfd_error_bad_value);
4112 return FALSE;
4113 }
4114
4115 /* Generate dynamic relocation only when there is a
4116 non-GOT reference in a shared object. */
4117 if (info->shared && h->non_got_ref)
4118 {
4119 Elf_Internal_Rela outrel;
4120 asection *sreloc;
4121
4122 /* Need a dynamic relocation to get the real function
4123 address. */
4124 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4125 info,
4126 input_section,
4127 rel->r_offset);
4128 if (outrel.r_offset == (bfd_vma) -1
4129 || outrel.r_offset == (bfd_vma) -2)
4130 abort ();
4131
4132 outrel.r_offset += (input_section->output_section->vma
4133 + input_section->output_offset);
4134
4135 if (h->dynindx == -1
4136 || h->forced_local
4137 || info->executable)
4138 {
4139 /* This symbol is resolved locally. */
4140 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4141 outrel.r_addend = (h->root.u.def.value
4142 + h->root.u.def.section->output_section->vma
4143 + h->root.u.def.section->output_offset);
4144 }
4145 else
4146 {
4147 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4148 outrel.r_addend = 0;
4149 }
4150
4151 sreloc = globals->root.irelifunc;
4152 elf_append_rela (output_bfd, sreloc, &outrel);
4153
4154 /* If this reloc is against an external symbol, we
4155 do not want to fiddle with the addend. Otherwise,
4156 we need to include the symbol value so that it
4157 becomes an addend for the dynamic reloc. For an
4158 internal symbol, we have updated addend. */
4159 return bfd_reloc_ok;
4160 }
4161 /* FALLTHROUGH */
4162 case BFD_RELOC_AARCH64_JUMP26:
4163 case BFD_RELOC_AARCH64_CALL26:
4164 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4165 signed_addend,
4166 weak_undef_p);
4167 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4168 howto, value);
4169 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4170 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4171 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4172 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4173 base_got = globals->root.sgot;
4174 off = h->got.offset;
4175
4176 if (base_got == NULL)
4177 abort ();
4178
4179 if (off == (bfd_vma) -1)
4180 {
4181 bfd_vma plt_index;
4182
4183 /* We can't use h->got.offset here to save state, or
4184 even just remember the offset, as finish_dynamic_symbol
4185 would use that as offset into .got. */
4186
4187 if (globals->root.splt != NULL)
4188 {
4189 plt_index = ((h->plt.offset - globals->plt_header_size) /
4190 globals->plt_entry_size);
4191 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4192 base_got = globals->root.sgotplt;
4193 }
4194 else
4195 {
4196 plt_index = h->plt.offset / globals->plt_entry_size;
4197 off = plt_index * GOT_ENTRY_SIZE;
4198 base_got = globals->root.igotplt;
4199 }
4200
4201 if (h->dynindx == -1
4202 || h->forced_local
4203 || info->symbolic)
4204 {
4205 /* This references the local definition. We must
4206 initialize this entry in the global offset table.
4207 Since the offset must always be a multiple of 8,
4208 we use the least significant bit to record
4209 whether we have initialized it already.
4210
4211 When doing a dynamic link, we create a .rela.got
4212 relocation entry to initialize the value. This
4213 is done in the finish_dynamic_symbol routine. */
4214 if ((off & 1) != 0)
4215 off &= ~1;
4216 else
4217 {
4218 bfd_put_NN (output_bfd, value,
4219 base_got->contents + off);
4220 /* Note that this is harmless as -1 | 1 still is -1. */
4221 h->got.offset |= 1;
4222 }
4223 }
4224 value = (base_got->output_section->vma
4225 + base_got->output_offset + off);
4226 }
4227 else
4228 value = aarch64_calculate_got_entry_vma (h, globals, info,
4229 value, output_bfd,
4230 unresolved_reloc_p);
4231 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4232 0, weak_undef_p);
4233 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4234 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4235 case BFD_RELOC_AARCH64_ADD_LO12:
4236 break;
4237 }
4238 }
4239
4240 switch (bfd_r_type)
4241 {
4242 case BFD_RELOC_AARCH64_NONE:
4243 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4244 *unresolved_reloc_p = FALSE;
4245 return bfd_reloc_ok;
4246
4247 case BFD_RELOC_AARCH64_NN:
4248
4249 /* When generating a shared object or relocatable executable, these
4250 relocations are copied into the output file to be resolved at
4251 run time. */
4252 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4253 && (input_section->flags & SEC_ALLOC)
4254 && (h == NULL
4255 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4256 || h->root.type != bfd_link_hash_undefweak))
4257 {
4258 Elf_Internal_Rela outrel;
4259 bfd_byte *loc;
4260 bfd_boolean skip, relocate;
4261 asection *sreloc;
4262
4263 *unresolved_reloc_p = FALSE;
4264
4265 skip = FALSE;
4266 relocate = FALSE;
4267
4268 outrel.r_addend = signed_addend;
4269 outrel.r_offset =
4270 _bfd_elf_section_offset (output_bfd, info, input_section,
4271 rel->r_offset);
4272 if (outrel.r_offset == (bfd_vma) - 1)
4273 skip = TRUE;
4274 else if (outrel.r_offset == (bfd_vma) - 2)
4275 {
4276 skip = TRUE;
4277 relocate = TRUE;
4278 }
4279
4280 outrel.r_offset += (input_section->output_section->vma
4281 + input_section->output_offset);
4282
4283 if (skip)
4284 memset (&outrel, 0, sizeof outrel);
4285 else if (h != NULL
4286 && h->dynindx != -1
4287 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4288 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4289 else
4290 {
4291 int symbol;
4292
4293 /* On SVR4-ish systems, the dynamic loader cannot
4294 relocate the text and data segments independently,
4295 so the symbol does not matter. */
4296 symbol = 0;
4297 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4298 outrel.r_addend += value;
4299 }
4300
4301 sreloc = elf_section_data (input_section)->sreloc;
4302 if (sreloc == NULL || sreloc->contents == NULL)
4303 return bfd_reloc_notsupported;
4304
4305 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4306 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4307
4308 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4309 {
4310 /* Sanity to check that we have previously allocated
4311 sufficient space in the relocation section for the
4312 number of relocations we actually want to emit. */
4313 abort ();
4314 }
4315
4316 /* If this reloc is against an external symbol, we do not want to
4317 fiddle with the addend. Otherwise, we need to include the symbol
4318 value so that it becomes an addend for the dynamic reloc. */
4319 if (!relocate)
4320 return bfd_reloc_ok;
4321
4322 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4323 contents, rel->r_offset, value,
4324 signed_addend);
4325 }
4326 else
4327 value += signed_addend;
4328 break;
4329
4330 case BFD_RELOC_AARCH64_JUMP26:
4331 case BFD_RELOC_AARCH64_CALL26:
4332 {
4333 asection *splt = globals->root.splt;
4334 bfd_boolean via_plt_p =
4335 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4336
4337 /* A call to an undefined weak symbol is converted to a jump to
4338 the next instruction unless a PLT entry will be created.
4339 The jump to the next instruction is optimized as a NOP.
4340 Do the same for local undefined symbols. */
4341 if (weak_undef_p && ! via_plt_p)
4342 {
4343 bfd_putl32 (INSN_NOP, hit_data);
4344 return bfd_reloc_ok;
4345 }
4346
4347 /* If the call goes through a PLT entry, make sure to
4348 check distance to the right destination address. */
4349 if (via_plt_p)
4350 {
4351 value = (splt->output_section->vma
4352 + splt->output_offset + h->plt.offset);
4353 *unresolved_reloc_p = FALSE;
4354 }
4355
4356 /* If the target symbol is global and marked as a function the
4357 relocation applies a function call or a tail call. In this
4358 situation we can veneer out of range branches. The veneers
4359 use IP0 and IP1 hence cannot be used arbitrary out of range
4360 branches that occur within the body of a function. */
4361 if (h && h->type == STT_FUNC)
4362 {
4363 /* Check if a stub has to be inserted because the destination
4364 is too far away. */
4365 if (! aarch64_valid_branch_p (value, place))
4366 {
4367 /* The target is out of reach, so redirect the branch to
4368 the local stub for this function. */
4369 struct elf_aarch64_stub_hash_entry *stub_entry;
4370 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4371 sym_sec, h,
4372 rel, globals);
4373 if (stub_entry != NULL)
4374 value = (stub_entry->stub_offset
4375 + stub_entry->stub_sec->output_offset
4376 + stub_entry->stub_sec->output_section->vma);
4377 }
4378 }
4379 }
4380 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4381 signed_addend, weak_undef_p);
4382 break;
4383
4384 case BFD_RELOC_AARCH64_16:
4385 #if ARCH_SIZE == 64
4386 case BFD_RELOC_AARCH64_32:
4387 #endif
4388 case BFD_RELOC_AARCH64_ADD_LO12:
4389 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4390 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4391 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4392 case BFD_RELOC_AARCH64_BRANCH19:
4393 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4394 case BFD_RELOC_AARCH64_LDST8_LO12:
4395 case BFD_RELOC_AARCH64_LDST16_LO12:
4396 case BFD_RELOC_AARCH64_LDST32_LO12:
4397 case BFD_RELOC_AARCH64_LDST64_LO12:
4398 case BFD_RELOC_AARCH64_LDST128_LO12:
4399 case BFD_RELOC_AARCH64_MOVW_G0_S:
4400 case BFD_RELOC_AARCH64_MOVW_G1_S:
4401 case BFD_RELOC_AARCH64_MOVW_G2_S:
4402 case BFD_RELOC_AARCH64_MOVW_G0:
4403 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4404 case BFD_RELOC_AARCH64_MOVW_G1:
4405 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4406 case BFD_RELOC_AARCH64_MOVW_G2:
4407 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4408 case BFD_RELOC_AARCH64_MOVW_G3:
4409 case BFD_RELOC_AARCH64_16_PCREL:
4410 case BFD_RELOC_AARCH64_32_PCREL:
4411 case BFD_RELOC_AARCH64_64_PCREL:
4412 case BFD_RELOC_AARCH64_TSTBR14:
4413 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4414 signed_addend, weak_undef_p);
4415 break;
4416
4417 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4418 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4419 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4420 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4421 if (globals->root.sgot == NULL)
4422 BFD_ASSERT (h != NULL);
4423
4424 if (h != NULL)
4425 {
4426 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4427 output_bfd,
4428 unresolved_reloc_p);
4429 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4430 0, weak_undef_p);
4431 }
4432 break;
4433
4434 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4435 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4436 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4437 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4438 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4439 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4440 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4441 if (globals->root.sgot == NULL)
4442 return bfd_reloc_notsupported;
4443
4444 value = (symbol_got_offset (input_bfd, h, r_symndx)
4445 + globals->root.sgot->output_section->vma
4446 + globals->root.sgot->output_offset);
4447
4448 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4449 0, weak_undef_p);
4450 *unresolved_reloc_p = FALSE;
4451 break;
4452
4453 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4454 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4455 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4456 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4457 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4458 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4459 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4460 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4461 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4462 signed_addend - tpoff_base (info),
4463 weak_undef_p);
4464 *unresolved_reloc_p = FALSE;
4465 break;
4466
4467 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4468 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4469 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4470 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4471 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4472 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4473 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4474 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4475 if (globals->root.sgot == NULL)
4476 return bfd_reloc_notsupported;
4477 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4478 + globals->root.sgotplt->output_section->vma
4479 + globals->root.sgotplt->output_offset
4480 + globals->sgotplt_jump_table_size);
4481
4482 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4483 0, weak_undef_p);
4484 *unresolved_reloc_p = FALSE;
4485 break;
4486
4487 default:
4488 return bfd_reloc_notsupported;
4489 }
4490
4491 if (saved_addend)
4492 *saved_addend = value;
4493
4494 /* Only apply the final relocation in a sequence. */
4495 if (save_addend)
4496 return bfd_reloc_continue;
4497
4498 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4499 howto, value);
4500 }
4501
4502 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4503 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4504 link.
4505
4506 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4507 is to then call final_link_relocate. Return other values in the
4508 case of error. */
4509
4510 static bfd_reloc_status_type
4511 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
4512 bfd *input_bfd, bfd_byte *contents,
4513 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4514 {
4515 bfd_boolean is_local = h == NULL;
4516 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
4517 unsigned long insn;
4518
4519 BFD_ASSERT (globals && input_bfd && contents && rel);
4520
4521 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4522 {
4523 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4524 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4525 if (is_local)
4526 {
4527 /* GD->LE relaxation:
4528 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4529 or
4530 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4531 */
4532 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4533 return bfd_reloc_continue;
4534 }
4535 else
4536 {
4537 /* GD->IE relaxation:
4538 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4539 or
4540 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4541 */
4542 return bfd_reloc_continue;
4543 }
4544
4545 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4546 BFD_ASSERT (0);
4547 break;
4548
4549 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4550 if (is_local)
4551 {
4552 /* Tiny TLSDESC->LE relaxation:
4553 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
4554 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
4555 .tlsdesccall var
4556 blr x1 => nop
4557 */
4558 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4559 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4560
4561 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4562 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
4563 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4564
4565 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4566 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
4567 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4568 return bfd_reloc_continue;
4569 }
4570 else
4571 {
4572 /* Tiny TLSDESC->IE relaxation:
4573 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
4574 adr x0, :tlsdesc:var => nop
4575 .tlsdesccall var
4576 blr x1 => nop
4577 */
4578 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4579 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4580
4581 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4582 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4583
4584 bfd_putl32 (0x58000000, contents + rel->r_offset);
4585 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
4586 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4587 return bfd_reloc_continue;
4588 }
4589
4590 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4591 if (is_local)
4592 {
4593 /* Tiny GD->LE relaxation:
4594 adr x0, :tlsgd:var => mrs x1, tpidr_el0
4595 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
4596 nop => add x0, x0, #:tprel_lo12_nc:x
4597 */
4598
4599 /* First kill the tls_get_addr reloc on the bl instruction. */
4600 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4601
4602 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
4603 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
4604 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
4605
4606 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4607 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
4608 rel[1].r_offset = rel->r_offset + 8;
4609
4610 /* Move the current relocation to the second instruction in
4611 the sequence. */
4612 rel->r_offset += 4;
4613 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4614 AARCH64_R (TLSLE_ADD_TPREL_HI12));
4615 return bfd_reloc_continue;
4616 }
4617 else
4618 {
4619 /* Tiny GD->IE relaxation:
4620 adr x0, :tlsgd:var => ldr x0, :gottprel:var
4621 bl __tls_get_addr => mrs x1, tpidr_el0
4622 nop => add x0, x0, x1
4623 */
4624
4625 /* First kill the tls_get_addr reloc on the bl instruction. */
4626 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4627 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4628
4629 bfd_putl32 (0x58000000, contents + rel->r_offset);
4630 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4631 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4632 return bfd_reloc_continue;
4633 }
4634
4635 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4636 return bfd_reloc_continue;
4637
4638 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4639 if (is_local)
4640 {
4641 /* GD->LE relaxation:
4642 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4643 */
4644 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4645 return bfd_reloc_continue;
4646 }
4647 else
4648 {
4649 /* GD->IE relaxation:
4650 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4651 */
4652 insn = bfd_getl32 (contents + rel->r_offset);
4653 insn &= 0xffffffe0;
4654 bfd_putl32 (insn, contents + rel->r_offset);
4655 return bfd_reloc_continue;
4656 }
4657
4658 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4659 if (is_local)
4660 {
4661 /* GD->LE relaxation
4662 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4663 bl __tls_get_addr => mrs x1, tpidr_el0
4664 nop => add x0, x1, x0
4665 */
4666
4667 /* First kill the tls_get_addr reloc on the bl instruction. */
4668 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4669 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4670
4671 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4672 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4673 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4674 return bfd_reloc_continue;
4675 }
4676 else
4677 {
4678 /* GD->IE relaxation
4679 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4680 BL __tls_get_addr => mrs x1, tpidr_el0
4681 R_AARCH64_CALL26
4682 NOP => add x0, x1, x0
4683 */
4684
4685 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
4686
4687 /* Remove the relocation on the BL instruction. */
4688 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4689
4690 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4691
4692 /* We choose to fixup the BL and NOP instructions using the
4693 offset from the second relocation to allow flexibility in
4694 scheduling instructions between the ADD and BL. */
4695 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4696 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4697 return bfd_reloc_continue;
4698 }
4699
4700 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4701 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4702 /* GD->IE/LE relaxation:
4703 add x0, x0, #:tlsdesc_lo12:var => nop
4704 blr xd => nop
4705 */
4706 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4707 return bfd_reloc_ok;
4708
4709 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4710 /* IE->LE relaxation:
4711 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4712 */
4713 if (is_local)
4714 {
4715 insn = bfd_getl32 (contents + rel->r_offset);
4716 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4717 }
4718 return bfd_reloc_continue;
4719
4720 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4721 /* IE->LE relaxation:
4722 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4723 */
4724 if (is_local)
4725 {
4726 insn = bfd_getl32 (contents + rel->r_offset);
4727 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4728 }
4729 return bfd_reloc_continue;
4730
4731 default:
4732 return bfd_reloc_continue;
4733 }
4734
4735 return bfd_reloc_ok;
4736 }
4737
4738 /* Relocate an AArch64 ELF section. */
4739
4740 static bfd_boolean
4741 elfNN_aarch64_relocate_section (bfd *output_bfd,
4742 struct bfd_link_info *info,
4743 bfd *input_bfd,
4744 asection *input_section,
4745 bfd_byte *contents,
4746 Elf_Internal_Rela *relocs,
4747 Elf_Internal_Sym *local_syms,
4748 asection **local_sections)
4749 {
4750 Elf_Internal_Shdr *symtab_hdr;
4751 struct elf_link_hash_entry **sym_hashes;
4752 Elf_Internal_Rela *rel;
4753 Elf_Internal_Rela *relend;
4754 const char *name;
4755 struct elf_aarch64_link_hash_table *globals;
4756 bfd_boolean save_addend = FALSE;
4757 bfd_vma addend = 0;
4758
4759 globals = elf_aarch64_hash_table (info);
4760
4761 symtab_hdr = &elf_symtab_hdr (input_bfd);
4762 sym_hashes = elf_sym_hashes (input_bfd);
4763
4764 rel = relocs;
4765 relend = relocs + input_section->reloc_count;
4766 for (; rel < relend; rel++)
4767 {
4768 unsigned int r_type;
4769 bfd_reloc_code_real_type bfd_r_type;
4770 bfd_reloc_code_real_type relaxed_bfd_r_type;
4771 reloc_howto_type *howto;
4772 unsigned long r_symndx;
4773 Elf_Internal_Sym *sym;
4774 asection *sec;
4775 struct elf_link_hash_entry *h;
4776 bfd_vma relocation;
4777 bfd_reloc_status_type r;
4778 arelent bfd_reloc;
4779 char sym_type;
4780 bfd_boolean unresolved_reloc = FALSE;
4781 char *error_message = NULL;
4782
4783 r_symndx = ELFNN_R_SYM (rel->r_info);
4784 r_type = ELFNN_R_TYPE (rel->r_info);
4785
4786 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
4787 howto = bfd_reloc.howto;
4788
4789 if (howto == NULL)
4790 {
4791 (*_bfd_error_handler)
4792 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4793 input_bfd, input_section, r_type);
4794 return FALSE;
4795 }
4796 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
4797
4798 h = NULL;
4799 sym = NULL;
4800 sec = NULL;
4801
4802 if (r_symndx < symtab_hdr->sh_info)
4803 {
4804 sym = local_syms + r_symndx;
4805 sym_type = ELFNN_ST_TYPE (sym->st_info);
4806 sec = local_sections[r_symndx];
4807
4808 /* An object file might have a reference to a local
4809 undefined symbol. This is a daft object file, but we
4810 should at least do something about it. */
4811 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4812 && bfd_is_und_section (sec)
4813 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4814 {
4815 if (!info->callbacks->undefined_symbol
4816 (info, bfd_elf_string_from_elf_section
4817 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4818 input_bfd, input_section, rel->r_offset, TRUE))
4819 return FALSE;
4820 }
4821
4822 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4823
4824 /* Relocate against local STT_GNU_IFUNC symbol. */
4825 if (!info->relocatable
4826 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4827 {
4828 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
4829 rel, FALSE);
4830 if (h == NULL)
4831 abort ();
4832
4833 /* Set STT_GNU_IFUNC symbol value. */
4834 h->root.u.def.value = sym->st_value;
4835 h->root.u.def.section = sec;
4836 }
4837 }
4838 else
4839 {
4840 bfd_boolean warned, ignored;
4841
4842 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4843 r_symndx, symtab_hdr, sym_hashes,
4844 h, sec, relocation,
4845 unresolved_reloc, warned, ignored);
4846
4847 sym_type = h->type;
4848 }
4849
4850 if (sec != NULL && discarded_section (sec))
4851 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4852 rel, 1, relend, howto, 0, contents);
4853
4854 if (info->relocatable)
4855 continue;
4856
4857 if (h != NULL)
4858 name = h->root.root.string;
4859 else
4860 {
4861 name = (bfd_elf_string_from_elf_section
4862 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4863 if (name == NULL || *name == '\0')
4864 name = bfd_section_name (input_bfd, sec);
4865 }
4866
4867 if (r_symndx != 0
4868 && r_type != R_AARCH64_NONE
4869 && r_type != R_AARCH64_NULL
4870 && (h == NULL
4871 || h->root.type == bfd_link_hash_defined
4872 || h->root.type == bfd_link_hash_defweak)
4873 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
4874 {
4875 (*_bfd_error_handler)
4876 ((sym_type == STT_TLS
4877 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4878 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4879 input_bfd,
4880 input_section, (long) rel->r_offset, howto->name, name);
4881 }
4882
4883 /* We relax only if we can see that there can be a valid transition
4884 from a reloc type to another.
4885 We call elfNN_aarch64_final_link_relocate unless we're completely
4886 done, i.e., the relaxation produced the final output we want. */
4887
4888 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4889 h, r_symndx);
4890 if (relaxed_bfd_r_type != bfd_r_type)
4891 {
4892 bfd_r_type = relaxed_bfd_r_type;
4893 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4894 BFD_ASSERT (howto != NULL);
4895 r_type = howto->type;
4896 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4897 unresolved_reloc = 0;
4898 }
4899 else
4900 r = bfd_reloc_continue;
4901
4902 /* There may be multiple consecutive relocations for the
4903 same offset. In that case we are supposed to treat the
4904 output of each relocation as the addend for the next. */
4905 if (rel + 1 < relend
4906 && rel->r_offset == rel[1].r_offset
4907 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4908 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4909 save_addend = TRUE;
4910 else
4911 save_addend = FALSE;
4912
4913 if (r == bfd_reloc_continue)
4914 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4915 input_section, contents, rel,
4916 relocation, info, sec,
4917 h, &unresolved_reloc,
4918 save_addend, &addend, sym);
4919
4920 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4921 {
4922 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4923 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4924 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4925 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4926 {
4927 bfd_boolean need_relocs = FALSE;
4928 bfd_byte *loc;
4929 int indx;
4930 bfd_vma off;
4931
4932 off = symbol_got_offset (input_bfd, h, r_symndx);
4933 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4934
4935 need_relocs =
4936 (info->shared || indx != 0) &&
4937 (h == NULL
4938 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4939 || h->root.type != bfd_link_hash_undefweak);
4940
4941 BFD_ASSERT (globals->root.srelgot != NULL);
4942
4943 if (need_relocs)
4944 {
4945 Elf_Internal_Rela rela;
4946 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
4947 rela.r_addend = 0;
4948 rela.r_offset = globals->root.sgot->output_section->vma +
4949 globals->root.sgot->output_offset + off;
4950
4951
4952 loc = globals->root.srelgot->contents;
4953 loc += globals->root.srelgot->reloc_count++
4954 * RELOC_SIZE (htab);
4955 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4956
4957 if (indx == 0)
4958 {
4959 bfd_put_NN (output_bfd,
4960 relocation - dtpoff_base (info),
4961 globals->root.sgot->contents + off
4962 + GOT_ENTRY_SIZE);
4963 }
4964 else
4965 {
4966 /* This TLS symbol is global. We emit a
4967 relocation to fixup the tls offset at load
4968 time. */
4969 rela.r_info =
4970 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
4971 rela.r_addend = 0;
4972 rela.r_offset =
4973 (globals->root.sgot->output_section->vma
4974 + globals->root.sgot->output_offset + off
4975 + GOT_ENTRY_SIZE);
4976
4977 loc = globals->root.srelgot->contents;
4978 loc += globals->root.srelgot->reloc_count++
4979 * RELOC_SIZE (globals);
4980 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4981 bfd_put_NN (output_bfd, (bfd_vma) 0,
4982 globals->root.sgot->contents + off
4983 + GOT_ENTRY_SIZE);
4984 }
4985 }
4986 else
4987 {
4988 bfd_put_NN (output_bfd, (bfd_vma) 1,
4989 globals->root.sgot->contents + off);
4990 bfd_put_NN (output_bfd,
4991 relocation - dtpoff_base (info),
4992 globals->root.sgot->contents + off
4993 + GOT_ENTRY_SIZE);
4994 }
4995
4996 symbol_got_offset_mark (input_bfd, h, r_symndx);
4997 }
4998 break;
4999
5000 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5001 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5002 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5003 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5004 {
5005 bfd_boolean need_relocs = FALSE;
5006 bfd_byte *loc;
5007 int indx;
5008 bfd_vma off;
5009
5010 off = symbol_got_offset (input_bfd, h, r_symndx);
5011
5012 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5013
5014 need_relocs =
5015 (info->shared || indx != 0) &&
5016 (h == NULL
5017 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5018 || h->root.type != bfd_link_hash_undefweak);
5019
5020 BFD_ASSERT (globals->root.srelgot != NULL);
5021
5022 if (need_relocs)
5023 {
5024 Elf_Internal_Rela rela;
5025
5026 if (indx == 0)
5027 rela.r_addend = relocation - dtpoff_base (info);
5028 else
5029 rela.r_addend = 0;
5030
5031 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5032 rela.r_offset = globals->root.sgot->output_section->vma +
5033 globals->root.sgot->output_offset + off;
5034
5035 loc = globals->root.srelgot->contents;
5036 loc += globals->root.srelgot->reloc_count++
5037 * RELOC_SIZE (htab);
5038
5039 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5040
5041 bfd_put_NN (output_bfd, rela.r_addend,
5042 globals->root.sgot->contents + off);
5043 }
5044 else
5045 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5046 globals->root.sgot->contents + off);
5047
5048 symbol_got_offset_mark (input_bfd, h, r_symndx);
5049 }
5050 break;
5051
5052 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5053 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5054 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5055 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5056 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5057 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5058 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5059 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5060 break;
5061
5062 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5063 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5064 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5065 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5066 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5067 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5068 {
5069 bfd_boolean need_relocs = FALSE;
5070 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5071 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5072
5073 need_relocs = (h == NULL
5074 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5075 || h->root.type != bfd_link_hash_undefweak);
5076
5077 BFD_ASSERT (globals->root.srelgot != NULL);
5078 BFD_ASSERT (globals->root.sgot != NULL);
5079
5080 if (need_relocs)
5081 {
5082 bfd_byte *loc;
5083 Elf_Internal_Rela rela;
5084 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5085
5086 rela.r_addend = 0;
5087 rela.r_offset = (globals->root.sgotplt->output_section->vma
5088 + globals->root.sgotplt->output_offset
5089 + off + globals->sgotplt_jump_table_size);
5090
5091 if (indx == 0)
5092 rela.r_addend = relocation - dtpoff_base (info);
5093
5094 /* Allocate the next available slot in the PLT reloc
5095 section to hold our R_AARCH64_TLSDESC, the next
5096 available slot is determined from reloc_count,
5097 which we step. But note, reloc_count was
5098 artifically moved down while allocating slots for
5099 real PLT relocs such that all of the PLT relocs
5100 will fit above the initial reloc_count and the
5101 extra stuff will fit below. */
5102 loc = globals->root.srelplt->contents;
5103 loc += globals->root.srelplt->reloc_count++
5104 * RELOC_SIZE (globals);
5105
5106 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5107
5108 bfd_put_NN (output_bfd, (bfd_vma) 0,
5109 globals->root.sgotplt->contents + off +
5110 globals->sgotplt_jump_table_size);
5111 bfd_put_NN (output_bfd, (bfd_vma) 0,
5112 globals->root.sgotplt->contents + off +
5113 globals->sgotplt_jump_table_size +
5114 GOT_ENTRY_SIZE);
5115 }
5116
5117 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5118 }
5119 break;
5120 default:
5121 break;
5122 }
5123
5124 if (!save_addend)
5125 addend = 0;
5126
5127
5128 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5129 because such sections are not SEC_ALLOC and thus ld.so will
5130 not process them. */
5131 if (unresolved_reloc
5132 && !((input_section->flags & SEC_DEBUGGING) != 0
5133 && h->def_dynamic)
5134 && _bfd_elf_section_offset (output_bfd, info, input_section,
5135 +rel->r_offset) != (bfd_vma) - 1)
5136 {
5137 (*_bfd_error_handler)
5138 (_
5139 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5140 input_bfd, input_section, (long) rel->r_offset, howto->name,
5141 h->root.root.string);
5142 return FALSE;
5143 }
5144
5145 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5146 {
5147 switch (r)
5148 {
5149 case bfd_reloc_overflow:
5150 /* If the overflowing reloc was to an undefined symbol,
5151 we have already printed one error message and there
5152 is no point complaining again. */
5153 if ((!h ||
5154 h->root.type != bfd_link_hash_undefined)
5155 && (!((*info->callbacks->reloc_overflow)
5156 (info, (h ? &h->root : NULL), name, howto->name,
5157 (bfd_vma) 0, input_bfd, input_section,
5158 rel->r_offset))))
5159 return FALSE;
5160 break;
5161
5162 case bfd_reloc_undefined:
5163 if (!((*info->callbacks->undefined_symbol)
5164 (info, name, input_bfd, input_section,
5165 rel->r_offset, TRUE)))
5166 return FALSE;
5167 break;
5168
5169 case bfd_reloc_outofrange:
5170 error_message = _("out of range");
5171 goto common_error;
5172
5173 case bfd_reloc_notsupported:
5174 error_message = _("unsupported relocation");
5175 goto common_error;
5176
5177 case bfd_reloc_dangerous:
5178 /* error_message should already be set. */
5179 goto common_error;
5180
5181 default:
5182 error_message = _("unknown error");
5183 /* Fall through. */
5184
5185 common_error:
5186 BFD_ASSERT (error_message != NULL);
5187 if (!((*info->callbacks->reloc_dangerous)
5188 (info, error_message, input_bfd, input_section,
5189 rel->r_offset)))
5190 return FALSE;
5191 break;
5192 }
5193 }
5194 }
5195
5196 return TRUE;
5197 }
5198
5199 /* Set the right machine number. */
5200
5201 static bfd_boolean
5202 elfNN_aarch64_object_p (bfd *abfd)
5203 {
5204 #if ARCH_SIZE == 32
5205 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5206 #else
5207 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5208 #endif
5209 return TRUE;
5210 }
5211
5212 /* Function to keep AArch64 specific flags in the ELF header. */
5213
5214 static bfd_boolean
5215 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5216 {
5217 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5218 {
5219 }
5220 else
5221 {
5222 elf_elfheader (abfd)->e_flags = flags;
5223 elf_flags_init (abfd) = TRUE;
5224 }
5225
5226 return TRUE;
5227 }
5228
5229 /* Merge backend specific data from an object file to the output
5230 object file when linking. */
5231
5232 static bfd_boolean
5233 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5234 {
5235 flagword out_flags;
5236 flagword in_flags;
5237 bfd_boolean flags_compatible = TRUE;
5238 asection *sec;
5239
5240 /* Check if we have the same endianess. */
5241 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5242 return FALSE;
5243
5244 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5245 return TRUE;
5246
5247 /* The input BFD must have had its flags initialised. */
5248 /* The following seems bogus to me -- The flags are initialized in
5249 the assembler but I don't think an elf_flags_init field is
5250 written into the object. */
5251 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5252
5253 in_flags = elf_elfheader (ibfd)->e_flags;
5254 out_flags = elf_elfheader (obfd)->e_flags;
5255
5256 if (!elf_flags_init (obfd))
5257 {
5258 /* If the input is the default architecture and had the default
5259 flags then do not bother setting the flags for the output
5260 architecture, instead allow future merges to do this. If no
5261 future merges ever set these flags then they will retain their
5262 uninitialised values, which surprise surprise, correspond
5263 to the default values. */
5264 if (bfd_get_arch_info (ibfd)->the_default
5265 && elf_elfheader (ibfd)->e_flags == 0)
5266 return TRUE;
5267
5268 elf_flags_init (obfd) = TRUE;
5269 elf_elfheader (obfd)->e_flags = in_flags;
5270
5271 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5272 && bfd_get_arch_info (obfd)->the_default)
5273 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5274 bfd_get_mach (ibfd));
5275
5276 return TRUE;
5277 }
5278
5279 /* Identical flags must be compatible. */
5280 if (in_flags == out_flags)
5281 return TRUE;
5282
5283 /* Check to see if the input BFD actually contains any sections. If
5284 not, its flags may not have been initialised either, but it
5285 cannot actually cause any incompatiblity. Do not short-circuit
5286 dynamic objects; their section list may be emptied by
5287 elf_link_add_object_symbols.
5288
5289 Also check to see if there are no code sections in the input.
5290 In this case there is no need to check for code specific flags.
5291 XXX - do we need to worry about floating-point format compatability
5292 in data sections ? */
5293 if (!(ibfd->flags & DYNAMIC))
5294 {
5295 bfd_boolean null_input_bfd = TRUE;
5296 bfd_boolean only_data_sections = TRUE;
5297
5298 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5299 {
5300 if ((bfd_get_section_flags (ibfd, sec)
5301 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5302 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5303 only_data_sections = FALSE;
5304
5305 null_input_bfd = FALSE;
5306 break;
5307 }
5308
5309 if (null_input_bfd || only_data_sections)
5310 return TRUE;
5311 }
5312
5313 return flags_compatible;
5314 }
5315
5316 /* Display the flags field. */
5317
5318 static bfd_boolean
5319 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5320 {
5321 FILE *file = (FILE *) ptr;
5322 unsigned long flags;
5323
5324 BFD_ASSERT (abfd != NULL && ptr != NULL);
5325
5326 /* Print normal ELF private data. */
5327 _bfd_elf_print_private_bfd_data (abfd, ptr);
5328
5329 flags = elf_elfheader (abfd)->e_flags;
5330 /* Ignore init flag - it may not be set, despite the flags field
5331 containing valid data. */
5332
5333 /* xgettext:c-format */
5334 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5335
5336 if (flags)
5337 fprintf (file, _("<Unrecognised flag bits set>"));
5338
5339 fputc ('\n', file);
5340
5341 return TRUE;
5342 }
5343
5344 /* Update the got entry reference counts for the section being removed. */
5345
5346 static bfd_boolean
5347 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5348 struct bfd_link_info *info,
5349 asection *sec,
5350 const Elf_Internal_Rela * relocs)
5351 {
5352 struct elf_aarch64_link_hash_table *htab;
5353 Elf_Internal_Shdr *symtab_hdr;
5354 struct elf_link_hash_entry **sym_hashes;
5355 struct elf_aarch64_local_symbol *locals;
5356 const Elf_Internal_Rela *rel, *relend;
5357
5358 if (info->relocatable)
5359 return TRUE;
5360
5361 htab = elf_aarch64_hash_table (info);
5362
5363 if (htab == NULL)
5364 return FALSE;
5365
5366 elf_section_data (sec)->local_dynrel = NULL;
5367
5368 symtab_hdr = &elf_symtab_hdr (abfd);
5369 sym_hashes = elf_sym_hashes (abfd);
5370
5371 locals = elf_aarch64_locals (abfd);
5372
5373 relend = relocs + sec->reloc_count;
5374 for (rel = relocs; rel < relend; rel++)
5375 {
5376 unsigned long r_symndx;
5377 unsigned int r_type;
5378 struct elf_link_hash_entry *h = NULL;
5379
5380 r_symndx = ELFNN_R_SYM (rel->r_info);
5381
5382 if (r_symndx >= symtab_hdr->sh_info)
5383 {
5384
5385 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5386 while (h->root.type == bfd_link_hash_indirect
5387 || h->root.type == bfd_link_hash_warning)
5388 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5389 }
5390 else
5391 {
5392 Elf_Internal_Sym *isym;
5393
5394 /* A local symbol. */
5395 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5396 abfd, r_symndx);
5397
5398 /* Check relocation against local STT_GNU_IFUNC symbol. */
5399 if (isym != NULL
5400 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5401 {
5402 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
5403 if (h == NULL)
5404 abort ();
5405 }
5406 }
5407
5408 if (h)
5409 {
5410 struct elf_aarch64_link_hash_entry *eh;
5411 struct elf_dyn_relocs **pp;
5412 struct elf_dyn_relocs *p;
5413
5414 eh = (struct elf_aarch64_link_hash_entry *) h;
5415
5416 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
5417 if (p->sec == sec)
5418 {
5419 /* Everything must go for SEC. */
5420 *pp = p->next;
5421 break;
5422 }
5423 }
5424
5425 r_type = ELFNN_R_TYPE (rel->r_info);
5426 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
5427 {
5428 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5429 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5430 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5431 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5432 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5433 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5434 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5435 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5436 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5437 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5438 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5439 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5440 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5441 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5442 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5443 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5444 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5445 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5446 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5447 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5448 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5449 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5450 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5451 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5452 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5453 if (h != NULL)
5454 {
5455 if (h->got.refcount > 0)
5456 h->got.refcount -= 1;
5457
5458 if (h->type == STT_GNU_IFUNC)
5459 {
5460 if (h->plt.refcount > 0)
5461 h->plt.refcount -= 1;
5462 }
5463 }
5464 else if (locals != NULL)
5465 {
5466 if (locals[r_symndx].got_refcount > 0)
5467 locals[r_symndx].got_refcount -= 1;
5468 }
5469 break;
5470
5471 case BFD_RELOC_AARCH64_CALL26:
5472 case BFD_RELOC_AARCH64_JUMP26:
5473 /* If this is a local symbol then we resolve it
5474 directly without creating a PLT entry. */
5475 if (h == NULL)
5476 continue;
5477
5478 if (h->plt.refcount > 0)
5479 h->plt.refcount -= 1;
5480 break;
5481
5482 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5483 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5484 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5485 case BFD_RELOC_AARCH64_MOVW_G3:
5486 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5487 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5488 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5489 case BFD_RELOC_AARCH64_NN:
5490 if (h != NULL && info->executable)
5491 {
5492 if (h->plt.refcount > 0)
5493 h->plt.refcount -= 1;
5494 }
5495 break;
5496
5497 default:
5498 break;
5499 }
5500 }
5501
5502 return TRUE;
5503 }
5504
5505 /* Adjust a symbol defined by a dynamic object and referenced by a
5506 regular object. The current definition is in some section of the
5507 dynamic object, but we're not including those sections. We have to
5508 change the definition to something the rest of the link can
5509 understand. */
5510
5511 static bfd_boolean
5512 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5513 struct elf_link_hash_entry *h)
5514 {
5515 struct elf_aarch64_link_hash_table *htab;
5516 asection *s;
5517
5518 /* If this is a function, put it in the procedure linkage table. We
5519 will fill in the contents of the procedure linkage table later,
5520 when we know the address of the .got section. */
5521 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
5522 {
5523 if (h->plt.refcount <= 0
5524 || (h->type != STT_GNU_IFUNC
5525 && (SYMBOL_CALLS_LOCAL (info, h)
5526 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
5527 && h->root.type == bfd_link_hash_undefweak))))
5528 {
5529 /* This case can occur if we saw a CALL26 reloc in
5530 an input file, but the symbol wasn't referred to
5531 by a dynamic object or all references were
5532 garbage collected. In which case we can end up
5533 resolving. */
5534 h->plt.offset = (bfd_vma) - 1;
5535 h->needs_plt = 0;
5536 }
5537
5538 return TRUE;
5539 }
5540 else
5541 /* It's possible that we incorrectly decided a .plt reloc was
5542 needed for an R_X86_64_PC32 reloc to a non-function sym in
5543 check_relocs. We can't decide accurately between function and
5544 non-function syms in check-relocs; Objects loaded later in
5545 the link may change h->type. So fix it now. */
5546 h->plt.offset = (bfd_vma) - 1;
5547
5548
5549 /* If this is a weak symbol, and there is a real definition, the
5550 processor independent code will have arranged for us to see the
5551 real definition first, and we can just use the same value. */
5552 if (h->u.weakdef != NULL)
5553 {
5554 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5555 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5556 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5557 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5558 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5559 h->non_got_ref = h->u.weakdef->non_got_ref;
5560 return TRUE;
5561 }
5562
5563 /* If we are creating a shared library, we must presume that the
5564 only references to the symbol are via the global offset table.
5565 For such cases we need not do anything here; the relocations will
5566 be handled correctly by relocate_section. */
5567 if (info->shared)
5568 return TRUE;
5569
5570 /* If there are no references to this symbol that do not use the
5571 GOT, we don't need to generate a copy reloc. */
5572 if (!h->non_got_ref)
5573 return TRUE;
5574
5575 /* If -z nocopyreloc was given, we won't generate them either. */
5576 if (info->nocopyreloc)
5577 {
5578 h->non_got_ref = 0;
5579 return TRUE;
5580 }
5581
5582 /* We must allocate the symbol in our .dynbss section, which will
5583 become part of the .bss section of the executable. There will be
5584 an entry for this symbol in the .dynsym section. The dynamic
5585 object will contain position independent code, so all references
5586 from the dynamic object to this symbol will go through the global
5587 offset table. The dynamic linker will use the .dynsym entry to
5588 determine the address it must put in the global offset table, so
5589 both the dynamic object and the regular object will refer to the
5590 same memory location for the variable. */
5591
5592 htab = elf_aarch64_hash_table (info);
5593
5594 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5595 to copy the initial value out of the dynamic object and into the
5596 runtime process image. */
5597 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5598 {
5599 htab->srelbss->size += RELOC_SIZE (htab);
5600 h->needs_copy = 1;
5601 }
5602
5603 s = htab->sdynbss;
5604
5605 return _bfd_elf_adjust_dynamic_copy (info, h, s);
5606
5607 }
5608
5609 static bfd_boolean
5610 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5611 {
5612 struct elf_aarch64_local_symbol *locals;
5613 locals = elf_aarch64_locals (abfd);
5614 if (locals == NULL)
5615 {
5616 locals = (struct elf_aarch64_local_symbol *)
5617 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5618 if (locals == NULL)
5619 return FALSE;
5620 elf_aarch64_locals (abfd) = locals;
5621 }
5622 return TRUE;
5623 }
5624
5625 /* Create the .got section to hold the global offset table. */
5626
5627 static bfd_boolean
5628 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
5629 {
5630 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5631 flagword flags;
5632 asection *s;
5633 struct elf_link_hash_entry *h;
5634 struct elf_link_hash_table *htab = elf_hash_table (info);
5635
5636 /* This function may be called more than once. */
5637 s = bfd_get_linker_section (abfd, ".got");
5638 if (s != NULL)
5639 return TRUE;
5640
5641 flags = bed->dynamic_sec_flags;
5642
5643 s = bfd_make_section_anyway_with_flags (abfd,
5644 (bed->rela_plts_and_copies_p
5645 ? ".rela.got" : ".rel.got"),
5646 (bed->dynamic_sec_flags
5647 | SEC_READONLY));
5648 if (s == NULL
5649 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5650 return FALSE;
5651 htab->srelgot = s;
5652
5653 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
5654 if (s == NULL
5655 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5656 return FALSE;
5657 htab->sgot = s;
5658 htab->sgot->size += GOT_ENTRY_SIZE;
5659
5660 if (bed->want_got_sym)
5661 {
5662 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
5663 (or .got.plt) section. We don't do this in the linker script
5664 because we don't want to define the symbol if we are not creating
5665 a global offset table. */
5666 h = _bfd_elf_define_linkage_sym (abfd, info, s,
5667 "_GLOBAL_OFFSET_TABLE_");
5668 elf_hash_table (info)->hgot = h;
5669 if (h == NULL)
5670 return FALSE;
5671 }
5672
5673 if (bed->want_got_plt)
5674 {
5675 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
5676 if (s == NULL
5677 || !bfd_set_section_alignment (abfd, s,
5678 bed->s->log_file_align))
5679 return FALSE;
5680 htab->sgotplt = s;
5681 }
5682
5683 /* The first bit of the global offset table is the header. */
5684 s->size += bed->got_header_size;
5685
5686 return TRUE;
5687 }
5688
5689 /* Look through the relocs for a section during the first phase. */
5690
5691 static bfd_boolean
5692 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5693 asection *sec, const Elf_Internal_Rela *relocs)
5694 {
5695 Elf_Internal_Shdr *symtab_hdr;
5696 struct elf_link_hash_entry **sym_hashes;
5697 const Elf_Internal_Rela *rel;
5698 const Elf_Internal_Rela *rel_end;
5699 asection *sreloc;
5700
5701 struct elf_aarch64_link_hash_table *htab;
5702
5703 if (info->relocatable)
5704 return TRUE;
5705
5706 BFD_ASSERT (is_aarch64_elf (abfd));
5707
5708 htab = elf_aarch64_hash_table (info);
5709 sreloc = NULL;
5710
5711 symtab_hdr = &elf_symtab_hdr (abfd);
5712 sym_hashes = elf_sym_hashes (abfd);
5713
5714 rel_end = relocs + sec->reloc_count;
5715 for (rel = relocs; rel < rel_end; rel++)
5716 {
5717 struct elf_link_hash_entry *h;
5718 unsigned long r_symndx;
5719 unsigned int r_type;
5720 bfd_reloc_code_real_type bfd_r_type;
5721 Elf_Internal_Sym *isym;
5722
5723 r_symndx = ELFNN_R_SYM (rel->r_info);
5724 r_type = ELFNN_R_TYPE (rel->r_info);
5725
5726 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5727 {
5728 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5729 r_symndx);
5730 return FALSE;
5731 }
5732
5733 if (r_symndx < symtab_hdr->sh_info)
5734 {
5735 /* A local symbol. */
5736 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5737 abfd, r_symndx);
5738 if (isym == NULL)
5739 return FALSE;
5740
5741 /* Check relocation against local STT_GNU_IFUNC symbol. */
5742 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5743 {
5744 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
5745 TRUE);
5746 if (h == NULL)
5747 return FALSE;
5748
5749 /* Fake a STT_GNU_IFUNC symbol. */
5750 h->type = STT_GNU_IFUNC;
5751 h->def_regular = 1;
5752 h->ref_regular = 1;
5753 h->forced_local = 1;
5754 h->root.type = bfd_link_hash_defined;
5755 }
5756 else
5757 h = NULL;
5758 }
5759 else
5760 {
5761 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5762 while (h->root.type == bfd_link_hash_indirect
5763 || h->root.type == bfd_link_hash_warning)
5764 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5765
5766 /* PR15323, ref flags aren't set for references in the same
5767 object. */
5768 h->root.non_ir_ref = 1;
5769 }
5770
5771 /* Could be done earlier, if h were already available. */
5772 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5773
5774 if (h != NULL)
5775 {
5776 /* Create the ifunc sections for static executables. If we
5777 never see an indirect function symbol nor we are building
5778 a static executable, those sections will be empty and
5779 won't appear in output. */
5780 switch (bfd_r_type)
5781 {
5782 default:
5783 break;
5784
5785 case BFD_RELOC_AARCH64_NN:
5786 case BFD_RELOC_AARCH64_CALL26:
5787 case BFD_RELOC_AARCH64_JUMP26:
5788 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5789 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5790 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5791 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5792 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5793 case BFD_RELOC_AARCH64_ADD_LO12:
5794 if (htab->root.dynobj == NULL)
5795 htab->root.dynobj = abfd;
5796 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
5797 return FALSE;
5798 break;
5799 }
5800
5801 /* It is referenced by a non-shared object. */
5802 h->ref_regular = 1;
5803 h->root.non_ir_ref = 1;
5804 }
5805
5806 switch (bfd_r_type)
5807 {
5808 case BFD_RELOC_AARCH64_NN:
5809
5810 /* We don't need to handle relocs into sections not going into
5811 the "real" output. */
5812 if ((sec->flags & SEC_ALLOC) == 0)
5813 break;
5814
5815 if (h != NULL)
5816 {
5817 if (!info->shared)
5818 h->non_got_ref = 1;
5819
5820 h->plt.refcount += 1;
5821 h->pointer_equality_needed = 1;
5822 }
5823
5824 /* No need to do anything if we're not creating a shared
5825 object. */
5826 if (! info->shared)
5827 break;
5828
5829 {
5830 struct elf_dyn_relocs *p;
5831 struct elf_dyn_relocs **head;
5832
5833 /* We must copy these reloc types into the output file.
5834 Create a reloc section in dynobj and make room for
5835 this reloc. */
5836 if (sreloc == NULL)
5837 {
5838 if (htab->root.dynobj == NULL)
5839 htab->root.dynobj = abfd;
5840
5841 sreloc = _bfd_elf_make_dynamic_reloc_section
5842 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
5843
5844 if (sreloc == NULL)
5845 return FALSE;
5846 }
5847
5848 /* If this is a global symbol, we count the number of
5849 relocations we need for this symbol. */
5850 if (h != NULL)
5851 {
5852 struct elf_aarch64_link_hash_entry *eh;
5853 eh = (struct elf_aarch64_link_hash_entry *) h;
5854 head = &eh->dyn_relocs;
5855 }
5856 else
5857 {
5858 /* Track dynamic relocs needed for local syms too.
5859 We really need local syms available to do this
5860 easily. Oh well. */
5861
5862 asection *s;
5863 void **vpp;
5864
5865 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5866 abfd, r_symndx);
5867 if (isym == NULL)
5868 return FALSE;
5869
5870 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5871 if (s == NULL)
5872 s = sec;
5873
5874 /* Beware of type punned pointers vs strict aliasing
5875 rules. */
5876 vpp = &(elf_section_data (s)->local_dynrel);
5877 head = (struct elf_dyn_relocs **) vpp;
5878 }
5879
5880 p = *head;
5881 if (p == NULL || p->sec != sec)
5882 {
5883 bfd_size_type amt = sizeof *p;
5884 p = ((struct elf_dyn_relocs *)
5885 bfd_zalloc (htab->root.dynobj, amt));
5886 if (p == NULL)
5887 return FALSE;
5888 p->next = *head;
5889 *head = p;
5890 p->sec = sec;
5891 }
5892
5893 p->count += 1;
5894
5895 }
5896 break;
5897
5898 /* RR: We probably want to keep a consistency check that
5899 there are no dangling GOT_PAGE relocs. */
5900 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5901 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5902 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5903 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5904 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5905 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5906 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5907 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5908 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5909 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5910 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5911 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5912 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5913 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5914 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5915 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5916 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5917 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5918 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5919 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5920 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5921 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5922 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5923 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5924 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5925 {
5926 unsigned got_type;
5927 unsigned old_got_type;
5928
5929 got_type = aarch64_reloc_got_type (bfd_r_type);
5930
5931 if (h)
5932 {
5933 h->got.refcount += 1;
5934 old_got_type = elf_aarch64_hash_entry (h)->got_type;
5935 }
5936 else
5937 {
5938 struct elf_aarch64_local_symbol *locals;
5939
5940 if (!elfNN_aarch64_allocate_local_symbols
5941 (abfd, symtab_hdr->sh_info))
5942 return FALSE;
5943
5944 locals = elf_aarch64_locals (abfd);
5945 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5946 locals[r_symndx].got_refcount += 1;
5947 old_got_type = locals[r_symndx].got_type;
5948 }
5949
5950 /* If a variable is accessed with both general dynamic TLS
5951 methods, two slots may be created. */
5952 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5953 got_type |= old_got_type;
5954
5955 /* We will already have issued an error message if there
5956 is a TLS/non-TLS mismatch, based on the symbol type.
5957 So just combine any TLS types needed. */
5958 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5959 && got_type != GOT_NORMAL)
5960 got_type |= old_got_type;
5961
5962 /* If the symbol is accessed by both IE and GD methods, we
5963 are able to relax. Turn off the GD flag, without
5964 messing up with any other kind of TLS types that may be
5965 involved. */
5966 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5967 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5968
5969 if (old_got_type != got_type)
5970 {
5971 if (h != NULL)
5972 elf_aarch64_hash_entry (h)->got_type = got_type;
5973 else
5974 {
5975 struct elf_aarch64_local_symbol *locals;
5976 locals = elf_aarch64_locals (abfd);
5977 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5978 locals[r_symndx].got_type = got_type;
5979 }
5980 }
5981
5982 if (htab->root.dynobj == NULL)
5983 htab->root.dynobj = abfd;
5984 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
5985 return FALSE;
5986 break;
5987 }
5988
5989 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5990 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5991 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5992 case BFD_RELOC_AARCH64_MOVW_G3:
5993 if (info->shared)
5994 {
5995 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5996 (*_bfd_error_handler)
5997 (_("%B: relocation %s against `%s' can not be used when making "
5998 "a shared object; recompile with -fPIC"),
5999 abfd, elfNN_aarch64_howto_table[howto_index].name,
6000 (h) ? h->root.root.string : "a local symbol");
6001 bfd_set_error (bfd_error_bad_value);
6002 return FALSE;
6003 }
6004
6005 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6006 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6007 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6008 if (h != NULL && info->executable)
6009 {
6010 /* If this reloc is in a read-only section, we might
6011 need a copy reloc. We can't check reliably at this
6012 stage whether the section is read-only, as input
6013 sections have not yet been mapped to output sections.
6014 Tentatively set the flag for now, and correct in
6015 adjust_dynamic_symbol. */
6016 h->non_got_ref = 1;
6017 h->plt.refcount += 1;
6018 h->pointer_equality_needed = 1;
6019 }
6020 /* FIXME:: RR need to handle these in shared libraries
6021 and essentially bomb out as these being non-PIC
6022 relocations in shared libraries. */
6023 break;
6024
6025 case BFD_RELOC_AARCH64_CALL26:
6026 case BFD_RELOC_AARCH64_JUMP26:
6027 /* If this is a local symbol then we resolve it
6028 directly without creating a PLT entry. */
6029 if (h == NULL)
6030 continue;
6031
6032 h->needs_plt = 1;
6033 if (h->plt.refcount <= 0)
6034 h->plt.refcount = 1;
6035 else
6036 h->plt.refcount += 1;
6037 break;
6038
6039 default:
6040 break;
6041 }
6042 }
6043
6044 return TRUE;
6045 }
6046
6047 /* Treat mapping symbols as special target symbols. */
6048
6049 static bfd_boolean
6050 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6051 asymbol *sym)
6052 {
6053 return bfd_is_aarch64_special_symbol_name (sym->name,
6054 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6055 }
6056
6057 /* This is a copy of elf_find_function () from elf.c except that
6058 AArch64 mapping symbols are ignored when looking for function names. */
6059
6060 static bfd_boolean
6061 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6062 asymbol **symbols,
6063 asection *section,
6064 bfd_vma offset,
6065 const char **filename_ptr,
6066 const char **functionname_ptr)
6067 {
6068 const char *filename = NULL;
6069 asymbol *func = NULL;
6070 bfd_vma low_func = 0;
6071 asymbol **p;
6072
6073 for (p = symbols; *p != NULL; p++)
6074 {
6075 elf_symbol_type *q;
6076
6077 q = (elf_symbol_type *) * p;
6078
6079 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6080 {
6081 default:
6082 break;
6083 case STT_FILE:
6084 filename = bfd_asymbol_name (&q->symbol);
6085 break;
6086 case STT_FUNC:
6087 case STT_NOTYPE:
6088 /* Skip mapping symbols. */
6089 if ((q->symbol.flags & BSF_LOCAL)
6090 && (bfd_is_aarch64_special_symbol_name
6091 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6092 continue;
6093 /* Fall through. */
6094 if (bfd_get_section (&q->symbol) == section
6095 && q->symbol.value >= low_func && q->symbol.value <= offset)
6096 {
6097 func = (asymbol *) q;
6098 low_func = q->symbol.value;
6099 }
6100 break;
6101 }
6102 }
6103
6104 if (func == NULL)
6105 return FALSE;
6106
6107 if (filename_ptr)
6108 *filename_ptr = filename;
6109 if (functionname_ptr)
6110 *functionname_ptr = bfd_asymbol_name (func);
6111
6112 return TRUE;
6113 }
6114
6115
6116 /* Find the nearest line to a particular section and offset, for error
6117 reporting. This code is a duplicate of the code in elf.c, except
6118 that it uses aarch64_elf_find_function. */
6119
6120 static bfd_boolean
6121 elfNN_aarch64_find_nearest_line (bfd *abfd,
6122 asymbol **symbols,
6123 asection *section,
6124 bfd_vma offset,
6125 const char **filename_ptr,
6126 const char **functionname_ptr,
6127 unsigned int *line_ptr,
6128 unsigned int *discriminator_ptr)
6129 {
6130 bfd_boolean found = FALSE;
6131
6132 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6133 filename_ptr, functionname_ptr,
6134 line_ptr, discriminator_ptr,
6135 dwarf_debug_sections, 0,
6136 &elf_tdata (abfd)->dwarf2_find_line_info))
6137 {
6138 if (!*functionname_ptr)
6139 aarch64_elf_find_function (abfd, symbols, section, offset,
6140 *filename_ptr ? NULL : filename_ptr,
6141 functionname_ptr);
6142
6143 return TRUE;
6144 }
6145
6146 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6147 toolchain uses DWARF1. */
6148
6149 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6150 &found, filename_ptr,
6151 functionname_ptr, line_ptr,
6152 &elf_tdata (abfd)->line_info))
6153 return FALSE;
6154
6155 if (found && (*functionname_ptr || *line_ptr))
6156 return TRUE;
6157
6158 if (symbols == NULL)
6159 return FALSE;
6160
6161 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6162 filename_ptr, functionname_ptr))
6163 return FALSE;
6164
6165 *line_ptr = 0;
6166 return TRUE;
6167 }
6168
6169 static bfd_boolean
6170 elfNN_aarch64_find_inliner_info (bfd *abfd,
6171 const char **filename_ptr,
6172 const char **functionname_ptr,
6173 unsigned int *line_ptr)
6174 {
6175 bfd_boolean found;
6176 found = _bfd_dwarf2_find_inliner_info
6177 (abfd, filename_ptr,
6178 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6179 return found;
6180 }
6181
6182
6183 static void
6184 elfNN_aarch64_post_process_headers (bfd *abfd,
6185 struct bfd_link_info *link_info)
6186 {
6187 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6188
6189 i_ehdrp = elf_elfheader (abfd);
6190 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6191
6192 _bfd_elf_post_process_headers (abfd, link_info);
6193 }
6194
6195 static enum elf_reloc_type_class
6196 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6197 const asection *rel_sec ATTRIBUTE_UNUSED,
6198 const Elf_Internal_Rela *rela)
6199 {
6200 switch ((int) ELFNN_R_TYPE (rela->r_info))
6201 {
6202 case AARCH64_R (RELATIVE):
6203 return reloc_class_relative;
6204 case AARCH64_R (JUMP_SLOT):
6205 return reloc_class_plt;
6206 case AARCH64_R (COPY):
6207 return reloc_class_copy;
6208 default:
6209 return reloc_class_normal;
6210 }
6211 }
6212
6213 /* Handle an AArch64 specific section when reading an object file. This is
6214 called when bfd_section_from_shdr finds a section with an unknown
6215 type. */
6216
6217 static bfd_boolean
6218 elfNN_aarch64_section_from_shdr (bfd *abfd,
6219 Elf_Internal_Shdr *hdr,
6220 const char *name, int shindex)
6221 {
6222 /* There ought to be a place to keep ELF backend specific flags, but
6223 at the moment there isn't one. We just keep track of the
6224 sections by their name, instead. Fortunately, the ABI gives
6225 names for all the AArch64 specific sections, so we will probably get
6226 away with this. */
6227 switch (hdr->sh_type)
6228 {
6229 case SHT_AARCH64_ATTRIBUTES:
6230 break;
6231
6232 default:
6233 return FALSE;
6234 }
6235
6236 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6237 return FALSE;
6238
6239 return TRUE;
6240 }
6241
6242 /* A structure used to record a list of sections, independently
6243 of the next and prev fields in the asection structure. */
6244 typedef struct section_list
6245 {
6246 asection *sec;
6247 struct section_list *next;
6248 struct section_list *prev;
6249 }
6250 section_list;
6251
6252 /* Unfortunately we need to keep a list of sections for which
6253 an _aarch64_elf_section_data structure has been allocated. This
6254 is because it is possible for functions like elfNN_aarch64_write_section
6255 to be called on a section which has had an elf_data_structure
6256 allocated for it (and so the used_by_bfd field is valid) but
6257 for which the AArch64 extended version of this structure - the
6258 _aarch64_elf_section_data structure - has not been allocated. */
6259 static section_list *sections_with_aarch64_elf_section_data = NULL;
6260
6261 static void
6262 record_section_with_aarch64_elf_section_data (asection *sec)
6263 {
6264 struct section_list *entry;
6265
6266 entry = bfd_malloc (sizeof (*entry));
6267 if (entry == NULL)
6268 return;
6269 entry->sec = sec;
6270 entry->next = sections_with_aarch64_elf_section_data;
6271 entry->prev = NULL;
6272 if (entry->next != NULL)
6273 entry->next->prev = entry;
6274 sections_with_aarch64_elf_section_data = entry;
6275 }
6276
6277 static struct section_list *
6278 find_aarch64_elf_section_entry (asection *sec)
6279 {
6280 struct section_list *entry;
6281 static struct section_list *last_entry = NULL;
6282
6283 /* This is a short cut for the typical case where the sections are added
6284 to the sections_with_aarch64_elf_section_data list in forward order and
6285 then looked up here in backwards order. This makes a real difference
6286 to the ld-srec/sec64k.exp linker test. */
6287 entry = sections_with_aarch64_elf_section_data;
6288 if (last_entry != NULL)
6289 {
6290 if (last_entry->sec == sec)
6291 entry = last_entry;
6292 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6293 entry = last_entry->next;
6294 }
6295
6296 for (; entry; entry = entry->next)
6297 if (entry->sec == sec)
6298 break;
6299
6300 if (entry)
6301 /* Record the entry prior to this one - it is the entry we are
6302 most likely to want to locate next time. Also this way if we
6303 have been called from
6304 unrecord_section_with_aarch64_elf_section_data () we will not
6305 be caching a pointer that is about to be freed. */
6306 last_entry = entry->prev;
6307
6308 return entry;
6309 }
6310
6311 static void
6312 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6313 {
6314 struct section_list *entry;
6315
6316 entry = find_aarch64_elf_section_entry (sec);
6317
6318 if (entry)
6319 {
6320 if (entry->prev != NULL)
6321 entry->prev->next = entry->next;
6322 if (entry->next != NULL)
6323 entry->next->prev = entry->prev;
6324 if (entry == sections_with_aarch64_elf_section_data)
6325 sections_with_aarch64_elf_section_data = entry->next;
6326 free (entry);
6327 }
6328 }
6329
6330
6331 typedef struct
6332 {
6333 void *finfo;
6334 struct bfd_link_info *info;
6335 asection *sec;
6336 int sec_shndx;
6337 int (*func) (void *, const char *, Elf_Internal_Sym *,
6338 asection *, struct elf_link_hash_entry *);
6339 } output_arch_syminfo;
6340
6341 enum map_symbol_type
6342 {
6343 AARCH64_MAP_INSN,
6344 AARCH64_MAP_DATA
6345 };
6346
6347
6348 /* Output a single mapping symbol. */
6349
6350 static bfd_boolean
6351 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6352 enum map_symbol_type type, bfd_vma offset)
6353 {
6354 static const char *names[2] = { "$x", "$d" };
6355 Elf_Internal_Sym sym;
6356
6357 sym.st_value = (osi->sec->output_section->vma
6358 + osi->sec->output_offset + offset);
6359 sym.st_size = 0;
6360 sym.st_other = 0;
6361 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6362 sym.st_shndx = osi->sec_shndx;
6363 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6364 }
6365
6366
6367
6368 /* Output mapping symbols for PLT entries associated with H. */
6369
6370 static bfd_boolean
6371 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6372 {
6373 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6374 bfd_vma addr;
6375
6376 if (h->root.type == bfd_link_hash_indirect)
6377 return TRUE;
6378
6379 if (h->root.type == bfd_link_hash_warning)
6380 /* When warning symbols are created, they **replace** the "real"
6381 entry in the hash table, thus we never get to see the real
6382 symbol in a hash traversal. So look at it now. */
6383 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6384
6385 if (h->plt.offset == (bfd_vma) - 1)
6386 return TRUE;
6387
6388 addr = h->plt.offset;
6389 if (addr == 32)
6390 {
6391 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6392 return FALSE;
6393 }
6394 return TRUE;
6395 }
6396
6397
6398 /* Output a single local symbol for a generated stub. */
6399
6400 static bfd_boolean
6401 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
6402 bfd_vma offset, bfd_vma size)
6403 {
6404 Elf_Internal_Sym sym;
6405
6406 sym.st_value = (osi->sec->output_section->vma
6407 + osi->sec->output_offset + offset);
6408 sym.st_size = size;
6409 sym.st_other = 0;
6410 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6411 sym.st_shndx = osi->sec_shndx;
6412 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
6413 }
6414
6415 static bfd_boolean
6416 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
6417 {
6418 struct elf_aarch64_stub_hash_entry *stub_entry;
6419 asection *stub_sec;
6420 bfd_vma addr;
6421 char *stub_name;
6422 output_arch_syminfo *osi;
6423
6424 /* Massage our args to the form they really have. */
6425 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6426 osi = (output_arch_syminfo *) in_arg;
6427
6428 stub_sec = stub_entry->stub_sec;
6429
6430 /* Ensure this stub is attached to the current section being
6431 processed. */
6432 if (stub_sec != osi->sec)
6433 return TRUE;
6434
6435 addr = (bfd_vma) stub_entry->stub_offset;
6436
6437 stub_name = stub_entry->output_name;
6438
6439 switch (stub_entry->stub_type)
6440 {
6441 case aarch64_stub_adrp_branch:
6442 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6443 sizeof (aarch64_adrp_branch_stub)))
6444 return FALSE;
6445 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6446 return FALSE;
6447 break;
6448 case aarch64_stub_long_branch:
6449 if (!elfNN_aarch64_output_stub_sym
6450 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
6451 return FALSE;
6452 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6453 return FALSE;
6454 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
6455 return FALSE;
6456 break;
6457 case aarch64_stub_erratum_835769_veneer:
6458 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6459 sizeof (aarch64_erratum_835769_stub)))
6460 return FALSE;
6461 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6462 return FALSE;
6463 break;
6464 default:
6465 abort ();
6466 }
6467
6468 return TRUE;
6469 }
6470
6471 /* Output mapping symbols for linker generated sections. */
6472
6473 static bfd_boolean
6474 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
6475 struct bfd_link_info *info,
6476 void *finfo,
6477 int (*func) (void *, const char *,
6478 Elf_Internal_Sym *,
6479 asection *,
6480 struct elf_link_hash_entry
6481 *))
6482 {
6483 output_arch_syminfo osi;
6484 struct elf_aarch64_link_hash_table *htab;
6485
6486 htab = elf_aarch64_hash_table (info);
6487
6488 osi.finfo = finfo;
6489 osi.info = info;
6490 osi.func = func;
6491
6492 /* Long calls stubs. */
6493 if (htab->stub_bfd && htab->stub_bfd->sections)
6494 {
6495 asection *stub_sec;
6496
6497 for (stub_sec = htab->stub_bfd->sections;
6498 stub_sec != NULL; stub_sec = stub_sec->next)
6499 {
6500 /* Ignore non-stub sections. */
6501 if (!strstr (stub_sec->name, STUB_SUFFIX))
6502 continue;
6503
6504 osi.sec = stub_sec;
6505
6506 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6507 (output_bfd, osi.sec->output_section);
6508
6509 /* The first instruction in a stub is always a branch. */
6510 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
6511 return FALSE;
6512
6513 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
6514 &osi);
6515 }
6516 }
6517
6518 /* Finally, output mapping symbols for the PLT. */
6519 if (!htab->root.splt || htab->root.splt->size == 0)
6520 return TRUE;
6521
6522 /* For now live without mapping symbols for the plt. */
6523 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6524 (output_bfd, htab->root.splt->output_section);
6525 osi.sec = htab->root.splt;
6526
6527 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
6528 (void *) &osi);
6529
6530 return TRUE;
6531
6532 }
6533
6534 /* Allocate target specific section data. */
6535
6536 static bfd_boolean
6537 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
6538 {
6539 if (!sec->used_by_bfd)
6540 {
6541 _aarch64_elf_section_data *sdata;
6542 bfd_size_type amt = sizeof (*sdata);
6543
6544 sdata = bfd_zalloc (abfd, amt);
6545 if (sdata == NULL)
6546 return FALSE;
6547 sec->used_by_bfd = sdata;
6548 }
6549
6550 record_section_with_aarch64_elf_section_data (sec);
6551
6552 return _bfd_elf_new_section_hook (abfd, sec);
6553 }
6554
6555
6556 static void
6557 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
6558 asection *sec,
6559 void *ignore ATTRIBUTE_UNUSED)
6560 {
6561 unrecord_section_with_aarch64_elf_section_data (sec);
6562 }
6563
6564 static bfd_boolean
6565 elfNN_aarch64_close_and_cleanup (bfd *abfd)
6566 {
6567 if (abfd->sections)
6568 bfd_map_over_sections (abfd,
6569 unrecord_section_via_map_over_sections, NULL);
6570
6571 return _bfd_elf_close_and_cleanup (abfd);
6572 }
6573
6574 static bfd_boolean
6575 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
6576 {
6577 if (abfd->sections)
6578 bfd_map_over_sections (abfd,
6579 unrecord_section_via_map_over_sections, NULL);
6580
6581 return _bfd_free_cached_info (abfd);
6582 }
6583
6584 /* Create dynamic sections. This is different from the ARM backend in that
6585 the got, plt, gotplt and their relocation sections are all created in the
6586 standard part of the bfd elf backend. */
6587
6588 static bfd_boolean
6589 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
6590 struct bfd_link_info *info)
6591 {
6592 struct elf_aarch64_link_hash_table *htab;
6593
6594 /* We need to create .got section. */
6595 if (!aarch64_elf_create_got_section (dynobj, info))
6596 return FALSE;
6597
6598 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
6599 return FALSE;
6600
6601 htab = elf_aarch64_hash_table (info);
6602 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
6603 if (!info->shared)
6604 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
6605
6606 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
6607 abort ();
6608
6609 return TRUE;
6610 }
6611
6612
6613 /* Allocate space in .plt, .got and associated reloc sections for
6614 dynamic relocs. */
6615
6616 static bfd_boolean
6617 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
6618 {
6619 struct bfd_link_info *info;
6620 struct elf_aarch64_link_hash_table *htab;
6621 struct elf_aarch64_link_hash_entry *eh;
6622 struct elf_dyn_relocs *p;
6623
6624 /* An example of a bfd_link_hash_indirect symbol is versioned
6625 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6626 -> __gxx_personality_v0(bfd_link_hash_defined)
6627
6628 There is no need to process bfd_link_hash_indirect symbols here
6629 because we will also be presented with the concrete instance of
6630 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6631 called to copy all relevant data from the generic to the concrete
6632 symbol instance.
6633 */
6634 if (h->root.type == bfd_link_hash_indirect)
6635 return TRUE;
6636
6637 if (h->root.type == bfd_link_hash_warning)
6638 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6639
6640 info = (struct bfd_link_info *) inf;
6641 htab = elf_aarch64_hash_table (info);
6642
6643 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6644 here if it is defined and referenced in a non-shared object. */
6645 if (h->type == STT_GNU_IFUNC
6646 && h->def_regular)
6647 return TRUE;
6648 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
6649 {
6650 /* Make sure this symbol is output as a dynamic symbol.
6651 Undefined weak syms won't yet be marked as dynamic. */
6652 if (h->dynindx == -1 && !h->forced_local)
6653 {
6654 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6655 return FALSE;
6656 }
6657
6658 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
6659 {
6660 asection *s = htab->root.splt;
6661
6662 /* If this is the first .plt entry, make room for the special
6663 first entry. */
6664 if (s->size == 0)
6665 s->size += htab->plt_header_size;
6666
6667 h->plt.offset = s->size;
6668
6669 /* If this symbol is not defined in a regular file, and we are
6670 not generating a shared library, then set the symbol to this
6671 location in the .plt. This is required to make function
6672 pointers compare as equal between the normal executable and
6673 the shared library. */
6674 if (!info->shared && !h->def_regular)
6675 {
6676 h->root.u.def.section = s;
6677 h->root.u.def.value = h->plt.offset;
6678 }
6679
6680 /* Make room for this entry. For now we only create the
6681 small model PLT entries. We later need to find a way
6682 of relaxing into these from the large model PLT entries. */
6683 s->size += PLT_SMALL_ENTRY_SIZE;
6684
6685 /* We also need to make an entry in the .got.plt section, which
6686 will be placed in the .got section by the linker script. */
6687 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6688
6689 /* We also need to make an entry in the .rela.plt section. */
6690 htab->root.srelplt->size += RELOC_SIZE (htab);
6691
6692 /* We need to ensure that all GOT entries that serve the PLT
6693 are consecutive with the special GOT slots [0] [1] and
6694 [2]. Any addtional relocations, such as
6695 R_AARCH64_TLSDESC, must be placed after the PLT related
6696 entries. We abuse the reloc_count such that during
6697 sizing we adjust reloc_count to indicate the number of
6698 PLT related reserved entries. In subsequent phases when
6699 filling in the contents of the reloc entries, PLT related
6700 entries are placed by computing their PLT index (0
6701 .. reloc_count). While other none PLT relocs are placed
6702 at the slot indicated by reloc_count and reloc_count is
6703 updated. */
6704
6705 htab->root.srelplt->reloc_count++;
6706 }
6707 else
6708 {
6709 h->plt.offset = (bfd_vma) - 1;
6710 h->needs_plt = 0;
6711 }
6712 }
6713 else
6714 {
6715 h->plt.offset = (bfd_vma) - 1;
6716 h->needs_plt = 0;
6717 }
6718
6719 eh = (struct elf_aarch64_link_hash_entry *) h;
6720 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6721
6722 if (h->got.refcount > 0)
6723 {
6724 bfd_boolean dyn;
6725 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
6726
6727 h->got.offset = (bfd_vma) - 1;
6728
6729 dyn = htab->root.dynamic_sections_created;
6730
6731 /* Make sure this symbol is output as a dynamic symbol.
6732 Undefined weak syms won't yet be marked as dynamic. */
6733 if (dyn && h->dynindx == -1 && !h->forced_local)
6734 {
6735 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6736 return FALSE;
6737 }
6738
6739 if (got_type == GOT_UNKNOWN)
6740 {
6741 }
6742 else if (got_type == GOT_NORMAL)
6743 {
6744 h->got.offset = htab->root.sgot->size;
6745 htab->root.sgot->size += GOT_ENTRY_SIZE;
6746 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6747 || h->root.type != bfd_link_hash_undefweak)
6748 && (info->shared
6749 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6750 {
6751 htab->root.srelgot->size += RELOC_SIZE (htab);
6752 }
6753 }
6754 else
6755 {
6756 int indx;
6757 if (got_type & GOT_TLSDESC_GD)
6758 {
6759 eh->tlsdesc_got_jump_table_offset =
6760 (htab->root.sgotplt->size
6761 - aarch64_compute_jump_table_size (htab));
6762 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6763 h->got.offset = (bfd_vma) - 2;
6764 }
6765
6766 if (got_type & GOT_TLS_GD)
6767 {
6768 h->got.offset = htab->root.sgot->size;
6769 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6770 }
6771
6772 if (got_type & GOT_TLS_IE)
6773 {
6774 h->got.offset = htab->root.sgot->size;
6775 htab->root.sgot->size += GOT_ENTRY_SIZE;
6776 }
6777
6778 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6779 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6780 || h->root.type != bfd_link_hash_undefweak)
6781 && (info->shared
6782 || indx != 0
6783 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6784 {
6785 if (got_type & GOT_TLSDESC_GD)
6786 {
6787 htab->root.srelplt->size += RELOC_SIZE (htab);
6788 /* Note reloc_count not incremented here! We have
6789 already adjusted reloc_count for this relocation
6790 type. */
6791
6792 /* TLSDESC PLT is now needed, but not yet determined. */
6793 htab->tlsdesc_plt = (bfd_vma) - 1;
6794 }
6795
6796 if (got_type & GOT_TLS_GD)
6797 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6798
6799 if (got_type & GOT_TLS_IE)
6800 htab->root.srelgot->size += RELOC_SIZE (htab);
6801 }
6802 }
6803 }
6804 else
6805 {
6806 h->got.offset = (bfd_vma) - 1;
6807 }
6808
6809 if (eh->dyn_relocs == NULL)
6810 return TRUE;
6811
6812 /* In the shared -Bsymbolic case, discard space allocated for
6813 dynamic pc-relative relocs against symbols which turn out to be
6814 defined in regular objects. For the normal shared case, discard
6815 space for pc-relative relocs that have become local due to symbol
6816 visibility changes. */
6817
6818 if (info->shared)
6819 {
6820 /* Relocs that use pc_count are those that appear on a call
6821 insn, or certain REL relocs that can generated via assembly.
6822 We want calls to protected symbols to resolve directly to the
6823 function rather than going via the plt. If people want
6824 function pointer comparisons to work as expected then they
6825 should avoid writing weird assembly. */
6826 if (SYMBOL_CALLS_LOCAL (info, h))
6827 {
6828 struct elf_dyn_relocs **pp;
6829
6830 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6831 {
6832 p->count -= p->pc_count;
6833 p->pc_count = 0;
6834 if (p->count == 0)
6835 *pp = p->next;
6836 else
6837 pp = &p->next;
6838 }
6839 }
6840
6841 /* Also discard relocs on undefined weak syms with non-default
6842 visibility. */
6843 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6844 {
6845 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6846 eh->dyn_relocs = NULL;
6847
6848 /* Make sure undefined weak symbols are output as a dynamic
6849 symbol in PIEs. */
6850 else if (h->dynindx == -1
6851 && !h->forced_local
6852 && !bfd_elf_link_record_dynamic_symbol (info, h))
6853 return FALSE;
6854 }
6855
6856 }
6857 else if (ELIMINATE_COPY_RELOCS)
6858 {
6859 /* For the non-shared case, discard space for relocs against
6860 symbols which turn out to need copy relocs or are not
6861 dynamic. */
6862
6863 if (!h->non_got_ref
6864 && ((h->def_dynamic
6865 && !h->def_regular)
6866 || (htab->root.dynamic_sections_created
6867 && (h->root.type == bfd_link_hash_undefweak
6868 || h->root.type == bfd_link_hash_undefined))))
6869 {
6870 /* Make sure this symbol is output as a dynamic symbol.
6871 Undefined weak syms won't yet be marked as dynamic. */
6872 if (h->dynindx == -1
6873 && !h->forced_local
6874 && !bfd_elf_link_record_dynamic_symbol (info, h))
6875 return FALSE;
6876
6877 /* If that succeeded, we know we'll be keeping all the
6878 relocs. */
6879 if (h->dynindx != -1)
6880 goto keep;
6881 }
6882
6883 eh->dyn_relocs = NULL;
6884
6885 keep:;
6886 }
6887
6888 /* Finally, allocate space. */
6889 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6890 {
6891 asection *sreloc;
6892
6893 sreloc = elf_section_data (p->sec)->sreloc;
6894
6895 BFD_ASSERT (sreloc != NULL);
6896
6897 sreloc->size += p->count * RELOC_SIZE (htab);
6898 }
6899
6900 return TRUE;
6901 }
6902
6903 /* Allocate space in .plt, .got and associated reloc sections for
6904 ifunc dynamic relocs. */
6905
6906 static bfd_boolean
6907 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
6908 void *inf)
6909 {
6910 struct bfd_link_info *info;
6911 struct elf_aarch64_link_hash_table *htab;
6912 struct elf_aarch64_link_hash_entry *eh;
6913
6914 /* An example of a bfd_link_hash_indirect symbol is versioned
6915 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6916 -> __gxx_personality_v0(bfd_link_hash_defined)
6917
6918 There is no need to process bfd_link_hash_indirect symbols here
6919 because we will also be presented with the concrete instance of
6920 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6921 called to copy all relevant data from the generic to the concrete
6922 symbol instance.
6923 */
6924 if (h->root.type == bfd_link_hash_indirect)
6925 return TRUE;
6926
6927 if (h->root.type == bfd_link_hash_warning)
6928 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6929
6930 info = (struct bfd_link_info *) inf;
6931 htab = elf_aarch64_hash_table (info);
6932
6933 eh = (struct elf_aarch64_link_hash_entry *) h;
6934
6935 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6936 here if it is defined and referenced in a non-shared object. */
6937 if (h->type == STT_GNU_IFUNC
6938 && h->def_regular)
6939 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
6940 &eh->dyn_relocs,
6941 htab->plt_entry_size,
6942 htab->plt_header_size,
6943 GOT_ENTRY_SIZE);
6944 return TRUE;
6945 }
6946
6947 /* Allocate space in .plt, .got and associated reloc sections for
6948 local dynamic relocs. */
6949
6950 static bfd_boolean
6951 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
6952 {
6953 struct elf_link_hash_entry *h
6954 = (struct elf_link_hash_entry *) *slot;
6955
6956 if (h->type != STT_GNU_IFUNC
6957 || !h->def_regular
6958 || !h->ref_regular
6959 || !h->forced_local
6960 || h->root.type != bfd_link_hash_defined)
6961 abort ();
6962
6963 return elfNN_aarch64_allocate_dynrelocs (h, inf);
6964 }
6965
6966 /* Allocate space in .plt, .got and associated reloc sections for
6967 local ifunc dynamic relocs. */
6968
6969 static bfd_boolean
6970 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
6971 {
6972 struct elf_link_hash_entry *h
6973 = (struct elf_link_hash_entry *) *slot;
6974
6975 if (h->type != STT_GNU_IFUNC
6976 || !h->def_regular
6977 || !h->ref_regular
6978 || !h->forced_local
6979 || h->root.type != bfd_link_hash_defined)
6980 abort ();
6981
6982 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
6983 }
6984
6985 /* This is the most important function of all . Innocuosly named
6986 though ! */
6987 static bfd_boolean
6988 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6989 struct bfd_link_info *info)
6990 {
6991 struct elf_aarch64_link_hash_table *htab;
6992 bfd *dynobj;
6993 asection *s;
6994 bfd_boolean relocs;
6995 bfd *ibfd;
6996
6997 htab = elf_aarch64_hash_table ((info));
6998 dynobj = htab->root.dynobj;
6999
7000 BFD_ASSERT (dynobj != NULL);
7001
7002 if (htab->root.dynamic_sections_created)
7003 {
7004 if (info->executable)
7005 {
7006 s = bfd_get_linker_section (dynobj, ".interp");
7007 if (s == NULL)
7008 abort ();
7009 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7010 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7011 }
7012 }
7013
7014 /* Set up .got offsets for local syms, and space for local dynamic
7015 relocs. */
7016 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7017 {
7018 struct elf_aarch64_local_symbol *locals = NULL;
7019 Elf_Internal_Shdr *symtab_hdr;
7020 asection *srel;
7021 unsigned int i;
7022
7023 if (!is_aarch64_elf (ibfd))
7024 continue;
7025
7026 for (s = ibfd->sections; s != NULL; s = s->next)
7027 {
7028 struct elf_dyn_relocs *p;
7029
7030 for (p = (struct elf_dyn_relocs *)
7031 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7032 {
7033 if (!bfd_is_abs_section (p->sec)
7034 && bfd_is_abs_section (p->sec->output_section))
7035 {
7036 /* Input section has been discarded, either because
7037 it is a copy of a linkonce section or due to
7038 linker script /DISCARD/, so we'll be discarding
7039 the relocs too. */
7040 }
7041 else if (p->count != 0)
7042 {
7043 srel = elf_section_data (p->sec)->sreloc;
7044 srel->size += p->count * RELOC_SIZE (htab);
7045 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7046 info->flags |= DF_TEXTREL;
7047 }
7048 }
7049 }
7050
7051 locals = elf_aarch64_locals (ibfd);
7052 if (!locals)
7053 continue;
7054
7055 symtab_hdr = &elf_symtab_hdr (ibfd);
7056 srel = htab->root.srelgot;
7057 for (i = 0; i < symtab_hdr->sh_info; i++)
7058 {
7059 locals[i].got_offset = (bfd_vma) - 1;
7060 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7061 if (locals[i].got_refcount > 0)
7062 {
7063 unsigned got_type = locals[i].got_type;
7064 if (got_type & GOT_TLSDESC_GD)
7065 {
7066 locals[i].tlsdesc_got_jump_table_offset =
7067 (htab->root.sgotplt->size
7068 - aarch64_compute_jump_table_size (htab));
7069 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7070 locals[i].got_offset = (bfd_vma) - 2;
7071 }
7072
7073 if (got_type & GOT_TLS_GD)
7074 {
7075 locals[i].got_offset = htab->root.sgot->size;
7076 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7077 }
7078
7079 if (got_type & GOT_TLS_IE)
7080 {
7081 locals[i].got_offset = htab->root.sgot->size;
7082 htab->root.sgot->size += GOT_ENTRY_SIZE;
7083 }
7084
7085 if (got_type == GOT_UNKNOWN)
7086 {
7087 }
7088
7089 if (got_type == GOT_NORMAL)
7090 {
7091 }
7092
7093 if (info->shared)
7094 {
7095 if (got_type & GOT_TLSDESC_GD)
7096 {
7097 htab->root.srelplt->size += RELOC_SIZE (htab);
7098 /* Note RELOC_COUNT not incremented here! */
7099 htab->tlsdesc_plt = (bfd_vma) - 1;
7100 }
7101
7102 if (got_type & GOT_TLS_GD)
7103 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7104
7105 if (got_type & GOT_TLS_IE)
7106 htab->root.srelgot->size += RELOC_SIZE (htab);
7107 }
7108 }
7109 else
7110 {
7111 locals[i].got_refcount = (bfd_vma) - 1;
7112 }
7113 }
7114 }
7115
7116
7117 /* Allocate global sym .plt and .got entries, and space for global
7118 sym dynamic relocs. */
7119 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7120 info);
7121
7122 /* Allocate global ifunc sym .plt and .got entries, and space for global
7123 ifunc sym dynamic relocs. */
7124 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7125 info);
7126
7127 /* Allocate .plt and .got entries, and space for local symbols. */
7128 htab_traverse (htab->loc_hash_table,
7129 elfNN_aarch64_allocate_local_dynrelocs,
7130 info);
7131
7132 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7133 htab_traverse (htab->loc_hash_table,
7134 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7135 info);
7136
7137 /* For every jump slot reserved in the sgotplt, reloc_count is
7138 incremented. However, when we reserve space for TLS descriptors,
7139 it's not incremented, so in order to compute the space reserved
7140 for them, it suffices to multiply the reloc count by the jump
7141 slot size. */
7142
7143 if (htab->root.srelplt)
7144 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7145
7146 if (htab->tlsdesc_plt)
7147 {
7148 if (htab->root.splt->size == 0)
7149 htab->root.splt->size += PLT_ENTRY_SIZE;
7150
7151 htab->tlsdesc_plt = htab->root.splt->size;
7152 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7153
7154 /* If we're not using lazy TLS relocations, don't generate the
7155 GOT entry required. */
7156 if (!(info->flags & DF_BIND_NOW))
7157 {
7158 htab->dt_tlsdesc_got = htab->root.sgot->size;
7159 htab->root.sgot->size += GOT_ENTRY_SIZE;
7160 }
7161 }
7162
7163 /* Init mapping symbols information to use later to distingush between
7164 code and data while scanning for erratam 835769. */
7165 if (htab->fix_erratum_835769)
7166 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7167 {
7168 if (!is_aarch64_elf (ibfd))
7169 continue;
7170 bfd_elfNN_aarch64_init_maps (ibfd);
7171 }
7172
7173 /* We now have determined the sizes of the various dynamic sections.
7174 Allocate memory for them. */
7175 relocs = FALSE;
7176 for (s = dynobj->sections; s != NULL; s = s->next)
7177 {
7178 if ((s->flags & SEC_LINKER_CREATED) == 0)
7179 continue;
7180
7181 if (s == htab->root.splt
7182 || s == htab->root.sgot
7183 || s == htab->root.sgotplt
7184 || s == htab->root.iplt
7185 || s == htab->root.igotplt || s == htab->sdynbss)
7186 {
7187 /* Strip this section if we don't need it; see the
7188 comment below. */
7189 }
7190 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7191 {
7192 if (s->size != 0 && s != htab->root.srelplt)
7193 relocs = TRUE;
7194
7195 /* We use the reloc_count field as a counter if we need
7196 to copy relocs into the output file. */
7197 if (s != htab->root.srelplt)
7198 s->reloc_count = 0;
7199 }
7200 else
7201 {
7202 /* It's not one of our sections, so don't allocate space. */
7203 continue;
7204 }
7205
7206 if (s->size == 0)
7207 {
7208 /* If we don't need this section, strip it from the
7209 output file. This is mostly to handle .rela.bss and
7210 .rela.plt. We must create both sections in
7211 create_dynamic_sections, because they must be created
7212 before the linker maps input sections to output
7213 sections. The linker does that before
7214 adjust_dynamic_symbol is called, and it is that
7215 function which decides whether anything needs to go
7216 into these sections. */
7217
7218 s->flags |= SEC_EXCLUDE;
7219 continue;
7220 }
7221
7222 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7223 continue;
7224
7225 /* Allocate memory for the section contents. We use bfd_zalloc
7226 here in case unused entries are not reclaimed before the
7227 section's contents are written out. This should not happen,
7228 but this way if it does, we get a R_AARCH64_NONE reloc instead
7229 of garbage. */
7230 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7231 if (s->contents == NULL)
7232 return FALSE;
7233 }
7234
7235 if (htab->root.dynamic_sections_created)
7236 {
7237 /* Add some entries to the .dynamic section. We fill in the
7238 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7239 must add the entries now so that we get the correct size for
7240 the .dynamic section. The DT_DEBUG entry is filled in by the
7241 dynamic linker and used by the debugger. */
7242 #define add_dynamic_entry(TAG, VAL) \
7243 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7244
7245 if (info->executable)
7246 {
7247 if (!add_dynamic_entry (DT_DEBUG, 0))
7248 return FALSE;
7249 }
7250
7251 if (htab->root.splt->size != 0)
7252 {
7253 if (!add_dynamic_entry (DT_PLTGOT, 0)
7254 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7255 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7256 || !add_dynamic_entry (DT_JMPREL, 0))
7257 return FALSE;
7258
7259 if (htab->tlsdesc_plt
7260 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7261 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7262 return FALSE;
7263 }
7264
7265 if (relocs)
7266 {
7267 if (!add_dynamic_entry (DT_RELA, 0)
7268 || !add_dynamic_entry (DT_RELASZ, 0)
7269 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7270 return FALSE;
7271
7272 /* If any dynamic relocs apply to a read-only section,
7273 then we need a DT_TEXTREL entry. */
7274 if ((info->flags & DF_TEXTREL) != 0)
7275 {
7276 if (!add_dynamic_entry (DT_TEXTREL, 0))
7277 return FALSE;
7278 }
7279 }
7280 }
7281 #undef add_dynamic_entry
7282
7283 return TRUE;
7284 }
7285
7286 static inline void
7287 elf_aarch64_update_plt_entry (bfd *output_bfd,
7288 bfd_reloc_code_real_type r_type,
7289 bfd_byte *plt_entry, bfd_vma value)
7290 {
7291 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7292
7293 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7294 }
7295
7296 static void
7297 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7298 struct elf_aarch64_link_hash_table
7299 *htab, bfd *output_bfd,
7300 struct bfd_link_info *info)
7301 {
7302 bfd_byte *plt_entry;
7303 bfd_vma plt_index;
7304 bfd_vma got_offset;
7305 bfd_vma gotplt_entry_address;
7306 bfd_vma plt_entry_address;
7307 Elf_Internal_Rela rela;
7308 bfd_byte *loc;
7309 asection *plt, *gotplt, *relplt;
7310
7311 /* When building a static executable, use .iplt, .igot.plt and
7312 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7313 if (htab->root.splt != NULL)
7314 {
7315 plt = htab->root.splt;
7316 gotplt = htab->root.sgotplt;
7317 relplt = htab->root.srelplt;
7318 }
7319 else
7320 {
7321 plt = htab->root.iplt;
7322 gotplt = htab->root.igotplt;
7323 relplt = htab->root.irelplt;
7324 }
7325
7326 /* Get the index in the procedure linkage table which
7327 corresponds to this symbol. This is the index of this symbol
7328 in all the symbols for which we are making plt entries. The
7329 first entry in the procedure linkage table is reserved.
7330
7331 Get the offset into the .got table of the entry that
7332 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7333 bytes. The first three are reserved for the dynamic linker.
7334
7335 For static executables, we don't reserve anything. */
7336
7337 if (plt == htab->root.splt)
7338 {
7339 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
7340 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
7341 }
7342 else
7343 {
7344 plt_index = h->plt.offset / htab->plt_entry_size;
7345 got_offset = plt_index * GOT_ENTRY_SIZE;
7346 }
7347
7348 plt_entry = plt->contents + h->plt.offset;
7349 plt_entry_address = plt->output_section->vma
7350 + plt->output_offset + h->plt.offset;
7351 gotplt_entry_address = gotplt->output_section->vma +
7352 gotplt->output_offset + got_offset;
7353
7354 /* Copy in the boiler-plate for the PLTn entry. */
7355 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
7356
7357 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7358 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7359 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7360 plt_entry,
7361 PG (gotplt_entry_address) -
7362 PG (plt_entry_address));
7363
7364 /* Fill in the lo12 bits for the load from the pltgot. */
7365 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7366 plt_entry + 4,
7367 PG_OFFSET (gotplt_entry_address));
7368
7369 /* Fill in the lo12 bits for the add from the pltgot entry. */
7370 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7371 plt_entry + 8,
7372 PG_OFFSET (gotplt_entry_address));
7373
7374 /* All the GOTPLT Entries are essentially initialized to PLT0. */
7375 bfd_put_NN (output_bfd,
7376 plt->output_section->vma + plt->output_offset,
7377 gotplt->contents + got_offset);
7378
7379 rela.r_offset = gotplt_entry_address;
7380
7381 if (h->dynindx == -1
7382 || ((info->executable
7383 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7384 && h->def_regular
7385 && h->type == STT_GNU_IFUNC))
7386 {
7387 /* If an STT_GNU_IFUNC symbol is locally defined, generate
7388 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
7389 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
7390 rela.r_addend = (h->root.u.def.value
7391 + h->root.u.def.section->output_section->vma
7392 + h->root.u.def.section->output_offset);
7393 }
7394 else
7395 {
7396 /* Fill in the entry in the .rela.plt section. */
7397 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
7398 rela.r_addend = 0;
7399 }
7400
7401 /* Compute the relocation entry to used based on PLT index and do
7402 not adjust reloc_count. The reloc_count has already been adjusted
7403 to account for this entry. */
7404 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
7405 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7406 }
7407
7408 /* Size sections even though they're not dynamic. We use it to setup
7409 _TLS_MODULE_BASE_, if needed. */
7410
7411 static bfd_boolean
7412 elfNN_aarch64_always_size_sections (bfd *output_bfd,
7413 struct bfd_link_info *info)
7414 {
7415 asection *tls_sec;
7416
7417 if (info->relocatable)
7418 return TRUE;
7419
7420 tls_sec = elf_hash_table (info)->tls_sec;
7421
7422 if (tls_sec)
7423 {
7424 struct elf_link_hash_entry *tlsbase;
7425
7426 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
7427 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
7428
7429 if (tlsbase)
7430 {
7431 struct bfd_link_hash_entry *h = NULL;
7432 const struct elf_backend_data *bed =
7433 get_elf_backend_data (output_bfd);
7434
7435 if (!(_bfd_generic_link_add_one_symbol
7436 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
7437 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
7438 return FALSE;
7439
7440 tlsbase->type = STT_TLS;
7441 tlsbase = (struct elf_link_hash_entry *) h;
7442 tlsbase->def_regular = 1;
7443 tlsbase->other = STV_HIDDEN;
7444 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
7445 }
7446 }
7447
7448 return TRUE;
7449 }
7450
7451 /* Finish up dynamic symbol handling. We set the contents of various
7452 dynamic sections here. */
7453 static bfd_boolean
7454 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
7455 struct bfd_link_info *info,
7456 struct elf_link_hash_entry *h,
7457 Elf_Internal_Sym *sym)
7458 {
7459 struct elf_aarch64_link_hash_table *htab;
7460 htab = elf_aarch64_hash_table (info);
7461
7462 if (h->plt.offset != (bfd_vma) - 1)
7463 {
7464 asection *plt, *gotplt, *relplt;
7465
7466 /* This symbol has an entry in the procedure linkage table. Set
7467 it up. */
7468
7469 /* When building a static executable, use .iplt, .igot.plt and
7470 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7471 if (htab->root.splt != NULL)
7472 {
7473 plt = htab->root.splt;
7474 gotplt = htab->root.sgotplt;
7475 relplt = htab->root.srelplt;
7476 }
7477 else
7478 {
7479 plt = htab->root.iplt;
7480 gotplt = htab->root.igotplt;
7481 relplt = htab->root.irelplt;
7482 }
7483
7484 /* This symbol has an entry in the procedure linkage table. Set
7485 it up. */
7486 if ((h->dynindx == -1
7487 && !((h->forced_local || info->executable)
7488 && h->def_regular
7489 && h->type == STT_GNU_IFUNC))
7490 || plt == NULL
7491 || gotplt == NULL
7492 || relplt == NULL)
7493 abort ();
7494
7495 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
7496 if (!h->def_regular)
7497 {
7498 /* Mark the symbol as undefined, rather than as defined in
7499 the .plt section. */
7500 sym->st_shndx = SHN_UNDEF;
7501 /* If the symbol is weak we need to clear the value.
7502 Otherwise, the PLT entry would provide a definition for
7503 the symbol even if the symbol wasn't defined anywhere,
7504 and so the symbol would never be NULL. Leave the value if
7505 there were any relocations where pointer equality matters
7506 (this is a clue for the dynamic linker, to make function
7507 pointer comparisons work between an application and shared
7508 library). */
7509 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
7510 sym->st_value = 0;
7511 }
7512 }
7513
7514 if (h->got.offset != (bfd_vma) - 1
7515 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
7516 {
7517 Elf_Internal_Rela rela;
7518 bfd_byte *loc;
7519
7520 /* This symbol has an entry in the global offset table. Set it
7521 up. */
7522 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
7523 abort ();
7524
7525 rela.r_offset = (htab->root.sgot->output_section->vma
7526 + htab->root.sgot->output_offset
7527 + (h->got.offset & ~(bfd_vma) 1));
7528
7529 if (h->def_regular
7530 && h->type == STT_GNU_IFUNC)
7531 {
7532 if (info->shared)
7533 {
7534 /* Generate R_AARCH64_GLOB_DAT. */
7535 goto do_glob_dat;
7536 }
7537 else
7538 {
7539 asection *plt;
7540
7541 if (!h->pointer_equality_needed)
7542 abort ();
7543
7544 /* For non-shared object, we can't use .got.plt, which
7545 contains the real function address if we need pointer
7546 equality. We load the GOT entry with the PLT entry. */
7547 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
7548 bfd_put_NN (output_bfd, (plt->output_section->vma
7549 + plt->output_offset
7550 + h->plt.offset),
7551 htab->root.sgot->contents
7552 + (h->got.offset & ~(bfd_vma) 1));
7553 return TRUE;
7554 }
7555 }
7556 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
7557 {
7558 if (!h->def_regular)
7559 return FALSE;
7560
7561 BFD_ASSERT ((h->got.offset & 1) != 0);
7562 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
7563 rela.r_addend = (h->root.u.def.value
7564 + h->root.u.def.section->output_section->vma
7565 + h->root.u.def.section->output_offset);
7566 }
7567 else
7568 {
7569 do_glob_dat:
7570 BFD_ASSERT ((h->got.offset & 1) == 0);
7571 bfd_put_NN (output_bfd, (bfd_vma) 0,
7572 htab->root.sgot->contents + h->got.offset);
7573 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
7574 rela.r_addend = 0;
7575 }
7576
7577 loc = htab->root.srelgot->contents;
7578 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
7579 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7580 }
7581
7582 if (h->needs_copy)
7583 {
7584 Elf_Internal_Rela rela;
7585 bfd_byte *loc;
7586
7587 /* This symbol needs a copy reloc. Set it up. */
7588
7589 if (h->dynindx == -1
7590 || (h->root.type != bfd_link_hash_defined
7591 && h->root.type != bfd_link_hash_defweak)
7592 || htab->srelbss == NULL)
7593 abort ();
7594
7595 rela.r_offset = (h->root.u.def.value
7596 + h->root.u.def.section->output_section->vma
7597 + h->root.u.def.section->output_offset);
7598 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
7599 rela.r_addend = 0;
7600 loc = htab->srelbss->contents;
7601 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
7602 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7603 }
7604
7605 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
7606 be NULL for local symbols. */
7607 if (sym != NULL
7608 && (h == elf_hash_table (info)->hdynamic
7609 || h == elf_hash_table (info)->hgot))
7610 sym->st_shndx = SHN_ABS;
7611
7612 return TRUE;
7613 }
7614
7615 /* Finish up local dynamic symbol handling. We set the contents of
7616 various dynamic sections here. */
7617
7618 static bfd_boolean
7619 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
7620 {
7621 struct elf_link_hash_entry *h
7622 = (struct elf_link_hash_entry *) *slot;
7623 struct bfd_link_info *info
7624 = (struct bfd_link_info *) inf;
7625
7626 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
7627 info, h, NULL);
7628 }
7629
7630 static void
7631 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
7632 struct elf_aarch64_link_hash_table
7633 *htab)
7634 {
7635 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
7636 small and large plts and at the minute just generates
7637 the small PLT. */
7638
7639 /* PLT0 of the small PLT looks like this in ELF64 -
7640 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
7641 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
7642 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
7643 // symbol resolver
7644 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
7645 // GOTPLT entry for this.
7646 br x17
7647 PLT0 will be slightly different in ELF32 due to different got entry
7648 size.
7649 */
7650 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
7651 bfd_vma plt_base;
7652
7653
7654 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
7655 PLT_ENTRY_SIZE);
7656 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
7657 PLT_ENTRY_SIZE;
7658
7659 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
7660 + htab->root.sgotplt->output_offset
7661 + GOT_ENTRY_SIZE * 2);
7662
7663 plt_base = htab->root.splt->output_section->vma +
7664 htab->root.splt->output_offset;
7665
7666 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7667 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7668 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7669 htab->root.splt->contents + 4,
7670 PG (plt_got_2nd_ent) - PG (plt_base + 4));
7671
7672 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7673 htab->root.splt->contents + 8,
7674 PG_OFFSET (plt_got_2nd_ent));
7675
7676 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7677 htab->root.splt->contents + 12,
7678 PG_OFFSET (plt_got_2nd_ent));
7679 }
7680
7681 static bfd_boolean
7682 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
7683 struct bfd_link_info *info)
7684 {
7685 struct elf_aarch64_link_hash_table *htab;
7686 bfd *dynobj;
7687 asection *sdyn;
7688
7689 htab = elf_aarch64_hash_table (info);
7690 dynobj = htab->root.dynobj;
7691 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
7692
7693 if (htab->root.dynamic_sections_created)
7694 {
7695 ElfNN_External_Dyn *dyncon, *dynconend;
7696
7697 if (sdyn == NULL || htab->root.sgot == NULL)
7698 abort ();
7699
7700 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
7701 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
7702 for (; dyncon < dynconend; dyncon++)
7703 {
7704 Elf_Internal_Dyn dyn;
7705 asection *s;
7706
7707 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
7708
7709 switch (dyn.d_tag)
7710 {
7711 default:
7712 continue;
7713
7714 case DT_PLTGOT:
7715 s = htab->root.sgotplt;
7716 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
7717 break;
7718
7719 case DT_JMPREL:
7720 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
7721 break;
7722
7723 case DT_PLTRELSZ:
7724 s = htab->root.srelplt;
7725 dyn.d_un.d_val = s->size;
7726 break;
7727
7728 case DT_RELASZ:
7729 /* The procedure linkage table relocs (DT_JMPREL) should
7730 not be included in the overall relocs (DT_RELA).
7731 Therefore, we override the DT_RELASZ entry here to
7732 make it not include the JMPREL relocs. Since the
7733 linker script arranges for .rela.plt to follow all
7734 other relocation sections, we don't have to worry
7735 about changing the DT_RELA entry. */
7736 if (htab->root.srelplt != NULL)
7737 {
7738 s = htab->root.srelplt;
7739 dyn.d_un.d_val -= s->size;
7740 }
7741 break;
7742
7743 case DT_TLSDESC_PLT:
7744 s = htab->root.splt;
7745 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7746 + htab->tlsdesc_plt;
7747 break;
7748
7749 case DT_TLSDESC_GOT:
7750 s = htab->root.sgot;
7751 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7752 + htab->dt_tlsdesc_got;
7753 break;
7754 }
7755
7756 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
7757 }
7758
7759 }
7760
7761 /* Fill in the special first entry in the procedure linkage table. */
7762 if (htab->root.splt && htab->root.splt->size > 0)
7763 {
7764 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
7765
7766 elf_section_data (htab->root.splt->output_section)->
7767 this_hdr.sh_entsize = htab->plt_entry_size;
7768
7769
7770 if (htab->tlsdesc_plt)
7771 {
7772 bfd_put_NN (output_bfd, (bfd_vma) 0,
7773 htab->root.sgot->contents + htab->dt_tlsdesc_got);
7774
7775 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
7776 elfNN_aarch64_tlsdesc_small_plt_entry,
7777 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
7778
7779 {
7780 bfd_vma adrp1_addr =
7781 htab->root.splt->output_section->vma
7782 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
7783
7784 bfd_vma adrp2_addr = adrp1_addr + 4;
7785
7786 bfd_vma got_addr =
7787 htab->root.sgot->output_section->vma
7788 + htab->root.sgot->output_offset;
7789
7790 bfd_vma pltgot_addr =
7791 htab->root.sgotplt->output_section->vma
7792 + htab->root.sgotplt->output_offset;
7793
7794 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
7795
7796 bfd_byte *plt_entry =
7797 htab->root.splt->contents + htab->tlsdesc_plt;
7798
7799 /* adrp x2, DT_TLSDESC_GOT */
7800 elf_aarch64_update_plt_entry (output_bfd,
7801 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7802 plt_entry + 4,
7803 (PG (dt_tlsdesc_got)
7804 - PG (adrp1_addr)));
7805
7806 /* adrp x3, 0 */
7807 elf_aarch64_update_plt_entry (output_bfd,
7808 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7809 plt_entry + 8,
7810 (PG (pltgot_addr)
7811 - PG (adrp2_addr)));
7812
7813 /* ldr x2, [x2, #0] */
7814 elf_aarch64_update_plt_entry (output_bfd,
7815 BFD_RELOC_AARCH64_LDSTNN_LO12,
7816 plt_entry + 12,
7817 PG_OFFSET (dt_tlsdesc_got));
7818
7819 /* add x3, x3, 0 */
7820 elf_aarch64_update_plt_entry (output_bfd,
7821 BFD_RELOC_AARCH64_ADD_LO12,
7822 plt_entry + 16,
7823 PG_OFFSET (pltgot_addr));
7824 }
7825 }
7826 }
7827
7828 if (htab->root.sgotplt)
7829 {
7830 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
7831 {
7832 (*_bfd_error_handler)
7833 (_("discarded output section: `%A'"), htab->root.sgotplt);
7834 return FALSE;
7835 }
7836
7837 /* Fill in the first three entries in the global offset table. */
7838 if (htab->root.sgotplt->size > 0)
7839 {
7840 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
7841
7842 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
7843 bfd_put_NN (output_bfd,
7844 (bfd_vma) 0,
7845 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
7846 bfd_put_NN (output_bfd,
7847 (bfd_vma) 0,
7848 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
7849 }
7850
7851 if (htab->root.sgot)
7852 {
7853 if (htab->root.sgot->size > 0)
7854 {
7855 bfd_vma addr =
7856 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
7857 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
7858 }
7859 }
7860
7861 elf_section_data (htab->root.sgotplt->output_section)->
7862 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
7863 }
7864
7865 if (htab->root.sgot && htab->root.sgot->size > 0)
7866 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
7867 = GOT_ENTRY_SIZE;
7868
7869 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
7870 htab_traverse (htab->loc_hash_table,
7871 elfNN_aarch64_finish_local_dynamic_symbol,
7872 info);
7873
7874 return TRUE;
7875 }
7876
7877 /* Return address for Ith PLT stub in section PLT, for relocation REL
7878 or (bfd_vma) -1 if it should not be included. */
7879
7880 static bfd_vma
7881 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
7882 const arelent *rel ATTRIBUTE_UNUSED)
7883 {
7884 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
7885 }
7886
7887
7888 /* We use this so we can override certain functions
7889 (though currently we don't). */
7890
7891 const struct elf_size_info elfNN_aarch64_size_info =
7892 {
7893 sizeof (ElfNN_External_Ehdr),
7894 sizeof (ElfNN_External_Phdr),
7895 sizeof (ElfNN_External_Shdr),
7896 sizeof (ElfNN_External_Rel),
7897 sizeof (ElfNN_External_Rela),
7898 sizeof (ElfNN_External_Sym),
7899 sizeof (ElfNN_External_Dyn),
7900 sizeof (Elf_External_Note),
7901 4, /* Hash table entry size. */
7902 1, /* Internal relocs per external relocs. */
7903 ARCH_SIZE, /* Arch size. */
7904 LOG_FILE_ALIGN, /* Log_file_align. */
7905 ELFCLASSNN, EV_CURRENT,
7906 bfd_elfNN_write_out_phdrs,
7907 bfd_elfNN_write_shdrs_and_ehdr,
7908 bfd_elfNN_checksum_contents,
7909 bfd_elfNN_write_relocs,
7910 bfd_elfNN_swap_symbol_in,
7911 bfd_elfNN_swap_symbol_out,
7912 bfd_elfNN_slurp_reloc_table,
7913 bfd_elfNN_slurp_symbol_table,
7914 bfd_elfNN_swap_dyn_in,
7915 bfd_elfNN_swap_dyn_out,
7916 bfd_elfNN_swap_reloc_in,
7917 bfd_elfNN_swap_reloc_out,
7918 bfd_elfNN_swap_reloca_in,
7919 bfd_elfNN_swap_reloca_out
7920 };
7921
7922 #define ELF_ARCH bfd_arch_aarch64
7923 #define ELF_MACHINE_CODE EM_AARCH64
7924 #define ELF_MAXPAGESIZE 0x10000
7925 #define ELF_MINPAGESIZE 0x1000
7926 #define ELF_COMMONPAGESIZE 0x1000
7927
7928 #define bfd_elfNN_close_and_cleanup \
7929 elfNN_aarch64_close_and_cleanup
7930
7931 #define bfd_elfNN_bfd_free_cached_info \
7932 elfNN_aarch64_bfd_free_cached_info
7933
7934 #define bfd_elfNN_bfd_is_target_special_symbol \
7935 elfNN_aarch64_is_target_special_symbol
7936
7937 #define bfd_elfNN_bfd_link_hash_table_create \
7938 elfNN_aarch64_link_hash_table_create
7939
7940 #define bfd_elfNN_bfd_merge_private_bfd_data \
7941 elfNN_aarch64_merge_private_bfd_data
7942
7943 #define bfd_elfNN_bfd_print_private_bfd_data \
7944 elfNN_aarch64_print_private_bfd_data
7945
7946 #define bfd_elfNN_bfd_reloc_type_lookup \
7947 elfNN_aarch64_reloc_type_lookup
7948
7949 #define bfd_elfNN_bfd_reloc_name_lookup \
7950 elfNN_aarch64_reloc_name_lookup
7951
7952 #define bfd_elfNN_bfd_set_private_flags \
7953 elfNN_aarch64_set_private_flags
7954
7955 #define bfd_elfNN_find_inliner_info \
7956 elfNN_aarch64_find_inliner_info
7957
7958 #define bfd_elfNN_find_nearest_line \
7959 elfNN_aarch64_find_nearest_line
7960
7961 #define bfd_elfNN_mkobject \
7962 elfNN_aarch64_mkobject
7963
7964 #define bfd_elfNN_new_section_hook \
7965 elfNN_aarch64_new_section_hook
7966
7967 #define elf_backend_adjust_dynamic_symbol \
7968 elfNN_aarch64_adjust_dynamic_symbol
7969
7970 #define elf_backend_always_size_sections \
7971 elfNN_aarch64_always_size_sections
7972
7973 #define elf_backend_check_relocs \
7974 elfNN_aarch64_check_relocs
7975
7976 #define elf_backend_copy_indirect_symbol \
7977 elfNN_aarch64_copy_indirect_symbol
7978
7979 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7980 to them in our hash. */
7981 #define elf_backend_create_dynamic_sections \
7982 elfNN_aarch64_create_dynamic_sections
7983
7984 #define elf_backend_init_index_section \
7985 _bfd_elf_init_2_index_sections
7986
7987 #define elf_backend_finish_dynamic_sections \
7988 elfNN_aarch64_finish_dynamic_sections
7989
7990 #define elf_backend_finish_dynamic_symbol \
7991 elfNN_aarch64_finish_dynamic_symbol
7992
7993 #define elf_backend_gc_sweep_hook \
7994 elfNN_aarch64_gc_sweep_hook
7995
7996 #define elf_backend_object_p \
7997 elfNN_aarch64_object_p
7998
7999 #define elf_backend_output_arch_local_syms \
8000 elfNN_aarch64_output_arch_local_syms
8001
8002 #define elf_backend_plt_sym_val \
8003 elfNN_aarch64_plt_sym_val
8004
8005 #define elf_backend_post_process_headers \
8006 elfNN_aarch64_post_process_headers
8007
8008 #define elf_backend_relocate_section \
8009 elfNN_aarch64_relocate_section
8010
8011 #define elf_backend_reloc_type_class \
8012 elfNN_aarch64_reloc_type_class
8013
8014 #define elf_backend_section_from_shdr \
8015 elfNN_aarch64_section_from_shdr
8016
8017 #define elf_backend_size_dynamic_sections \
8018 elfNN_aarch64_size_dynamic_sections
8019
8020 #define elf_backend_size_info \
8021 elfNN_aarch64_size_info
8022
8023 #define elf_backend_write_section \
8024 elfNN_aarch64_write_section
8025
8026 #define elf_backend_can_refcount 1
8027 #define elf_backend_can_gc_sections 1
8028 #define elf_backend_plt_readonly 1
8029 #define elf_backend_want_got_plt 1
8030 #define elf_backend_want_plt_sym 0
8031 #define elf_backend_may_use_rel_p 0
8032 #define elf_backend_may_use_rela_p 1
8033 #define elf_backend_default_use_rela_p 1
8034 #define elf_backend_rela_normal 1
8035 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8036 #define elf_backend_default_execstack 0
8037
8038 #undef elf_backend_obj_attrs_section
8039 #define elf_backend_obj_attrs_section ".ARM.attributes"
8040
8041 #include "elfNN-target.h"
This page took 0.295412 seconds and 4 git commands to generate.