Improve load command's help text
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12 \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12 \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12 \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC \
191 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12 \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0 \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1 \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
206 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
207 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
208 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
209 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
210
211 #define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \
212 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
213 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
214 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
215 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
216 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
217 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
218 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \
219 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
220 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
221 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
222 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
223 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
224 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
225 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
226 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
227 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
228 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
229 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
230 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC \
231 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
232 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
233 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21)
234
235 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
236 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
237 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
238 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
239 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
240 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
241 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
242 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
243 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
244 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
245 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
246 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
247 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
248
249 #define ELIMINATE_COPY_RELOCS 0
250
251 /* Return size of a relocation entry. HTAB is the bfd's
252 elf_aarch64_link_hash_entry. */
253 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
254
255 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
256 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
257 #define PLT_ENTRY_SIZE (32)
258 #define PLT_SMALL_ENTRY_SIZE (16)
259 #define PLT_TLSDESC_ENTRY_SIZE (32)
260
261 /* Encoding of the nop instruction. */
262 #define INSN_NOP 0xd503201f
263
264 #define aarch64_compute_jump_table_size(htab) \
265 (((htab)->root.srelplt == NULL) ? 0 \
266 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
267
268 /* The first entry in a procedure linkage table looks like this
269 if the distance between the PLTGOT and the PLT is < 4GB use
270 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
271 in x16 and needs to work out PLTGOT[1] by using an address of
272 [x16,#-GOT_ENTRY_SIZE]. */
273 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
274 {
275 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
276 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
277 #if ARCH_SIZE == 64
278 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
279 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
280 #else
281 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
282 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
283 #endif
284 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
285 0x1f, 0x20, 0x03, 0xd5, /* nop */
286 0x1f, 0x20, 0x03, 0xd5, /* nop */
287 0x1f, 0x20, 0x03, 0xd5, /* nop */
288 };
289
290 /* Per function entry in a procedure linkage table looks like this
291 if the distance between the PLTGOT and the PLT is < 4GB use
292 these PLT entries. */
293 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
294 {
295 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
296 #if ARCH_SIZE == 64
297 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
298 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
299 #else
300 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
301 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
302 #endif
303 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
304 };
305
306 static const bfd_byte
307 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
308 {
309 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
310 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
311 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
312 #if ARCH_SIZE == 64
313 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
314 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
315 #else
316 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
317 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
318 #endif
319 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
320 0x1f, 0x20, 0x03, 0xd5, /* nop */
321 0x1f, 0x20, 0x03, 0xd5, /* nop */
322 };
323
324 #define elf_info_to_howto elfNN_aarch64_info_to_howto
325 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
326
327 #define AARCH64_ELF_ABI_VERSION 0
328
329 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
330 #define ALL_ONES (~ (bfd_vma) 0)
331
332 /* Indexed by the bfd interal reloc enumerators.
333 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
334 in reloc.c. */
335
336 static reloc_howto_type elfNN_aarch64_howto_table[] =
337 {
338 EMPTY_HOWTO (0),
339
340 /* Basic data relocations. */
341
342 /* Deprecated, but retained for backwards compatibility. */
343 HOWTO64 (R_AARCH64_NULL, /* type */
344 0, /* rightshift */
345 3, /* size (0 = byte, 1 = short, 2 = long) */
346 0, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_dont, /* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_AARCH64_NULL", /* name */
352 FALSE, /* partial_inplace */
353 0, /* src_mask */
354 0, /* dst_mask */
355 FALSE), /* pcrel_offset */
356 HOWTO (R_AARCH64_NONE, /* type */
357 0, /* rightshift */
358 3, /* size (0 = byte, 1 = short, 2 = long) */
359 0, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_dont, /* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_AARCH64_NONE", /* name */
365 FALSE, /* partial_inplace */
366 0, /* src_mask */
367 0, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* .xword: (S+A) */
371 HOWTO64 (AARCH64_R (ABS64), /* type */
372 0, /* rightshift */
373 4, /* size (4 = long long) */
374 64, /* bitsize */
375 FALSE, /* pc_relative */
376 0, /* bitpos */
377 complain_overflow_unsigned, /* complain_on_overflow */
378 bfd_elf_generic_reloc, /* special_function */
379 AARCH64_R_STR (ABS64), /* name */
380 FALSE, /* partial_inplace */
381 ALL_ONES, /* src_mask */
382 ALL_ONES, /* dst_mask */
383 FALSE), /* pcrel_offset */
384
385 /* .word: (S+A) */
386 HOWTO (AARCH64_R (ABS32), /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_unsigned, /* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 AARCH64_R_STR (ABS32), /* name */
395 FALSE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 /* .half: (S+A) */
401 HOWTO (AARCH64_R (ABS16), /* type */
402 0, /* rightshift */
403 1, /* size (0 = byte, 1 = short, 2 = long) */
404 16, /* bitsize */
405 FALSE, /* pc_relative */
406 0, /* bitpos */
407 complain_overflow_unsigned, /* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 AARCH64_R_STR (ABS16), /* name */
410 FALSE, /* partial_inplace */
411 0xffff, /* src_mask */
412 0xffff, /* dst_mask */
413 FALSE), /* pcrel_offset */
414
415 /* .xword: (S+A-P) */
416 HOWTO64 (AARCH64_R (PREL64), /* type */
417 0, /* rightshift */
418 4, /* size (4 = long long) */
419 64, /* bitsize */
420 TRUE, /* pc_relative */
421 0, /* bitpos */
422 complain_overflow_signed, /* complain_on_overflow */
423 bfd_elf_generic_reloc, /* special_function */
424 AARCH64_R_STR (PREL64), /* name */
425 FALSE, /* partial_inplace */
426 ALL_ONES, /* src_mask */
427 ALL_ONES, /* dst_mask */
428 TRUE), /* pcrel_offset */
429
430 /* .word: (S+A-P) */
431 HOWTO (AARCH64_R (PREL32), /* type */
432 0, /* rightshift */
433 2, /* size (0 = byte, 1 = short, 2 = long) */
434 32, /* bitsize */
435 TRUE, /* pc_relative */
436 0, /* bitpos */
437 complain_overflow_signed, /* complain_on_overflow */
438 bfd_elf_generic_reloc, /* special_function */
439 AARCH64_R_STR (PREL32), /* name */
440 FALSE, /* partial_inplace */
441 0xffffffff, /* src_mask */
442 0xffffffff, /* dst_mask */
443 TRUE), /* pcrel_offset */
444
445 /* .half: (S+A-P) */
446 HOWTO (AARCH64_R (PREL16), /* type */
447 0, /* rightshift */
448 1, /* size (0 = byte, 1 = short, 2 = long) */
449 16, /* bitsize */
450 TRUE, /* pc_relative */
451 0, /* bitpos */
452 complain_overflow_signed, /* complain_on_overflow */
453 bfd_elf_generic_reloc, /* special_function */
454 AARCH64_R_STR (PREL16), /* name */
455 FALSE, /* partial_inplace */
456 0xffff, /* src_mask */
457 0xffff, /* dst_mask */
458 TRUE), /* pcrel_offset */
459
460 /* Group relocations to create a 16, 32, 48 or 64 bit
461 unsigned data or abs address inline. */
462
463 /* MOVZ: ((S+A) >> 0) & 0xffff */
464 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
465 0, /* rightshift */
466 2, /* size (0 = byte, 1 = short, 2 = long) */
467 16, /* bitsize */
468 FALSE, /* pc_relative */
469 0, /* bitpos */
470 complain_overflow_unsigned, /* complain_on_overflow */
471 bfd_elf_generic_reloc, /* special_function */
472 AARCH64_R_STR (MOVW_UABS_G0), /* name */
473 FALSE, /* partial_inplace */
474 0xffff, /* src_mask */
475 0xffff, /* dst_mask */
476 FALSE), /* pcrel_offset */
477
478 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
479 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
480 0, /* rightshift */
481 2, /* size (0 = byte, 1 = short, 2 = long) */
482 16, /* bitsize */
483 FALSE, /* pc_relative */
484 0, /* bitpos */
485 complain_overflow_dont, /* complain_on_overflow */
486 bfd_elf_generic_reloc, /* special_function */
487 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
488 FALSE, /* partial_inplace */
489 0xffff, /* src_mask */
490 0xffff, /* dst_mask */
491 FALSE), /* pcrel_offset */
492
493 /* MOVZ: ((S+A) >> 16) & 0xffff */
494 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
495 16, /* rightshift */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
497 16, /* bitsize */
498 FALSE, /* pc_relative */
499 0, /* bitpos */
500 complain_overflow_unsigned, /* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 AARCH64_R_STR (MOVW_UABS_G1), /* name */
503 FALSE, /* partial_inplace */
504 0xffff, /* src_mask */
505 0xffff, /* dst_mask */
506 FALSE), /* pcrel_offset */
507
508 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
509 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
510 16, /* rightshift */
511 2, /* size (0 = byte, 1 = short, 2 = long) */
512 16, /* bitsize */
513 FALSE, /* pc_relative */
514 0, /* bitpos */
515 complain_overflow_dont, /* complain_on_overflow */
516 bfd_elf_generic_reloc, /* special_function */
517 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
518 FALSE, /* partial_inplace */
519 0xffff, /* src_mask */
520 0xffff, /* dst_mask */
521 FALSE), /* pcrel_offset */
522
523 /* MOVZ: ((S+A) >> 32) & 0xffff */
524 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
525 32, /* rightshift */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
527 16, /* bitsize */
528 FALSE, /* pc_relative */
529 0, /* bitpos */
530 complain_overflow_unsigned, /* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 AARCH64_R_STR (MOVW_UABS_G2), /* name */
533 FALSE, /* partial_inplace */
534 0xffff, /* src_mask */
535 0xffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
537
538 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
539 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
540 32, /* rightshift */
541 2, /* size (0 = byte, 1 = short, 2 = long) */
542 16, /* bitsize */
543 FALSE, /* pc_relative */
544 0, /* bitpos */
545 complain_overflow_dont, /* complain_on_overflow */
546 bfd_elf_generic_reloc, /* special_function */
547 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
548 FALSE, /* partial_inplace */
549 0xffff, /* src_mask */
550 0xffff, /* dst_mask */
551 FALSE), /* pcrel_offset */
552
553 /* MOVZ: ((S+A) >> 48) & 0xffff */
554 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
555 48, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 16, /* bitsize */
558 FALSE, /* pc_relative */
559 0, /* bitpos */
560 complain_overflow_unsigned, /* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 AARCH64_R_STR (MOVW_UABS_G3), /* name */
563 FALSE, /* partial_inplace */
564 0xffff, /* src_mask */
565 0xffff, /* dst_mask */
566 FALSE), /* pcrel_offset */
567
568 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
569 signed data or abs address inline. Will change instruction
570 to MOVN or MOVZ depending on sign of calculated value. */
571
572 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
573 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
574 0, /* rightshift */
575 2, /* size (0 = byte, 1 = short, 2 = long) */
576 17, /* bitsize */
577 FALSE, /* pc_relative */
578 0, /* bitpos */
579 complain_overflow_signed, /* complain_on_overflow */
580 bfd_elf_generic_reloc, /* special_function */
581 AARCH64_R_STR (MOVW_SABS_G0), /* name */
582 FALSE, /* partial_inplace */
583 0xffff, /* src_mask */
584 0xffff, /* dst_mask */
585 FALSE), /* pcrel_offset */
586
587 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
588 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
589 16, /* rightshift */
590 2, /* size (0 = byte, 1 = short, 2 = long) */
591 17, /* bitsize */
592 FALSE, /* pc_relative */
593 0, /* bitpos */
594 complain_overflow_signed, /* complain_on_overflow */
595 bfd_elf_generic_reloc, /* special_function */
596 AARCH64_R_STR (MOVW_SABS_G1), /* name */
597 FALSE, /* partial_inplace */
598 0xffff, /* src_mask */
599 0xffff, /* dst_mask */
600 FALSE), /* pcrel_offset */
601
602 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
603 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
604 32, /* rightshift */
605 2, /* size (0 = byte, 1 = short, 2 = long) */
606 17, /* bitsize */
607 FALSE, /* pc_relative */
608 0, /* bitpos */
609 complain_overflow_signed, /* complain_on_overflow */
610 bfd_elf_generic_reloc, /* special_function */
611 AARCH64_R_STR (MOVW_SABS_G2), /* name */
612 FALSE, /* partial_inplace */
613 0xffff, /* src_mask */
614 0xffff, /* dst_mask */
615 FALSE), /* pcrel_offset */
616
617 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
618 addresses: PG(x) is (x & ~0xfff). */
619
620 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
621 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
622 2, /* rightshift */
623 2, /* size (0 = byte, 1 = short, 2 = long) */
624 19, /* bitsize */
625 TRUE, /* pc_relative */
626 0, /* bitpos */
627 complain_overflow_signed, /* complain_on_overflow */
628 bfd_elf_generic_reloc, /* special_function */
629 AARCH64_R_STR (LD_PREL_LO19), /* name */
630 FALSE, /* partial_inplace */
631 0x7ffff, /* src_mask */
632 0x7ffff, /* dst_mask */
633 TRUE), /* pcrel_offset */
634
635 /* ADR: (S+A-P) & 0x1fffff */
636 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
637 0, /* rightshift */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
639 21, /* bitsize */
640 TRUE, /* pc_relative */
641 0, /* bitpos */
642 complain_overflow_signed, /* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 AARCH64_R_STR (ADR_PREL_LO21), /* name */
645 FALSE, /* partial_inplace */
646 0x1fffff, /* src_mask */
647 0x1fffff, /* dst_mask */
648 TRUE), /* pcrel_offset */
649
650 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
651 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
652 12, /* rightshift */
653 2, /* size (0 = byte, 1 = short, 2 = long) */
654 21, /* bitsize */
655 TRUE, /* pc_relative */
656 0, /* bitpos */
657 complain_overflow_signed, /* complain_on_overflow */
658 bfd_elf_generic_reloc, /* special_function */
659 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
660 FALSE, /* partial_inplace */
661 0x1fffff, /* src_mask */
662 0x1fffff, /* dst_mask */
663 TRUE), /* pcrel_offset */
664
665 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
666 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
667 12, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 21, /* bitsize */
670 TRUE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_dont, /* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
675 FALSE, /* partial_inplace */
676 0x1fffff, /* src_mask */
677 0x1fffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 /* ADD: (S+A) & 0xfff [no overflow check] */
681 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
682 0, /* rightshift */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
684 12, /* bitsize */
685 FALSE, /* pc_relative */
686 10, /* bitpos */
687 complain_overflow_dont, /* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
690 FALSE, /* partial_inplace */
691 0x3ffc00, /* src_mask */
692 0x3ffc00, /* dst_mask */
693 FALSE), /* pcrel_offset */
694
695 /* LD/ST8: (S+A) & 0xfff */
696 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
697 0, /* rightshift */
698 2, /* size (0 = byte, 1 = short, 2 = long) */
699 12, /* bitsize */
700 FALSE, /* pc_relative */
701 0, /* bitpos */
702 complain_overflow_dont, /* complain_on_overflow */
703 bfd_elf_generic_reloc, /* special_function */
704 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
705 FALSE, /* partial_inplace */
706 0xfff, /* src_mask */
707 0xfff, /* dst_mask */
708 FALSE), /* pcrel_offset */
709
710 /* Relocations for control-flow instructions. */
711
712 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
713 HOWTO (AARCH64_R (TSTBR14), /* type */
714 2, /* rightshift */
715 2, /* size (0 = byte, 1 = short, 2 = long) */
716 14, /* bitsize */
717 TRUE, /* pc_relative */
718 0, /* bitpos */
719 complain_overflow_signed, /* complain_on_overflow */
720 bfd_elf_generic_reloc, /* special_function */
721 AARCH64_R_STR (TSTBR14), /* name */
722 FALSE, /* partial_inplace */
723 0x3fff, /* src_mask */
724 0x3fff, /* dst_mask */
725 TRUE), /* pcrel_offset */
726
727 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
728 HOWTO (AARCH64_R (CONDBR19), /* type */
729 2, /* rightshift */
730 2, /* size (0 = byte, 1 = short, 2 = long) */
731 19, /* bitsize */
732 TRUE, /* pc_relative */
733 0, /* bitpos */
734 complain_overflow_signed, /* complain_on_overflow */
735 bfd_elf_generic_reloc, /* special_function */
736 AARCH64_R_STR (CONDBR19), /* name */
737 FALSE, /* partial_inplace */
738 0x7ffff, /* src_mask */
739 0x7ffff, /* dst_mask */
740 TRUE), /* pcrel_offset */
741
742 /* B: ((S+A-P) >> 2) & 0x3ffffff */
743 HOWTO (AARCH64_R (JUMP26), /* type */
744 2, /* rightshift */
745 2, /* size (0 = byte, 1 = short, 2 = long) */
746 26, /* bitsize */
747 TRUE, /* pc_relative */
748 0, /* bitpos */
749 complain_overflow_signed, /* complain_on_overflow */
750 bfd_elf_generic_reloc, /* special_function */
751 AARCH64_R_STR (JUMP26), /* name */
752 FALSE, /* partial_inplace */
753 0x3ffffff, /* src_mask */
754 0x3ffffff, /* dst_mask */
755 TRUE), /* pcrel_offset */
756
757 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
758 HOWTO (AARCH64_R (CALL26), /* type */
759 2, /* rightshift */
760 2, /* size (0 = byte, 1 = short, 2 = long) */
761 26, /* bitsize */
762 TRUE, /* pc_relative */
763 0, /* bitpos */
764 complain_overflow_signed, /* complain_on_overflow */
765 bfd_elf_generic_reloc, /* special_function */
766 AARCH64_R_STR (CALL26), /* name */
767 FALSE, /* partial_inplace */
768 0x3ffffff, /* src_mask */
769 0x3ffffff, /* dst_mask */
770 TRUE), /* pcrel_offset */
771
772 /* LD/ST16: (S+A) & 0xffe */
773 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
774 1, /* rightshift */
775 2, /* size (0 = byte, 1 = short, 2 = long) */
776 12, /* bitsize */
777 FALSE, /* pc_relative */
778 0, /* bitpos */
779 complain_overflow_dont, /* complain_on_overflow */
780 bfd_elf_generic_reloc, /* special_function */
781 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
782 FALSE, /* partial_inplace */
783 0xffe, /* src_mask */
784 0xffe, /* dst_mask */
785 FALSE), /* pcrel_offset */
786
787 /* LD/ST32: (S+A) & 0xffc */
788 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
789 2, /* rightshift */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
791 12, /* bitsize */
792 FALSE, /* pc_relative */
793 0, /* bitpos */
794 complain_overflow_dont, /* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
797 FALSE, /* partial_inplace */
798 0xffc, /* src_mask */
799 0xffc, /* dst_mask */
800 FALSE), /* pcrel_offset */
801
802 /* LD/ST64: (S+A) & 0xff8 */
803 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
804 3, /* rightshift */
805 2, /* size (0 = byte, 1 = short, 2 = long) */
806 12, /* bitsize */
807 FALSE, /* pc_relative */
808 0, /* bitpos */
809 complain_overflow_dont, /* complain_on_overflow */
810 bfd_elf_generic_reloc, /* special_function */
811 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
812 FALSE, /* partial_inplace */
813 0xff8, /* src_mask */
814 0xff8, /* dst_mask */
815 FALSE), /* pcrel_offset */
816
817 /* LD/ST128: (S+A) & 0xff0 */
818 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
819 4, /* rightshift */
820 2, /* size (0 = byte, 1 = short, 2 = long) */
821 12, /* bitsize */
822 FALSE, /* pc_relative */
823 0, /* bitpos */
824 complain_overflow_dont, /* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
827 FALSE, /* partial_inplace */
828 0xff0, /* src_mask */
829 0xff0, /* dst_mask */
830 FALSE), /* pcrel_offset */
831
832 /* Set a load-literal immediate field to bits
833 0x1FFFFC of G(S)-P */
834 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
835 2, /* rightshift */
836 2, /* size (0 = byte,1 = short,2 = long) */
837 19, /* bitsize */
838 TRUE, /* pc_relative */
839 0, /* bitpos */
840 complain_overflow_signed, /* complain_on_overflow */
841 bfd_elf_generic_reloc, /* special_function */
842 AARCH64_R_STR (GOT_LD_PREL19), /* name */
843 FALSE, /* partial_inplace */
844 0xffffe0, /* src_mask */
845 0xffffe0, /* dst_mask */
846 TRUE), /* pcrel_offset */
847
848 /* Get to the page for the GOT entry for the symbol
849 (G(S) - P) using an ADRP instruction. */
850 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
851 12, /* rightshift */
852 2, /* size (0 = byte, 1 = short, 2 = long) */
853 21, /* bitsize */
854 TRUE, /* pc_relative */
855 0, /* bitpos */
856 complain_overflow_dont, /* complain_on_overflow */
857 bfd_elf_generic_reloc, /* special_function */
858 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
859 FALSE, /* partial_inplace */
860 0x1fffff, /* src_mask */
861 0x1fffff, /* dst_mask */
862 TRUE), /* pcrel_offset */
863
864 /* LD64: GOT offset G(S) & 0xff8 */
865 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
866 3, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 12, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont, /* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
874 FALSE, /* partial_inplace */
875 0xff8, /* src_mask */
876 0xff8, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 /* LD32: GOT offset G(S) & 0xffc */
880 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
881 2, /* rightshift */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
883 12, /* bitsize */
884 FALSE, /* pc_relative */
885 0, /* bitpos */
886 complain_overflow_dont, /* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
889 FALSE, /* partial_inplace */
890 0xffc, /* src_mask */
891 0xffc, /* dst_mask */
892 FALSE), /* pcrel_offset */
893
894 /* Lower 16 bits of GOT offset for the symbol. */
895 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G0_NC), /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 16, /* bitsize */
899 FALSE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont, /* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 AARCH64_R_STR (MOVW_GOTOFF_G0_NC), /* name */
904 FALSE, /* partial_inplace */
905 0xffff, /* src_mask */
906 0xffff, /* dst_mask */
907 FALSE), /* pcrel_offset */
908
909 /* Higher 16 bits of GOT offset for the symbol. */
910 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G1), /* type */
911 16, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 16, /* bitsize */
914 FALSE, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_unsigned, /* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 AARCH64_R_STR (MOVW_GOTOFF_G1), /* name */
919 FALSE, /* partial_inplace */
920 0xffff, /* src_mask */
921 0xffff, /* dst_mask */
922 FALSE), /* pcrel_offset */
923
924 /* LD64: GOT offset for the symbol. */
925 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
926 3, /* rightshift */
927 2, /* size (0 = byte, 1 = short, 2 = long) */
928 12, /* bitsize */
929 FALSE, /* pc_relative */
930 0, /* bitpos */
931 complain_overflow_unsigned, /* complain_on_overflow */
932 bfd_elf_generic_reloc, /* special_function */
933 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
934 FALSE, /* partial_inplace */
935 0x7ff8, /* src_mask */
936 0x7ff8, /* dst_mask */
937 FALSE), /* pcrel_offset */
938
939 /* LD32: GOT offset to the page address of GOT table.
940 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
941 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
942 2, /* rightshift */
943 2, /* size (0 = byte, 1 = short, 2 = long) */
944 12, /* bitsize */
945 FALSE, /* pc_relative */
946 0, /* bitpos */
947 complain_overflow_unsigned, /* complain_on_overflow */
948 bfd_elf_generic_reloc, /* special_function */
949 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
950 FALSE, /* partial_inplace */
951 0x5ffc, /* src_mask */
952 0x5ffc, /* dst_mask */
953 FALSE), /* pcrel_offset */
954
955 /* LD64: GOT offset to the page address of GOT table.
956 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
957 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
958 3, /* rightshift */
959 2, /* size (0 = byte, 1 = short, 2 = long) */
960 12, /* bitsize */
961 FALSE, /* pc_relative */
962 0, /* bitpos */
963 complain_overflow_unsigned, /* complain_on_overflow */
964 bfd_elf_generic_reloc, /* special_function */
965 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
966 FALSE, /* partial_inplace */
967 0x7ff8, /* src_mask */
968 0x7ff8, /* dst_mask */
969 FALSE), /* pcrel_offset */
970
971 /* Get to the page for the GOT entry for the symbol
972 (G(S) - P) using an ADRP instruction. */
973 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
974 12, /* rightshift */
975 2, /* size (0 = byte, 1 = short, 2 = long) */
976 21, /* bitsize */
977 TRUE, /* pc_relative */
978 0, /* bitpos */
979 complain_overflow_dont, /* complain_on_overflow */
980 bfd_elf_generic_reloc, /* special_function */
981 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
982 FALSE, /* partial_inplace */
983 0x1fffff, /* src_mask */
984 0x1fffff, /* dst_mask */
985 TRUE), /* pcrel_offset */
986
987 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
988 0, /* rightshift */
989 2, /* size (0 = byte, 1 = short, 2 = long) */
990 21, /* bitsize */
991 TRUE, /* pc_relative */
992 0, /* bitpos */
993 complain_overflow_dont, /* complain_on_overflow */
994 bfd_elf_generic_reloc, /* special_function */
995 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
996 FALSE, /* partial_inplace */
997 0x1fffff, /* src_mask */
998 0x1fffff, /* dst_mask */
999 TRUE), /* pcrel_offset */
1000
1001 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1002 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
1003 0, /* rightshift */
1004 2, /* size (0 = byte, 1 = short, 2 = long) */
1005 12, /* bitsize */
1006 FALSE, /* pc_relative */
1007 0, /* bitpos */
1008 complain_overflow_dont, /* complain_on_overflow */
1009 bfd_elf_generic_reloc, /* special_function */
1010 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
1011 FALSE, /* partial_inplace */
1012 0xfff, /* src_mask */
1013 0xfff, /* dst_mask */
1014 FALSE), /* pcrel_offset */
1015
1016 /* Lower 16 bits of GOT offset to tls_index. */
1017 HOWTO64 (AARCH64_R (TLSGD_MOVW_G0_NC), /* type */
1018 0, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 16, /* bitsize */
1021 FALSE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont, /* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 AARCH64_R_STR (TLSGD_MOVW_G0_NC), /* name */
1026 FALSE, /* partial_inplace */
1027 0xffff, /* src_mask */
1028 0xffff, /* dst_mask */
1029 FALSE), /* pcrel_offset */
1030
1031 /* Higher 16 bits of GOT offset to tls_index. */
1032 HOWTO64 (AARCH64_R (TLSGD_MOVW_G1), /* type */
1033 16, /* rightshift */
1034 2, /* size (0 = byte, 1 = short, 2 = long) */
1035 16, /* bitsize */
1036 FALSE, /* pc_relative */
1037 0, /* bitpos */
1038 complain_overflow_unsigned, /* complain_on_overflow */
1039 bfd_elf_generic_reloc, /* special_function */
1040 AARCH64_R_STR (TLSGD_MOVW_G1), /* name */
1041 FALSE, /* partial_inplace */
1042 0xffff, /* src_mask */
1043 0xffff, /* dst_mask */
1044 FALSE), /* pcrel_offset */
1045
1046 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
1047 12, /* rightshift */
1048 2, /* size (0 = byte, 1 = short, 2 = long) */
1049 21, /* bitsize */
1050 FALSE, /* pc_relative */
1051 0, /* bitpos */
1052 complain_overflow_dont, /* complain_on_overflow */
1053 bfd_elf_generic_reloc, /* special_function */
1054 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
1055 FALSE, /* partial_inplace */
1056 0x1fffff, /* src_mask */
1057 0x1fffff, /* dst_mask */
1058 FALSE), /* pcrel_offset */
1059
1060 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
1061 3, /* rightshift */
1062 2, /* size (0 = byte, 1 = short, 2 = long) */
1063 12, /* bitsize */
1064 FALSE, /* pc_relative */
1065 0, /* bitpos */
1066 complain_overflow_dont, /* complain_on_overflow */
1067 bfd_elf_generic_reloc, /* special_function */
1068 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
1069 FALSE, /* partial_inplace */
1070 0xff8, /* src_mask */
1071 0xff8, /* dst_mask */
1072 FALSE), /* pcrel_offset */
1073
1074 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1075 2, /* rightshift */
1076 2, /* size (0 = byte, 1 = short, 2 = long) */
1077 12, /* bitsize */
1078 FALSE, /* pc_relative */
1079 0, /* bitpos */
1080 complain_overflow_dont, /* complain_on_overflow */
1081 bfd_elf_generic_reloc, /* special_function */
1082 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1083 FALSE, /* partial_inplace */
1084 0xffc, /* src_mask */
1085 0xffc, /* dst_mask */
1086 FALSE), /* pcrel_offset */
1087
1088 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1089 2, /* rightshift */
1090 2, /* size (0 = byte, 1 = short, 2 = long) */
1091 19, /* bitsize */
1092 FALSE, /* pc_relative */
1093 0, /* bitpos */
1094 complain_overflow_dont, /* complain_on_overflow */
1095 bfd_elf_generic_reloc, /* special_function */
1096 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1097 FALSE, /* partial_inplace */
1098 0x1ffffc, /* src_mask */
1099 0x1ffffc, /* dst_mask */
1100 FALSE), /* pcrel_offset */
1101
1102 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
1103 0, /* rightshift */
1104 2, /* size (0 = byte, 1 = short, 2 = long) */
1105 16, /* bitsize */
1106 FALSE, /* pc_relative */
1107 0, /* bitpos */
1108 complain_overflow_dont, /* complain_on_overflow */
1109 bfd_elf_generic_reloc, /* special_function */
1110 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
1111 FALSE, /* partial_inplace */
1112 0xffff, /* src_mask */
1113 0xffff, /* dst_mask */
1114 FALSE), /* pcrel_offset */
1115
1116 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
1117 16, /* rightshift */
1118 2, /* size (0 = byte, 1 = short, 2 = long) */
1119 16, /* bitsize */
1120 FALSE, /* pc_relative */
1121 0, /* bitpos */
1122 complain_overflow_unsigned, /* complain_on_overflow */
1123 bfd_elf_generic_reloc, /* special_function */
1124 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
1125 FALSE, /* partial_inplace */
1126 0xffff, /* src_mask */
1127 0xffff, /* dst_mask */
1128 FALSE), /* pcrel_offset */
1129
1130 /* ADD: bit[23:12] of byte offset to module TLS base address. */
1131 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_HI12), /* type */
1132 12, /* rightshift */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1134 12, /* bitsize */
1135 FALSE, /* pc_relative */
1136 0, /* bitpos */
1137 complain_overflow_unsigned, /* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 AARCH64_R_STR (TLSLD_ADD_DTPREL_HI12), /* name */
1140 FALSE, /* partial_inplace */
1141 0xfff, /* src_mask */
1142 0xfff, /* dst_mask */
1143 FALSE), /* pcrel_offset */
1144
1145 /* Unsigned 12 bit byte offset to module TLS base address. */
1146 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1147 0, /* rightshift */
1148 2, /* size (0 = byte, 1 = short, 2 = long) */
1149 12, /* bitsize */
1150 FALSE, /* pc_relative */
1151 0, /* bitpos */
1152 complain_overflow_unsigned, /* complain_on_overflow */
1153 bfd_elf_generic_reloc, /* special_function */
1154 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1155 FALSE, /* partial_inplace */
1156 0xfff, /* src_mask */
1157 0xfff, /* dst_mask */
1158 FALSE), /* pcrel_offset */
1159
1160 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12. */
1161 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12_NC), /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 12, /* bitsize */
1165 FALSE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont, /* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12_NC), /* name */
1170 FALSE, /* partial_inplace */
1171 0xfff, /* src_mask */
1172 0xfff, /* dst_mask */
1173 FALSE), /* pcrel_offset */
1174
1175 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1176 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 12, /* bitsize */
1180 FALSE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont, /* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1185 FALSE, /* partial_inplace */
1186 0xfff, /* src_mask */
1187 0xfff, /* dst_mask */
1188 FALSE), /* pcrel_offset */
1189
1190 /* Get to the page for the GOT entry for the symbol
1191 (G(S) - P) using an ADRP instruction. */
1192 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1193 12, /* rightshift */
1194 2, /* size (0 = byte, 1 = short, 2 = long) */
1195 21, /* bitsize */
1196 TRUE, /* pc_relative */
1197 0, /* bitpos */
1198 complain_overflow_signed, /* complain_on_overflow */
1199 bfd_elf_generic_reloc, /* special_function */
1200 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1201 FALSE, /* partial_inplace */
1202 0x1fffff, /* src_mask */
1203 0x1fffff, /* dst_mask */
1204 TRUE), /* pcrel_offset */
1205
1206 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1207 0, /* rightshift */
1208 2, /* size (0 = byte, 1 = short, 2 = long) */
1209 21, /* bitsize */
1210 TRUE, /* pc_relative */
1211 0, /* bitpos */
1212 complain_overflow_signed, /* complain_on_overflow */
1213 bfd_elf_generic_reloc, /* special_function */
1214 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1215 FALSE, /* partial_inplace */
1216 0x1fffff, /* src_mask */
1217 0x1fffff, /* dst_mask */
1218 TRUE), /* pcrel_offset */
1219
1220 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1221 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12), /* type */
1222 1, /* rightshift */
1223 2, /* size (0 = byte, 1 = short, 2 = long) */
1224 11, /* bitsize */
1225 FALSE, /* pc_relative */
1226 10, /* bitpos */
1227 complain_overflow_unsigned, /* complain_on_overflow */
1228 bfd_elf_generic_reloc, /* special_function */
1229 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12), /* name */
1230 FALSE, /* partial_inplace */
1231 0x1ffc00, /* src_mask */
1232 0x1ffc00, /* dst_mask */
1233 FALSE), /* pcrel_offset */
1234
1235 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, but no overflow check. */
1236 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12_NC), /* type */
1237 1, /* rightshift */
1238 2, /* size (0 = byte, 1 = short, 2 = long) */
1239 11, /* bitsize */
1240 FALSE, /* pc_relative */
1241 10, /* bitpos */
1242 complain_overflow_dont, /* complain_on_overflow */
1243 bfd_elf_generic_reloc, /* special_function */
1244 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12_NC), /* name */
1245 FALSE, /* partial_inplace */
1246 0x1ffc00, /* src_mask */
1247 0x1ffc00, /* dst_mask */
1248 FALSE), /* pcrel_offset */
1249
1250 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1251 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12), /* type */
1252 2, /* rightshift */
1253 2, /* size (0 = byte, 1 = short, 2 = long) */
1254 10, /* bitsize */
1255 FALSE, /* pc_relative */
1256 10, /* bitpos */
1257 complain_overflow_unsigned, /* complain_on_overflow */
1258 bfd_elf_generic_reloc, /* special_function */
1259 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12), /* name */
1260 FALSE, /* partial_inplace */
1261 0x3ffc00, /* src_mask */
1262 0x3ffc00, /* dst_mask */
1263 FALSE), /* pcrel_offset */
1264
1265 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, but no overflow check. */
1266 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12_NC), /* type */
1267 2, /* rightshift */
1268 2, /* size (0 = byte, 1 = short, 2 = long) */
1269 10, /* bitsize */
1270 FALSE, /* pc_relative */
1271 10, /* bitpos */
1272 complain_overflow_dont, /* complain_on_overflow */
1273 bfd_elf_generic_reloc, /* special_function */
1274 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12_NC), /* name */
1275 FALSE, /* partial_inplace */
1276 0xffc00, /* src_mask */
1277 0xffc00, /* dst_mask */
1278 FALSE), /* pcrel_offset */
1279
1280 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1281 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12), /* type */
1282 3, /* rightshift */
1283 2, /* size (0 = byte, 1 = short, 2 = long) */
1284 9, /* bitsize */
1285 FALSE, /* pc_relative */
1286 10, /* bitpos */
1287 complain_overflow_unsigned, /* complain_on_overflow */
1288 bfd_elf_generic_reloc, /* special_function */
1289 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12), /* name */
1290 FALSE, /* partial_inplace */
1291 0x3ffc00, /* src_mask */
1292 0x3ffc00, /* dst_mask */
1293 FALSE), /* pcrel_offset */
1294
1295 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, but no overflow check. */
1296 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12_NC), /* type */
1297 3, /* rightshift */
1298 2, /* size (0 = byte, 1 = short, 2 = long) */
1299 9, /* bitsize */
1300 FALSE, /* pc_relative */
1301 10, /* bitpos */
1302 complain_overflow_dont, /* complain_on_overflow */
1303 bfd_elf_generic_reloc, /* special_function */
1304 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12_NC), /* name */
1305 FALSE, /* partial_inplace */
1306 0x7fc00, /* src_mask */
1307 0x7fc00, /* dst_mask */
1308 FALSE), /* pcrel_offset */
1309
1310 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1311 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12), /* type */
1312 0, /* rightshift */
1313 2, /* size (0 = byte, 1 = short, 2 = long) */
1314 12, /* bitsize */
1315 FALSE, /* pc_relative */
1316 10, /* bitpos */
1317 complain_overflow_unsigned, /* complain_on_overflow */
1318 bfd_elf_generic_reloc, /* special_function */
1319 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12), /* name */
1320 FALSE, /* partial_inplace */
1321 0x3ffc00, /* src_mask */
1322 0x3ffc00, /* dst_mask */
1323 FALSE), /* pcrel_offset */
1324
1325 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, but no overflow check. */
1326 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12_NC), /* type */
1327 0, /* rightshift */
1328 2, /* size (0 = byte, 1 = short, 2 = long) */
1329 12, /* bitsize */
1330 FALSE, /* pc_relative */
1331 10, /* bitpos */
1332 complain_overflow_dont, /* complain_on_overflow */
1333 bfd_elf_generic_reloc, /* special_function */
1334 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12_NC), /* name */
1335 FALSE, /* partial_inplace */
1336 0x3ffc00, /* src_mask */
1337 0x3ffc00, /* dst_mask */
1338 FALSE), /* pcrel_offset */
1339
1340 /* MOVZ: bit[15:0] of byte offset to module TLS base address. */
1341 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0), /* type */
1342 0, /* rightshift */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1344 16, /* bitsize */
1345 FALSE, /* pc_relative */
1346 0, /* bitpos */
1347 complain_overflow_unsigned, /* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0), /* name */
1350 FALSE, /* partial_inplace */
1351 0xffff, /* src_mask */
1352 0xffff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1354
1355 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
1356 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0_NC), /* type */
1357 0, /* rightshift */
1358 2, /* size (0 = byte, 1 = short, 2 = long) */
1359 16, /* bitsize */
1360 FALSE, /* pc_relative */
1361 0, /* bitpos */
1362 complain_overflow_dont, /* complain_on_overflow */
1363 bfd_elf_generic_reloc, /* special_function */
1364 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0_NC), /* name */
1365 FALSE, /* partial_inplace */
1366 0xffff, /* src_mask */
1367 0xffff, /* dst_mask */
1368 FALSE), /* pcrel_offset */
1369
1370 /* MOVZ: bit[31:16] of byte offset to module TLS base address. */
1371 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G1), /* type */
1372 16, /* rightshift */
1373 2, /* size (0 = byte, 1 = short, 2 = long) */
1374 16, /* bitsize */
1375 FALSE, /* pc_relative */
1376 0, /* bitpos */
1377 complain_overflow_unsigned, /* complain_on_overflow */
1378 bfd_elf_generic_reloc, /* special_function */
1379 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1), /* name */
1380 FALSE, /* partial_inplace */
1381 0xffff, /* src_mask */
1382 0xffff, /* dst_mask */
1383 FALSE), /* pcrel_offset */
1384
1385 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
1386 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G1_NC), /* type */
1387 16, /* rightshift */
1388 2, /* size (0 = byte, 1 = short, 2 = long) */
1389 16, /* bitsize */
1390 FALSE, /* pc_relative */
1391 0, /* bitpos */
1392 complain_overflow_dont, /* complain_on_overflow */
1393 bfd_elf_generic_reloc, /* special_function */
1394 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1_NC), /* name */
1395 FALSE, /* partial_inplace */
1396 0xffff, /* src_mask */
1397 0xffff, /* dst_mask */
1398 FALSE), /* pcrel_offset */
1399
1400 /* MOVZ: bit[47:32] of byte offset to module TLS base address. */
1401 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G2), /* type */
1402 32, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 16, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_unsigned, /* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G2), /* name */
1410 FALSE, /* partial_inplace */
1411 0xffff, /* src_mask */
1412 0xffff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1416 32, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 16, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_unsigned, /* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1424 FALSE, /* partial_inplace */
1425 0xffff, /* src_mask */
1426 0xffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1430 16, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 16, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont, /* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1438 FALSE, /* partial_inplace */
1439 0xffff, /* src_mask */
1440 0xffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1444 16, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 16, /* bitsize */
1447 FALSE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1452 FALSE, /* partial_inplace */
1453 0xffff, /* src_mask */
1454 0xffff, /* dst_mask */
1455 FALSE), /* pcrel_offset */
1456
1457 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 16, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_dont, /* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1466 FALSE, /* partial_inplace */
1467 0xffff, /* src_mask */
1468 0xffff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 16, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_dont, /* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1480 FALSE, /* partial_inplace */
1481 0xffff, /* src_mask */
1482 0xffff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1486 12, /* rightshift */
1487 2, /* size (0 = byte, 1 = short, 2 = long) */
1488 12, /* bitsize */
1489 FALSE, /* pc_relative */
1490 0, /* bitpos */
1491 complain_overflow_unsigned, /* complain_on_overflow */
1492 bfd_elf_generic_reloc, /* special_function */
1493 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1494 FALSE, /* partial_inplace */
1495 0xfff, /* src_mask */
1496 0xfff, /* dst_mask */
1497 FALSE), /* pcrel_offset */
1498
1499 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1500 0, /* rightshift */
1501 2, /* size (0 = byte, 1 = short, 2 = long) */
1502 12, /* bitsize */
1503 FALSE, /* pc_relative */
1504 0, /* bitpos */
1505 complain_overflow_unsigned, /* complain_on_overflow */
1506 bfd_elf_generic_reloc, /* special_function */
1507 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1508 FALSE, /* partial_inplace */
1509 0xfff, /* src_mask */
1510 0xfff, /* dst_mask */
1511 FALSE), /* pcrel_offset */
1512
1513 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1514 0, /* rightshift */
1515 2, /* size (0 = byte, 1 = short, 2 = long) */
1516 12, /* bitsize */
1517 FALSE, /* pc_relative */
1518 0, /* bitpos */
1519 complain_overflow_dont, /* complain_on_overflow */
1520 bfd_elf_generic_reloc, /* special_function */
1521 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1522 FALSE, /* partial_inplace */
1523 0xfff, /* src_mask */
1524 0xfff, /* dst_mask */
1525 FALSE), /* pcrel_offset */
1526
1527 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1528 2, /* rightshift */
1529 2, /* size (0 = byte, 1 = short, 2 = long) */
1530 19, /* bitsize */
1531 TRUE, /* pc_relative */
1532 0, /* bitpos */
1533 complain_overflow_dont, /* complain_on_overflow */
1534 bfd_elf_generic_reloc, /* special_function */
1535 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1536 FALSE, /* partial_inplace */
1537 0x0ffffe0, /* src_mask */
1538 0x0ffffe0, /* dst_mask */
1539 TRUE), /* pcrel_offset */
1540
1541 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1542 0, /* rightshift */
1543 2, /* size (0 = byte, 1 = short, 2 = long) */
1544 21, /* bitsize */
1545 TRUE, /* pc_relative */
1546 0, /* bitpos */
1547 complain_overflow_dont, /* complain_on_overflow */
1548 bfd_elf_generic_reloc, /* special_function */
1549 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1550 FALSE, /* partial_inplace */
1551 0x1fffff, /* src_mask */
1552 0x1fffff, /* dst_mask */
1553 TRUE), /* pcrel_offset */
1554
1555 /* Get to the page for the GOT entry for the symbol
1556 (G(S) - P) using an ADRP instruction. */
1557 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1558 12, /* rightshift */
1559 2, /* size (0 = byte, 1 = short, 2 = long) */
1560 21, /* bitsize */
1561 TRUE, /* pc_relative */
1562 0, /* bitpos */
1563 complain_overflow_dont, /* complain_on_overflow */
1564 bfd_elf_generic_reloc, /* special_function */
1565 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1566 FALSE, /* partial_inplace */
1567 0x1fffff, /* src_mask */
1568 0x1fffff, /* dst_mask */
1569 TRUE), /* pcrel_offset */
1570
1571 /* LD64: GOT offset G(S) & 0xff8. */
1572 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1573 3, /* rightshift */
1574 2, /* size (0 = byte, 1 = short, 2 = long) */
1575 12, /* bitsize */
1576 FALSE, /* pc_relative */
1577 0, /* bitpos */
1578 complain_overflow_dont, /* complain_on_overflow */
1579 bfd_elf_generic_reloc, /* special_function */
1580 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1581 FALSE, /* partial_inplace */
1582 0xff8, /* src_mask */
1583 0xff8, /* dst_mask */
1584 FALSE), /* pcrel_offset */
1585
1586 /* LD32: GOT offset G(S) & 0xffc. */
1587 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1588 2, /* rightshift */
1589 2, /* size (0 = byte, 1 = short, 2 = long) */
1590 12, /* bitsize */
1591 FALSE, /* pc_relative */
1592 0, /* bitpos */
1593 complain_overflow_dont, /* complain_on_overflow */
1594 bfd_elf_generic_reloc, /* special_function */
1595 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1596 FALSE, /* partial_inplace */
1597 0xffc, /* src_mask */
1598 0xffc, /* dst_mask */
1599 FALSE), /* pcrel_offset */
1600
1601 /* ADD: GOT offset G(S) & 0xfff. */
1602 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 12, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_dont, /* complain_on_overflow */
1609 bfd_elf_generic_reloc, /* special_function */
1610 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1611 FALSE, /* partial_inplace */
1612 0xfff, /* src_mask */
1613 0xfff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1617 16, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_unsigned, /* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1625 FALSE, /* partial_inplace */
1626 0xffff, /* src_mask */
1627 0xffff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_dont, /* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1639 FALSE, /* partial_inplace */
1640 0xffff, /* src_mask */
1641 0xffff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_dont, /* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 AARCH64_R_STR (TLSDESC_LDR), /* name */
1653 FALSE, /* partial_inplace */
1654 0x0, /* src_mask */
1655 0x0, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1659 0, /* rightshift */
1660 2, /* size (0 = byte, 1 = short, 2 = long) */
1661 12, /* bitsize */
1662 FALSE, /* pc_relative */
1663 0, /* bitpos */
1664 complain_overflow_dont, /* complain_on_overflow */
1665 bfd_elf_generic_reloc, /* special_function */
1666 AARCH64_R_STR (TLSDESC_ADD), /* name */
1667 FALSE, /* partial_inplace */
1668 0x0, /* src_mask */
1669 0x0, /* dst_mask */
1670 FALSE), /* pcrel_offset */
1671
1672 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1673 0, /* rightshift */
1674 2, /* size (0 = byte, 1 = short, 2 = long) */
1675 0, /* bitsize */
1676 FALSE, /* pc_relative */
1677 0, /* bitpos */
1678 complain_overflow_dont, /* complain_on_overflow */
1679 bfd_elf_generic_reloc, /* special_function */
1680 AARCH64_R_STR (TLSDESC_CALL), /* name */
1681 FALSE, /* partial_inplace */
1682 0x0, /* src_mask */
1683 0x0, /* dst_mask */
1684 FALSE), /* pcrel_offset */
1685
1686 HOWTO (AARCH64_R (COPY), /* type */
1687 0, /* rightshift */
1688 2, /* size (0 = byte, 1 = short, 2 = long) */
1689 64, /* bitsize */
1690 FALSE, /* pc_relative */
1691 0, /* bitpos */
1692 complain_overflow_bitfield, /* complain_on_overflow */
1693 bfd_elf_generic_reloc, /* special_function */
1694 AARCH64_R_STR (COPY), /* name */
1695 TRUE, /* partial_inplace */
1696 0xffffffff, /* src_mask */
1697 0xffffffff, /* dst_mask */
1698 FALSE), /* pcrel_offset */
1699
1700 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1701 0, /* rightshift */
1702 2, /* size (0 = byte, 1 = short, 2 = long) */
1703 64, /* bitsize */
1704 FALSE, /* pc_relative */
1705 0, /* bitpos */
1706 complain_overflow_bitfield, /* complain_on_overflow */
1707 bfd_elf_generic_reloc, /* special_function */
1708 AARCH64_R_STR (GLOB_DAT), /* name */
1709 TRUE, /* partial_inplace */
1710 0xffffffff, /* src_mask */
1711 0xffffffff, /* dst_mask */
1712 FALSE), /* pcrel_offset */
1713
1714 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1715 0, /* rightshift */
1716 2, /* size (0 = byte, 1 = short, 2 = long) */
1717 64, /* bitsize */
1718 FALSE, /* pc_relative */
1719 0, /* bitpos */
1720 complain_overflow_bitfield, /* complain_on_overflow */
1721 bfd_elf_generic_reloc, /* special_function */
1722 AARCH64_R_STR (JUMP_SLOT), /* name */
1723 TRUE, /* partial_inplace */
1724 0xffffffff, /* src_mask */
1725 0xffffffff, /* dst_mask */
1726 FALSE), /* pcrel_offset */
1727
1728 HOWTO (AARCH64_R (RELATIVE), /* type */
1729 0, /* rightshift */
1730 2, /* size (0 = byte, 1 = short, 2 = long) */
1731 64, /* bitsize */
1732 FALSE, /* pc_relative */
1733 0, /* bitpos */
1734 complain_overflow_bitfield, /* complain_on_overflow */
1735 bfd_elf_generic_reloc, /* special_function */
1736 AARCH64_R_STR (RELATIVE), /* name */
1737 TRUE, /* partial_inplace */
1738 ALL_ONES, /* src_mask */
1739 ALL_ONES, /* dst_mask */
1740 FALSE), /* pcrel_offset */
1741
1742 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1743 0, /* rightshift */
1744 2, /* size (0 = byte, 1 = short, 2 = long) */
1745 64, /* bitsize */
1746 FALSE, /* pc_relative */
1747 0, /* bitpos */
1748 complain_overflow_dont, /* complain_on_overflow */
1749 bfd_elf_generic_reloc, /* special_function */
1750 #if ARCH_SIZE == 64
1751 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1752 #else
1753 AARCH64_R_STR (TLS_DTPMOD), /* name */
1754 #endif
1755 FALSE, /* partial_inplace */
1756 0, /* src_mask */
1757 ALL_ONES, /* dst_mask */
1758 FALSE), /* pc_reloffset */
1759
1760 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1761 0, /* rightshift */
1762 2, /* size (0 = byte, 1 = short, 2 = long) */
1763 64, /* bitsize */
1764 FALSE, /* pc_relative */
1765 0, /* bitpos */
1766 complain_overflow_dont, /* complain_on_overflow */
1767 bfd_elf_generic_reloc, /* special_function */
1768 #if ARCH_SIZE == 64
1769 AARCH64_R_STR (TLS_DTPREL64), /* name */
1770 #else
1771 AARCH64_R_STR (TLS_DTPREL), /* name */
1772 #endif
1773 FALSE, /* partial_inplace */
1774 0, /* src_mask */
1775 ALL_ONES, /* dst_mask */
1776 FALSE), /* pcrel_offset */
1777
1778 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1779 0, /* rightshift */
1780 2, /* size (0 = byte, 1 = short, 2 = long) */
1781 64, /* bitsize */
1782 FALSE, /* pc_relative */
1783 0, /* bitpos */
1784 complain_overflow_dont, /* complain_on_overflow */
1785 bfd_elf_generic_reloc, /* special_function */
1786 #if ARCH_SIZE == 64
1787 AARCH64_R_STR (TLS_TPREL64), /* name */
1788 #else
1789 AARCH64_R_STR (TLS_TPREL), /* name */
1790 #endif
1791 FALSE, /* partial_inplace */
1792 0, /* src_mask */
1793 ALL_ONES, /* dst_mask */
1794 FALSE), /* pcrel_offset */
1795
1796 HOWTO (AARCH64_R (TLSDESC), /* type */
1797 0, /* rightshift */
1798 2, /* size (0 = byte, 1 = short, 2 = long) */
1799 64, /* bitsize */
1800 FALSE, /* pc_relative */
1801 0, /* bitpos */
1802 complain_overflow_dont, /* complain_on_overflow */
1803 bfd_elf_generic_reloc, /* special_function */
1804 AARCH64_R_STR (TLSDESC), /* name */
1805 FALSE, /* partial_inplace */
1806 0, /* src_mask */
1807 ALL_ONES, /* dst_mask */
1808 FALSE), /* pcrel_offset */
1809
1810 HOWTO (AARCH64_R (IRELATIVE), /* type */
1811 0, /* rightshift */
1812 2, /* size (0 = byte, 1 = short, 2 = long) */
1813 64, /* bitsize */
1814 FALSE, /* pc_relative */
1815 0, /* bitpos */
1816 complain_overflow_bitfield, /* complain_on_overflow */
1817 bfd_elf_generic_reloc, /* special_function */
1818 AARCH64_R_STR (IRELATIVE), /* name */
1819 FALSE, /* partial_inplace */
1820 0, /* src_mask */
1821 ALL_ONES, /* dst_mask */
1822 FALSE), /* pcrel_offset */
1823
1824 EMPTY_HOWTO (0),
1825 };
1826
1827 static reloc_howto_type elfNN_aarch64_howto_none =
1828 HOWTO (R_AARCH64_NONE, /* type */
1829 0, /* rightshift */
1830 3, /* size (0 = byte, 1 = short, 2 = long) */
1831 0, /* bitsize */
1832 FALSE, /* pc_relative */
1833 0, /* bitpos */
1834 complain_overflow_dont,/* complain_on_overflow */
1835 bfd_elf_generic_reloc, /* special_function */
1836 "R_AARCH64_NONE", /* name */
1837 FALSE, /* partial_inplace */
1838 0, /* src_mask */
1839 0, /* dst_mask */
1840 FALSE); /* pcrel_offset */
1841
1842 /* Given HOWTO, return the bfd internal relocation enumerator. */
1843
1844 static bfd_reloc_code_real_type
1845 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1846 {
1847 const int size
1848 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1849 const ptrdiff_t offset
1850 = howto - elfNN_aarch64_howto_table;
1851
1852 if (offset > 0 && offset < size - 1)
1853 return BFD_RELOC_AARCH64_RELOC_START + offset;
1854
1855 if (howto == &elfNN_aarch64_howto_none)
1856 return BFD_RELOC_AARCH64_NONE;
1857
1858 return BFD_RELOC_AARCH64_RELOC_START;
1859 }
1860
1861 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1862
1863 static bfd_reloc_code_real_type
1864 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1865 {
1866 static bfd_boolean initialized_p = FALSE;
1867 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1868 static unsigned int offsets[R_AARCH64_end];
1869
1870 if (initialized_p == FALSE)
1871 {
1872 unsigned int i;
1873
1874 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1875 if (elfNN_aarch64_howto_table[i].type != 0)
1876 offsets[elfNN_aarch64_howto_table[i].type] = i;
1877
1878 initialized_p = TRUE;
1879 }
1880
1881 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1882 return BFD_RELOC_AARCH64_NONE;
1883
1884 /* PR 17512: file: b371e70a. */
1885 if (r_type >= R_AARCH64_end)
1886 {
1887 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1888 bfd_set_error (bfd_error_bad_value);
1889 return BFD_RELOC_AARCH64_NONE;
1890 }
1891
1892 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1893 }
1894
1895 struct elf_aarch64_reloc_map
1896 {
1897 bfd_reloc_code_real_type from;
1898 bfd_reloc_code_real_type to;
1899 };
1900
1901 /* Map bfd generic reloc to AArch64-specific reloc. */
1902 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1903 {
1904 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1905
1906 /* Basic data relocations. */
1907 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1908 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1909 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1910 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1911 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1912 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1913 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1914 };
1915
1916 /* Given the bfd internal relocation enumerator in CODE, return the
1917 corresponding howto entry. */
1918
1919 static reloc_howto_type *
1920 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1921 {
1922 unsigned int i;
1923
1924 /* Convert bfd generic reloc to AArch64-specific reloc. */
1925 if (code < BFD_RELOC_AARCH64_RELOC_START
1926 || code > BFD_RELOC_AARCH64_RELOC_END)
1927 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1928 if (elf_aarch64_reloc_map[i].from == code)
1929 {
1930 code = elf_aarch64_reloc_map[i].to;
1931 break;
1932 }
1933
1934 if (code > BFD_RELOC_AARCH64_RELOC_START
1935 && code < BFD_RELOC_AARCH64_RELOC_END)
1936 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1937 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1938
1939 if (code == BFD_RELOC_AARCH64_NONE)
1940 return &elfNN_aarch64_howto_none;
1941
1942 return NULL;
1943 }
1944
1945 static reloc_howto_type *
1946 elfNN_aarch64_howto_from_type (unsigned int r_type)
1947 {
1948 bfd_reloc_code_real_type val;
1949 reloc_howto_type *howto;
1950
1951 #if ARCH_SIZE == 32
1952 if (r_type > 256)
1953 {
1954 bfd_set_error (bfd_error_bad_value);
1955 return NULL;
1956 }
1957 #endif
1958
1959 if (r_type == R_AARCH64_NONE)
1960 return &elfNN_aarch64_howto_none;
1961
1962 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1963 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1964
1965 if (howto != NULL)
1966 return howto;
1967
1968 bfd_set_error (bfd_error_bad_value);
1969 return NULL;
1970 }
1971
1972 static void
1973 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1974 Elf_Internal_Rela *elf_reloc)
1975 {
1976 unsigned int r_type;
1977
1978 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1979 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1980 }
1981
1982 static reloc_howto_type *
1983 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1984 bfd_reloc_code_real_type code)
1985 {
1986 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1987
1988 if (howto != NULL)
1989 return howto;
1990
1991 bfd_set_error (bfd_error_bad_value);
1992 return NULL;
1993 }
1994
1995 static reloc_howto_type *
1996 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1997 const char *r_name)
1998 {
1999 unsigned int i;
2000
2001 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2002 if (elfNN_aarch64_howto_table[i].name != NULL
2003 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
2004 return &elfNN_aarch64_howto_table[i];
2005
2006 return NULL;
2007 }
2008
2009 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
2010 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
2011 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
2012 #define TARGET_BIG_NAME "elfNN-bigaarch64"
2013
2014 /* The linker script knows the section names for placement.
2015 The entry_names are used to do simple name mangling on the stubs.
2016 Given a function name, and its type, the stub can be found. The
2017 name can be changed. The only requirement is the %s be present. */
2018 #define STUB_ENTRY_NAME "__%s_veneer"
2019
2020 /* The name of the dynamic interpreter. This is put in the .interp
2021 section. */
2022 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
2023
2024 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
2025 (((1 << 25) - 1) << 2)
2026 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
2027 (-((1 << 25) << 2))
2028
2029 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
2030 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
2031
2032 static int
2033 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
2034 {
2035 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
2036 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
2037 }
2038
2039 static int
2040 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
2041 {
2042 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
2043 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
2044 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
2045 }
2046
2047 static const uint32_t aarch64_adrp_branch_stub [] =
2048 {
2049 0x90000010, /* adrp ip0, X */
2050 /* R_AARCH64_ADR_HI21_PCREL(X) */
2051 0x91000210, /* add ip0, ip0, :lo12:X */
2052 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
2053 0xd61f0200, /* br ip0 */
2054 };
2055
2056 static const uint32_t aarch64_long_branch_stub[] =
2057 {
2058 #if ARCH_SIZE == 64
2059 0x58000090, /* ldr ip0, 1f */
2060 #else
2061 0x18000090, /* ldr wip0, 1f */
2062 #endif
2063 0x10000011, /* adr ip1, #0 */
2064 0x8b110210, /* add ip0, ip0, ip1 */
2065 0xd61f0200, /* br ip0 */
2066 0x00000000, /* 1: .xword or .word
2067 R_AARCH64_PRELNN(X) + 12
2068 */
2069 0x00000000,
2070 };
2071
2072 static const uint32_t aarch64_erratum_835769_stub[] =
2073 {
2074 0x00000000, /* Placeholder for multiply accumulate. */
2075 0x14000000, /* b <label> */
2076 };
2077
2078 static const uint32_t aarch64_erratum_843419_stub[] =
2079 {
2080 0x00000000, /* Placeholder for LDR instruction. */
2081 0x14000000, /* b <label> */
2082 };
2083
2084 /* Section name for stubs is the associated section name plus this
2085 string. */
2086 #define STUB_SUFFIX ".stub"
2087
2088 enum elf_aarch64_stub_type
2089 {
2090 aarch64_stub_none,
2091 aarch64_stub_adrp_branch,
2092 aarch64_stub_long_branch,
2093 aarch64_stub_erratum_835769_veneer,
2094 aarch64_stub_erratum_843419_veneer,
2095 };
2096
2097 struct elf_aarch64_stub_hash_entry
2098 {
2099 /* Base hash table entry structure. */
2100 struct bfd_hash_entry root;
2101
2102 /* The stub section. */
2103 asection *stub_sec;
2104
2105 /* Offset within stub_sec of the beginning of this stub. */
2106 bfd_vma stub_offset;
2107
2108 /* Given the symbol's value and its section we can determine its final
2109 value when building the stubs (so the stub knows where to jump). */
2110 bfd_vma target_value;
2111 asection *target_section;
2112
2113 enum elf_aarch64_stub_type stub_type;
2114
2115 /* The symbol table entry, if any, that this was derived from. */
2116 struct elf_aarch64_link_hash_entry *h;
2117
2118 /* Destination symbol type */
2119 unsigned char st_type;
2120
2121 /* Where this stub is being called from, or, in the case of combined
2122 stub sections, the first input section in the group. */
2123 asection *id_sec;
2124
2125 /* The name for the local symbol at the start of this stub. The
2126 stub name in the hash table has to be unique; this does not, so
2127 it can be friendlier. */
2128 char *output_name;
2129
2130 /* The instruction which caused this stub to be generated (only valid for
2131 erratum 835769 workaround stubs at present). */
2132 uint32_t veneered_insn;
2133
2134 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
2135 bfd_vma adrp_offset;
2136 };
2137
2138 /* Used to build a map of a section. This is required for mixed-endian
2139 code/data. */
2140
2141 typedef struct elf_elf_section_map
2142 {
2143 bfd_vma vma;
2144 char type;
2145 }
2146 elf_aarch64_section_map;
2147
2148
2149 typedef struct _aarch64_elf_section_data
2150 {
2151 struct bfd_elf_section_data elf;
2152 unsigned int mapcount;
2153 unsigned int mapsize;
2154 elf_aarch64_section_map *map;
2155 }
2156 _aarch64_elf_section_data;
2157
2158 #define elf_aarch64_section_data(sec) \
2159 ((_aarch64_elf_section_data *) elf_section_data (sec))
2160
2161 /* The size of the thread control block which is defined to be two pointers. */
2162 #define TCB_SIZE (ARCH_SIZE/8)*2
2163
2164 struct elf_aarch64_local_symbol
2165 {
2166 unsigned int got_type;
2167 bfd_signed_vma got_refcount;
2168 bfd_vma got_offset;
2169
2170 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
2171 offset is from the end of the jump table and reserved entries
2172 within the PLTGOT.
2173
2174 The magic value (bfd_vma) -1 indicates that an offset has not be
2175 allocated. */
2176 bfd_vma tlsdesc_got_jump_table_offset;
2177 };
2178
2179 struct elf_aarch64_obj_tdata
2180 {
2181 struct elf_obj_tdata root;
2182
2183 /* local symbol descriptors */
2184 struct elf_aarch64_local_symbol *locals;
2185
2186 /* Zero to warn when linking objects with incompatible enum sizes. */
2187 int no_enum_size_warning;
2188
2189 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2190 int no_wchar_size_warning;
2191 };
2192
2193 #define elf_aarch64_tdata(bfd) \
2194 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
2195
2196 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
2197
2198 #define is_aarch64_elf(bfd) \
2199 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2200 && elf_tdata (bfd) != NULL \
2201 && elf_object_id (bfd) == AARCH64_ELF_DATA)
2202
2203 static bfd_boolean
2204 elfNN_aarch64_mkobject (bfd *abfd)
2205 {
2206 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
2207 AARCH64_ELF_DATA);
2208 }
2209
2210 #define elf_aarch64_hash_entry(ent) \
2211 ((struct elf_aarch64_link_hash_entry *)(ent))
2212
2213 #define GOT_UNKNOWN 0
2214 #define GOT_NORMAL 1
2215 #define GOT_TLS_GD 2
2216 #define GOT_TLS_IE 4
2217 #define GOT_TLSDESC_GD 8
2218
2219 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
2220
2221 /* AArch64 ELF linker hash entry. */
2222 struct elf_aarch64_link_hash_entry
2223 {
2224 struct elf_link_hash_entry root;
2225
2226 /* Track dynamic relocs copied for this symbol. */
2227 struct elf_dyn_relocs *dyn_relocs;
2228
2229 /* Since PLT entries have variable size, we need to record the
2230 index into .got.plt instead of recomputing it from the PLT
2231 offset. */
2232 bfd_signed_vma plt_got_offset;
2233
2234 /* Bit mask representing the type of GOT entry(s) if any required by
2235 this symbol. */
2236 unsigned int got_type;
2237
2238 /* A pointer to the most recently used stub hash entry against this
2239 symbol. */
2240 struct elf_aarch64_stub_hash_entry *stub_cache;
2241
2242 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
2243 is from the end of the jump table and reserved entries within the PLTGOT.
2244
2245 The magic value (bfd_vma) -1 indicates that an offset has not
2246 be allocated. */
2247 bfd_vma tlsdesc_got_jump_table_offset;
2248 };
2249
2250 static unsigned int
2251 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
2252 bfd *abfd,
2253 unsigned long r_symndx)
2254 {
2255 if (h)
2256 return elf_aarch64_hash_entry (h)->got_type;
2257
2258 if (! elf_aarch64_locals (abfd))
2259 return GOT_UNKNOWN;
2260
2261 return elf_aarch64_locals (abfd)[r_symndx].got_type;
2262 }
2263
2264 /* Get the AArch64 elf linker hash table from a link_info structure. */
2265 #define elf_aarch64_hash_table(info) \
2266 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
2267
2268 #define aarch64_stub_hash_lookup(table, string, create, copy) \
2269 ((struct elf_aarch64_stub_hash_entry *) \
2270 bfd_hash_lookup ((table), (string), (create), (copy)))
2271
2272 /* AArch64 ELF linker hash table. */
2273 struct elf_aarch64_link_hash_table
2274 {
2275 /* The main hash table. */
2276 struct elf_link_hash_table root;
2277
2278 /* Nonzero to force PIC branch veneers. */
2279 int pic_veneer;
2280
2281 /* Fix erratum 835769. */
2282 int fix_erratum_835769;
2283
2284 /* Fix erratum 843419. */
2285 int fix_erratum_843419;
2286
2287 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
2288 int fix_erratum_843419_adr;
2289
2290 /* Don't apply link-time values for dynamic relocations. */
2291 int no_apply_dynamic_relocs;
2292
2293 /* The number of bytes in the initial entry in the PLT. */
2294 bfd_size_type plt_header_size;
2295
2296 /* The number of bytes in the subsequent PLT etries. */
2297 bfd_size_type plt_entry_size;
2298
2299 /* Small local sym cache. */
2300 struct sym_cache sym_cache;
2301
2302 /* For convenience in allocate_dynrelocs. */
2303 bfd *obfd;
2304
2305 /* The amount of space used by the reserved portion of the sgotplt
2306 section, plus whatever space is used by the jump slots. */
2307 bfd_vma sgotplt_jump_table_size;
2308
2309 /* The stub hash table. */
2310 struct bfd_hash_table stub_hash_table;
2311
2312 /* Linker stub bfd. */
2313 bfd *stub_bfd;
2314
2315 /* Linker call-backs. */
2316 asection *(*add_stub_section) (const char *, asection *);
2317 void (*layout_sections_again) (void);
2318
2319 /* Array to keep track of which stub sections have been created, and
2320 information on stub grouping. */
2321 struct map_stub
2322 {
2323 /* This is the section to which stubs in the group will be
2324 attached. */
2325 asection *link_sec;
2326 /* The stub section. */
2327 asection *stub_sec;
2328 } *stub_group;
2329
2330 /* Assorted information used by elfNN_aarch64_size_stubs. */
2331 unsigned int bfd_count;
2332 unsigned int top_index;
2333 asection **input_list;
2334
2335 /* The offset into splt of the PLT entry for the TLS descriptor
2336 resolver. Special values are 0, if not necessary (or not found
2337 to be necessary yet), and -1 if needed but not determined
2338 yet. */
2339 bfd_vma tlsdesc_plt;
2340
2341 /* The GOT offset for the lazy trampoline. Communicated to the
2342 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
2343 indicates an offset is not allocated. */
2344 bfd_vma dt_tlsdesc_got;
2345
2346 /* Used by local STT_GNU_IFUNC symbols. */
2347 htab_t loc_hash_table;
2348 void * loc_hash_memory;
2349 };
2350
2351 /* Create an entry in an AArch64 ELF linker hash table. */
2352
2353 static struct bfd_hash_entry *
2354 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2355 struct bfd_hash_table *table,
2356 const char *string)
2357 {
2358 struct elf_aarch64_link_hash_entry *ret =
2359 (struct elf_aarch64_link_hash_entry *) entry;
2360
2361 /* Allocate the structure if it has not already been allocated by a
2362 subclass. */
2363 if (ret == NULL)
2364 ret = bfd_hash_allocate (table,
2365 sizeof (struct elf_aarch64_link_hash_entry));
2366 if (ret == NULL)
2367 return (struct bfd_hash_entry *) ret;
2368
2369 /* Call the allocation method of the superclass. */
2370 ret = ((struct elf_aarch64_link_hash_entry *)
2371 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2372 table, string));
2373 if (ret != NULL)
2374 {
2375 ret->dyn_relocs = NULL;
2376 ret->got_type = GOT_UNKNOWN;
2377 ret->plt_got_offset = (bfd_vma) - 1;
2378 ret->stub_cache = NULL;
2379 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2380 }
2381
2382 return (struct bfd_hash_entry *) ret;
2383 }
2384
2385 /* Initialize an entry in the stub hash table. */
2386
2387 static struct bfd_hash_entry *
2388 stub_hash_newfunc (struct bfd_hash_entry *entry,
2389 struct bfd_hash_table *table, const char *string)
2390 {
2391 /* Allocate the structure if it has not already been allocated by a
2392 subclass. */
2393 if (entry == NULL)
2394 {
2395 entry = bfd_hash_allocate (table,
2396 sizeof (struct
2397 elf_aarch64_stub_hash_entry));
2398 if (entry == NULL)
2399 return entry;
2400 }
2401
2402 /* Call the allocation method of the superclass. */
2403 entry = bfd_hash_newfunc (entry, table, string);
2404 if (entry != NULL)
2405 {
2406 struct elf_aarch64_stub_hash_entry *eh;
2407
2408 /* Initialize the local fields. */
2409 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2410 eh->adrp_offset = 0;
2411 eh->stub_sec = NULL;
2412 eh->stub_offset = 0;
2413 eh->target_value = 0;
2414 eh->target_section = NULL;
2415 eh->stub_type = aarch64_stub_none;
2416 eh->h = NULL;
2417 eh->id_sec = NULL;
2418 }
2419
2420 return entry;
2421 }
2422
2423 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2424 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2425 as global symbol. We reuse indx and dynstr_index for local symbol
2426 hash since they aren't used by global symbols in this backend. */
2427
2428 static hashval_t
2429 elfNN_aarch64_local_htab_hash (const void *ptr)
2430 {
2431 struct elf_link_hash_entry *h
2432 = (struct elf_link_hash_entry *) ptr;
2433 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2434 }
2435
2436 /* Compare local hash entries. */
2437
2438 static int
2439 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2440 {
2441 struct elf_link_hash_entry *h1
2442 = (struct elf_link_hash_entry *) ptr1;
2443 struct elf_link_hash_entry *h2
2444 = (struct elf_link_hash_entry *) ptr2;
2445
2446 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2447 }
2448
2449 /* Find and/or create a hash entry for local symbol. */
2450
2451 static struct elf_link_hash_entry *
2452 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2453 bfd *abfd, const Elf_Internal_Rela *rel,
2454 bfd_boolean create)
2455 {
2456 struct elf_aarch64_link_hash_entry e, *ret;
2457 asection *sec = abfd->sections;
2458 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2459 ELFNN_R_SYM (rel->r_info));
2460 void **slot;
2461
2462 e.root.indx = sec->id;
2463 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2464 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2465 create ? INSERT : NO_INSERT);
2466
2467 if (!slot)
2468 return NULL;
2469
2470 if (*slot)
2471 {
2472 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2473 return &ret->root;
2474 }
2475
2476 ret = (struct elf_aarch64_link_hash_entry *)
2477 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2478 sizeof (struct elf_aarch64_link_hash_entry));
2479 if (ret)
2480 {
2481 memset (ret, 0, sizeof (*ret));
2482 ret->root.indx = sec->id;
2483 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2484 ret->root.dynindx = -1;
2485 *slot = ret;
2486 }
2487 return &ret->root;
2488 }
2489
2490 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2491
2492 static void
2493 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2494 struct elf_link_hash_entry *dir,
2495 struct elf_link_hash_entry *ind)
2496 {
2497 struct elf_aarch64_link_hash_entry *edir, *eind;
2498
2499 edir = (struct elf_aarch64_link_hash_entry *) dir;
2500 eind = (struct elf_aarch64_link_hash_entry *) ind;
2501
2502 if (eind->dyn_relocs != NULL)
2503 {
2504 if (edir->dyn_relocs != NULL)
2505 {
2506 struct elf_dyn_relocs **pp;
2507 struct elf_dyn_relocs *p;
2508
2509 /* Add reloc counts against the indirect sym to the direct sym
2510 list. Merge any entries against the same section. */
2511 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2512 {
2513 struct elf_dyn_relocs *q;
2514
2515 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2516 if (q->sec == p->sec)
2517 {
2518 q->pc_count += p->pc_count;
2519 q->count += p->count;
2520 *pp = p->next;
2521 break;
2522 }
2523 if (q == NULL)
2524 pp = &p->next;
2525 }
2526 *pp = edir->dyn_relocs;
2527 }
2528
2529 edir->dyn_relocs = eind->dyn_relocs;
2530 eind->dyn_relocs = NULL;
2531 }
2532
2533 if (ind->root.type == bfd_link_hash_indirect)
2534 {
2535 /* Copy over PLT info. */
2536 if (dir->got.refcount <= 0)
2537 {
2538 edir->got_type = eind->got_type;
2539 eind->got_type = GOT_UNKNOWN;
2540 }
2541 }
2542
2543 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2544 }
2545
2546 /* Destroy an AArch64 elf linker hash table. */
2547
2548 static void
2549 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2550 {
2551 struct elf_aarch64_link_hash_table *ret
2552 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2553
2554 if (ret->loc_hash_table)
2555 htab_delete (ret->loc_hash_table);
2556 if (ret->loc_hash_memory)
2557 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2558
2559 bfd_hash_table_free (&ret->stub_hash_table);
2560 _bfd_elf_link_hash_table_free (obfd);
2561 }
2562
2563 /* Create an AArch64 elf linker hash table. */
2564
2565 static struct bfd_link_hash_table *
2566 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2567 {
2568 struct elf_aarch64_link_hash_table *ret;
2569 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2570
2571 ret = bfd_zmalloc (amt);
2572 if (ret == NULL)
2573 return NULL;
2574
2575 if (!_bfd_elf_link_hash_table_init
2576 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2577 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2578 {
2579 free (ret);
2580 return NULL;
2581 }
2582
2583 ret->plt_header_size = PLT_ENTRY_SIZE;
2584 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2585 ret->obfd = abfd;
2586 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2587
2588 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2589 sizeof (struct elf_aarch64_stub_hash_entry)))
2590 {
2591 _bfd_elf_link_hash_table_free (abfd);
2592 return NULL;
2593 }
2594
2595 ret->loc_hash_table = htab_try_create (1024,
2596 elfNN_aarch64_local_htab_hash,
2597 elfNN_aarch64_local_htab_eq,
2598 NULL);
2599 ret->loc_hash_memory = objalloc_create ();
2600 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2601 {
2602 elfNN_aarch64_link_hash_table_free (abfd);
2603 return NULL;
2604 }
2605 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2606
2607 return &ret->root.root;
2608 }
2609
2610 static bfd_boolean
2611 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2612 bfd_vma offset, bfd_vma value)
2613 {
2614 reloc_howto_type *howto;
2615 bfd_vma place;
2616
2617 howto = elfNN_aarch64_howto_from_type (r_type);
2618 place = (input_section->output_section->vma + input_section->output_offset
2619 + offset);
2620
2621 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2622 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2623 return _bfd_aarch64_elf_put_addend (input_bfd,
2624 input_section->contents + offset, r_type,
2625 howto, value);
2626 }
2627
2628 static enum elf_aarch64_stub_type
2629 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2630 {
2631 if (aarch64_valid_for_adrp_p (value, place))
2632 return aarch64_stub_adrp_branch;
2633 return aarch64_stub_long_branch;
2634 }
2635
2636 /* Determine the type of stub needed, if any, for a call. */
2637
2638 static enum elf_aarch64_stub_type
2639 aarch64_type_of_stub (asection *input_sec,
2640 const Elf_Internal_Rela *rel,
2641 asection *sym_sec,
2642 unsigned char st_type,
2643 bfd_vma destination)
2644 {
2645 bfd_vma location;
2646 bfd_signed_vma branch_offset;
2647 unsigned int r_type;
2648 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2649
2650 if (st_type != STT_FUNC
2651 && (sym_sec == input_sec))
2652 return stub_type;
2653
2654 /* Determine where the call point is. */
2655 location = (input_sec->output_offset
2656 + input_sec->output_section->vma + rel->r_offset);
2657
2658 branch_offset = (bfd_signed_vma) (destination - location);
2659
2660 r_type = ELFNN_R_TYPE (rel->r_info);
2661
2662 /* We don't want to redirect any old unconditional jump in this way,
2663 only one which is being used for a sibcall, where it is
2664 acceptable for the IP0 and IP1 registers to be clobbered. */
2665 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2666 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2667 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2668 {
2669 stub_type = aarch64_stub_long_branch;
2670 }
2671
2672 return stub_type;
2673 }
2674
2675 /* Build a name for an entry in the stub hash table. */
2676
2677 static char *
2678 elfNN_aarch64_stub_name (const asection *input_section,
2679 const asection *sym_sec,
2680 const struct elf_aarch64_link_hash_entry *hash,
2681 const Elf_Internal_Rela *rel)
2682 {
2683 char *stub_name;
2684 bfd_size_type len;
2685
2686 if (hash)
2687 {
2688 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2689 stub_name = bfd_malloc (len);
2690 if (stub_name != NULL)
2691 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2692 (unsigned int) input_section->id,
2693 hash->root.root.root.string,
2694 rel->r_addend);
2695 }
2696 else
2697 {
2698 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2699 stub_name = bfd_malloc (len);
2700 if (stub_name != NULL)
2701 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2702 (unsigned int) input_section->id,
2703 (unsigned int) sym_sec->id,
2704 (unsigned int) ELFNN_R_SYM (rel->r_info),
2705 rel->r_addend);
2706 }
2707
2708 return stub_name;
2709 }
2710
2711 /* Return TRUE if symbol H should be hashed in the `.gnu.hash' section. For
2712 executable PLT slots where the executable never takes the address of those
2713 functions, the function symbols are not added to the hash table. */
2714
2715 static bfd_boolean
2716 elf_aarch64_hash_symbol (struct elf_link_hash_entry *h)
2717 {
2718 if (h->plt.offset != (bfd_vma) -1
2719 && !h->def_regular
2720 && !h->pointer_equality_needed)
2721 return FALSE;
2722
2723 return _bfd_elf_hash_symbol (h);
2724 }
2725
2726
2727 /* Look up an entry in the stub hash. Stub entries are cached because
2728 creating the stub name takes a bit of time. */
2729
2730 static struct elf_aarch64_stub_hash_entry *
2731 elfNN_aarch64_get_stub_entry (const asection *input_section,
2732 const asection *sym_sec,
2733 struct elf_link_hash_entry *hash,
2734 const Elf_Internal_Rela *rel,
2735 struct elf_aarch64_link_hash_table *htab)
2736 {
2737 struct elf_aarch64_stub_hash_entry *stub_entry;
2738 struct elf_aarch64_link_hash_entry *h =
2739 (struct elf_aarch64_link_hash_entry *) hash;
2740 const asection *id_sec;
2741
2742 if ((input_section->flags & SEC_CODE) == 0)
2743 return NULL;
2744
2745 /* If this input section is part of a group of sections sharing one
2746 stub section, then use the id of the first section in the group.
2747 Stub names need to include a section id, as there may well be
2748 more than one stub used to reach say, printf, and we need to
2749 distinguish between them. */
2750 id_sec = htab->stub_group[input_section->id].link_sec;
2751
2752 if (h != NULL && h->stub_cache != NULL
2753 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2754 {
2755 stub_entry = h->stub_cache;
2756 }
2757 else
2758 {
2759 char *stub_name;
2760
2761 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2762 if (stub_name == NULL)
2763 return NULL;
2764
2765 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2766 stub_name, FALSE, FALSE);
2767 if (h != NULL)
2768 h->stub_cache = stub_entry;
2769
2770 free (stub_name);
2771 }
2772
2773 return stub_entry;
2774 }
2775
2776
2777 /* Create a stub section. */
2778
2779 static asection *
2780 _bfd_aarch64_create_stub_section (asection *section,
2781 struct elf_aarch64_link_hash_table *htab)
2782 {
2783 size_t namelen;
2784 bfd_size_type len;
2785 char *s_name;
2786
2787 namelen = strlen (section->name);
2788 len = namelen + sizeof (STUB_SUFFIX);
2789 s_name = bfd_alloc (htab->stub_bfd, len);
2790 if (s_name == NULL)
2791 return NULL;
2792
2793 memcpy (s_name, section->name, namelen);
2794 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2795 return (*htab->add_stub_section) (s_name, section);
2796 }
2797
2798
2799 /* Find or create a stub section for a link section.
2800
2801 Fix or create the stub section used to collect stubs attached to
2802 the specified link section. */
2803
2804 static asection *
2805 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2806 struct elf_aarch64_link_hash_table *htab)
2807 {
2808 if (htab->stub_group[link_section->id].stub_sec == NULL)
2809 htab->stub_group[link_section->id].stub_sec
2810 = _bfd_aarch64_create_stub_section (link_section, htab);
2811 return htab->stub_group[link_section->id].stub_sec;
2812 }
2813
2814
2815 /* Find or create a stub section in the stub group for an input
2816 section. */
2817
2818 static asection *
2819 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2820 struct elf_aarch64_link_hash_table *htab)
2821 {
2822 asection *link_sec = htab->stub_group[section->id].link_sec;
2823 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2824 }
2825
2826
2827 /* Add a new stub entry in the stub group associated with an input
2828 section to the stub hash. Not all fields of the new stub entry are
2829 initialised. */
2830
2831 static struct elf_aarch64_stub_hash_entry *
2832 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2833 asection *section,
2834 struct elf_aarch64_link_hash_table *htab)
2835 {
2836 asection *link_sec;
2837 asection *stub_sec;
2838 struct elf_aarch64_stub_hash_entry *stub_entry;
2839
2840 link_sec = htab->stub_group[section->id].link_sec;
2841 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2842
2843 /* Enter this entry into the linker stub hash table. */
2844 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2845 TRUE, FALSE);
2846 if (stub_entry == NULL)
2847 {
2848 /* xgettext:c-format */
2849 _bfd_error_handler (_("%s: cannot create stub entry %s"),
2850 section->owner, stub_name);
2851 return NULL;
2852 }
2853
2854 stub_entry->stub_sec = stub_sec;
2855 stub_entry->stub_offset = 0;
2856 stub_entry->id_sec = link_sec;
2857
2858 return stub_entry;
2859 }
2860
2861 /* Add a new stub entry in the final stub section to the stub hash.
2862 Not all fields of the new stub entry are initialised. */
2863
2864 static struct elf_aarch64_stub_hash_entry *
2865 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2866 asection *link_section,
2867 struct elf_aarch64_link_hash_table *htab)
2868 {
2869 asection *stub_sec;
2870 struct elf_aarch64_stub_hash_entry *stub_entry;
2871
2872 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2873 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2874 TRUE, FALSE);
2875 if (stub_entry == NULL)
2876 {
2877 _bfd_error_handler (_("cannot create stub entry %s"), stub_name);
2878 return NULL;
2879 }
2880
2881 stub_entry->stub_sec = stub_sec;
2882 stub_entry->stub_offset = 0;
2883 stub_entry->id_sec = link_section;
2884
2885 return stub_entry;
2886 }
2887
2888
2889 static bfd_boolean
2890 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2891 void *in_arg ATTRIBUTE_UNUSED)
2892 {
2893 struct elf_aarch64_stub_hash_entry *stub_entry;
2894 asection *stub_sec;
2895 bfd *stub_bfd;
2896 bfd_byte *loc;
2897 bfd_vma sym_value;
2898 bfd_vma veneered_insn_loc;
2899 bfd_vma veneer_entry_loc;
2900 bfd_signed_vma branch_offset = 0;
2901 unsigned int template_size;
2902 const uint32_t *template;
2903 unsigned int i;
2904
2905 /* Massage our args to the form they really have. */
2906 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2907
2908 stub_sec = stub_entry->stub_sec;
2909
2910 /* Make a note of the offset within the stubs for this entry. */
2911 stub_entry->stub_offset = stub_sec->size;
2912 loc = stub_sec->contents + stub_entry->stub_offset;
2913
2914 stub_bfd = stub_sec->owner;
2915
2916 /* This is the address of the stub destination. */
2917 sym_value = (stub_entry->target_value
2918 + stub_entry->target_section->output_offset
2919 + stub_entry->target_section->output_section->vma);
2920
2921 if (stub_entry->stub_type == aarch64_stub_long_branch)
2922 {
2923 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2924 + stub_sec->output_offset);
2925
2926 /* See if we can relax the stub. */
2927 if (aarch64_valid_for_adrp_p (sym_value, place))
2928 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2929 }
2930
2931 switch (stub_entry->stub_type)
2932 {
2933 case aarch64_stub_adrp_branch:
2934 template = aarch64_adrp_branch_stub;
2935 template_size = sizeof (aarch64_adrp_branch_stub);
2936 break;
2937 case aarch64_stub_long_branch:
2938 template = aarch64_long_branch_stub;
2939 template_size = sizeof (aarch64_long_branch_stub);
2940 break;
2941 case aarch64_stub_erratum_835769_veneer:
2942 template = aarch64_erratum_835769_stub;
2943 template_size = sizeof (aarch64_erratum_835769_stub);
2944 break;
2945 case aarch64_stub_erratum_843419_veneer:
2946 template = aarch64_erratum_843419_stub;
2947 template_size = sizeof (aarch64_erratum_843419_stub);
2948 break;
2949 default:
2950 abort ();
2951 }
2952
2953 for (i = 0; i < (template_size / sizeof template[0]); i++)
2954 {
2955 bfd_putl32 (template[i], loc);
2956 loc += 4;
2957 }
2958
2959 template_size = (template_size + 7) & ~7;
2960 stub_sec->size += template_size;
2961
2962 switch (stub_entry->stub_type)
2963 {
2964 case aarch64_stub_adrp_branch:
2965 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2966 stub_entry->stub_offset, sym_value))
2967 /* The stub would not have been relaxed if the offset was out
2968 of range. */
2969 BFD_FAIL ();
2970
2971 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2972 stub_entry->stub_offset + 4, sym_value))
2973 BFD_FAIL ();
2974 break;
2975
2976 case aarch64_stub_long_branch:
2977 /* We want the value relative to the address 12 bytes back from the
2978 value itself. */
2979 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2980 stub_entry->stub_offset + 16, sym_value + 12))
2981 BFD_FAIL ();
2982 break;
2983
2984 case aarch64_stub_erratum_835769_veneer:
2985 veneered_insn_loc = stub_entry->target_section->output_section->vma
2986 + stub_entry->target_section->output_offset
2987 + stub_entry->target_value;
2988 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2989 + stub_entry->stub_sec->output_offset
2990 + stub_entry->stub_offset;
2991 branch_offset = veneered_insn_loc - veneer_entry_loc;
2992 branch_offset >>= 2;
2993 branch_offset &= 0x3ffffff;
2994 bfd_putl32 (stub_entry->veneered_insn,
2995 stub_sec->contents + stub_entry->stub_offset);
2996 bfd_putl32 (template[1] | branch_offset,
2997 stub_sec->contents + stub_entry->stub_offset + 4);
2998 break;
2999
3000 case aarch64_stub_erratum_843419_veneer:
3001 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
3002 stub_entry->stub_offset + 4, sym_value + 4))
3003 BFD_FAIL ();
3004 break;
3005
3006 default:
3007 abort ();
3008 }
3009
3010 return TRUE;
3011 }
3012
3013 /* As above, but don't actually build the stub. Just bump offset so
3014 we know stub section sizes. */
3015
3016 static bfd_boolean
3017 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
3018 void *in_arg ATTRIBUTE_UNUSED)
3019 {
3020 struct elf_aarch64_stub_hash_entry *stub_entry;
3021 int size;
3022
3023 /* Massage our args to the form they really have. */
3024 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3025
3026 switch (stub_entry->stub_type)
3027 {
3028 case aarch64_stub_adrp_branch:
3029 size = sizeof (aarch64_adrp_branch_stub);
3030 break;
3031 case aarch64_stub_long_branch:
3032 size = sizeof (aarch64_long_branch_stub);
3033 break;
3034 case aarch64_stub_erratum_835769_veneer:
3035 size = sizeof (aarch64_erratum_835769_stub);
3036 break;
3037 case aarch64_stub_erratum_843419_veneer:
3038 size = sizeof (aarch64_erratum_843419_stub);
3039 break;
3040 default:
3041 abort ();
3042 }
3043
3044 size = (size + 7) & ~7;
3045 stub_entry->stub_sec->size += size;
3046 return TRUE;
3047 }
3048
3049 /* External entry points for sizing and building linker stubs. */
3050
3051 /* Set up various things so that we can make a list of input sections
3052 for each output section included in the link. Returns -1 on error,
3053 0 when no stubs will be needed, and 1 on success. */
3054
3055 int
3056 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
3057 struct bfd_link_info *info)
3058 {
3059 bfd *input_bfd;
3060 unsigned int bfd_count;
3061 unsigned int top_id, top_index;
3062 asection *section;
3063 asection **input_list, **list;
3064 bfd_size_type amt;
3065 struct elf_aarch64_link_hash_table *htab =
3066 elf_aarch64_hash_table (info);
3067
3068 if (!is_elf_hash_table (htab))
3069 return 0;
3070
3071 /* Count the number of input BFDs and find the top input section id. */
3072 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3073 input_bfd != NULL; input_bfd = input_bfd->link.next)
3074 {
3075 bfd_count += 1;
3076 for (section = input_bfd->sections;
3077 section != NULL; section = section->next)
3078 {
3079 if (top_id < section->id)
3080 top_id = section->id;
3081 }
3082 }
3083 htab->bfd_count = bfd_count;
3084
3085 amt = sizeof (struct map_stub) * (top_id + 1);
3086 htab->stub_group = bfd_zmalloc (amt);
3087 if (htab->stub_group == NULL)
3088 return -1;
3089
3090 /* We can't use output_bfd->section_count here to find the top output
3091 section index as some sections may have been removed, and
3092 _bfd_strip_section_from_output doesn't renumber the indices. */
3093 for (section = output_bfd->sections, top_index = 0;
3094 section != NULL; section = section->next)
3095 {
3096 if (top_index < section->index)
3097 top_index = section->index;
3098 }
3099
3100 htab->top_index = top_index;
3101 amt = sizeof (asection *) * (top_index + 1);
3102 input_list = bfd_malloc (amt);
3103 htab->input_list = input_list;
3104 if (input_list == NULL)
3105 return -1;
3106
3107 /* For sections we aren't interested in, mark their entries with a
3108 value we can check later. */
3109 list = input_list + top_index;
3110 do
3111 *list = bfd_abs_section_ptr;
3112 while (list-- != input_list);
3113
3114 for (section = output_bfd->sections;
3115 section != NULL; section = section->next)
3116 {
3117 if ((section->flags & SEC_CODE) != 0)
3118 input_list[section->index] = NULL;
3119 }
3120
3121 return 1;
3122 }
3123
3124 /* Used by elfNN_aarch64_next_input_section and group_sections. */
3125 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3126
3127 /* The linker repeatedly calls this function for each input section,
3128 in the order that input sections are linked into output sections.
3129 Build lists of input sections to determine groupings between which
3130 we may insert linker stubs. */
3131
3132 void
3133 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
3134 {
3135 struct elf_aarch64_link_hash_table *htab =
3136 elf_aarch64_hash_table (info);
3137
3138 if (isec->output_section->index <= htab->top_index)
3139 {
3140 asection **list = htab->input_list + isec->output_section->index;
3141
3142 if (*list != bfd_abs_section_ptr)
3143 {
3144 /* Steal the link_sec pointer for our list. */
3145 /* This happens to make the list in reverse order,
3146 which is what we want. */
3147 PREV_SEC (isec) = *list;
3148 *list = isec;
3149 }
3150 }
3151 }
3152
3153 /* See whether we can group stub sections together. Grouping stub
3154 sections may result in fewer stubs. More importantly, we need to
3155 put all .init* and .fini* stubs at the beginning of the .init or
3156 .fini output sections respectively, because glibc splits the
3157 _init and _fini functions into multiple parts. Putting a stub in
3158 the middle of a function is not a good idea. */
3159
3160 static void
3161 group_sections (struct elf_aarch64_link_hash_table *htab,
3162 bfd_size_type stub_group_size,
3163 bfd_boolean stubs_always_before_branch)
3164 {
3165 asection **list = htab->input_list + htab->top_index;
3166
3167 do
3168 {
3169 asection *tail = *list;
3170
3171 if (tail == bfd_abs_section_ptr)
3172 continue;
3173
3174 while (tail != NULL)
3175 {
3176 asection *curr;
3177 asection *prev;
3178 bfd_size_type total;
3179
3180 curr = tail;
3181 total = tail->size;
3182 while ((prev = PREV_SEC (curr)) != NULL
3183 && ((total += curr->output_offset - prev->output_offset)
3184 < stub_group_size))
3185 curr = prev;
3186
3187 /* OK, the size from the start of CURR to the end is less
3188 than stub_group_size and thus can be handled by one stub
3189 section. (Or the tail section is itself larger than
3190 stub_group_size, in which case we may be toast.)
3191 We should really be keeping track of the total size of
3192 stubs added here, as stubs contribute to the final output
3193 section size. */
3194 do
3195 {
3196 prev = PREV_SEC (tail);
3197 /* Set up this stub group. */
3198 htab->stub_group[tail->id].link_sec = curr;
3199 }
3200 while (tail != curr && (tail = prev) != NULL);
3201
3202 /* But wait, there's more! Input sections up to stub_group_size
3203 bytes before the stub section can be handled by it too. */
3204 if (!stubs_always_before_branch)
3205 {
3206 total = 0;
3207 while (prev != NULL
3208 && ((total += tail->output_offset - prev->output_offset)
3209 < stub_group_size))
3210 {
3211 tail = prev;
3212 prev = PREV_SEC (tail);
3213 htab->stub_group[tail->id].link_sec = curr;
3214 }
3215 }
3216 tail = prev;
3217 }
3218 }
3219 while (list-- != htab->input_list);
3220
3221 free (htab->input_list);
3222 }
3223
3224 #undef PREV_SEC
3225
3226 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
3227
3228 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
3229 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
3230 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
3231 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
3232 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
3233 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
3234
3235 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
3236 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
3237 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
3238 #define AARCH64_ZR 0x1f
3239
3240 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
3241 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
3242
3243 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
3244 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
3245 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
3246 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
3247 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
3248 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
3249 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
3250 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
3251 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
3252 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
3253 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
3254 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
3255 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
3256 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
3257 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
3258 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
3259 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
3260 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
3261
3262 /* Classify an INSN if it is indeed a load/store.
3263
3264 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
3265
3266 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
3267 is set equal to RT.
3268
3269 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned. */
3270
3271 static bfd_boolean
3272 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
3273 bfd_boolean *pair, bfd_boolean *load)
3274 {
3275 uint32_t opcode;
3276 unsigned int r;
3277 uint32_t opc = 0;
3278 uint32_t v = 0;
3279 uint32_t opc_v = 0;
3280
3281 /* Bail out quickly if INSN doesn't fall into the the load-store
3282 encoding space. */
3283 if (!AARCH64_LDST (insn))
3284 return FALSE;
3285
3286 *pair = FALSE;
3287 *load = FALSE;
3288 if (AARCH64_LDST_EX (insn))
3289 {
3290 *rt = AARCH64_RT (insn);
3291 *rt2 = *rt;
3292 if (AARCH64_BIT (insn, 21) == 1)
3293 {
3294 *pair = TRUE;
3295 *rt2 = AARCH64_RT2 (insn);
3296 }
3297 *load = AARCH64_LD (insn);
3298 return TRUE;
3299 }
3300 else if (AARCH64_LDST_NAP (insn)
3301 || AARCH64_LDSTP_PI (insn)
3302 || AARCH64_LDSTP_O (insn)
3303 || AARCH64_LDSTP_PRE (insn))
3304 {
3305 *pair = TRUE;
3306 *rt = AARCH64_RT (insn);
3307 *rt2 = AARCH64_RT2 (insn);
3308 *load = AARCH64_LD (insn);
3309 return TRUE;
3310 }
3311 else if (AARCH64_LDST_PCREL (insn)
3312 || AARCH64_LDST_UI (insn)
3313 || AARCH64_LDST_PIIMM (insn)
3314 || AARCH64_LDST_U (insn)
3315 || AARCH64_LDST_PREIMM (insn)
3316 || AARCH64_LDST_RO (insn)
3317 || AARCH64_LDST_UIMM (insn))
3318 {
3319 *rt = AARCH64_RT (insn);
3320 *rt2 = *rt;
3321 if (AARCH64_LDST_PCREL (insn))
3322 *load = TRUE;
3323 opc = AARCH64_BITS (insn, 22, 2);
3324 v = AARCH64_BIT (insn, 26);
3325 opc_v = opc | (v << 2);
3326 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
3327 || opc_v == 5 || opc_v == 7);
3328 return TRUE;
3329 }
3330 else if (AARCH64_LDST_SIMD_M (insn)
3331 || AARCH64_LDST_SIMD_M_PI (insn))
3332 {
3333 *rt = AARCH64_RT (insn);
3334 *load = AARCH64_BIT (insn, 22);
3335 opcode = (insn >> 12) & 0xf;
3336 switch (opcode)
3337 {
3338 case 0:
3339 case 2:
3340 *rt2 = *rt + 3;
3341 break;
3342
3343 case 4:
3344 case 6:
3345 *rt2 = *rt + 2;
3346 break;
3347
3348 case 7:
3349 *rt2 = *rt;
3350 break;
3351
3352 case 8:
3353 case 10:
3354 *rt2 = *rt + 1;
3355 break;
3356
3357 default:
3358 return FALSE;
3359 }
3360 return TRUE;
3361 }
3362 else if (AARCH64_LDST_SIMD_S (insn)
3363 || AARCH64_LDST_SIMD_S_PI (insn))
3364 {
3365 *rt = AARCH64_RT (insn);
3366 r = (insn >> 21) & 1;
3367 *load = AARCH64_BIT (insn, 22);
3368 opcode = (insn >> 13) & 0x7;
3369 switch (opcode)
3370 {
3371 case 0:
3372 case 2:
3373 case 4:
3374 *rt2 = *rt + r;
3375 break;
3376
3377 case 1:
3378 case 3:
3379 case 5:
3380 *rt2 = *rt + (r == 0 ? 2 : 3);
3381 break;
3382
3383 case 6:
3384 *rt2 = *rt + r;
3385 break;
3386
3387 case 7:
3388 *rt2 = *rt + (r == 0 ? 2 : 3);
3389 break;
3390
3391 default:
3392 return FALSE;
3393 }
3394 return TRUE;
3395 }
3396
3397 return FALSE;
3398 }
3399
3400 /* Return TRUE if INSN is multiply-accumulate. */
3401
3402 static bfd_boolean
3403 aarch64_mlxl_p (uint32_t insn)
3404 {
3405 uint32_t op31 = AARCH64_OP31 (insn);
3406
3407 if (AARCH64_MAC (insn)
3408 && (op31 == 0 || op31 == 1 || op31 == 5)
3409 /* Exclude MUL instructions which are encoded as a multiple accumulate
3410 with RA = XZR. */
3411 && AARCH64_RA (insn) != AARCH64_ZR)
3412 return TRUE;
3413
3414 return FALSE;
3415 }
3416
3417 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3418 it is possible for a 64-bit multiply-accumulate instruction to generate an
3419 incorrect result. The details are quite complex and hard to
3420 determine statically, since branches in the code may exist in some
3421 circumstances, but all cases end with a memory (load, store, or
3422 prefetch) instruction followed immediately by the multiply-accumulate
3423 operation. We employ a linker patching technique, by moving the potentially
3424 affected multiply-accumulate instruction into a patch region and replacing
3425 the original instruction with a branch to the patch. This function checks
3426 if INSN_1 is the memory operation followed by a multiply-accumulate
3427 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3428 if INSN_1 and INSN_2 are safe. */
3429
3430 static bfd_boolean
3431 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3432 {
3433 uint32_t rt;
3434 uint32_t rt2;
3435 uint32_t rn;
3436 uint32_t rm;
3437 uint32_t ra;
3438 bfd_boolean pair;
3439 bfd_boolean load;
3440
3441 if (aarch64_mlxl_p (insn_2)
3442 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3443 {
3444 /* Any SIMD memory op is independent of the subsequent MLA
3445 by definition of the erratum. */
3446 if (AARCH64_BIT (insn_1, 26))
3447 return TRUE;
3448
3449 /* If not SIMD, check for integer memory ops and MLA relationship. */
3450 rn = AARCH64_RN (insn_2);
3451 ra = AARCH64_RA (insn_2);
3452 rm = AARCH64_RM (insn_2);
3453
3454 /* If this is a load and there's a true(RAW) dependency, we are safe
3455 and this is not an erratum sequence. */
3456 if (load &&
3457 (rt == rn || rt == rm || rt == ra
3458 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3459 return FALSE;
3460
3461 /* We conservatively put out stubs for all other cases (including
3462 writebacks). */
3463 return TRUE;
3464 }
3465
3466 return FALSE;
3467 }
3468
3469 /* Used to order a list of mapping symbols by address. */
3470
3471 static int
3472 elf_aarch64_compare_mapping (const void *a, const void *b)
3473 {
3474 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3475 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3476
3477 if (amap->vma > bmap->vma)
3478 return 1;
3479 else if (amap->vma < bmap->vma)
3480 return -1;
3481 else if (amap->type > bmap->type)
3482 /* Ensure results do not depend on the host qsort for objects with
3483 multiple mapping symbols at the same address by sorting on type
3484 after vma. */
3485 return 1;
3486 else if (amap->type < bmap->type)
3487 return -1;
3488 else
3489 return 0;
3490 }
3491
3492
3493 static char *
3494 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3495 {
3496 char *stub_name = (char *) bfd_malloc
3497 (strlen ("__erratum_835769_veneer_") + 16);
3498 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3499 return stub_name;
3500 }
3501
3502 /* Scan for Cortex-A53 erratum 835769 sequence.
3503
3504 Return TRUE else FALSE on abnormal termination. */
3505
3506 static bfd_boolean
3507 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3508 struct bfd_link_info *info,
3509 unsigned int *num_fixes_p)
3510 {
3511 asection *section;
3512 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3513 unsigned int num_fixes = *num_fixes_p;
3514
3515 if (htab == NULL)
3516 return TRUE;
3517
3518 for (section = input_bfd->sections;
3519 section != NULL;
3520 section = section->next)
3521 {
3522 bfd_byte *contents = NULL;
3523 struct _aarch64_elf_section_data *sec_data;
3524 unsigned int span;
3525
3526 if (elf_section_type (section) != SHT_PROGBITS
3527 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3528 || (section->flags & SEC_EXCLUDE) != 0
3529 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3530 || (section->output_section == bfd_abs_section_ptr))
3531 continue;
3532
3533 if (elf_section_data (section)->this_hdr.contents != NULL)
3534 contents = elf_section_data (section)->this_hdr.contents;
3535 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3536 return FALSE;
3537
3538 sec_data = elf_aarch64_section_data (section);
3539
3540 qsort (sec_data->map, sec_data->mapcount,
3541 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3542
3543 for (span = 0; span < sec_data->mapcount; span++)
3544 {
3545 unsigned int span_start = sec_data->map[span].vma;
3546 unsigned int span_end = ((span == sec_data->mapcount - 1)
3547 ? sec_data->map[0].vma + section->size
3548 : sec_data->map[span + 1].vma);
3549 unsigned int i;
3550 char span_type = sec_data->map[span].type;
3551
3552 if (span_type == 'd')
3553 continue;
3554
3555 for (i = span_start; i + 4 < span_end; i += 4)
3556 {
3557 uint32_t insn_1 = bfd_getl32 (contents + i);
3558 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3559
3560 if (aarch64_erratum_sequence (insn_1, insn_2))
3561 {
3562 struct elf_aarch64_stub_hash_entry *stub_entry;
3563 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3564 if (! stub_name)
3565 return FALSE;
3566
3567 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3568 section,
3569 htab);
3570 if (! stub_entry)
3571 return FALSE;
3572
3573 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3574 stub_entry->target_section = section;
3575 stub_entry->target_value = i + 4;
3576 stub_entry->veneered_insn = insn_2;
3577 stub_entry->output_name = stub_name;
3578 num_fixes++;
3579 }
3580 }
3581 }
3582 if (elf_section_data (section)->this_hdr.contents == NULL)
3583 free (contents);
3584 }
3585
3586 *num_fixes_p = num_fixes;
3587
3588 return TRUE;
3589 }
3590
3591
3592 /* Test if instruction INSN is ADRP. */
3593
3594 static bfd_boolean
3595 _bfd_aarch64_adrp_p (uint32_t insn)
3596 {
3597 return ((insn & 0x9f000000) == 0x90000000);
3598 }
3599
3600
3601 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3602
3603 static bfd_boolean
3604 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3605 uint32_t insn_3)
3606 {
3607 uint32_t rt;
3608 uint32_t rt2;
3609 bfd_boolean pair;
3610 bfd_boolean load;
3611
3612 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3613 && (!pair
3614 || (pair && !load))
3615 && AARCH64_LDST_UIMM (insn_3)
3616 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3617 }
3618
3619
3620 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3621
3622 Return TRUE if section CONTENTS at offset I contains one of the
3623 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3624 seen set P_VENEER_I to the offset of the final LOAD/STORE
3625 instruction in the sequence.
3626 */
3627
3628 static bfd_boolean
3629 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3630 bfd_vma i, bfd_vma span_end,
3631 bfd_vma *p_veneer_i)
3632 {
3633 uint32_t insn_1 = bfd_getl32 (contents + i);
3634
3635 if (!_bfd_aarch64_adrp_p (insn_1))
3636 return FALSE;
3637
3638 if (span_end < i + 12)
3639 return FALSE;
3640
3641 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3642 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3643
3644 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3645 return FALSE;
3646
3647 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3648 {
3649 *p_veneer_i = i + 8;
3650 return TRUE;
3651 }
3652
3653 if (span_end < i + 16)
3654 return FALSE;
3655
3656 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3657
3658 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3659 {
3660 *p_veneer_i = i + 12;
3661 return TRUE;
3662 }
3663
3664 return FALSE;
3665 }
3666
3667
3668 /* Resize all stub sections. */
3669
3670 static void
3671 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3672 {
3673 asection *section;
3674
3675 /* OK, we've added some stubs. Find out the new size of the
3676 stub sections. */
3677 for (section = htab->stub_bfd->sections;
3678 section != NULL; section = section->next)
3679 {
3680 /* Ignore non-stub sections. */
3681 if (!strstr (section->name, STUB_SUFFIX))
3682 continue;
3683 section->size = 0;
3684 }
3685
3686 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3687
3688 for (section = htab->stub_bfd->sections;
3689 section != NULL; section = section->next)
3690 {
3691 if (!strstr (section->name, STUB_SUFFIX))
3692 continue;
3693
3694 if (section->size)
3695 section->size += 4;
3696
3697 /* Ensure all stub sections have a size which is a multiple of
3698 4096. This is important in order to ensure that the insertion
3699 of stub sections does not in itself move existing code around
3700 in such a way that new errata sequences are created. */
3701 if (htab->fix_erratum_843419)
3702 if (section->size)
3703 section->size = BFD_ALIGN (section->size, 0x1000);
3704 }
3705 }
3706
3707
3708 /* Construct an erratum 843419 workaround stub name.
3709 */
3710
3711 static char *
3712 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3713 bfd_vma offset)
3714 {
3715 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3716 char *stub_name = bfd_malloc (len);
3717
3718 if (stub_name != NULL)
3719 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3720 input_section->owner->id,
3721 input_section->id,
3722 offset);
3723 return stub_name;
3724 }
3725
3726 /* Build a stub_entry structure describing an 843419 fixup.
3727
3728 The stub_entry constructed is populated with the bit pattern INSN
3729 of the instruction located at OFFSET within input SECTION.
3730
3731 Returns TRUE on success. */
3732
3733 static bfd_boolean
3734 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3735 bfd_vma adrp_offset,
3736 bfd_vma ldst_offset,
3737 asection *section,
3738 struct bfd_link_info *info)
3739 {
3740 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3741 char *stub_name;
3742 struct elf_aarch64_stub_hash_entry *stub_entry;
3743
3744 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3745 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3746 FALSE, FALSE);
3747 if (stub_entry)
3748 {
3749 free (stub_name);
3750 return TRUE;
3751 }
3752
3753 /* We always place an 843419 workaround veneer in the stub section
3754 attached to the input section in which an erratum sequence has
3755 been found. This ensures that later in the link process (in
3756 elfNN_aarch64_write_section) when we copy the veneered
3757 instruction from the input section into the stub section the
3758 copied instruction will have had any relocations applied to it.
3759 If we placed workaround veneers in any other stub section then we
3760 could not assume that all relocations have been processed on the
3761 corresponding input section at the point we output the stub
3762 section.
3763 */
3764
3765 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3766 if (stub_entry == NULL)
3767 {
3768 free (stub_name);
3769 return FALSE;
3770 }
3771
3772 stub_entry->adrp_offset = adrp_offset;
3773 stub_entry->target_value = ldst_offset;
3774 stub_entry->target_section = section;
3775 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3776 stub_entry->veneered_insn = insn;
3777 stub_entry->output_name = stub_name;
3778
3779 return TRUE;
3780 }
3781
3782
3783 /* Scan an input section looking for the signature of erratum 843419.
3784
3785 Scans input SECTION in INPUT_BFD looking for erratum 843419
3786 signatures, for each signature found a stub_entry is created
3787 describing the location of the erratum for subsequent fixup.
3788
3789 Return TRUE on successful scan, FALSE on failure to scan.
3790 */
3791
3792 static bfd_boolean
3793 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3794 struct bfd_link_info *info)
3795 {
3796 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3797
3798 if (htab == NULL)
3799 return TRUE;
3800
3801 if (elf_section_type (section) != SHT_PROGBITS
3802 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3803 || (section->flags & SEC_EXCLUDE) != 0
3804 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3805 || (section->output_section == bfd_abs_section_ptr))
3806 return TRUE;
3807
3808 do
3809 {
3810 bfd_byte *contents = NULL;
3811 struct _aarch64_elf_section_data *sec_data;
3812 unsigned int span;
3813
3814 if (elf_section_data (section)->this_hdr.contents != NULL)
3815 contents = elf_section_data (section)->this_hdr.contents;
3816 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3817 return FALSE;
3818
3819 sec_data = elf_aarch64_section_data (section);
3820
3821 qsort (sec_data->map, sec_data->mapcount,
3822 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3823
3824 for (span = 0; span < sec_data->mapcount; span++)
3825 {
3826 unsigned int span_start = sec_data->map[span].vma;
3827 unsigned int span_end = ((span == sec_data->mapcount - 1)
3828 ? sec_data->map[0].vma + section->size
3829 : sec_data->map[span + 1].vma);
3830 unsigned int i;
3831 char span_type = sec_data->map[span].type;
3832
3833 if (span_type == 'd')
3834 continue;
3835
3836 for (i = span_start; i + 8 < span_end; i += 4)
3837 {
3838 bfd_vma vma = (section->output_section->vma
3839 + section->output_offset
3840 + i);
3841 bfd_vma veneer_i;
3842
3843 if (_bfd_aarch64_erratum_843419_p
3844 (contents, vma, i, span_end, &veneer_i))
3845 {
3846 uint32_t insn = bfd_getl32 (contents + veneer_i);
3847
3848 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3849 section, info))
3850 return FALSE;
3851 }
3852 }
3853 }
3854
3855 if (elf_section_data (section)->this_hdr.contents == NULL)
3856 free (contents);
3857 }
3858 while (0);
3859
3860 return TRUE;
3861 }
3862
3863
3864 /* Determine and set the size of the stub section for a final link.
3865
3866 The basic idea here is to examine all the relocations looking for
3867 PC-relative calls to a target that is unreachable with a "bl"
3868 instruction. */
3869
3870 bfd_boolean
3871 elfNN_aarch64_size_stubs (bfd *output_bfd,
3872 bfd *stub_bfd,
3873 struct bfd_link_info *info,
3874 bfd_signed_vma group_size,
3875 asection * (*add_stub_section) (const char *,
3876 asection *),
3877 void (*layout_sections_again) (void))
3878 {
3879 bfd_size_type stub_group_size;
3880 bfd_boolean stubs_always_before_branch;
3881 bfd_boolean stub_changed = FALSE;
3882 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3883 unsigned int num_erratum_835769_fixes = 0;
3884
3885 /* Propagate mach to stub bfd, because it may not have been
3886 finalized when we created stub_bfd. */
3887 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3888 bfd_get_mach (output_bfd));
3889
3890 /* Stash our params away. */
3891 htab->stub_bfd = stub_bfd;
3892 htab->add_stub_section = add_stub_section;
3893 htab->layout_sections_again = layout_sections_again;
3894 stubs_always_before_branch = group_size < 0;
3895 if (group_size < 0)
3896 stub_group_size = -group_size;
3897 else
3898 stub_group_size = group_size;
3899
3900 if (stub_group_size == 1)
3901 {
3902 /* Default values. */
3903 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3904 stub_group_size = 127 * 1024 * 1024;
3905 }
3906
3907 group_sections (htab, stub_group_size, stubs_always_before_branch);
3908
3909 (*htab->layout_sections_again) ();
3910
3911 if (htab->fix_erratum_835769)
3912 {
3913 bfd *input_bfd;
3914
3915 for (input_bfd = info->input_bfds;
3916 input_bfd != NULL; input_bfd = input_bfd->link.next)
3917 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3918 &num_erratum_835769_fixes))
3919 return FALSE;
3920
3921 _bfd_aarch64_resize_stubs (htab);
3922 (*htab->layout_sections_again) ();
3923 }
3924
3925 if (htab->fix_erratum_843419)
3926 {
3927 bfd *input_bfd;
3928
3929 for (input_bfd = info->input_bfds;
3930 input_bfd != NULL;
3931 input_bfd = input_bfd->link.next)
3932 {
3933 asection *section;
3934
3935 for (section = input_bfd->sections;
3936 section != NULL;
3937 section = section->next)
3938 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3939 return FALSE;
3940 }
3941
3942 _bfd_aarch64_resize_stubs (htab);
3943 (*htab->layout_sections_again) ();
3944 }
3945
3946 while (1)
3947 {
3948 bfd *input_bfd;
3949
3950 for (input_bfd = info->input_bfds;
3951 input_bfd != NULL; input_bfd = input_bfd->link.next)
3952 {
3953 Elf_Internal_Shdr *symtab_hdr;
3954 asection *section;
3955 Elf_Internal_Sym *local_syms = NULL;
3956
3957 /* We'll need the symbol table in a second. */
3958 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3959 if (symtab_hdr->sh_info == 0)
3960 continue;
3961
3962 /* Walk over each section attached to the input bfd. */
3963 for (section = input_bfd->sections;
3964 section != NULL; section = section->next)
3965 {
3966 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3967
3968 /* If there aren't any relocs, then there's nothing more
3969 to do. */
3970 if ((section->flags & SEC_RELOC) == 0
3971 || section->reloc_count == 0
3972 || (section->flags & SEC_CODE) == 0)
3973 continue;
3974
3975 /* If this section is a link-once section that will be
3976 discarded, then don't create any stubs. */
3977 if (section->output_section == NULL
3978 || section->output_section->owner != output_bfd)
3979 continue;
3980
3981 /* Get the relocs. */
3982 internal_relocs
3983 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3984 NULL, info->keep_memory);
3985 if (internal_relocs == NULL)
3986 goto error_ret_free_local;
3987
3988 /* Now examine each relocation. */
3989 irela = internal_relocs;
3990 irelaend = irela + section->reloc_count;
3991 for (; irela < irelaend; irela++)
3992 {
3993 unsigned int r_type, r_indx;
3994 enum elf_aarch64_stub_type stub_type;
3995 struct elf_aarch64_stub_hash_entry *stub_entry;
3996 asection *sym_sec;
3997 bfd_vma sym_value;
3998 bfd_vma destination;
3999 struct elf_aarch64_link_hash_entry *hash;
4000 const char *sym_name;
4001 char *stub_name;
4002 const asection *id_sec;
4003 unsigned char st_type;
4004 bfd_size_type len;
4005
4006 r_type = ELFNN_R_TYPE (irela->r_info);
4007 r_indx = ELFNN_R_SYM (irela->r_info);
4008
4009 if (r_type >= (unsigned int) R_AARCH64_end)
4010 {
4011 bfd_set_error (bfd_error_bad_value);
4012 error_ret_free_internal:
4013 if (elf_section_data (section)->relocs == NULL)
4014 free (internal_relocs);
4015 goto error_ret_free_local;
4016 }
4017
4018 /* Only look for stubs on unconditional branch and
4019 branch and link instructions. */
4020 if (r_type != (unsigned int) AARCH64_R (CALL26)
4021 && r_type != (unsigned int) AARCH64_R (JUMP26))
4022 continue;
4023
4024 /* Now determine the call target, its name, value,
4025 section. */
4026 sym_sec = NULL;
4027 sym_value = 0;
4028 destination = 0;
4029 hash = NULL;
4030 sym_name = NULL;
4031 if (r_indx < symtab_hdr->sh_info)
4032 {
4033 /* It's a local symbol. */
4034 Elf_Internal_Sym *sym;
4035 Elf_Internal_Shdr *hdr;
4036
4037 if (local_syms == NULL)
4038 {
4039 local_syms
4040 = (Elf_Internal_Sym *) symtab_hdr->contents;
4041 if (local_syms == NULL)
4042 local_syms
4043 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4044 symtab_hdr->sh_info, 0,
4045 NULL, NULL, NULL);
4046 if (local_syms == NULL)
4047 goto error_ret_free_internal;
4048 }
4049
4050 sym = local_syms + r_indx;
4051 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4052 sym_sec = hdr->bfd_section;
4053 if (!sym_sec)
4054 /* This is an undefined symbol. It can never
4055 be resolved. */
4056 continue;
4057
4058 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4059 sym_value = sym->st_value;
4060 destination = (sym_value + irela->r_addend
4061 + sym_sec->output_offset
4062 + sym_sec->output_section->vma);
4063 st_type = ELF_ST_TYPE (sym->st_info);
4064 sym_name
4065 = bfd_elf_string_from_elf_section (input_bfd,
4066 symtab_hdr->sh_link,
4067 sym->st_name);
4068 }
4069 else
4070 {
4071 int e_indx;
4072
4073 e_indx = r_indx - symtab_hdr->sh_info;
4074 hash = ((struct elf_aarch64_link_hash_entry *)
4075 elf_sym_hashes (input_bfd)[e_indx]);
4076
4077 while (hash->root.root.type == bfd_link_hash_indirect
4078 || hash->root.root.type == bfd_link_hash_warning)
4079 hash = ((struct elf_aarch64_link_hash_entry *)
4080 hash->root.root.u.i.link);
4081
4082 if (hash->root.root.type == bfd_link_hash_defined
4083 || hash->root.root.type == bfd_link_hash_defweak)
4084 {
4085 struct elf_aarch64_link_hash_table *globals =
4086 elf_aarch64_hash_table (info);
4087 sym_sec = hash->root.root.u.def.section;
4088 sym_value = hash->root.root.u.def.value;
4089 /* For a destination in a shared library,
4090 use the PLT stub as target address to
4091 decide whether a branch stub is
4092 needed. */
4093 if (globals->root.splt != NULL && hash != NULL
4094 && hash->root.plt.offset != (bfd_vma) - 1)
4095 {
4096 sym_sec = globals->root.splt;
4097 sym_value = hash->root.plt.offset;
4098 if (sym_sec->output_section != NULL)
4099 destination = (sym_value
4100 + sym_sec->output_offset
4101 +
4102 sym_sec->output_section->vma);
4103 }
4104 else if (sym_sec->output_section != NULL)
4105 destination = (sym_value + irela->r_addend
4106 + sym_sec->output_offset
4107 + sym_sec->output_section->vma);
4108 }
4109 else if (hash->root.root.type == bfd_link_hash_undefined
4110 || (hash->root.root.type
4111 == bfd_link_hash_undefweak))
4112 {
4113 /* For a shared library, use the PLT stub as
4114 target address to decide whether a long
4115 branch stub is needed.
4116 For absolute code, they cannot be handled. */
4117 struct elf_aarch64_link_hash_table *globals =
4118 elf_aarch64_hash_table (info);
4119
4120 if (globals->root.splt != NULL && hash != NULL
4121 && hash->root.plt.offset != (bfd_vma) - 1)
4122 {
4123 sym_sec = globals->root.splt;
4124 sym_value = hash->root.plt.offset;
4125 if (sym_sec->output_section != NULL)
4126 destination = (sym_value
4127 + sym_sec->output_offset
4128 +
4129 sym_sec->output_section->vma);
4130 }
4131 else
4132 continue;
4133 }
4134 else
4135 {
4136 bfd_set_error (bfd_error_bad_value);
4137 goto error_ret_free_internal;
4138 }
4139 st_type = ELF_ST_TYPE (hash->root.type);
4140 sym_name = hash->root.root.root.string;
4141 }
4142
4143 /* Determine what (if any) linker stub is needed. */
4144 stub_type = aarch64_type_of_stub (section, irela, sym_sec,
4145 st_type, destination);
4146 if (stub_type == aarch64_stub_none)
4147 continue;
4148
4149 /* Support for grouping stub sections. */
4150 id_sec = htab->stub_group[section->id].link_sec;
4151
4152 /* Get the name of this stub. */
4153 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
4154 irela);
4155 if (!stub_name)
4156 goto error_ret_free_internal;
4157
4158 stub_entry =
4159 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4160 stub_name, FALSE, FALSE);
4161 if (stub_entry != NULL)
4162 {
4163 /* The proper stub has already been created. */
4164 free (stub_name);
4165 continue;
4166 }
4167
4168 stub_entry = _bfd_aarch64_add_stub_entry_in_group
4169 (stub_name, section, htab);
4170 if (stub_entry == NULL)
4171 {
4172 free (stub_name);
4173 goto error_ret_free_internal;
4174 }
4175
4176 stub_entry->target_value = sym_value + irela->r_addend;
4177 stub_entry->target_section = sym_sec;
4178 stub_entry->stub_type = stub_type;
4179 stub_entry->h = hash;
4180 stub_entry->st_type = st_type;
4181
4182 if (sym_name == NULL)
4183 sym_name = "unnamed";
4184 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
4185 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
4186 if (stub_entry->output_name == NULL)
4187 {
4188 free (stub_name);
4189 goto error_ret_free_internal;
4190 }
4191
4192 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
4193 sym_name);
4194
4195 stub_changed = TRUE;
4196 }
4197
4198 /* We're done with the internal relocs, free them. */
4199 if (elf_section_data (section)->relocs == NULL)
4200 free (internal_relocs);
4201 }
4202 }
4203
4204 if (!stub_changed)
4205 break;
4206
4207 _bfd_aarch64_resize_stubs (htab);
4208
4209 /* Ask the linker to do its stuff. */
4210 (*htab->layout_sections_again) ();
4211 stub_changed = FALSE;
4212 }
4213
4214 return TRUE;
4215
4216 error_ret_free_local:
4217 return FALSE;
4218 }
4219
4220 /* Build all the stubs associated with the current output file. The
4221 stubs are kept in a hash table attached to the main linker hash
4222 table. We also set up the .plt entries for statically linked PIC
4223 functions here. This function is called via aarch64_elf_finish in the
4224 linker. */
4225
4226 bfd_boolean
4227 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
4228 {
4229 asection *stub_sec;
4230 struct bfd_hash_table *table;
4231 struct elf_aarch64_link_hash_table *htab;
4232
4233 htab = elf_aarch64_hash_table (info);
4234
4235 for (stub_sec = htab->stub_bfd->sections;
4236 stub_sec != NULL; stub_sec = stub_sec->next)
4237 {
4238 bfd_size_type size;
4239
4240 /* Ignore non-stub sections. */
4241 if (!strstr (stub_sec->name, STUB_SUFFIX))
4242 continue;
4243
4244 /* Allocate memory to hold the linker stubs. */
4245 size = stub_sec->size;
4246 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4247 if (stub_sec->contents == NULL && size != 0)
4248 return FALSE;
4249 stub_sec->size = 0;
4250
4251 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
4252 stub_sec->size += 4;
4253 }
4254
4255 /* Build the stubs as directed by the stub hash table. */
4256 table = &htab->stub_hash_table;
4257 bfd_hash_traverse (table, aarch64_build_one_stub, info);
4258
4259 return TRUE;
4260 }
4261
4262
4263 /* Add an entry to the code/data map for section SEC. */
4264
4265 static void
4266 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
4267 {
4268 struct _aarch64_elf_section_data *sec_data =
4269 elf_aarch64_section_data (sec);
4270 unsigned int newidx;
4271
4272 if (sec_data->map == NULL)
4273 {
4274 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
4275 sec_data->mapcount = 0;
4276 sec_data->mapsize = 1;
4277 }
4278
4279 newidx = sec_data->mapcount++;
4280
4281 if (sec_data->mapcount > sec_data->mapsize)
4282 {
4283 sec_data->mapsize *= 2;
4284 sec_data->map = bfd_realloc_or_free
4285 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
4286 }
4287
4288 if (sec_data->map)
4289 {
4290 sec_data->map[newidx].vma = vma;
4291 sec_data->map[newidx].type = type;
4292 }
4293 }
4294
4295
4296 /* Initialise maps of insn/data for input BFDs. */
4297 void
4298 bfd_elfNN_aarch64_init_maps (bfd *abfd)
4299 {
4300 Elf_Internal_Sym *isymbuf;
4301 Elf_Internal_Shdr *hdr;
4302 unsigned int i, localsyms;
4303
4304 /* Make sure that we are dealing with an AArch64 elf binary. */
4305 if (!is_aarch64_elf (abfd))
4306 return;
4307
4308 if ((abfd->flags & DYNAMIC) != 0)
4309 return;
4310
4311 hdr = &elf_symtab_hdr (abfd);
4312 localsyms = hdr->sh_info;
4313
4314 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
4315 should contain the number of local symbols, which should come before any
4316 global symbols. Mapping symbols are always local. */
4317 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
4318
4319 /* No internal symbols read? Skip this BFD. */
4320 if (isymbuf == NULL)
4321 return;
4322
4323 for (i = 0; i < localsyms; i++)
4324 {
4325 Elf_Internal_Sym *isym = &isymbuf[i];
4326 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
4327 const char *name;
4328
4329 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4330 {
4331 name = bfd_elf_string_from_elf_section (abfd,
4332 hdr->sh_link,
4333 isym->st_name);
4334
4335 if (bfd_is_aarch64_special_symbol_name
4336 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4337 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4338 }
4339 }
4340 }
4341
4342 /* Set option values needed during linking. */
4343 void
4344 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
4345 struct bfd_link_info *link_info,
4346 int no_enum_warn,
4347 int no_wchar_warn, int pic_veneer,
4348 int fix_erratum_835769,
4349 int fix_erratum_843419,
4350 int no_apply_dynamic_relocs)
4351 {
4352 struct elf_aarch64_link_hash_table *globals;
4353
4354 globals = elf_aarch64_hash_table (link_info);
4355 globals->pic_veneer = pic_veneer;
4356 globals->fix_erratum_835769 = fix_erratum_835769;
4357 globals->fix_erratum_843419 = fix_erratum_843419;
4358 globals->fix_erratum_843419_adr = TRUE;
4359 globals->no_apply_dynamic_relocs = no_apply_dynamic_relocs;
4360
4361 BFD_ASSERT (is_aarch64_elf (output_bfd));
4362 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
4363 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4364 }
4365
4366 static bfd_vma
4367 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
4368 struct elf_aarch64_link_hash_table
4369 *globals, struct bfd_link_info *info,
4370 bfd_vma value, bfd *output_bfd,
4371 bfd_boolean *unresolved_reloc_p)
4372 {
4373 bfd_vma off = (bfd_vma) - 1;
4374 asection *basegot = globals->root.sgot;
4375 bfd_boolean dyn = globals->root.dynamic_sections_created;
4376
4377 if (h != NULL)
4378 {
4379 BFD_ASSERT (basegot != NULL);
4380 off = h->got.offset;
4381 BFD_ASSERT (off != (bfd_vma) - 1);
4382 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4383 || (bfd_link_pic (info)
4384 && SYMBOL_REFERENCES_LOCAL (info, h))
4385 || (ELF_ST_VISIBILITY (h->other)
4386 && h->root.type == bfd_link_hash_undefweak))
4387 {
4388 /* This is actually a static link, or it is a -Bsymbolic link
4389 and the symbol is defined locally. We must initialize this
4390 entry in the global offset table. Since the offset must
4391 always be a multiple of 8 (4 in the case of ILP32), we use
4392 the least significant bit to record whether we have
4393 initialized it already.
4394 When doing a dynamic link, we create a .rel(a).got relocation
4395 entry to initialize the value. This is done in the
4396 finish_dynamic_symbol routine. */
4397 if ((off & 1) != 0)
4398 off &= ~1;
4399 else
4400 {
4401 bfd_put_NN (output_bfd, value, basegot->contents + off);
4402 h->got.offset |= 1;
4403 }
4404 }
4405 else
4406 *unresolved_reloc_p = FALSE;
4407
4408 off = off + basegot->output_section->vma + basegot->output_offset;
4409 }
4410
4411 return off;
4412 }
4413
4414 /* Change R_TYPE to a more efficient access model where possible,
4415 return the new reloc type. */
4416
4417 static bfd_reloc_code_real_type
4418 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4419 struct elf_link_hash_entry *h)
4420 {
4421 bfd_boolean is_local = h == NULL;
4422
4423 switch (r_type)
4424 {
4425 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4426 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4427 return (is_local
4428 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4429 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4430
4431 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4432 return (is_local
4433 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4434 : r_type);
4435
4436 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4437 return (is_local
4438 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4439 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4440
4441 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4442 return (is_local
4443 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4444 : BFD_RELOC_AARCH64_NONE);
4445
4446 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4447 return (is_local
4448 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
4449 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC);
4450
4451 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4452 return (is_local
4453 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
4454 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1);
4455
4456 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4457 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4458 return (is_local
4459 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4460 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4461
4462 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4463 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4464
4465 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4466 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4467
4468 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4469 return r_type;
4470
4471 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4472 return (is_local
4473 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4474 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4475
4476 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4477 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4478 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4479 /* Instructions with these relocations will become NOPs. */
4480 return BFD_RELOC_AARCH64_NONE;
4481
4482 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
4483 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4484 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4485 return is_local ? BFD_RELOC_AARCH64_NONE : r_type;
4486
4487 #if ARCH_SIZE == 64
4488 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4489 return is_local
4490 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
4491 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
4492
4493 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4494 return is_local
4495 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
4496 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
4497 #endif
4498
4499 default:
4500 break;
4501 }
4502
4503 return r_type;
4504 }
4505
4506 static unsigned int
4507 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4508 {
4509 switch (r_type)
4510 {
4511 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4512 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4513 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4514 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4515 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
4516 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4517 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4518 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4519 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4520 return GOT_NORMAL;
4521
4522 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4523 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4524 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4525 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4526 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4527 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
4528 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4529 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4530 return GOT_TLS_GD;
4531
4532 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4533 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4534 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4535 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4536 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4537 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4538 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4539 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4540 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4541 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4542 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4543 return GOT_TLSDESC_GD;
4544
4545 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4546 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4547 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4548 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4549 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
4550 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
4551 return GOT_TLS_IE;
4552
4553 default:
4554 break;
4555 }
4556 return GOT_UNKNOWN;
4557 }
4558
4559 static bfd_boolean
4560 aarch64_can_relax_tls (bfd *input_bfd,
4561 struct bfd_link_info *info,
4562 bfd_reloc_code_real_type r_type,
4563 struct elf_link_hash_entry *h,
4564 unsigned long r_symndx)
4565 {
4566 unsigned int symbol_got_type;
4567 unsigned int reloc_got_type;
4568
4569 if (! IS_AARCH64_TLS_RELAX_RELOC (r_type))
4570 return FALSE;
4571
4572 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4573 reloc_got_type = aarch64_reloc_got_type (r_type);
4574
4575 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4576 return TRUE;
4577
4578 if (bfd_link_pic (info))
4579 return FALSE;
4580
4581 if (h && h->root.type == bfd_link_hash_undefweak)
4582 return FALSE;
4583
4584 return TRUE;
4585 }
4586
4587 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4588 enumerator. */
4589
4590 static bfd_reloc_code_real_type
4591 aarch64_tls_transition (bfd *input_bfd,
4592 struct bfd_link_info *info,
4593 unsigned int r_type,
4594 struct elf_link_hash_entry *h,
4595 unsigned long r_symndx)
4596 {
4597 bfd_reloc_code_real_type bfd_r_type
4598 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4599
4600 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4601 return bfd_r_type;
4602
4603 return aarch64_tls_transition_without_check (bfd_r_type, h);
4604 }
4605
4606 /* Return the base VMA address which should be subtracted from real addresses
4607 when resolving R_AARCH64_TLS_DTPREL relocation. */
4608
4609 static bfd_vma
4610 dtpoff_base (struct bfd_link_info *info)
4611 {
4612 /* If tls_sec is NULL, we should have signalled an error already. */
4613 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4614 return elf_hash_table (info)->tls_sec->vma;
4615 }
4616
4617 /* Return the base VMA address which should be subtracted from real addresses
4618 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4619
4620 static bfd_vma
4621 tpoff_base (struct bfd_link_info *info)
4622 {
4623 struct elf_link_hash_table *htab = elf_hash_table (info);
4624
4625 /* If tls_sec is NULL, we should have signalled an error already. */
4626 BFD_ASSERT (htab->tls_sec != NULL);
4627
4628 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4629 htab->tls_sec->alignment_power);
4630 return htab->tls_sec->vma - base;
4631 }
4632
4633 static bfd_vma *
4634 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4635 unsigned long r_symndx)
4636 {
4637 /* Calculate the address of the GOT entry for symbol
4638 referred to in h. */
4639 if (h != NULL)
4640 return &h->got.offset;
4641 else
4642 {
4643 /* local symbol */
4644 struct elf_aarch64_local_symbol *l;
4645
4646 l = elf_aarch64_locals (input_bfd);
4647 return &l[r_symndx].got_offset;
4648 }
4649 }
4650
4651 static void
4652 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4653 unsigned long r_symndx)
4654 {
4655 bfd_vma *p;
4656 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4657 *p |= 1;
4658 }
4659
4660 static int
4661 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4662 unsigned long r_symndx)
4663 {
4664 bfd_vma value;
4665 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4666 return value & 1;
4667 }
4668
4669 static bfd_vma
4670 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4671 unsigned long r_symndx)
4672 {
4673 bfd_vma value;
4674 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4675 value &= ~1;
4676 return value;
4677 }
4678
4679 static bfd_vma *
4680 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4681 unsigned long r_symndx)
4682 {
4683 /* Calculate the address of the GOT entry for symbol
4684 referred to in h. */
4685 if (h != NULL)
4686 {
4687 struct elf_aarch64_link_hash_entry *eh;
4688 eh = (struct elf_aarch64_link_hash_entry *) h;
4689 return &eh->tlsdesc_got_jump_table_offset;
4690 }
4691 else
4692 {
4693 /* local symbol */
4694 struct elf_aarch64_local_symbol *l;
4695
4696 l = elf_aarch64_locals (input_bfd);
4697 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4698 }
4699 }
4700
4701 static void
4702 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4703 unsigned long r_symndx)
4704 {
4705 bfd_vma *p;
4706 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4707 *p |= 1;
4708 }
4709
4710 static int
4711 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4712 struct elf_link_hash_entry *h,
4713 unsigned long r_symndx)
4714 {
4715 bfd_vma value;
4716 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4717 return value & 1;
4718 }
4719
4720 static bfd_vma
4721 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4722 unsigned long r_symndx)
4723 {
4724 bfd_vma value;
4725 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4726 value &= ~1;
4727 return value;
4728 }
4729
4730 /* Data for make_branch_to_erratum_835769_stub(). */
4731
4732 struct erratum_835769_branch_to_stub_data
4733 {
4734 struct bfd_link_info *info;
4735 asection *output_section;
4736 bfd_byte *contents;
4737 };
4738
4739 /* Helper to insert branches to erratum 835769 stubs in the right
4740 places for a particular section. */
4741
4742 static bfd_boolean
4743 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4744 void *in_arg)
4745 {
4746 struct elf_aarch64_stub_hash_entry *stub_entry;
4747 struct erratum_835769_branch_to_stub_data *data;
4748 bfd_byte *contents;
4749 unsigned long branch_insn = 0;
4750 bfd_vma veneered_insn_loc, veneer_entry_loc;
4751 bfd_signed_vma branch_offset;
4752 unsigned int target;
4753 bfd *abfd;
4754
4755 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4756 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4757
4758 if (stub_entry->target_section != data->output_section
4759 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4760 return TRUE;
4761
4762 contents = data->contents;
4763 veneered_insn_loc = stub_entry->target_section->output_section->vma
4764 + stub_entry->target_section->output_offset
4765 + stub_entry->target_value;
4766 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4767 + stub_entry->stub_sec->output_offset
4768 + stub_entry->stub_offset;
4769 branch_offset = veneer_entry_loc - veneered_insn_loc;
4770
4771 abfd = stub_entry->target_section->owner;
4772 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4773 _bfd_error_handler
4774 (_("%B: error: Erratum 835769 stub out "
4775 "of range (input file too large)"), abfd);
4776
4777 target = stub_entry->target_value;
4778 branch_insn = 0x14000000;
4779 branch_offset >>= 2;
4780 branch_offset &= 0x3ffffff;
4781 branch_insn |= branch_offset;
4782 bfd_putl32 (branch_insn, &contents[target]);
4783
4784 return TRUE;
4785 }
4786
4787
4788 static bfd_boolean
4789 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4790 void *in_arg)
4791 {
4792 struct elf_aarch64_stub_hash_entry *stub_entry
4793 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4794 struct erratum_835769_branch_to_stub_data *data
4795 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4796 struct bfd_link_info *info;
4797 struct elf_aarch64_link_hash_table *htab;
4798 bfd_byte *contents;
4799 asection *section;
4800 bfd *abfd;
4801 bfd_vma place;
4802 uint32_t insn;
4803
4804 info = data->info;
4805 contents = data->contents;
4806 section = data->output_section;
4807
4808 htab = elf_aarch64_hash_table (info);
4809
4810 if (stub_entry->target_section != section
4811 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4812 return TRUE;
4813
4814 insn = bfd_getl32 (contents + stub_entry->target_value);
4815 bfd_putl32 (insn,
4816 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4817
4818 place = (section->output_section->vma + section->output_offset
4819 + stub_entry->adrp_offset);
4820 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4821
4822 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4823 abort ();
4824
4825 bfd_signed_vma imm =
4826 (_bfd_aarch64_sign_extend
4827 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4828 - (place & 0xfff));
4829
4830 if (htab->fix_erratum_843419_adr
4831 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4832 {
4833 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4834 | AARCH64_RT (insn));
4835 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4836 }
4837 else
4838 {
4839 bfd_vma veneered_insn_loc;
4840 bfd_vma veneer_entry_loc;
4841 bfd_signed_vma branch_offset;
4842 uint32_t branch_insn;
4843
4844 veneered_insn_loc = stub_entry->target_section->output_section->vma
4845 + stub_entry->target_section->output_offset
4846 + stub_entry->target_value;
4847 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4848 + stub_entry->stub_sec->output_offset
4849 + stub_entry->stub_offset;
4850 branch_offset = veneer_entry_loc - veneered_insn_loc;
4851
4852 abfd = stub_entry->target_section->owner;
4853 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4854 _bfd_error_handler
4855 (_("%B: error: Erratum 843419 stub out "
4856 "of range (input file too large)"), abfd);
4857
4858 branch_insn = 0x14000000;
4859 branch_offset >>= 2;
4860 branch_offset &= 0x3ffffff;
4861 branch_insn |= branch_offset;
4862 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4863 }
4864 return TRUE;
4865 }
4866
4867
4868 static bfd_boolean
4869 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4870 struct bfd_link_info *link_info,
4871 asection *sec,
4872 bfd_byte *contents)
4873
4874 {
4875 struct elf_aarch64_link_hash_table *globals =
4876 elf_aarch64_hash_table (link_info);
4877
4878 if (globals == NULL)
4879 return FALSE;
4880
4881 /* Fix code to point to erratum 835769 stubs. */
4882 if (globals->fix_erratum_835769)
4883 {
4884 struct erratum_835769_branch_to_stub_data data;
4885
4886 data.info = link_info;
4887 data.output_section = sec;
4888 data.contents = contents;
4889 bfd_hash_traverse (&globals->stub_hash_table,
4890 make_branch_to_erratum_835769_stub, &data);
4891 }
4892
4893 if (globals->fix_erratum_843419)
4894 {
4895 struct erratum_835769_branch_to_stub_data data;
4896
4897 data.info = link_info;
4898 data.output_section = sec;
4899 data.contents = contents;
4900 bfd_hash_traverse (&globals->stub_hash_table,
4901 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4902 }
4903
4904 return FALSE;
4905 }
4906
4907 /* Perform a relocation as part of a final link. The input relocation type
4908 should be TLS relaxed. */
4909
4910 static bfd_reloc_status_type
4911 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4912 bfd *input_bfd,
4913 bfd *output_bfd,
4914 asection *input_section,
4915 bfd_byte *contents,
4916 Elf_Internal_Rela *rel,
4917 bfd_vma value,
4918 struct bfd_link_info *info,
4919 asection *sym_sec,
4920 struct elf_link_hash_entry *h,
4921 bfd_boolean *unresolved_reloc_p,
4922 bfd_boolean save_addend,
4923 bfd_vma *saved_addend,
4924 Elf_Internal_Sym *sym)
4925 {
4926 Elf_Internal_Shdr *symtab_hdr;
4927 unsigned int r_type = howto->type;
4928 bfd_reloc_code_real_type bfd_r_type
4929 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4930 unsigned long r_symndx;
4931 bfd_byte *hit_data = contents + rel->r_offset;
4932 bfd_vma place, off;
4933 bfd_signed_vma signed_addend;
4934 struct elf_aarch64_link_hash_table *globals;
4935 bfd_boolean weak_undef_p;
4936 asection *base_got;
4937
4938 globals = elf_aarch64_hash_table (info);
4939
4940 symtab_hdr = &elf_symtab_hdr (input_bfd);
4941
4942 BFD_ASSERT (is_aarch64_elf (input_bfd));
4943
4944 r_symndx = ELFNN_R_SYM (rel->r_info);
4945
4946 place = input_section->output_section->vma
4947 + input_section->output_offset + rel->r_offset;
4948
4949 /* Get addend, accumulating the addend for consecutive relocs
4950 which refer to the same offset. */
4951 signed_addend = saved_addend ? *saved_addend : 0;
4952 signed_addend += rel->r_addend;
4953
4954 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4955 : bfd_is_und_section (sym_sec));
4956
4957 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4958 it here if it is defined in a non-shared object. */
4959 if (h != NULL
4960 && h->type == STT_GNU_IFUNC
4961 && h->def_regular)
4962 {
4963 asection *plt;
4964 const char *name;
4965 bfd_vma addend = 0;
4966
4967 if ((input_section->flags & SEC_ALLOC) == 0
4968 || h->plt.offset == (bfd_vma) -1)
4969 abort ();
4970
4971 /* STT_GNU_IFUNC symbol must go through PLT. */
4972 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4973 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4974
4975 switch (bfd_r_type)
4976 {
4977 default:
4978 if (h->root.root.string)
4979 name = h->root.root.string;
4980 else
4981 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4982 NULL);
4983 _bfd_error_handler
4984 /* xgettext:c-format */
4985 (_("%B: relocation %s against STT_GNU_IFUNC "
4986 "symbol `%s' isn't handled by %s"), input_bfd,
4987 howto->name, name, __FUNCTION__);
4988 bfd_set_error (bfd_error_bad_value);
4989 return FALSE;
4990
4991 case BFD_RELOC_AARCH64_NN:
4992 if (rel->r_addend != 0)
4993 {
4994 if (h->root.root.string)
4995 name = h->root.root.string;
4996 else
4997 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4998 sym, NULL);
4999 _bfd_error_handler
5000 /* xgettext:c-format */
5001 (_("%B: relocation %s against STT_GNU_IFUNC "
5002 "symbol `%s' has non-zero addend: %d"),
5003 input_bfd, howto->name, name, rel->r_addend);
5004 bfd_set_error (bfd_error_bad_value);
5005 return FALSE;
5006 }
5007
5008 /* Generate dynamic relocation only when there is a
5009 non-GOT reference in a shared object. */
5010 if (bfd_link_pic (info) && h->non_got_ref)
5011 {
5012 Elf_Internal_Rela outrel;
5013 asection *sreloc;
5014
5015 /* Need a dynamic relocation to get the real function
5016 address. */
5017 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
5018 info,
5019 input_section,
5020 rel->r_offset);
5021 if (outrel.r_offset == (bfd_vma) -1
5022 || outrel.r_offset == (bfd_vma) -2)
5023 abort ();
5024
5025 outrel.r_offset += (input_section->output_section->vma
5026 + input_section->output_offset);
5027
5028 if (h->dynindx == -1
5029 || h->forced_local
5030 || bfd_link_executable (info))
5031 {
5032 /* This symbol is resolved locally. */
5033 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
5034 outrel.r_addend = (h->root.u.def.value
5035 + h->root.u.def.section->output_section->vma
5036 + h->root.u.def.section->output_offset);
5037 }
5038 else
5039 {
5040 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5041 outrel.r_addend = 0;
5042 }
5043
5044 sreloc = globals->root.irelifunc;
5045 elf_append_rela (output_bfd, sreloc, &outrel);
5046
5047 /* If this reloc is against an external symbol, we
5048 do not want to fiddle with the addend. Otherwise,
5049 we need to include the symbol value so that it
5050 becomes an addend for the dynamic reloc. For an
5051 internal symbol, we have updated addend. */
5052 return bfd_reloc_ok;
5053 }
5054 /* FALLTHROUGH */
5055 case BFD_RELOC_AARCH64_CALL26:
5056 case BFD_RELOC_AARCH64_JUMP26:
5057 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5058 signed_addend,
5059 weak_undef_p);
5060 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5061 howto, value);
5062 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5063 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5064 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5065 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5066 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5067 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5068 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5069 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5070 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5071 base_got = globals->root.sgot;
5072 off = h->got.offset;
5073
5074 if (base_got == NULL)
5075 abort ();
5076
5077 if (off == (bfd_vma) -1)
5078 {
5079 bfd_vma plt_index;
5080
5081 /* We can't use h->got.offset here to save state, or
5082 even just remember the offset, as finish_dynamic_symbol
5083 would use that as offset into .got. */
5084
5085 if (globals->root.splt != NULL)
5086 {
5087 plt_index = ((h->plt.offset - globals->plt_header_size) /
5088 globals->plt_entry_size);
5089 off = (plt_index + 3) * GOT_ENTRY_SIZE;
5090 base_got = globals->root.sgotplt;
5091 }
5092 else
5093 {
5094 plt_index = h->plt.offset / globals->plt_entry_size;
5095 off = plt_index * GOT_ENTRY_SIZE;
5096 base_got = globals->root.igotplt;
5097 }
5098
5099 if (h->dynindx == -1
5100 || h->forced_local
5101 || info->symbolic)
5102 {
5103 /* This references the local definition. We must
5104 initialize this entry in the global offset table.
5105 Since the offset must always be a multiple of 8,
5106 we use the least significant bit to record
5107 whether we have initialized it already.
5108
5109 When doing a dynamic link, we create a .rela.got
5110 relocation entry to initialize the value. This
5111 is done in the finish_dynamic_symbol routine. */
5112 if ((off & 1) != 0)
5113 off &= ~1;
5114 else
5115 {
5116 bfd_put_NN (output_bfd, value,
5117 base_got->contents + off);
5118 /* Note that this is harmless as -1 | 1 still is -1. */
5119 h->got.offset |= 1;
5120 }
5121 }
5122 value = (base_got->output_section->vma
5123 + base_got->output_offset + off);
5124 }
5125 else
5126 value = aarch64_calculate_got_entry_vma (h, globals, info,
5127 value, output_bfd,
5128 unresolved_reloc_p);
5129
5130 switch (bfd_r_type)
5131 {
5132 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5133 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5134 addend = (globals->root.sgot->output_section->vma
5135 + globals->root.sgot->output_offset);
5136 break;
5137 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5138 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5139 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5140 value = (value - globals->root.sgot->output_section->vma
5141 - globals->root.sgot->output_offset);
5142 default:
5143 break;
5144 }
5145
5146 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5147 addend, weak_undef_p);
5148 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
5149 case BFD_RELOC_AARCH64_ADD_LO12:
5150 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5151 break;
5152 }
5153 }
5154
5155 switch (bfd_r_type)
5156 {
5157 case BFD_RELOC_AARCH64_NONE:
5158 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5159 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5160 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5161 *unresolved_reloc_p = FALSE;
5162 return bfd_reloc_ok;
5163
5164 case BFD_RELOC_AARCH64_NN:
5165
5166 /* When generating a shared object or relocatable executable, these
5167 relocations are copied into the output file to be resolved at
5168 run time. */
5169 if (((bfd_link_pic (info) == TRUE)
5170 || globals->root.is_relocatable_executable)
5171 && (input_section->flags & SEC_ALLOC)
5172 && (h == NULL
5173 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5174 || h->root.type != bfd_link_hash_undefweak))
5175 {
5176 Elf_Internal_Rela outrel;
5177 bfd_byte *loc;
5178 bfd_boolean skip, relocate;
5179 asection *sreloc;
5180
5181 *unresolved_reloc_p = FALSE;
5182
5183 skip = FALSE;
5184 relocate = FALSE;
5185
5186 outrel.r_addend = signed_addend;
5187 outrel.r_offset =
5188 _bfd_elf_section_offset (output_bfd, info, input_section,
5189 rel->r_offset);
5190 if (outrel.r_offset == (bfd_vma) - 1)
5191 skip = TRUE;
5192 else if (outrel.r_offset == (bfd_vma) - 2)
5193 {
5194 skip = TRUE;
5195 relocate = TRUE;
5196 }
5197
5198 outrel.r_offset += (input_section->output_section->vma
5199 + input_section->output_offset);
5200
5201 if (skip)
5202 memset (&outrel, 0, sizeof outrel);
5203 else if (h != NULL
5204 && h->dynindx != -1
5205 && (!bfd_link_pic (info)
5206 || !(bfd_link_pie (info)
5207 || SYMBOLIC_BIND (info, h))
5208 || !h->def_regular))
5209 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5210 else
5211 {
5212 int symbol;
5213
5214 /* On SVR4-ish systems, the dynamic loader cannot
5215 relocate the text and data segments independently,
5216 so the symbol does not matter. */
5217 symbol = 0;
5218 relocate = globals->no_apply_dynamic_relocs ? FALSE : TRUE;
5219 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
5220 outrel.r_addend += value;
5221 }
5222
5223 sreloc = elf_section_data (input_section)->sreloc;
5224 if (sreloc == NULL || sreloc->contents == NULL)
5225 return bfd_reloc_notsupported;
5226
5227 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
5228 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
5229
5230 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
5231 {
5232 /* Sanity to check that we have previously allocated
5233 sufficient space in the relocation section for the
5234 number of relocations we actually want to emit. */
5235 abort ();
5236 }
5237
5238 /* If this reloc is against an external symbol, we do not want to
5239 fiddle with the addend. Otherwise, we need to include the symbol
5240 value so that it becomes an addend for the dynamic reloc. */
5241 if (!relocate)
5242 return bfd_reloc_ok;
5243
5244 return _bfd_final_link_relocate (howto, input_bfd, input_section,
5245 contents, rel->r_offset, value,
5246 signed_addend);
5247 }
5248 else
5249 value += signed_addend;
5250 break;
5251
5252 case BFD_RELOC_AARCH64_CALL26:
5253 case BFD_RELOC_AARCH64_JUMP26:
5254 {
5255 asection *splt = globals->root.splt;
5256 bfd_boolean via_plt_p =
5257 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
5258
5259 /* A call to an undefined weak symbol is converted to a jump to
5260 the next instruction unless a PLT entry will be created.
5261 The jump to the next instruction is optimized as a NOP.
5262 Do the same for local undefined symbols. */
5263 if (weak_undef_p && ! via_plt_p)
5264 {
5265 bfd_putl32 (INSN_NOP, hit_data);
5266 return bfd_reloc_ok;
5267 }
5268
5269 /* If the call goes through a PLT entry, make sure to
5270 check distance to the right destination address. */
5271 if (via_plt_p)
5272 value = (splt->output_section->vma
5273 + splt->output_offset + h->plt.offset);
5274
5275 /* Check if a stub has to be inserted because the destination
5276 is too far away. */
5277 struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
5278
5279 /* If the branch destination is directed to plt stub, "value" will be
5280 the final destination, otherwise we should plus signed_addend, it may
5281 contain non-zero value, for example call to local function symbol
5282 which are turned into "sec_sym + sec_off", and sec_off is kept in
5283 signed_addend. */
5284 if (! aarch64_valid_branch_p (via_plt_p ? value : value + signed_addend,
5285 place))
5286 /* The target is out of reach, so redirect the branch to
5287 the local stub for this function. */
5288 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
5289 rel, globals);
5290 if (stub_entry != NULL)
5291 {
5292 value = (stub_entry->stub_offset
5293 + stub_entry->stub_sec->output_offset
5294 + stub_entry->stub_sec->output_section->vma);
5295
5296 /* We have redirected the destination to stub entry address,
5297 so ignore any addend record in the original rela entry. */
5298 signed_addend = 0;
5299 }
5300 }
5301 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5302 signed_addend, weak_undef_p);
5303 *unresolved_reloc_p = FALSE;
5304 break;
5305
5306 case BFD_RELOC_AARCH64_16_PCREL:
5307 case BFD_RELOC_AARCH64_32_PCREL:
5308 case BFD_RELOC_AARCH64_64_PCREL:
5309 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5310 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5311 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5312 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
5313 if (bfd_link_pic (info)
5314 && (input_section->flags & SEC_ALLOC) != 0
5315 && (input_section->flags & SEC_READONLY) != 0
5316 && h != NULL
5317 && !h->def_regular)
5318 {
5319 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5320
5321 _bfd_error_handler
5322 /* xgettext:c-format */
5323 (_("%B: relocation %s against external symbol `%s' can not be used"
5324 " when making a shared object; recompile with -fPIC"),
5325 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
5326 h->root.root.string);
5327 bfd_set_error (bfd_error_bad_value);
5328 return FALSE;
5329 }
5330 /* Fall through. */
5331
5332 case BFD_RELOC_AARCH64_16:
5333 #if ARCH_SIZE == 64
5334 case BFD_RELOC_AARCH64_32:
5335 #endif
5336 case BFD_RELOC_AARCH64_ADD_LO12:
5337 case BFD_RELOC_AARCH64_BRANCH19:
5338 case BFD_RELOC_AARCH64_LDST128_LO12:
5339 case BFD_RELOC_AARCH64_LDST16_LO12:
5340 case BFD_RELOC_AARCH64_LDST32_LO12:
5341 case BFD_RELOC_AARCH64_LDST64_LO12:
5342 case BFD_RELOC_AARCH64_LDST8_LO12:
5343 case BFD_RELOC_AARCH64_MOVW_G0:
5344 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5345 case BFD_RELOC_AARCH64_MOVW_G0_S:
5346 case BFD_RELOC_AARCH64_MOVW_G1:
5347 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5348 case BFD_RELOC_AARCH64_MOVW_G1_S:
5349 case BFD_RELOC_AARCH64_MOVW_G2:
5350 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5351 case BFD_RELOC_AARCH64_MOVW_G2_S:
5352 case BFD_RELOC_AARCH64_MOVW_G3:
5353 case BFD_RELOC_AARCH64_TSTBR14:
5354 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5355 signed_addend, weak_undef_p);
5356 break;
5357
5358 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5359 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5360 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5361 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5362 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5363 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5364 if (globals->root.sgot == NULL)
5365 BFD_ASSERT (h != NULL);
5366
5367 if (h != NULL)
5368 {
5369 bfd_vma addend = 0;
5370 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
5371 output_bfd,
5372 unresolved_reloc_p);
5373 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5374 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5375 addend = (globals->root.sgot->output_section->vma
5376 + globals->root.sgot->output_offset);
5377 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5378 addend, weak_undef_p);
5379 }
5380 else
5381 {
5382 bfd_vma addend = 0;
5383 struct elf_aarch64_local_symbol *locals
5384 = elf_aarch64_locals (input_bfd);
5385
5386 if (locals == NULL)
5387 {
5388 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5389 _bfd_error_handler
5390 /* xgettext:c-format */
5391 (_("%B: Local symbol descriptor table be NULL when applying "
5392 "relocation %s against local symbol"),
5393 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
5394 abort ();
5395 }
5396
5397 off = symbol_got_offset (input_bfd, h, r_symndx);
5398 base_got = globals->root.sgot;
5399 bfd_vma got_entry_addr = (base_got->output_section->vma
5400 + base_got->output_offset + off);
5401
5402 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5403 {
5404 bfd_put_64 (output_bfd, value, base_got->contents + off);
5405
5406 if (bfd_link_pic (info))
5407 {
5408 asection *s;
5409 Elf_Internal_Rela outrel;
5410
5411 /* For local symbol, we have done absolute relocation in static
5412 linking stageh. While for share library, we need to update
5413 the content of GOT entry according to the share objects
5414 loading base address. So we need to generate a
5415 R_AARCH64_RELATIVE reloc for dynamic linker. */
5416 s = globals->root.srelgot;
5417 if (s == NULL)
5418 abort ();
5419
5420 outrel.r_offset = got_entry_addr;
5421 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
5422 outrel.r_addend = value;
5423 elf_append_rela (output_bfd, s, &outrel);
5424 }
5425
5426 symbol_got_offset_mark (input_bfd, h, r_symndx);
5427 }
5428
5429 /* Update the relocation value to GOT entry addr as we have transformed
5430 the direct data access into indirect data access through GOT. */
5431 value = got_entry_addr;
5432
5433 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5434 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5435 addend = base_got->output_section->vma + base_got->output_offset;
5436
5437 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5438 addend, weak_undef_p);
5439 }
5440
5441 break;
5442
5443 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5444 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5445 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5446 if (h != NULL)
5447 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
5448 output_bfd,
5449 unresolved_reloc_p);
5450 else
5451 {
5452 struct elf_aarch64_local_symbol *locals
5453 = elf_aarch64_locals (input_bfd);
5454
5455 if (locals == NULL)
5456 {
5457 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5458 _bfd_error_handler
5459 /* xgettext:c-format */
5460 (_("%B: Local symbol descriptor table be NULL when applying "
5461 "relocation %s against local symbol"),
5462 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
5463 abort ();
5464 }
5465
5466 off = symbol_got_offset (input_bfd, h, r_symndx);
5467 base_got = globals->root.sgot;
5468 if (base_got == NULL)
5469 abort ();
5470
5471 bfd_vma got_entry_addr = (base_got->output_section->vma
5472 + base_got->output_offset + off);
5473
5474 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5475 {
5476 bfd_put_64 (output_bfd, value, base_got->contents + off);
5477
5478 if (bfd_link_pic (info))
5479 {
5480 asection *s;
5481 Elf_Internal_Rela outrel;
5482
5483 /* For local symbol, we have done absolute relocation in static
5484 linking stage. While for share library, we need to update
5485 the content of GOT entry according to the share objects
5486 loading base address. So we need to generate a
5487 R_AARCH64_RELATIVE reloc for dynamic linker. */
5488 s = globals->root.srelgot;
5489 if (s == NULL)
5490 abort ();
5491
5492 outrel.r_offset = got_entry_addr;
5493 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
5494 outrel.r_addend = value;
5495 elf_append_rela (output_bfd, s, &outrel);
5496 }
5497
5498 symbol_got_offset_mark (input_bfd, h, r_symndx);
5499 }
5500 }
5501
5502 /* Update the relocation value to GOT entry addr as we have transformed
5503 the direct data access into indirect data access through GOT. */
5504 value = symbol_got_offset (input_bfd, h, r_symndx);
5505 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5506 0, weak_undef_p);
5507 *unresolved_reloc_p = FALSE;
5508 break;
5509
5510 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5511 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5512 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5513 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5514 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5515 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5516 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5517 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5518 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5519 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5520 if (globals->root.sgot == NULL)
5521 return bfd_reloc_notsupported;
5522
5523 value = (symbol_got_offset (input_bfd, h, r_symndx)
5524 + globals->root.sgot->output_section->vma
5525 + globals->root.sgot->output_offset);
5526
5527 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5528 0, weak_undef_p);
5529 *unresolved_reloc_p = FALSE;
5530 break;
5531
5532 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5533 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5534 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5535 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5536 if (globals->root.sgot == NULL)
5537 return bfd_reloc_notsupported;
5538
5539 value = symbol_got_offset (input_bfd, h, r_symndx);
5540 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5541 0, weak_undef_p);
5542 *unresolved_reloc_p = FALSE;
5543 break;
5544
5545 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
5546 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
5547 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5548 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
5549 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
5550 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
5551 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
5552 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
5553 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
5554 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
5555 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
5556 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5557 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5558 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5559 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5560 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5561 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5562 signed_addend - dtpoff_base (info),
5563 weak_undef_p);
5564 break;
5565
5566 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5567 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5568 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5569 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5570 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5571 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5572 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5573 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5574 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5575 signed_addend - tpoff_base (info),
5576 weak_undef_p);
5577 *unresolved_reloc_p = FALSE;
5578 break;
5579
5580 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5581 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5582 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5583 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5584 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5585 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5586 if (globals->root.sgot == NULL)
5587 return bfd_reloc_notsupported;
5588 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5589 + globals->root.sgotplt->output_section->vma
5590 + globals->root.sgotplt->output_offset
5591 + globals->sgotplt_jump_table_size);
5592
5593 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5594 0, weak_undef_p);
5595 *unresolved_reloc_p = FALSE;
5596 break;
5597
5598 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5599 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5600 if (globals->root.sgot == NULL)
5601 return bfd_reloc_notsupported;
5602
5603 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5604 + globals->root.sgotplt->output_section->vma
5605 + globals->root.sgotplt->output_offset
5606 + globals->sgotplt_jump_table_size);
5607
5608 value -= (globals->root.sgot->output_section->vma
5609 + globals->root.sgot->output_offset);
5610
5611 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5612 0, weak_undef_p);
5613 *unresolved_reloc_p = FALSE;
5614 break;
5615
5616 default:
5617 return bfd_reloc_notsupported;
5618 }
5619
5620 if (saved_addend)
5621 *saved_addend = value;
5622
5623 /* Only apply the final relocation in a sequence. */
5624 if (save_addend)
5625 return bfd_reloc_continue;
5626
5627 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5628 howto, value);
5629 }
5630
5631 /* LP64 and ILP32 operates on x- and w-registers respectively.
5632 Next definitions take into account the difference between
5633 corresponding machine codes. R means x-register if the target
5634 arch is LP64, and w-register if the target is ILP32. */
5635
5636 #if ARCH_SIZE == 64
5637 # define add_R0_R0 (0x91000000)
5638 # define add_R0_R0_R1 (0x8b000020)
5639 # define add_R0_R1 (0x91400020)
5640 # define ldr_R0 (0x58000000)
5641 # define ldr_R0_mask(i) (i & 0xffffffe0)
5642 # define ldr_R0_x0 (0xf9400000)
5643 # define ldr_hw_R0 (0xf2a00000)
5644 # define movk_R0 (0xf2800000)
5645 # define movz_R0 (0xd2a00000)
5646 # define movz_hw_R0 (0xd2c00000)
5647 #else /*ARCH_SIZE == 32 */
5648 # define add_R0_R0 (0x11000000)
5649 # define add_R0_R0_R1 (0x0b000020)
5650 # define add_R0_R1 (0x11400020)
5651 # define ldr_R0 (0x18000000)
5652 # define ldr_R0_mask(i) (i & 0xbfffffe0)
5653 # define ldr_R0_x0 (0xb9400000)
5654 # define ldr_hw_R0 (0x72a00000)
5655 # define movk_R0 (0x72800000)
5656 # define movz_R0 (0x52a00000)
5657 # define movz_hw_R0 (0x52c00000)
5658 #endif
5659
5660 /* Handle TLS relaxations. Relaxing is possible for symbols that use
5661 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
5662 link.
5663
5664 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
5665 is to then call final_link_relocate. Return other values in the
5666 case of error. */
5667
5668 static bfd_reloc_status_type
5669 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5670 bfd *input_bfd, bfd_byte *contents,
5671 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5672 {
5673 bfd_boolean is_local = h == NULL;
5674 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5675 unsigned long insn;
5676
5677 BFD_ASSERT (globals && input_bfd && contents && rel);
5678
5679 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5680 {
5681 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5682 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5683 if (is_local)
5684 {
5685 /* GD->LE relaxation:
5686 adrp x0, :tlsgd:var => movz R0, :tprel_g1:var
5687 or
5688 adrp x0, :tlsdesc:var => movz R0, :tprel_g1:var
5689
5690 Where R is x for LP64, and w for ILP32. */
5691 bfd_putl32 (movz_R0, contents + rel->r_offset);
5692 return bfd_reloc_continue;
5693 }
5694 else
5695 {
5696 /* GD->IE relaxation:
5697 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5698 or
5699 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5700 */
5701 return bfd_reloc_continue;
5702 }
5703
5704 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5705 BFD_ASSERT (0);
5706 break;
5707
5708 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5709 if (is_local)
5710 {
5711 /* Tiny TLSDESC->LE relaxation:
5712 ldr x1, :tlsdesc:var => movz R0, #:tprel_g1:var
5713 adr x0, :tlsdesc:var => movk R0, #:tprel_g0_nc:var
5714 .tlsdesccall var
5715 blr x1 => nop
5716
5717 Where R is x for LP64, and w for ILP32. */
5718 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5719 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5720
5721 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5722 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5723 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5724
5725 bfd_putl32 (movz_R0, contents + rel->r_offset);
5726 bfd_putl32 (movk_R0, contents + rel->r_offset + 4);
5727 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5728 return bfd_reloc_continue;
5729 }
5730 else
5731 {
5732 /* Tiny TLSDESC->IE relaxation:
5733 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5734 adr x0, :tlsdesc:var => nop
5735 .tlsdesccall var
5736 blr x1 => nop
5737 */
5738 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5739 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5740
5741 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5742 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5743
5744 bfd_putl32 (ldr_R0, contents + rel->r_offset);
5745 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5746 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5747 return bfd_reloc_continue;
5748 }
5749
5750 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5751 if (is_local)
5752 {
5753 /* Tiny GD->LE relaxation:
5754 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5755 bl __tls_get_addr => add R0, R1, #:tprel_hi12:x, lsl #12
5756 nop => add R0, R0, #:tprel_lo12_nc:x
5757
5758 Where R is x for LP64, and x for Ilp32. */
5759
5760 /* First kill the tls_get_addr reloc on the bl instruction. */
5761 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5762
5763 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5764 bfd_putl32 (add_R0_R1, contents + rel->r_offset + 4);
5765 bfd_putl32 (add_R0_R0, contents + rel->r_offset + 8);
5766
5767 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5768 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5769 rel[1].r_offset = rel->r_offset + 8;
5770
5771 /* Move the current relocation to the second instruction in
5772 the sequence. */
5773 rel->r_offset += 4;
5774 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5775 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5776 return bfd_reloc_continue;
5777 }
5778 else
5779 {
5780 /* Tiny GD->IE relaxation:
5781 adr x0, :tlsgd:var => ldr R0, :gottprel:var
5782 bl __tls_get_addr => mrs x1, tpidr_el0
5783 nop => add R0, R0, R1
5784
5785 Where R is x for LP64, and w for Ilp32. */
5786
5787 /* First kill the tls_get_addr reloc on the bl instruction. */
5788 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5789 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5790
5791 bfd_putl32 (ldr_R0, contents + rel->r_offset);
5792 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5793 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
5794 return bfd_reloc_continue;
5795 }
5796
5797 #if ARCH_SIZE == 64
5798 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5799 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSGD_MOVW_G0_NC));
5800 BFD_ASSERT (rel->r_offset + 12 == rel[2].r_offset);
5801 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (CALL26));
5802
5803 if (is_local)
5804 {
5805 /* Large GD->LE relaxation:
5806 movz x0, #:tlsgd_g1:var => movz x0, #:tprel_g2:var, lsl #32
5807 movk x0, #:tlsgd_g0_nc:var => movk x0, #:tprel_g1_nc:var, lsl #16
5808 add x0, gp, x0 => movk x0, #:tprel_g0_nc:var
5809 bl __tls_get_addr => mrs x1, tpidr_el0
5810 nop => add x0, x0, x1
5811 */
5812 rel[2].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5813 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5814 rel[2].r_offset = rel->r_offset + 8;
5815
5816 bfd_putl32 (movz_hw_R0, contents + rel->r_offset + 0);
5817 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset + 4);
5818 bfd_putl32 (movk_R0, contents + rel->r_offset + 8);
5819 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
5820 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
5821 }
5822 else
5823 {
5824 /* Large GD->IE relaxation:
5825 movz x0, #:tlsgd_g1:var => movz x0, #:gottprel_g1:var, lsl #16
5826 movk x0, #:tlsgd_g0_nc:var => movk x0, #:gottprel_g0_nc:var
5827 add x0, gp, x0 => ldr x0, [gp, x0]
5828 bl __tls_get_addr => mrs x1, tpidr_el0
5829 nop => add x0, x0, x1
5830 */
5831 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5832 bfd_putl32 (0xd2a80000, contents + rel->r_offset + 0);
5833 bfd_putl32 (ldr_R0, contents + rel->r_offset + 8);
5834 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
5835 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
5836 }
5837 return bfd_reloc_continue;
5838
5839 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5840 return bfd_reloc_continue;
5841 #endif
5842
5843 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5844 return bfd_reloc_continue;
5845
5846 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5847 if (is_local)
5848 {
5849 /* GD->LE relaxation:
5850 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5851
5852 Where R is x for lp64 mode, and w for ILP32 mode. */
5853 bfd_putl32 (movk_R0, contents + rel->r_offset);
5854 return bfd_reloc_continue;
5855 }
5856 else
5857 {
5858 /* GD->IE relaxation:
5859 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr R0, [x0, #:gottprel_lo12:var]
5860
5861 Where R is x for lp64 mode, and w for ILP32 mode. */
5862 insn = bfd_getl32 (contents + rel->r_offset);
5863 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
5864 return bfd_reloc_continue;
5865 }
5866
5867 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5868 if (is_local)
5869 {
5870 /* GD->LE relaxation
5871 add x0, #:tlsgd_lo12:var => movk R0, :tprel_g0_nc:var
5872 bl __tls_get_addr => mrs x1, tpidr_el0
5873 nop => add R0, R1, R0
5874
5875 Where R is x for lp64 mode, and w for ILP32 mode. */
5876
5877 /* First kill the tls_get_addr reloc on the bl instruction. */
5878 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5879 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5880
5881 bfd_putl32 (movk_R0, contents + rel->r_offset);
5882 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5883 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
5884 return bfd_reloc_continue;
5885 }
5886 else
5887 {
5888 /* GD->IE relaxation
5889 ADD x0, #:tlsgd_lo12:var => ldr R0, [x0, #:gottprel_lo12:var]
5890 BL __tls_get_addr => mrs x1, tpidr_el0
5891 R_AARCH64_CALL26
5892 NOP => add R0, R1, R0
5893
5894 Where R is x for lp64 mode, and w for ilp32 mode. */
5895
5896 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5897
5898 /* Remove the relocation on the BL instruction. */
5899 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5900
5901 /* We choose to fixup the BL and NOP instructions using the
5902 offset from the second relocation to allow flexibility in
5903 scheduling instructions between the ADD and BL. */
5904 bfd_putl32 (ldr_R0_x0, contents + rel->r_offset);
5905 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5906 bfd_putl32 (add_R0_R0_R1, contents + rel[1].r_offset + 4);
5907 return bfd_reloc_continue;
5908 }
5909
5910 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5911 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5912 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5913 /* GD->IE/LE relaxation:
5914 add x0, x0, #:tlsdesc_lo12:var => nop
5915 blr xd => nop
5916 */
5917 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5918 return bfd_reloc_ok;
5919
5920 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5921 if (is_local)
5922 {
5923 /* GD->LE relaxation:
5924 ldr xd, [gp, xn] => movk R0, #:tprel_g0_nc:var
5925
5926 Where R is x for lp64 mode, and w for ILP32 mode. */
5927 bfd_putl32 (movk_R0, contents + rel->r_offset);
5928 return bfd_reloc_continue;
5929 }
5930 else
5931 {
5932 /* GD->IE relaxation:
5933 ldr xd, [gp, xn] => ldr R0, [gp, xn]
5934
5935 Where R is x for lp64 mode, and w for ILP32 mode. */
5936 insn = bfd_getl32 (contents + rel->r_offset);
5937 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
5938 return bfd_reloc_ok;
5939 }
5940
5941 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5942 /* GD->LE relaxation:
5943 movk xd, #:tlsdesc_off_g0_nc:var => movk R0, #:tprel_g1_nc:var, lsl #16
5944 GD->IE relaxation:
5945 movk xd, #:tlsdesc_off_g0_nc:var => movk Rd, #:gottprel_g0_nc:var
5946
5947 Where R is x for lp64 mode, and w for ILP32 mode. */
5948 if (is_local)
5949 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset);
5950 return bfd_reloc_continue;
5951
5952 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5953 if (is_local)
5954 {
5955 /* GD->LE relaxation:
5956 movz xd, #:tlsdesc_off_g1:var => movz R0, #:tprel_g2:var, lsl #32
5957
5958 Where R is x for lp64 mode, and w for ILP32 mode. */
5959 bfd_putl32 (movz_hw_R0, contents + rel->r_offset);
5960 return bfd_reloc_continue;
5961 }
5962 else
5963 {
5964 /* GD->IE relaxation:
5965 movz xd, #:tlsdesc_off_g1:var => movz Rd, #:gottprel_g1:var, lsl #16
5966
5967 Where R is x for lp64 mode, and w for ILP32 mode. */
5968 insn = bfd_getl32 (contents + rel->r_offset);
5969 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
5970 return bfd_reloc_continue;
5971 }
5972
5973 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5974 /* IE->LE relaxation:
5975 adrp xd, :gottprel:var => movz Rd, :tprel_g1:var
5976
5977 Where R is x for lp64 mode, and w for ILP32 mode. */
5978 if (is_local)
5979 {
5980 insn = bfd_getl32 (contents + rel->r_offset);
5981 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
5982 }
5983 return bfd_reloc_continue;
5984
5985 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5986 /* IE->LE relaxation:
5987 ldr xd, [xm, #:gottprel_lo12:var] => movk Rd, :tprel_g0_nc:var
5988
5989 Where R is x for lp64 mode, and w for ILP32 mode. */
5990 if (is_local)
5991 {
5992 insn = bfd_getl32 (contents + rel->r_offset);
5993 bfd_putl32 (movk_R0 | (insn & 0x1f), contents + rel->r_offset);
5994 }
5995 return bfd_reloc_continue;
5996
5997 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5998 /* LD->LE relaxation (tiny):
5999 adr x0, :tlsldm:x => mrs x0, tpidr_el0
6000 bl __tls_get_addr => add R0, R0, TCB_SIZE
6001
6002 Where R is x for lp64 mode, and w for ilp32 mode. */
6003 if (is_local)
6004 {
6005 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6006 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6007 /* No need of CALL26 relocation for tls_get_addr. */
6008 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6009 bfd_putl32 (0xd53bd040, contents + rel->r_offset + 0);
6010 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6011 contents + rel->r_offset + 4);
6012 return bfd_reloc_ok;
6013 }
6014 return bfd_reloc_continue;
6015
6016 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6017 /* LD->LE relaxation (small):
6018 adrp x0, :tlsldm:x => mrs x0, tpidr_el0
6019 */
6020 if (is_local)
6021 {
6022 bfd_putl32 (0xd53bd040, contents + rel->r_offset);
6023 return bfd_reloc_ok;
6024 }
6025 return bfd_reloc_continue;
6026
6027 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6028 /* LD->LE relaxation (small):
6029 add x0, #:tlsldm_lo12:x => add R0, R0, TCB_SIZE
6030 bl __tls_get_addr => nop
6031
6032 Where R is x for lp64 mode, and w for ilp32 mode. */
6033 if (is_local)
6034 {
6035 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6036 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6037 /* No need of CALL26 relocation for tls_get_addr. */
6038 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6039 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6040 contents + rel->r_offset + 0);
6041 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
6042 return bfd_reloc_ok;
6043 }
6044 return bfd_reloc_continue;
6045
6046 default:
6047 return bfd_reloc_continue;
6048 }
6049
6050 return bfd_reloc_ok;
6051 }
6052
6053 /* Relocate an AArch64 ELF section. */
6054
6055 static bfd_boolean
6056 elfNN_aarch64_relocate_section (bfd *output_bfd,
6057 struct bfd_link_info *info,
6058 bfd *input_bfd,
6059 asection *input_section,
6060 bfd_byte *contents,
6061 Elf_Internal_Rela *relocs,
6062 Elf_Internal_Sym *local_syms,
6063 asection **local_sections)
6064 {
6065 Elf_Internal_Shdr *symtab_hdr;
6066 struct elf_link_hash_entry **sym_hashes;
6067 Elf_Internal_Rela *rel;
6068 Elf_Internal_Rela *relend;
6069 const char *name;
6070 struct elf_aarch64_link_hash_table *globals;
6071 bfd_boolean save_addend = FALSE;
6072 bfd_vma addend = 0;
6073
6074 globals = elf_aarch64_hash_table (info);
6075
6076 symtab_hdr = &elf_symtab_hdr (input_bfd);
6077 sym_hashes = elf_sym_hashes (input_bfd);
6078
6079 rel = relocs;
6080 relend = relocs + input_section->reloc_count;
6081 for (; rel < relend; rel++)
6082 {
6083 unsigned int r_type;
6084 bfd_reloc_code_real_type bfd_r_type;
6085 bfd_reloc_code_real_type relaxed_bfd_r_type;
6086 reloc_howto_type *howto;
6087 unsigned long r_symndx;
6088 Elf_Internal_Sym *sym;
6089 asection *sec;
6090 struct elf_link_hash_entry *h;
6091 bfd_vma relocation;
6092 bfd_reloc_status_type r;
6093 arelent bfd_reloc;
6094 char sym_type;
6095 bfd_boolean unresolved_reloc = FALSE;
6096 char *error_message = NULL;
6097
6098 r_symndx = ELFNN_R_SYM (rel->r_info);
6099 r_type = ELFNN_R_TYPE (rel->r_info);
6100
6101 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
6102 howto = bfd_reloc.howto;
6103
6104 if (howto == NULL)
6105 {
6106 /* xgettext:c-format */
6107 _bfd_error_handler
6108 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
6109 input_bfd, input_section, r_type);
6110 return FALSE;
6111 }
6112 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
6113
6114 h = NULL;
6115 sym = NULL;
6116 sec = NULL;
6117
6118 if (r_symndx < symtab_hdr->sh_info)
6119 {
6120 sym = local_syms + r_symndx;
6121 sym_type = ELFNN_ST_TYPE (sym->st_info);
6122 sec = local_sections[r_symndx];
6123
6124 /* An object file might have a reference to a local
6125 undefined symbol. This is a daft object file, but we
6126 should at least do something about it. */
6127 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
6128 && bfd_is_und_section (sec)
6129 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
6130 (*info->callbacks->undefined_symbol)
6131 (info, bfd_elf_string_from_elf_section
6132 (input_bfd, symtab_hdr->sh_link, sym->st_name),
6133 input_bfd, input_section, rel->r_offset, TRUE);
6134
6135 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
6136
6137 /* Relocate against local STT_GNU_IFUNC symbol. */
6138 if (!bfd_link_relocatable (info)
6139 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
6140 {
6141 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
6142 rel, FALSE);
6143 if (h == NULL)
6144 abort ();
6145
6146 /* Set STT_GNU_IFUNC symbol value. */
6147 h->root.u.def.value = sym->st_value;
6148 h->root.u.def.section = sec;
6149 }
6150 }
6151 else
6152 {
6153 bfd_boolean warned, ignored;
6154
6155 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
6156 r_symndx, symtab_hdr, sym_hashes,
6157 h, sec, relocation,
6158 unresolved_reloc, warned, ignored);
6159
6160 sym_type = h->type;
6161 }
6162
6163 if (sec != NULL && discarded_section (sec))
6164 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
6165 rel, 1, relend, howto, 0, contents);
6166
6167 if (bfd_link_relocatable (info))
6168 continue;
6169
6170 if (h != NULL)
6171 name = h->root.root.string;
6172 else
6173 {
6174 name = (bfd_elf_string_from_elf_section
6175 (input_bfd, symtab_hdr->sh_link, sym->st_name));
6176 if (name == NULL || *name == '\0')
6177 name = bfd_section_name (input_bfd, sec);
6178 }
6179
6180 if (r_symndx != 0
6181 && r_type != R_AARCH64_NONE
6182 && r_type != R_AARCH64_NULL
6183 && (h == NULL
6184 || h->root.type == bfd_link_hash_defined
6185 || h->root.type == bfd_link_hash_defweak)
6186 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
6187 {
6188 _bfd_error_handler
6189 ((sym_type == STT_TLS
6190 /* xgettext:c-format */
6191 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
6192 /* xgettext:c-format */
6193 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
6194 input_bfd,
6195 input_section, (long) rel->r_offset, howto->name, name);
6196 }
6197
6198 /* We relax only if we can see that there can be a valid transition
6199 from a reloc type to another.
6200 We call elfNN_aarch64_final_link_relocate unless we're completely
6201 done, i.e., the relaxation produced the final output we want. */
6202
6203 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
6204 h, r_symndx);
6205 if (relaxed_bfd_r_type != bfd_r_type)
6206 {
6207 bfd_r_type = relaxed_bfd_r_type;
6208 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
6209 BFD_ASSERT (howto != NULL);
6210 r_type = howto->type;
6211 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
6212 unresolved_reloc = 0;
6213 }
6214 else
6215 r = bfd_reloc_continue;
6216
6217 /* There may be multiple consecutive relocations for the
6218 same offset. In that case we are supposed to treat the
6219 output of each relocation as the addend for the next. */
6220 if (rel + 1 < relend
6221 && rel->r_offset == rel[1].r_offset
6222 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
6223 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
6224 save_addend = TRUE;
6225 else
6226 save_addend = FALSE;
6227
6228 if (r == bfd_reloc_continue)
6229 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
6230 input_section, contents, rel,
6231 relocation, info, sec,
6232 h, &unresolved_reloc,
6233 save_addend, &addend, sym);
6234
6235 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
6236 {
6237 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6238 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6239 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6240 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6241 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6242 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6243 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6244 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6245 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6246 {
6247 bfd_boolean need_relocs = FALSE;
6248 bfd_byte *loc;
6249 int indx;
6250 bfd_vma off;
6251
6252 off = symbol_got_offset (input_bfd, h, r_symndx);
6253 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6254
6255 need_relocs =
6256 (bfd_link_pic (info) || indx != 0) &&
6257 (h == NULL
6258 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6259 || h->root.type != bfd_link_hash_undefweak);
6260
6261 BFD_ASSERT (globals->root.srelgot != NULL);
6262
6263 if (need_relocs)
6264 {
6265 Elf_Internal_Rela rela;
6266 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
6267 rela.r_addend = 0;
6268 rela.r_offset = globals->root.sgot->output_section->vma +
6269 globals->root.sgot->output_offset + off;
6270
6271
6272 loc = globals->root.srelgot->contents;
6273 loc += globals->root.srelgot->reloc_count++
6274 * RELOC_SIZE (htab);
6275 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6276
6277 bfd_reloc_code_real_type real_type =
6278 elfNN_aarch64_bfd_reloc_from_type (r_type);
6279
6280 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
6281 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
6282 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
6283 {
6284 /* For local dynamic, don't generate DTPREL in any case.
6285 Initialize the DTPREL slot into zero, so we get module
6286 base address when invoke runtime TLS resolver. */
6287 bfd_put_NN (output_bfd, 0,
6288 globals->root.sgot->contents + off
6289 + GOT_ENTRY_SIZE);
6290 }
6291 else if (indx == 0)
6292 {
6293 bfd_put_NN (output_bfd,
6294 relocation - dtpoff_base (info),
6295 globals->root.sgot->contents + off
6296 + GOT_ENTRY_SIZE);
6297 }
6298 else
6299 {
6300 /* This TLS symbol is global. We emit a
6301 relocation to fixup the tls offset at load
6302 time. */
6303 rela.r_info =
6304 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
6305 rela.r_addend = 0;
6306 rela.r_offset =
6307 (globals->root.sgot->output_section->vma
6308 + globals->root.sgot->output_offset + off
6309 + GOT_ENTRY_SIZE);
6310
6311 loc = globals->root.srelgot->contents;
6312 loc += globals->root.srelgot->reloc_count++
6313 * RELOC_SIZE (globals);
6314 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6315 bfd_put_NN (output_bfd, (bfd_vma) 0,
6316 globals->root.sgot->contents + off
6317 + GOT_ENTRY_SIZE);
6318 }
6319 }
6320 else
6321 {
6322 bfd_put_NN (output_bfd, (bfd_vma) 1,
6323 globals->root.sgot->contents + off);
6324 bfd_put_NN (output_bfd,
6325 relocation - dtpoff_base (info),
6326 globals->root.sgot->contents + off
6327 + GOT_ENTRY_SIZE);
6328 }
6329
6330 symbol_got_offset_mark (input_bfd, h, r_symndx);
6331 }
6332 break;
6333
6334 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6335 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
6336 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6337 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6338 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6339 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6340 {
6341 bfd_boolean need_relocs = FALSE;
6342 bfd_byte *loc;
6343 int indx;
6344 bfd_vma off;
6345
6346 off = symbol_got_offset (input_bfd, h, r_symndx);
6347
6348 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6349
6350 need_relocs =
6351 (bfd_link_pic (info) || indx != 0) &&
6352 (h == NULL
6353 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6354 || h->root.type != bfd_link_hash_undefweak);
6355
6356 BFD_ASSERT (globals->root.srelgot != NULL);
6357
6358 if (need_relocs)
6359 {
6360 Elf_Internal_Rela rela;
6361
6362 if (indx == 0)
6363 rela.r_addend = relocation - dtpoff_base (info);
6364 else
6365 rela.r_addend = 0;
6366
6367 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
6368 rela.r_offset = globals->root.sgot->output_section->vma +
6369 globals->root.sgot->output_offset + off;
6370
6371 loc = globals->root.srelgot->contents;
6372 loc += globals->root.srelgot->reloc_count++
6373 * RELOC_SIZE (htab);
6374
6375 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6376
6377 bfd_put_NN (output_bfd, rela.r_addend,
6378 globals->root.sgot->contents + off);
6379 }
6380 else
6381 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
6382 globals->root.sgot->contents + off);
6383
6384 symbol_got_offset_mark (input_bfd, h, r_symndx);
6385 }
6386 break;
6387
6388 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6389 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6390 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6391 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
6392 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6393 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6394 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6395 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
6396 {
6397 bfd_boolean need_relocs = FALSE;
6398 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
6399 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
6400
6401 need_relocs = (h == NULL
6402 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6403 || h->root.type != bfd_link_hash_undefweak);
6404
6405 BFD_ASSERT (globals->root.srelgot != NULL);
6406 BFD_ASSERT (globals->root.sgot != NULL);
6407
6408 if (need_relocs)
6409 {
6410 bfd_byte *loc;
6411 Elf_Internal_Rela rela;
6412 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
6413
6414 rela.r_addend = 0;
6415 rela.r_offset = (globals->root.sgotplt->output_section->vma
6416 + globals->root.sgotplt->output_offset
6417 + off + globals->sgotplt_jump_table_size);
6418
6419 if (indx == 0)
6420 rela.r_addend = relocation - dtpoff_base (info);
6421
6422 /* Allocate the next available slot in the PLT reloc
6423 section to hold our R_AARCH64_TLSDESC, the next
6424 available slot is determined from reloc_count,
6425 which we step. But note, reloc_count was
6426 artifically moved down while allocating slots for
6427 real PLT relocs such that all of the PLT relocs
6428 will fit above the initial reloc_count and the
6429 extra stuff will fit below. */
6430 loc = globals->root.srelplt->contents;
6431 loc += globals->root.srelplt->reloc_count++
6432 * RELOC_SIZE (globals);
6433
6434 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6435
6436 bfd_put_NN (output_bfd, (bfd_vma) 0,
6437 globals->root.sgotplt->contents + off +
6438 globals->sgotplt_jump_table_size);
6439 bfd_put_NN (output_bfd, (bfd_vma) 0,
6440 globals->root.sgotplt->contents + off +
6441 globals->sgotplt_jump_table_size +
6442 GOT_ENTRY_SIZE);
6443 }
6444
6445 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
6446 }
6447 break;
6448 default:
6449 break;
6450 }
6451
6452 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
6453 because such sections are not SEC_ALLOC and thus ld.so will
6454 not process them. */
6455 if (unresolved_reloc
6456 && !((input_section->flags & SEC_DEBUGGING) != 0
6457 && h->def_dynamic)
6458 && _bfd_elf_section_offset (output_bfd, info, input_section,
6459 +rel->r_offset) != (bfd_vma) - 1)
6460 {
6461 _bfd_error_handler
6462 /* xgettext:c-format */
6463 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
6464 input_bfd, input_section, (long) rel->r_offset, howto->name,
6465 h->root.root.string);
6466 return FALSE;
6467 }
6468
6469 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
6470 {
6471 bfd_reloc_code_real_type real_r_type
6472 = elfNN_aarch64_bfd_reloc_from_type (r_type);
6473
6474 switch (r)
6475 {
6476 case bfd_reloc_overflow:
6477 (*info->callbacks->reloc_overflow)
6478 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
6479 input_bfd, input_section, rel->r_offset);
6480 if (real_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
6481 || real_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
6482 {
6483 (*info->callbacks->warning)
6484 (info,
6485 _("Too many GOT entries for -fpic, "
6486 "please recompile with -fPIC"),
6487 name, input_bfd, input_section, rel->r_offset);
6488 return FALSE;
6489 }
6490 /* Overflow can occur when a variable is referenced with a type
6491 that has a larger alignment than the type with which it was
6492 declared. eg:
6493 file1.c: extern int foo; int a (void) { return foo; }
6494 file2.c: char bar, foo, baz;
6495 If the variable is placed into a data section at an offset
6496 that is incompatible with the larger alignment requirement
6497 overflow will occur. (Strictly speaking this is not overflow
6498 but rather an alignment problem, but the bfd_reloc_ error
6499 enum does not have a value to cover that situation).
6500
6501 Try to catch this situation here and provide a more helpful
6502 error message to the user. */
6503 if (addend & ((1 << howto->rightshift) - 1)
6504 /* FIXME: Are we testing all of the appropriate reloc
6505 types here ? */
6506 && (real_r_type == BFD_RELOC_AARCH64_LD_LO19_PCREL
6507 || real_r_type == BFD_RELOC_AARCH64_LDST16_LO12
6508 || real_r_type == BFD_RELOC_AARCH64_LDST32_LO12
6509 || real_r_type == BFD_RELOC_AARCH64_LDST64_LO12
6510 || real_r_type == BFD_RELOC_AARCH64_LDST128_LO12))
6511 {
6512 info->callbacks->warning
6513 (info, _("One possible cause of this error is that the \
6514 symbol is being referenced in the indicated code as if it had a larger \
6515 alignment than was declared where it was defined."),
6516 name, input_bfd, input_section, rel->r_offset);
6517 }
6518 break;
6519
6520 case bfd_reloc_undefined:
6521 (*info->callbacks->undefined_symbol)
6522 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
6523 break;
6524
6525 case bfd_reloc_outofrange:
6526 error_message = _("out of range");
6527 goto common_error;
6528
6529 case bfd_reloc_notsupported:
6530 error_message = _("unsupported relocation");
6531 goto common_error;
6532
6533 case bfd_reloc_dangerous:
6534 /* error_message should already be set. */
6535 goto common_error;
6536
6537 default:
6538 error_message = _("unknown error");
6539 /* Fall through. */
6540
6541 common_error:
6542 BFD_ASSERT (error_message != NULL);
6543 (*info->callbacks->reloc_dangerous)
6544 (info, error_message, input_bfd, input_section, rel->r_offset);
6545 break;
6546 }
6547 }
6548
6549 if (!save_addend)
6550 addend = 0;
6551 }
6552
6553 return TRUE;
6554 }
6555
6556 /* Set the right machine number. */
6557
6558 static bfd_boolean
6559 elfNN_aarch64_object_p (bfd *abfd)
6560 {
6561 #if ARCH_SIZE == 32
6562 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
6563 #else
6564 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
6565 #endif
6566 return TRUE;
6567 }
6568
6569 /* Function to keep AArch64 specific flags in the ELF header. */
6570
6571 static bfd_boolean
6572 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
6573 {
6574 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
6575 {
6576 }
6577 else
6578 {
6579 elf_elfheader (abfd)->e_flags = flags;
6580 elf_flags_init (abfd) = TRUE;
6581 }
6582
6583 return TRUE;
6584 }
6585
6586 /* Merge backend specific data from an object file to the output
6587 object file when linking. */
6588
6589 static bfd_boolean
6590 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
6591 {
6592 bfd *obfd = info->output_bfd;
6593 flagword out_flags;
6594 flagword in_flags;
6595 bfd_boolean flags_compatible = TRUE;
6596 asection *sec;
6597
6598 /* Check if we have the same endianess. */
6599 if (!_bfd_generic_verify_endian_match (ibfd, info))
6600 return FALSE;
6601
6602 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
6603 return TRUE;
6604
6605 /* The input BFD must have had its flags initialised. */
6606 /* The following seems bogus to me -- The flags are initialized in
6607 the assembler but I don't think an elf_flags_init field is
6608 written into the object. */
6609 /* BFD_ASSERT (elf_flags_init (ibfd)); */
6610
6611 in_flags = elf_elfheader (ibfd)->e_flags;
6612 out_flags = elf_elfheader (obfd)->e_flags;
6613
6614 if (!elf_flags_init (obfd))
6615 {
6616 /* If the input is the default architecture and had the default
6617 flags then do not bother setting the flags for the output
6618 architecture, instead allow future merges to do this. If no
6619 future merges ever set these flags then they will retain their
6620 uninitialised values, which surprise surprise, correspond
6621 to the default values. */
6622 if (bfd_get_arch_info (ibfd)->the_default
6623 && elf_elfheader (ibfd)->e_flags == 0)
6624 return TRUE;
6625
6626 elf_flags_init (obfd) = TRUE;
6627 elf_elfheader (obfd)->e_flags = in_flags;
6628
6629 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
6630 && bfd_get_arch_info (obfd)->the_default)
6631 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
6632 bfd_get_mach (ibfd));
6633
6634 return TRUE;
6635 }
6636
6637 /* Identical flags must be compatible. */
6638 if (in_flags == out_flags)
6639 return TRUE;
6640
6641 /* Check to see if the input BFD actually contains any sections. If
6642 not, its flags may not have been initialised either, but it
6643 cannot actually cause any incompatiblity. Do not short-circuit
6644 dynamic objects; their section list may be emptied by
6645 elf_link_add_object_symbols.
6646
6647 Also check to see if there are no code sections in the input.
6648 In this case there is no need to check for code specific flags.
6649 XXX - do we need to worry about floating-point format compatability
6650 in data sections ? */
6651 if (!(ibfd->flags & DYNAMIC))
6652 {
6653 bfd_boolean null_input_bfd = TRUE;
6654 bfd_boolean only_data_sections = TRUE;
6655
6656 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
6657 {
6658 if ((bfd_get_section_flags (ibfd, sec)
6659 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
6660 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
6661 only_data_sections = FALSE;
6662
6663 null_input_bfd = FALSE;
6664 break;
6665 }
6666
6667 if (null_input_bfd || only_data_sections)
6668 return TRUE;
6669 }
6670
6671 return flags_compatible;
6672 }
6673
6674 /* Display the flags field. */
6675
6676 static bfd_boolean
6677 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
6678 {
6679 FILE *file = (FILE *) ptr;
6680 unsigned long flags;
6681
6682 BFD_ASSERT (abfd != NULL && ptr != NULL);
6683
6684 /* Print normal ELF private data. */
6685 _bfd_elf_print_private_bfd_data (abfd, ptr);
6686
6687 flags = elf_elfheader (abfd)->e_flags;
6688 /* Ignore init flag - it may not be set, despite the flags field
6689 containing valid data. */
6690
6691 /* xgettext:c-format */
6692 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
6693
6694 if (flags)
6695 fprintf (file, _("<Unrecognised flag bits set>"));
6696
6697 fputc ('\n', file);
6698
6699 return TRUE;
6700 }
6701
6702 /* Update the got entry reference counts for the section being removed. */
6703
6704 static bfd_boolean
6705 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
6706 struct bfd_link_info *info,
6707 asection *sec,
6708 const Elf_Internal_Rela * relocs)
6709 {
6710 struct elf_aarch64_link_hash_table *htab;
6711 Elf_Internal_Shdr *symtab_hdr;
6712 struct elf_link_hash_entry **sym_hashes;
6713 struct elf_aarch64_local_symbol *locals;
6714 const Elf_Internal_Rela *rel, *relend;
6715
6716 if (bfd_link_relocatable (info))
6717 return TRUE;
6718
6719 htab = elf_aarch64_hash_table (info);
6720
6721 if (htab == NULL)
6722 return FALSE;
6723
6724 elf_section_data (sec)->local_dynrel = NULL;
6725
6726 symtab_hdr = &elf_symtab_hdr (abfd);
6727 sym_hashes = elf_sym_hashes (abfd);
6728
6729 locals = elf_aarch64_locals (abfd);
6730
6731 relend = relocs + sec->reloc_count;
6732 for (rel = relocs; rel < relend; rel++)
6733 {
6734 unsigned long r_symndx;
6735 unsigned int r_type;
6736 struct elf_link_hash_entry *h = NULL;
6737
6738 r_symndx = ELFNN_R_SYM (rel->r_info);
6739
6740 if (r_symndx >= symtab_hdr->sh_info)
6741 {
6742
6743 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6744 while (h->root.type == bfd_link_hash_indirect
6745 || h->root.type == bfd_link_hash_warning)
6746 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6747 }
6748 else
6749 {
6750 Elf_Internal_Sym *isym;
6751
6752 /* A local symbol. */
6753 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6754 abfd, r_symndx);
6755
6756 /* Check relocation against local STT_GNU_IFUNC symbol. */
6757 if (isym != NULL
6758 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6759 {
6760 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
6761 if (h == NULL)
6762 abort ();
6763 }
6764 }
6765
6766 if (h)
6767 {
6768 struct elf_aarch64_link_hash_entry *eh;
6769 struct elf_dyn_relocs **pp;
6770 struct elf_dyn_relocs *p;
6771
6772 eh = (struct elf_aarch64_link_hash_entry *) h;
6773
6774 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
6775 if (p->sec == sec)
6776 {
6777 /* Everything must go for SEC. */
6778 *pp = p->next;
6779 break;
6780 }
6781 }
6782
6783 r_type = ELFNN_R_TYPE (rel->r_info);
6784 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
6785 {
6786 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6787 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6788 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6789 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6790 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6791 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6792 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6793 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6794 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6795 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6796 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6797 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6798 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6799 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6800 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6801 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6802 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6803 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6804 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6805 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6806 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6807 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6808 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6809 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6810 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6811 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6812 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6813 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6814 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6815 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6816 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6817 if (h != NULL)
6818 {
6819 if (h->got.refcount > 0)
6820 h->got.refcount -= 1;
6821
6822 if (h->type == STT_GNU_IFUNC)
6823 {
6824 if (h->plt.refcount > 0)
6825 h->plt.refcount -= 1;
6826 }
6827 }
6828 else if (locals != NULL)
6829 {
6830 if (locals[r_symndx].got_refcount > 0)
6831 locals[r_symndx].got_refcount -= 1;
6832 }
6833 break;
6834
6835 case BFD_RELOC_AARCH64_CALL26:
6836 case BFD_RELOC_AARCH64_JUMP26:
6837 /* If this is a local symbol then we resolve it
6838 directly without creating a PLT entry. */
6839 if (h == NULL)
6840 continue;
6841
6842 if (h->plt.refcount > 0)
6843 h->plt.refcount -= 1;
6844 break;
6845
6846 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6847 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6848 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6849 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6850 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6851 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6852 case BFD_RELOC_AARCH64_MOVW_G3:
6853 case BFD_RELOC_AARCH64_NN:
6854 if (h != NULL && bfd_link_executable (info))
6855 {
6856 if (h->plt.refcount > 0)
6857 h->plt.refcount -= 1;
6858 }
6859 break;
6860
6861 default:
6862 break;
6863 }
6864 }
6865
6866 return TRUE;
6867 }
6868
6869 /* Adjust a symbol defined by a dynamic object and referenced by a
6870 regular object. The current definition is in some section of the
6871 dynamic object, but we're not including those sections. We have to
6872 change the definition to something the rest of the link can
6873 understand. */
6874
6875 static bfd_boolean
6876 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
6877 struct elf_link_hash_entry *h)
6878 {
6879 struct elf_aarch64_link_hash_table *htab;
6880 asection *s, *srel;
6881
6882 /* If this is a function, put it in the procedure linkage table. We
6883 will fill in the contents of the procedure linkage table later,
6884 when we know the address of the .got section. */
6885 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6886 {
6887 if (h->plt.refcount <= 0
6888 || (h->type != STT_GNU_IFUNC
6889 && (SYMBOL_CALLS_LOCAL (info, h)
6890 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6891 && h->root.type == bfd_link_hash_undefweak))))
6892 {
6893 /* This case can occur if we saw a CALL26 reloc in
6894 an input file, but the symbol wasn't referred to
6895 by a dynamic object or all references were
6896 garbage collected. In which case we can end up
6897 resolving. */
6898 h->plt.offset = (bfd_vma) - 1;
6899 h->needs_plt = 0;
6900 }
6901
6902 return TRUE;
6903 }
6904 else
6905 /* Otherwise, reset to -1. */
6906 h->plt.offset = (bfd_vma) - 1;
6907
6908
6909 /* If this is a weak symbol, and there is a real definition, the
6910 processor independent code will have arranged for us to see the
6911 real definition first, and we can just use the same value. */
6912 if (h->u.weakdef != NULL)
6913 {
6914 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6915 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6916 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6917 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6918 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6919 h->non_got_ref = h->u.weakdef->non_got_ref;
6920 return TRUE;
6921 }
6922
6923 /* If we are creating a shared library, we must presume that the
6924 only references to the symbol are via the global offset table.
6925 For such cases we need not do anything here; the relocations will
6926 be handled correctly by relocate_section. */
6927 if (bfd_link_pic (info))
6928 return TRUE;
6929
6930 /* If there are no references to this symbol that do not use the
6931 GOT, we don't need to generate a copy reloc. */
6932 if (!h->non_got_ref)
6933 return TRUE;
6934
6935 /* If -z nocopyreloc was given, we won't generate them either. */
6936 if (info->nocopyreloc)
6937 {
6938 h->non_got_ref = 0;
6939 return TRUE;
6940 }
6941
6942 /* We must allocate the symbol in our .dynbss section, which will
6943 become part of the .bss section of the executable. There will be
6944 an entry for this symbol in the .dynsym section. The dynamic
6945 object will contain position independent code, so all references
6946 from the dynamic object to this symbol will go through the global
6947 offset table. The dynamic linker will use the .dynsym entry to
6948 determine the address it must put in the global offset table, so
6949 both the dynamic object and the regular object will refer to the
6950 same memory location for the variable. */
6951
6952 htab = elf_aarch64_hash_table (info);
6953
6954 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6955 to copy the initial value out of the dynamic object and into the
6956 runtime process image. */
6957 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
6958 {
6959 s = htab->root.sdynrelro;
6960 srel = htab->root.sreldynrelro;
6961 }
6962 else
6963 {
6964 s = htab->root.sdynbss;
6965 srel = htab->root.srelbss;
6966 }
6967 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6968 {
6969 srel->size += RELOC_SIZE (htab);
6970 h->needs_copy = 1;
6971 }
6972
6973 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6974
6975 }
6976
6977 static bfd_boolean
6978 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6979 {
6980 struct elf_aarch64_local_symbol *locals;
6981 locals = elf_aarch64_locals (abfd);
6982 if (locals == NULL)
6983 {
6984 locals = (struct elf_aarch64_local_symbol *)
6985 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6986 if (locals == NULL)
6987 return FALSE;
6988 elf_aarch64_locals (abfd) = locals;
6989 }
6990 return TRUE;
6991 }
6992
6993 /* Create the .got section to hold the global offset table. */
6994
6995 static bfd_boolean
6996 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6997 {
6998 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6999 flagword flags;
7000 asection *s;
7001 struct elf_link_hash_entry *h;
7002 struct elf_link_hash_table *htab = elf_hash_table (info);
7003
7004 /* This function may be called more than once. */
7005 if (htab->sgot != NULL)
7006 return TRUE;
7007
7008 flags = bed->dynamic_sec_flags;
7009
7010 s = bfd_make_section_anyway_with_flags (abfd,
7011 (bed->rela_plts_and_copies_p
7012 ? ".rela.got" : ".rel.got"),
7013 (bed->dynamic_sec_flags
7014 | SEC_READONLY));
7015 if (s == NULL
7016 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
7017 return FALSE;
7018 htab->srelgot = s;
7019
7020 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
7021 if (s == NULL
7022 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
7023 return FALSE;
7024 htab->sgot = s;
7025 htab->sgot->size += GOT_ENTRY_SIZE;
7026
7027 if (bed->want_got_sym)
7028 {
7029 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
7030 (or .got.plt) section. We don't do this in the linker script
7031 because we don't want to define the symbol if we are not creating
7032 a global offset table. */
7033 h = _bfd_elf_define_linkage_sym (abfd, info, s,
7034 "_GLOBAL_OFFSET_TABLE_");
7035 elf_hash_table (info)->hgot = h;
7036 if (h == NULL)
7037 return FALSE;
7038 }
7039
7040 if (bed->want_got_plt)
7041 {
7042 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
7043 if (s == NULL
7044 || !bfd_set_section_alignment (abfd, s,
7045 bed->s->log_file_align))
7046 return FALSE;
7047 htab->sgotplt = s;
7048 }
7049
7050 /* The first bit of the global offset table is the header. */
7051 s->size += bed->got_header_size;
7052
7053 return TRUE;
7054 }
7055
7056 /* Look through the relocs for a section during the first phase. */
7057
7058 static bfd_boolean
7059 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
7060 asection *sec, const Elf_Internal_Rela *relocs)
7061 {
7062 Elf_Internal_Shdr *symtab_hdr;
7063 struct elf_link_hash_entry **sym_hashes;
7064 const Elf_Internal_Rela *rel;
7065 const Elf_Internal_Rela *rel_end;
7066 asection *sreloc;
7067
7068 struct elf_aarch64_link_hash_table *htab;
7069
7070 if (bfd_link_relocatable (info))
7071 return TRUE;
7072
7073 BFD_ASSERT (is_aarch64_elf (abfd));
7074
7075 htab = elf_aarch64_hash_table (info);
7076 sreloc = NULL;
7077
7078 symtab_hdr = &elf_symtab_hdr (abfd);
7079 sym_hashes = elf_sym_hashes (abfd);
7080
7081 rel_end = relocs + sec->reloc_count;
7082 for (rel = relocs; rel < rel_end; rel++)
7083 {
7084 struct elf_link_hash_entry *h;
7085 unsigned long r_symndx;
7086 unsigned int r_type;
7087 bfd_reloc_code_real_type bfd_r_type;
7088 Elf_Internal_Sym *isym;
7089
7090 r_symndx = ELFNN_R_SYM (rel->r_info);
7091 r_type = ELFNN_R_TYPE (rel->r_info);
7092
7093 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
7094 {
7095 /* xgettext:c-format */
7096 _bfd_error_handler (_("%B: bad symbol index: %d"), abfd, r_symndx);
7097 return FALSE;
7098 }
7099
7100 if (r_symndx < symtab_hdr->sh_info)
7101 {
7102 /* A local symbol. */
7103 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
7104 abfd, r_symndx);
7105 if (isym == NULL)
7106 return FALSE;
7107
7108 /* Check relocation against local STT_GNU_IFUNC symbol. */
7109 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
7110 {
7111 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
7112 TRUE);
7113 if (h == NULL)
7114 return FALSE;
7115
7116 /* Fake a STT_GNU_IFUNC symbol. */
7117 h->type = STT_GNU_IFUNC;
7118 h->def_regular = 1;
7119 h->ref_regular = 1;
7120 h->forced_local = 1;
7121 h->root.type = bfd_link_hash_defined;
7122 }
7123 else
7124 h = NULL;
7125 }
7126 else
7127 {
7128 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
7129 while (h->root.type == bfd_link_hash_indirect
7130 || h->root.type == bfd_link_hash_warning)
7131 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7132
7133 /* PR15323, ref flags aren't set for references in the same
7134 object. */
7135 h->root.non_ir_ref = 1;
7136 }
7137
7138 /* Could be done earlier, if h were already available. */
7139 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
7140
7141 if (h != NULL)
7142 {
7143 /* If a relocation refers to _GLOBAL_OFFSET_TABLE_, create the .got.
7144 This shows up in particular in an R_AARCH64_PREL64 in large model
7145 when calculating the pc-relative address to .got section which is
7146 used to initialize the gp register. */
7147 if (h->root.root.string
7148 && strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0)
7149 {
7150 if (htab->root.dynobj == NULL)
7151 htab->root.dynobj = abfd;
7152
7153 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
7154 return FALSE;
7155
7156 BFD_ASSERT (h == htab->root.hgot);
7157 }
7158
7159 /* Create the ifunc sections for static executables. If we
7160 never see an indirect function symbol nor we are building
7161 a static executable, those sections will be empty and
7162 won't appear in output. */
7163 switch (bfd_r_type)
7164 {
7165 default:
7166 break;
7167
7168 case BFD_RELOC_AARCH64_ADD_LO12:
7169 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7170 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7171 case BFD_RELOC_AARCH64_CALL26:
7172 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7173 case BFD_RELOC_AARCH64_JUMP26:
7174 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7175 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7176 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7177 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7178 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7179 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7180 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7181 case BFD_RELOC_AARCH64_NN:
7182 if (htab->root.dynobj == NULL)
7183 htab->root.dynobj = abfd;
7184 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
7185 return FALSE;
7186 break;
7187 }
7188
7189 /* It is referenced by a non-shared object. */
7190 h->ref_regular = 1;
7191 h->root.non_ir_ref = 1;
7192 }
7193
7194 switch (bfd_r_type)
7195 {
7196 case BFD_RELOC_AARCH64_NN:
7197
7198 /* We don't need to handle relocs into sections not going into
7199 the "real" output. */
7200 if ((sec->flags & SEC_ALLOC) == 0)
7201 break;
7202
7203 if (h != NULL)
7204 {
7205 if (!bfd_link_pic (info))
7206 h->non_got_ref = 1;
7207
7208 h->plt.refcount += 1;
7209 h->pointer_equality_needed = 1;
7210 }
7211
7212 /* No need to do anything if we're not creating a shared
7213 object. */
7214 if (! bfd_link_pic (info))
7215 break;
7216
7217 {
7218 struct elf_dyn_relocs *p;
7219 struct elf_dyn_relocs **head;
7220
7221 /* We must copy these reloc types into the output file.
7222 Create a reloc section in dynobj and make room for
7223 this reloc. */
7224 if (sreloc == NULL)
7225 {
7226 if (htab->root.dynobj == NULL)
7227 htab->root.dynobj = abfd;
7228
7229 sreloc = _bfd_elf_make_dynamic_reloc_section
7230 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
7231
7232 if (sreloc == NULL)
7233 return FALSE;
7234 }
7235
7236 /* If this is a global symbol, we count the number of
7237 relocations we need for this symbol. */
7238 if (h != NULL)
7239 {
7240 struct elf_aarch64_link_hash_entry *eh;
7241 eh = (struct elf_aarch64_link_hash_entry *) h;
7242 head = &eh->dyn_relocs;
7243 }
7244 else
7245 {
7246 /* Track dynamic relocs needed for local syms too.
7247 We really need local syms available to do this
7248 easily. Oh well. */
7249
7250 asection *s;
7251 void **vpp;
7252
7253 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
7254 abfd, r_symndx);
7255 if (isym == NULL)
7256 return FALSE;
7257
7258 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
7259 if (s == NULL)
7260 s = sec;
7261
7262 /* Beware of type punned pointers vs strict aliasing
7263 rules. */
7264 vpp = &(elf_section_data (s)->local_dynrel);
7265 head = (struct elf_dyn_relocs **) vpp;
7266 }
7267
7268 p = *head;
7269 if (p == NULL || p->sec != sec)
7270 {
7271 bfd_size_type amt = sizeof *p;
7272 p = ((struct elf_dyn_relocs *)
7273 bfd_zalloc (htab->root.dynobj, amt));
7274 if (p == NULL)
7275 return FALSE;
7276 p->next = *head;
7277 *head = p;
7278 p->sec = sec;
7279 }
7280
7281 p->count += 1;
7282
7283 }
7284 break;
7285
7286 /* RR: We probably want to keep a consistency check that
7287 there are no dangling GOT_PAGE relocs. */
7288 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7289 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7290 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7291 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7292 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7293 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7294 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7295 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7296 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7297 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7298 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7299 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7300 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7301 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7302 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7303 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7304 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7305 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7306 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7307 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7308 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7309 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7310 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7311 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7312 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7313 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7314 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7315 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7316 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7317 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7318 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7319 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7320 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7321 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7322 {
7323 unsigned got_type;
7324 unsigned old_got_type;
7325
7326 got_type = aarch64_reloc_got_type (bfd_r_type);
7327
7328 if (h)
7329 {
7330 h->got.refcount += 1;
7331 old_got_type = elf_aarch64_hash_entry (h)->got_type;
7332 }
7333 else
7334 {
7335 struct elf_aarch64_local_symbol *locals;
7336
7337 if (!elfNN_aarch64_allocate_local_symbols
7338 (abfd, symtab_hdr->sh_info))
7339 return FALSE;
7340
7341 locals = elf_aarch64_locals (abfd);
7342 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
7343 locals[r_symndx].got_refcount += 1;
7344 old_got_type = locals[r_symndx].got_type;
7345 }
7346
7347 /* If a variable is accessed with both general dynamic TLS
7348 methods, two slots may be created. */
7349 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
7350 got_type |= old_got_type;
7351
7352 /* We will already have issued an error message if there
7353 is a TLS/non-TLS mismatch, based on the symbol type.
7354 So just combine any TLS types needed. */
7355 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
7356 && got_type != GOT_NORMAL)
7357 got_type |= old_got_type;
7358
7359 /* If the symbol is accessed by both IE and GD methods, we
7360 are able to relax. Turn off the GD flag, without
7361 messing up with any other kind of TLS types that may be
7362 involved. */
7363 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
7364 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
7365
7366 if (old_got_type != got_type)
7367 {
7368 if (h != NULL)
7369 elf_aarch64_hash_entry (h)->got_type = got_type;
7370 else
7371 {
7372 struct elf_aarch64_local_symbol *locals;
7373 locals = elf_aarch64_locals (abfd);
7374 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
7375 locals[r_symndx].got_type = got_type;
7376 }
7377 }
7378
7379 if (htab->root.dynobj == NULL)
7380 htab->root.dynobj = abfd;
7381 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
7382 return FALSE;
7383 break;
7384 }
7385
7386 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7387 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7388 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7389 case BFD_RELOC_AARCH64_MOVW_G3:
7390 if (bfd_link_pic (info))
7391 {
7392 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7393 _bfd_error_handler
7394 /* xgettext:c-format */
7395 (_("%B: relocation %s against `%s' can not be used when making "
7396 "a shared object; recompile with -fPIC"),
7397 abfd, elfNN_aarch64_howto_table[howto_index].name,
7398 (h) ? h->root.root.string : "a local symbol");
7399 bfd_set_error (bfd_error_bad_value);
7400 return FALSE;
7401 }
7402 /* Fall through. */
7403
7404 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7405 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7406 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7407 if (h != NULL && bfd_link_executable (info))
7408 {
7409 /* If this reloc is in a read-only section, we might
7410 need a copy reloc. We can't check reliably at this
7411 stage whether the section is read-only, as input
7412 sections have not yet been mapped to output sections.
7413 Tentatively set the flag for now, and correct in
7414 adjust_dynamic_symbol. */
7415 h->non_got_ref = 1;
7416 h->plt.refcount += 1;
7417 h->pointer_equality_needed = 1;
7418 }
7419 /* FIXME:: RR need to handle these in shared libraries
7420 and essentially bomb out as these being non-PIC
7421 relocations in shared libraries. */
7422 break;
7423
7424 case BFD_RELOC_AARCH64_CALL26:
7425 case BFD_RELOC_AARCH64_JUMP26:
7426 /* If this is a local symbol then we resolve it
7427 directly without creating a PLT entry. */
7428 if (h == NULL)
7429 continue;
7430
7431 h->needs_plt = 1;
7432 if (h->plt.refcount <= 0)
7433 h->plt.refcount = 1;
7434 else
7435 h->plt.refcount += 1;
7436 break;
7437
7438 default:
7439 break;
7440 }
7441 }
7442
7443 return TRUE;
7444 }
7445
7446 /* Treat mapping symbols as special target symbols. */
7447
7448 static bfd_boolean
7449 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
7450 asymbol *sym)
7451 {
7452 return bfd_is_aarch64_special_symbol_name (sym->name,
7453 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
7454 }
7455
7456 /* This is a copy of elf_find_function () from elf.c except that
7457 AArch64 mapping symbols are ignored when looking for function names. */
7458
7459 static bfd_boolean
7460 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
7461 asymbol **symbols,
7462 asection *section,
7463 bfd_vma offset,
7464 const char **filename_ptr,
7465 const char **functionname_ptr)
7466 {
7467 const char *filename = NULL;
7468 asymbol *func = NULL;
7469 bfd_vma low_func = 0;
7470 asymbol **p;
7471
7472 for (p = symbols; *p != NULL; p++)
7473 {
7474 elf_symbol_type *q;
7475
7476 q = (elf_symbol_type *) * p;
7477
7478 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
7479 {
7480 default:
7481 break;
7482 case STT_FILE:
7483 filename = bfd_asymbol_name (&q->symbol);
7484 break;
7485 case STT_FUNC:
7486 case STT_NOTYPE:
7487 /* Skip mapping symbols. */
7488 if ((q->symbol.flags & BSF_LOCAL)
7489 && (bfd_is_aarch64_special_symbol_name
7490 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
7491 continue;
7492 /* Fall through. */
7493 if (bfd_get_section (&q->symbol) == section
7494 && q->symbol.value >= low_func && q->symbol.value <= offset)
7495 {
7496 func = (asymbol *) q;
7497 low_func = q->symbol.value;
7498 }
7499 break;
7500 }
7501 }
7502
7503 if (func == NULL)
7504 return FALSE;
7505
7506 if (filename_ptr)
7507 *filename_ptr = filename;
7508 if (functionname_ptr)
7509 *functionname_ptr = bfd_asymbol_name (func);
7510
7511 return TRUE;
7512 }
7513
7514
7515 /* Find the nearest line to a particular section and offset, for error
7516 reporting. This code is a duplicate of the code in elf.c, except
7517 that it uses aarch64_elf_find_function. */
7518
7519 static bfd_boolean
7520 elfNN_aarch64_find_nearest_line (bfd *abfd,
7521 asymbol **symbols,
7522 asection *section,
7523 bfd_vma offset,
7524 const char **filename_ptr,
7525 const char **functionname_ptr,
7526 unsigned int *line_ptr,
7527 unsigned int *discriminator_ptr)
7528 {
7529 bfd_boolean found = FALSE;
7530
7531 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
7532 filename_ptr, functionname_ptr,
7533 line_ptr, discriminator_ptr,
7534 dwarf_debug_sections, 0,
7535 &elf_tdata (abfd)->dwarf2_find_line_info))
7536 {
7537 if (!*functionname_ptr)
7538 aarch64_elf_find_function (abfd, symbols, section, offset,
7539 *filename_ptr ? NULL : filename_ptr,
7540 functionname_ptr);
7541
7542 return TRUE;
7543 }
7544
7545 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
7546 toolchain uses DWARF1. */
7547
7548 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
7549 &found, filename_ptr,
7550 functionname_ptr, line_ptr,
7551 &elf_tdata (abfd)->line_info))
7552 return FALSE;
7553
7554 if (found && (*functionname_ptr || *line_ptr))
7555 return TRUE;
7556
7557 if (symbols == NULL)
7558 return FALSE;
7559
7560 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
7561 filename_ptr, functionname_ptr))
7562 return FALSE;
7563
7564 *line_ptr = 0;
7565 return TRUE;
7566 }
7567
7568 static bfd_boolean
7569 elfNN_aarch64_find_inliner_info (bfd *abfd,
7570 const char **filename_ptr,
7571 const char **functionname_ptr,
7572 unsigned int *line_ptr)
7573 {
7574 bfd_boolean found;
7575 found = _bfd_dwarf2_find_inliner_info
7576 (abfd, filename_ptr,
7577 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
7578 return found;
7579 }
7580
7581
7582 static void
7583 elfNN_aarch64_post_process_headers (bfd *abfd,
7584 struct bfd_link_info *link_info)
7585 {
7586 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
7587
7588 i_ehdrp = elf_elfheader (abfd);
7589 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
7590
7591 _bfd_elf_post_process_headers (abfd, link_info);
7592 }
7593
7594 static enum elf_reloc_type_class
7595 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
7596 const asection *rel_sec ATTRIBUTE_UNUSED,
7597 const Elf_Internal_Rela *rela)
7598 {
7599 switch ((int) ELFNN_R_TYPE (rela->r_info))
7600 {
7601 case AARCH64_R (RELATIVE):
7602 return reloc_class_relative;
7603 case AARCH64_R (JUMP_SLOT):
7604 return reloc_class_plt;
7605 case AARCH64_R (COPY):
7606 return reloc_class_copy;
7607 default:
7608 return reloc_class_normal;
7609 }
7610 }
7611
7612 /* Handle an AArch64 specific section when reading an object file. This is
7613 called when bfd_section_from_shdr finds a section with an unknown
7614 type. */
7615
7616 static bfd_boolean
7617 elfNN_aarch64_section_from_shdr (bfd *abfd,
7618 Elf_Internal_Shdr *hdr,
7619 const char *name, int shindex)
7620 {
7621 /* There ought to be a place to keep ELF backend specific flags, but
7622 at the moment there isn't one. We just keep track of the
7623 sections by their name, instead. Fortunately, the ABI gives
7624 names for all the AArch64 specific sections, so we will probably get
7625 away with this. */
7626 switch (hdr->sh_type)
7627 {
7628 case SHT_AARCH64_ATTRIBUTES:
7629 break;
7630
7631 default:
7632 return FALSE;
7633 }
7634
7635 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
7636 return FALSE;
7637
7638 return TRUE;
7639 }
7640
7641 /* A structure used to record a list of sections, independently
7642 of the next and prev fields in the asection structure. */
7643 typedef struct section_list
7644 {
7645 asection *sec;
7646 struct section_list *next;
7647 struct section_list *prev;
7648 }
7649 section_list;
7650
7651 /* Unfortunately we need to keep a list of sections for which
7652 an _aarch64_elf_section_data structure has been allocated. This
7653 is because it is possible for functions like elfNN_aarch64_write_section
7654 to be called on a section which has had an elf_data_structure
7655 allocated for it (and so the used_by_bfd field is valid) but
7656 for which the AArch64 extended version of this structure - the
7657 _aarch64_elf_section_data structure - has not been allocated. */
7658 static section_list *sections_with_aarch64_elf_section_data = NULL;
7659
7660 static void
7661 record_section_with_aarch64_elf_section_data (asection *sec)
7662 {
7663 struct section_list *entry;
7664
7665 entry = bfd_malloc (sizeof (*entry));
7666 if (entry == NULL)
7667 return;
7668 entry->sec = sec;
7669 entry->next = sections_with_aarch64_elf_section_data;
7670 entry->prev = NULL;
7671 if (entry->next != NULL)
7672 entry->next->prev = entry;
7673 sections_with_aarch64_elf_section_data = entry;
7674 }
7675
7676 static struct section_list *
7677 find_aarch64_elf_section_entry (asection *sec)
7678 {
7679 struct section_list *entry;
7680 static struct section_list *last_entry = NULL;
7681
7682 /* This is a short cut for the typical case where the sections are added
7683 to the sections_with_aarch64_elf_section_data list in forward order and
7684 then looked up here in backwards order. This makes a real difference
7685 to the ld-srec/sec64k.exp linker test. */
7686 entry = sections_with_aarch64_elf_section_data;
7687 if (last_entry != NULL)
7688 {
7689 if (last_entry->sec == sec)
7690 entry = last_entry;
7691 else if (last_entry->next != NULL && last_entry->next->sec == sec)
7692 entry = last_entry->next;
7693 }
7694
7695 for (; entry; entry = entry->next)
7696 if (entry->sec == sec)
7697 break;
7698
7699 if (entry)
7700 /* Record the entry prior to this one - it is the entry we are
7701 most likely to want to locate next time. Also this way if we
7702 have been called from
7703 unrecord_section_with_aarch64_elf_section_data () we will not
7704 be caching a pointer that is about to be freed. */
7705 last_entry = entry->prev;
7706
7707 return entry;
7708 }
7709
7710 static void
7711 unrecord_section_with_aarch64_elf_section_data (asection *sec)
7712 {
7713 struct section_list *entry;
7714
7715 entry = find_aarch64_elf_section_entry (sec);
7716
7717 if (entry)
7718 {
7719 if (entry->prev != NULL)
7720 entry->prev->next = entry->next;
7721 if (entry->next != NULL)
7722 entry->next->prev = entry->prev;
7723 if (entry == sections_with_aarch64_elf_section_data)
7724 sections_with_aarch64_elf_section_data = entry->next;
7725 free (entry);
7726 }
7727 }
7728
7729
7730 typedef struct
7731 {
7732 void *finfo;
7733 struct bfd_link_info *info;
7734 asection *sec;
7735 int sec_shndx;
7736 int (*func) (void *, const char *, Elf_Internal_Sym *,
7737 asection *, struct elf_link_hash_entry *);
7738 } output_arch_syminfo;
7739
7740 enum map_symbol_type
7741 {
7742 AARCH64_MAP_INSN,
7743 AARCH64_MAP_DATA
7744 };
7745
7746
7747 /* Output a single mapping symbol. */
7748
7749 static bfd_boolean
7750 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
7751 enum map_symbol_type type, bfd_vma offset)
7752 {
7753 static const char *names[2] = { "$x", "$d" };
7754 Elf_Internal_Sym sym;
7755
7756 sym.st_value = (osi->sec->output_section->vma
7757 + osi->sec->output_offset + offset);
7758 sym.st_size = 0;
7759 sym.st_other = 0;
7760 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7761 sym.st_shndx = osi->sec_shndx;
7762 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
7763 }
7764
7765 /* Output a single local symbol for a generated stub. */
7766
7767 static bfd_boolean
7768 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
7769 bfd_vma offset, bfd_vma size)
7770 {
7771 Elf_Internal_Sym sym;
7772
7773 sym.st_value = (osi->sec->output_section->vma
7774 + osi->sec->output_offset + offset);
7775 sym.st_size = size;
7776 sym.st_other = 0;
7777 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7778 sym.st_shndx = osi->sec_shndx;
7779 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
7780 }
7781
7782 static bfd_boolean
7783 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
7784 {
7785 struct elf_aarch64_stub_hash_entry *stub_entry;
7786 asection *stub_sec;
7787 bfd_vma addr;
7788 char *stub_name;
7789 output_arch_syminfo *osi;
7790
7791 /* Massage our args to the form they really have. */
7792 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
7793 osi = (output_arch_syminfo *) in_arg;
7794
7795 stub_sec = stub_entry->stub_sec;
7796
7797 /* Ensure this stub is attached to the current section being
7798 processed. */
7799 if (stub_sec != osi->sec)
7800 return TRUE;
7801
7802 addr = (bfd_vma) stub_entry->stub_offset;
7803
7804 stub_name = stub_entry->output_name;
7805
7806 switch (stub_entry->stub_type)
7807 {
7808 case aarch64_stub_adrp_branch:
7809 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7810 sizeof (aarch64_adrp_branch_stub)))
7811 return FALSE;
7812 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7813 return FALSE;
7814 break;
7815 case aarch64_stub_long_branch:
7816 if (!elfNN_aarch64_output_stub_sym
7817 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
7818 return FALSE;
7819 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7820 return FALSE;
7821 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
7822 return FALSE;
7823 break;
7824 case aarch64_stub_erratum_835769_veneer:
7825 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7826 sizeof (aarch64_erratum_835769_stub)))
7827 return FALSE;
7828 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7829 return FALSE;
7830 break;
7831 case aarch64_stub_erratum_843419_veneer:
7832 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7833 sizeof (aarch64_erratum_843419_stub)))
7834 return FALSE;
7835 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7836 return FALSE;
7837 break;
7838
7839 default:
7840 abort ();
7841 }
7842
7843 return TRUE;
7844 }
7845
7846 /* Output mapping symbols for linker generated sections. */
7847
7848 static bfd_boolean
7849 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
7850 struct bfd_link_info *info,
7851 void *finfo,
7852 int (*func) (void *, const char *,
7853 Elf_Internal_Sym *,
7854 asection *,
7855 struct elf_link_hash_entry
7856 *))
7857 {
7858 output_arch_syminfo osi;
7859 struct elf_aarch64_link_hash_table *htab;
7860
7861 htab = elf_aarch64_hash_table (info);
7862
7863 osi.finfo = finfo;
7864 osi.info = info;
7865 osi.func = func;
7866
7867 /* Long calls stubs. */
7868 if (htab->stub_bfd && htab->stub_bfd->sections)
7869 {
7870 asection *stub_sec;
7871
7872 for (stub_sec = htab->stub_bfd->sections;
7873 stub_sec != NULL; stub_sec = stub_sec->next)
7874 {
7875 /* Ignore non-stub sections. */
7876 if (!strstr (stub_sec->name, STUB_SUFFIX))
7877 continue;
7878
7879 osi.sec = stub_sec;
7880
7881 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7882 (output_bfd, osi.sec->output_section);
7883
7884 /* The first instruction in a stub is always a branch. */
7885 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
7886 return FALSE;
7887
7888 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7889 &osi);
7890 }
7891 }
7892
7893 /* Finally, output mapping symbols for the PLT. */
7894 if (!htab->root.splt || htab->root.splt->size == 0)
7895 return TRUE;
7896
7897 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7898 (output_bfd, htab->root.splt->output_section);
7899 osi.sec = htab->root.splt;
7900
7901 elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0);
7902
7903 return TRUE;
7904
7905 }
7906
7907 /* Allocate target specific section data. */
7908
7909 static bfd_boolean
7910 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7911 {
7912 if (!sec->used_by_bfd)
7913 {
7914 _aarch64_elf_section_data *sdata;
7915 bfd_size_type amt = sizeof (*sdata);
7916
7917 sdata = bfd_zalloc (abfd, amt);
7918 if (sdata == NULL)
7919 return FALSE;
7920 sec->used_by_bfd = sdata;
7921 }
7922
7923 record_section_with_aarch64_elf_section_data (sec);
7924
7925 return _bfd_elf_new_section_hook (abfd, sec);
7926 }
7927
7928
7929 static void
7930 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7931 asection *sec,
7932 void *ignore ATTRIBUTE_UNUSED)
7933 {
7934 unrecord_section_with_aarch64_elf_section_data (sec);
7935 }
7936
7937 static bfd_boolean
7938 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7939 {
7940 if (abfd->sections)
7941 bfd_map_over_sections (abfd,
7942 unrecord_section_via_map_over_sections, NULL);
7943
7944 return _bfd_elf_close_and_cleanup (abfd);
7945 }
7946
7947 static bfd_boolean
7948 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7949 {
7950 if (abfd->sections)
7951 bfd_map_over_sections (abfd,
7952 unrecord_section_via_map_over_sections, NULL);
7953
7954 return _bfd_free_cached_info (abfd);
7955 }
7956
7957 /* Create dynamic sections. This is different from the ARM backend in that
7958 the got, plt, gotplt and their relocation sections are all created in the
7959 standard part of the bfd elf backend. */
7960
7961 static bfd_boolean
7962 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7963 struct bfd_link_info *info)
7964 {
7965 /* We need to create .got section. */
7966 if (!aarch64_elf_create_got_section (dynobj, info))
7967 return FALSE;
7968
7969 return _bfd_elf_create_dynamic_sections (dynobj, info);
7970 }
7971
7972
7973 /* Allocate space in .plt, .got and associated reloc sections for
7974 dynamic relocs. */
7975
7976 static bfd_boolean
7977 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7978 {
7979 struct bfd_link_info *info;
7980 struct elf_aarch64_link_hash_table *htab;
7981 struct elf_aarch64_link_hash_entry *eh;
7982 struct elf_dyn_relocs *p;
7983
7984 /* An example of a bfd_link_hash_indirect symbol is versioned
7985 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7986 -> __gxx_personality_v0(bfd_link_hash_defined)
7987
7988 There is no need to process bfd_link_hash_indirect symbols here
7989 because we will also be presented with the concrete instance of
7990 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7991 called to copy all relevant data from the generic to the concrete
7992 symbol instance. */
7993 if (h->root.type == bfd_link_hash_indirect)
7994 return TRUE;
7995
7996 if (h->root.type == bfd_link_hash_warning)
7997 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7998
7999 info = (struct bfd_link_info *) inf;
8000 htab = elf_aarch64_hash_table (info);
8001
8002 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
8003 here if it is defined and referenced in a non-shared object. */
8004 if (h->type == STT_GNU_IFUNC
8005 && h->def_regular)
8006 return TRUE;
8007 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
8008 {
8009 /* Make sure this symbol is output as a dynamic symbol.
8010 Undefined weak syms won't yet be marked as dynamic. */
8011 if (h->dynindx == -1 && !h->forced_local)
8012 {
8013 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8014 return FALSE;
8015 }
8016
8017 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
8018 {
8019 asection *s = htab->root.splt;
8020
8021 /* If this is the first .plt entry, make room for the special
8022 first entry. */
8023 if (s->size == 0)
8024 s->size += htab->plt_header_size;
8025
8026 h->plt.offset = s->size;
8027
8028 /* If this symbol is not defined in a regular file, and we are
8029 not generating a shared library, then set the symbol to this
8030 location in the .plt. This is required to make function
8031 pointers compare as equal between the normal executable and
8032 the shared library. */
8033 if (!bfd_link_pic (info) && !h->def_regular)
8034 {
8035 h->root.u.def.section = s;
8036 h->root.u.def.value = h->plt.offset;
8037 }
8038
8039 /* Make room for this entry. For now we only create the
8040 small model PLT entries. We later need to find a way
8041 of relaxing into these from the large model PLT entries. */
8042 s->size += PLT_SMALL_ENTRY_SIZE;
8043
8044 /* We also need to make an entry in the .got.plt section, which
8045 will be placed in the .got section by the linker script. */
8046 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
8047
8048 /* We also need to make an entry in the .rela.plt section. */
8049 htab->root.srelplt->size += RELOC_SIZE (htab);
8050
8051 /* We need to ensure that all GOT entries that serve the PLT
8052 are consecutive with the special GOT slots [0] [1] and
8053 [2]. Any addtional relocations, such as
8054 R_AARCH64_TLSDESC, must be placed after the PLT related
8055 entries. We abuse the reloc_count such that during
8056 sizing we adjust reloc_count to indicate the number of
8057 PLT related reserved entries. In subsequent phases when
8058 filling in the contents of the reloc entries, PLT related
8059 entries are placed by computing their PLT index (0
8060 .. reloc_count). While other none PLT relocs are placed
8061 at the slot indicated by reloc_count and reloc_count is
8062 updated. */
8063
8064 htab->root.srelplt->reloc_count++;
8065 }
8066 else
8067 {
8068 h->plt.offset = (bfd_vma) - 1;
8069 h->needs_plt = 0;
8070 }
8071 }
8072 else
8073 {
8074 h->plt.offset = (bfd_vma) - 1;
8075 h->needs_plt = 0;
8076 }
8077
8078 eh = (struct elf_aarch64_link_hash_entry *) h;
8079 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
8080
8081 if (h->got.refcount > 0)
8082 {
8083 bfd_boolean dyn;
8084 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
8085
8086 h->got.offset = (bfd_vma) - 1;
8087
8088 dyn = htab->root.dynamic_sections_created;
8089
8090 /* Make sure this symbol is output as a dynamic symbol.
8091 Undefined weak syms won't yet be marked as dynamic. */
8092 if (dyn && h->dynindx == -1 && !h->forced_local)
8093 {
8094 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8095 return FALSE;
8096 }
8097
8098 if (got_type == GOT_UNKNOWN)
8099 {
8100 }
8101 else if (got_type == GOT_NORMAL)
8102 {
8103 h->got.offset = htab->root.sgot->size;
8104 htab->root.sgot->size += GOT_ENTRY_SIZE;
8105 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8106 || h->root.type != bfd_link_hash_undefweak)
8107 && (bfd_link_pic (info)
8108 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
8109 {
8110 htab->root.srelgot->size += RELOC_SIZE (htab);
8111 }
8112 }
8113 else
8114 {
8115 int indx;
8116 if (got_type & GOT_TLSDESC_GD)
8117 {
8118 eh->tlsdesc_got_jump_table_offset =
8119 (htab->root.sgotplt->size
8120 - aarch64_compute_jump_table_size (htab));
8121 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
8122 h->got.offset = (bfd_vma) - 2;
8123 }
8124
8125 if (got_type & GOT_TLS_GD)
8126 {
8127 h->got.offset = htab->root.sgot->size;
8128 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
8129 }
8130
8131 if (got_type & GOT_TLS_IE)
8132 {
8133 h->got.offset = htab->root.sgot->size;
8134 htab->root.sgot->size += GOT_ENTRY_SIZE;
8135 }
8136
8137 indx = h && h->dynindx != -1 ? h->dynindx : 0;
8138 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8139 || h->root.type != bfd_link_hash_undefweak)
8140 && (bfd_link_pic (info)
8141 || indx != 0
8142 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
8143 {
8144 if (got_type & GOT_TLSDESC_GD)
8145 {
8146 htab->root.srelplt->size += RELOC_SIZE (htab);
8147 /* Note reloc_count not incremented here! We have
8148 already adjusted reloc_count for this relocation
8149 type. */
8150
8151 /* TLSDESC PLT is now needed, but not yet determined. */
8152 htab->tlsdesc_plt = (bfd_vma) - 1;
8153 }
8154
8155 if (got_type & GOT_TLS_GD)
8156 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
8157
8158 if (got_type & GOT_TLS_IE)
8159 htab->root.srelgot->size += RELOC_SIZE (htab);
8160 }
8161 }
8162 }
8163 else
8164 {
8165 h->got.offset = (bfd_vma) - 1;
8166 }
8167
8168 if (eh->dyn_relocs == NULL)
8169 return TRUE;
8170
8171 /* In the shared -Bsymbolic case, discard space allocated for
8172 dynamic pc-relative relocs against symbols which turn out to be
8173 defined in regular objects. For the normal shared case, discard
8174 space for pc-relative relocs that have become local due to symbol
8175 visibility changes. */
8176
8177 if (bfd_link_pic (info))
8178 {
8179 /* Relocs that use pc_count are those that appear on a call
8180 insn, or certain REL relocs that can generated via assembly.
8181 We want calls to protected symbols to resolve directly to the
8182 function rather than going via the plt. If people want
8183 function pointer comparisons to work as expected then they
8184 should avoid writing weird assembly. */
8185 if (SYMBOL_CALLS_LOCAL (info, h))
8186 {
8187 struct elf_dyn_relocs **pp;
8188
8189 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
8190 {
8191 p->count -= p->pc_count;
8192 p->pc_count = 0;
8193 if (p->count == 0)
8194 *pp = p->next;
8195 else
8196 pp = &p->next;
8197 }
8198 }
8199
8200 /* Also discard relocs on undefined weak syms with non-default
8201 visibility. */
8202 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
8203 {
8204 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8205 eh->dyn_relocs = NULL;
8206
8207 /* Make sure undefined weak symbols are output as a dynamic
8208 symbol in PIEs. */
8209 else if (h->dynindx == -1
8210 && !h->forced_local
8211 && !bfd_elf_link_record_dynamic_symbol (info, h))
8212 return FALSE;
8213 }
8214
8215 }
8216 else if (ELIMINATE_COPY_RELOCS)
8217 {
8218 /* For the non-shared case, discard space for relocs against
8219 symbols which turn out to need copy relocs or are not
8220 dynamic. */
8221
8222 if (!h->non_got_ref
8223 && ((h->def_dynamic
8224 && !h->def_regular)
8225 || (htab->root.dynamic_sections_created
8226 && (h->root.type == bfd_link_hash_undefweak
8227 || h->root.type == bfd_link_hash_undefined))))
8228 {
8229 /* Make sure this symbol is output as a dynamic symbol.
8230 Undefined weak syms won't yet be marked as dynamic. */
8231 if (h->dynindx == -1
8232 && !h->forced_local
8233 && !bfd_elf_link_record_dynamic_symbol (info, h))
8234 return FALSE;
8235
8236 /* If that succeeded, we know we'll be keeping all the
8237 relocs. */
8238 if (h->dynindx != -1)
8239 goto keep;
8240 }
8241
8242 eh->dyn_relocs = NULL;
8243
8244 keep:;
8245 }
8246
8247 /* Finally, allocate space. */
8248 for (p = eh->dyn_relocs; p != NULL; p = p->next)
8249 {
8250 asection *sreloc;
8251
8252 sreloc = elf_section_data (p->sec)->sreloc;
8253
8254 BFD_ASSERT (sreloc != NULL);
8255
8256 sreloc->size += p->count * RELOC_SIZE (htab);
8257 }
8258
8259 return TRUE;
8260 }
8261
8262 /* Allocate space in .plt, .got and associated reloc sections for
8263 ifunc dynamic relocs. */
8264
8265 static bfd_boolean
8266 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
8267 void *inf)
8268 {
8269 struct bfd_link_info *info;
8270 struct elf_aarch64_link_hash_table *htab;
8271 struct elf_aarch64_link_hash_entry *eh;
8272
8273 /* An example of a bfd_link_hash_indirect symbol is versioned
8274 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
8275 -> __gxx_personality_v0(bfd_link_hash_defined)
8276
8277 There is no need to process bfd_link_hash_indirect symbols here
8278 because we will also be presented with the concrete instance of
8279 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
8280 called to copy all relevant data from the generic to the concrete
8281 symbol instance. */
8282 if (h->root.type == bfd_link_hash_indirect)
8283 return TRUE;
8284
8285 if (h->root.type == bfd_link_hash_warning)
8286 h = (struct elf_link_hash_entry *) h->root.u.i.link;
8287
8288 info = (struct bfd_link_info *) inf;
8289 htab = elf_aarch64_hash_table (info);
8290
8291 eh = (struct elf_aarch64_link_hash_entry *) h;
8292
8293 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
8294 here if it is defined and referenced in a non-shared object. */
8295 if (h->type == STT_GNU_IFUNC
8296 && h->def_regular)
8297 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
8298 &eh->dyn_relocs,
8299 NULL,
8300 htab->plt_entry_size,
8301 htab->plt_header_size,
8302 GOT_ENTRY_SIZE,
8303 FALSE);
8304 return TRUE;
8305 }
8306
8307 /* Allocate space in .plt, .got and associated reloc sections for
8308 local dynamic relocs. */
8309
8310 static bfd_boolean
8311 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
8312 {
8313 struct elf_link_hash_entry *h
8314 = (struct elf_link_hash_entry *) *slot;
8315
8316 if (h->type != STT_GNU_IFUNC
8317 || !h->def_regular
8318 || !h->ref_regular
8319 || !h->forced_local
8320 || h->root.type != bfd_link_hash_defined)
8321 abort ();
8322
8323 return elfNN_aarch64_allocate_dynrelocs (h, inf);
8324 }
8325
8326 /* Allocate space in .plt, .got and associated reloc sections for
8327 local ifunc dynamic relocs. */
8328
8329 static bfd_boolean
8330 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
8331 {
8332 struct elf_link_hash_entry *h
8333 = (struct elf_link_hash_entry *) *slot;
8334
8335 if (h->type != STT_GNU_IFUNC
8336 || !h->def_regular
8337 || !h->ref_regular
8338 || !h->forced_local
8339 || h->root.type != bfd_link_hash_defined)
8340 abort ();
8341
8342 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
8343 }
8344
8345 /* Find any dynamic relocs that apply to read-only sections. */
8346
8347 static bfd_boolean
8348 aarch64_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
8349 {
8350 struct elf_aarch64_link_hash_entry * eh;
8351 struct elf_dyn_relocs * p;
8352
8353 eh = (struct elf_aarch64_link_hash_entry *) h;
8354 for (p = eh->dyn_relocs; p != NULL; p = p->next)
8355 {
8356 asection *s = p->sec;
8357
8358 if (s != NULL && (s->flags & SEC_READONLY) != 0)
8359 {
8360 struct bfd_link_info *info = (struct bfd_link_info *) inf;
8361
8362 info->flags |= DF_TEXTREL;
8363
8364 /* Not an error, just cut short the traversal. */
8365 return FALSE;
8366 }
8367 }
8368 return TRUE;
8369 }
8370
8371 /* This is the most important function of all . Innocuosly named
8372 though ! */
8373
8374 static bfd_boolean
8375 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
8376 struct bfd_link_info *info)
8377 {
8378 struct elf_aarch64_link_hash_table *htab;
8379 bfd *dynobj;
8380 asection *s;
8381 bfd_boolean relocs;
8382 bfd *ibfd;
8383
8384 htab = elf_aarch64_hash_table ((info));
8385 dynobj = htab->root.dynobj;
8386
8387 BFD_ASSERT (dynobj != NULL);
8388
8389 if (htab->root.dynamic_sections_created)
8390 {
8391 if (bfd_link_executable (info) && !info->nointerp)
8392 {
8393 s = bfd_get_linker_section (dynobj, ".interp");
8394 if (s == NULL)
8395 abort ();
8396 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
8397 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
8398 }
8399 }
8400
8401 /* Set up .got offsets for local syms, and space for local dynamic
8402 relocs. */
8403 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
8404 {
8405 struct elf_aarch64_local_symbol *locals = NULL;
8406 Elf_Internal_Shdr *symtab_hdr;
8407 asection *srel;
8408 unsigned int i;
8409
8410 if (!is_aarch64_elf (ibfd))
8411 continue;
8412
8413 for (s = ibfd->sections; s != NULL; s = s->next)
8414 {
8415 struct elf_dyn_relocs *p;
8416
8417 for (p = (struct elf_dyn_relocs *)
8418 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
8419 {
8420 if (!bfd_is_abs_section (p->sec)
8421 && bfd_is_abs_section (p->sec->output_section))
8422 {
8423 /* Input section has been discarded, either because
8424 it is a copy of a linkonce section or due to
8425 linker script /DISCARD/, so we'll be discarding
8426 the relocs too. */
8427 }
8428 else if (p->count != 0)
8429 {
8430 srel = elf_section_data (p->sec)->sreloc;
8431 srel->size += p->count * RELOC_SIZE (htab);
8432 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
8433 info->flags |= DF_TEXTREL;
8434 }
8435 }
8436 }
8437
8438 locals = elf_aarch64_locals (ibfd);
8439 if (!locals)
8440 continue;
8441
8442 symtab_hdr = &elf_symtab_hdr (ibfd);
8443 srel = htab->root.srelgot;
8444 for (i = 0; i < symtab_hdr->sh_info; i++)
8445 {
8446 locals[i].got_offset = (bfd_vma) - 1;
8447 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
8448 if (locals[i].got_refcount > 0)
8449 {
8450 unsigned got_type = locals[i].got_type;
8451 if (got_type & GOT_TLSDESC_GD)
8452 {
8453 locals[i].tlsdesc_got_jump_table_offset =
8454 (htab->root.sgotplt->size
8455 - aarch64_compute_jump_table_size (htab));
8456 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
8457 locals[i].got_offset = (bfd_vma) - 2;
8458 }
8459
8460 if (got_type & GOT_TLS_GD)
8461 {
8462 locals[i].got_offset = htab->root.sgot->size;
8463 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
8464 }
8465
8466 if (got_type & GOT_TLS_IE
8467 || got_type & GOT_NORMAL)
8468 {
8469 locals[i].got_offset = htab->root.sgot->size;
8470 htab->root.sgot->size += GOT_ENTRY_SIZE;
8471 }
8472
8473 if (got_type == GOT_UNKNOWN)
8474 {
8475 }
8476
8477 if (bfd_link_pic (info))
8478 {
8479 if (got_type & GOT_TLSDESC_GD)
8480 {
8481 htab->root.srelplt->size += RELOC_SIZE (htab);
8482 /* Note RELOC_COUNT not incremented here! */
8483 htab->tlsdesc_plt = (bfd_vma) - 1;
8484 }
8485
8486 if (got_type & GOT_TLS_GD)
8487 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
8488
8489 if (got_type & GOT_TLS_IE
8490 || got_type & GOT_NORMAL)
8491 htab->root.srelgot->size += RELOC_SIZE (htab);
8492 }
8493 }
8494 else
8495 {
8496 locals[i].got_refcount = (bfd_vma) - 1;
8497 }
8498 }
8499 }
8500
8501
8502 /* Allocate global sym .plt and .got entries, and space for global
8503 sym dynamic relocs. */
8504 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
8505 info);
8506
8507 /* Allocate global ifunc sym .plt and .got entries, and space for global
8508 ifunc sym dynamic relocs. */
8509 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
8510 info);
8511
8512 /* Allocate .plt and .got entries, and space for local symbols. */
8513 htab_traverse (htab->loc_hash_table,
8514 elfNN_aarch64_allocate_local_dynrelocs,
8515 info);
8516
8517 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
8518 htab_traverse (htab->loc_hash_table,
8519 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
8520 info);
8521
8522 /* For every jump slot reserved in the sgotplt, reloc_count is
8523 incremented. However, when we reserve space for TLS descriptors,
8524 it's not incremented, so in order to compute the space reserved
8525 for them, it suffices to multiply the reloc count by the jump
8526 slot size. */
8527
8528 if (htab->root.srelplt)
8529 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
8530
8531 if (htab->tlsdesc_plt)
8532 {
8533 if (htab->root.splt->size == 0)
8534 htab->root.splt->size += PLT_ENTRY_SIZE;
8535
8536 htab->tlsdesc_plt = htab->root.splt->size;
8537 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
8538
8539 /* If we're not using lazy TLS relocations, don't generate the
8540 GOT entry required. */
8541 if (!(info->flags & DF_BIND_NOW))
8542 {
8543 htab->dt_tlsdesc_got = htab->root.sgot->size;
8544 htab->root.sgot->size += GOT_ENTRY_SIZE;
8545 }
8546 }
8547
8548 /* Init mapping symbols information to use later to distingush between
8549 code and data while scanning for errata. */
8550 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
8551 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
8552 {
8553 if (!is_aarch64_elf (ibfd))
8554 continue;
8555 bfd_elfNN_aarch64_init_maps (ibfd);
8556 }
8557
8558 /* We now have determined the sizes of the various dynamic sections.
8559 Allocate memory for them. */
8560 relocs = FALSE;
8561 for (s = dynobj->sections; s != NULL; s = s->next)
8562 {
8563 if ((s->flags & SEC_LINKER_CREATED) == 0)
8564 continue;
8565
8566 if (s == htab->root.splt
8567 || s == htab->root.sgot
8568 || s == htab->root.sgotplt
8569 || s == htab->root.iplt
8570 || s == htab->root.igotplt
8571 || s == htab->root.sdynbss
8572 || s == htab->root.sdynrelro)
8573 {
8574 /* Strip this section if we don't need it; see the
8575 comment below. */
8576 }
8577 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
8578 {
8579 if (s->size != 0 && s != htab->root.srelplt)
8580 relocs = TRUE;
8581
8582 /* We use the reloc_count field as a counter if we need
8583 to copy relocs into the output file. */
8584 if (s != htab->root.srelplt)
8585 s->reloc_count = 0;
8586 }
8587 else
8588 {
8589 /* It's not one of our sections, so don't allocate space. */
8590 continue;
8591 }
8592
8593 if (s->size == 0)
8594 {
8595 /* If we don't need this section, strip it from the
8596 output file. This is mostly to handle .rela.bss and
8597 .rela.plt. We must create both sections in
8598 create_dynamic_sections, because they must be created
8599 before the linker maps input sections to output
8600 sections. The linker does that before
8601 adjust_dynamic_symbol is called, and it is that
8602 function which decides whether anything needs to go
8603 into these sections. */
8604 s->flags |= SEC_EXCLUDE;
8605 continue;
8606 }
8607
8608 if ((s->flags & SEC_HAS_CONTENTS) == 0)
8609 continue;
8610
8611 /* Allocate memory for the section contents. We use bfd_zalloc
8612 here in case unused entries are not reclaimed before the
8613 section's contents are written out. This should not happen,
8614 but this way if it does, we get a R_AARCH64_NONE reloc instead
8615 of garbage. */
8616 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
8617 if (s->contents == NULL)
8618 return FALSE;
8619 }
8620
8621 if (htab->root.dynamic_sections_created)
8622 {
8623 /* Add some entries to the .dynamic section. We fill in the
8624 values later, in elfNN_aarch64_finish_dynamic_sections, but we
8625 must add the entries now so that we get the correct size for
8626 the .dynamic section. The DT_DEBUG entry is filled in by the
8627 dynamic linker and used by the debugger. */
8628 #define add_dynamic_entry(TAG, VAL) \
8629 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
8630
8631 if (bfd_link_executable (info))
8632 {
8633 if (!add_dynamic_entry (DT_DEBUG, 0))
8634 return FALSE;
8635 }
8636
8637 if (htab->root.splt->size != 0)
8638 {
8639 if (!add_dynamic_entry (DT_PLTGOT, 0)
8640 || !add_dynamic_entry (DT_PLTRELSZ, 0)
8641 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
8642 || !add_dynamic_entry (DT_JMPREL, 0))
8643 return FALSE;
8644
8645 if (htab->tlsdesc_plt
8646 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
8647 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
8648 return FALSE;
8649 }
8650
8651 if (relocs)
8652 {
8653 if (!add_dynamic_entry (DT_RELA, 0)
8654 || !add_dynamic_entry (DT_RELASZ, 0)
8655 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
8656 return FALSE;
8657
8658 /* If any dynamic relocs apply to a read-only section,
8659 then we need a DT_TEXTREL entry. */
8660 if ((info->flags & DF_TEXTREL) == 0)
8661 elf_link_hash_traverse (& htab->root, aarch64_readonly_dynrelocs,
8662 info);
8663
8664 if ((info->flags & DF_TEXTREL) != 0)
8665 {
8666 if (!add_dynamic_entry (DT_TEXTREL, 0))
8667 return FALSE;
8668 }
8669 }
8670 }
8671 #undef add_dynamic_entry
8672
8673 return TRUE;
8674 }
8675
8676 static inline void
8677 elf_aarch64_update_plt_entry (bfd *output_bfd,
8678 bfd_reloc_code_real_type r_type,
8679 bfd_byte *plt_entry, bfd_vma value)
8680 {
8681 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
8682
8683 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
8684 }
8685
8686 static void
8687 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
8688 struct elf_aarch64_link_hash_table
8689 *htab, bfd *output_bfd,
8690 struct bfd_link_info *info)
8691 {
8692 bfd_byte *plt_entry;
8693 bfd_vma plt_index;
8694 bfd_vma got_offset;
8695 bfd_vma gotplt_entry_address;
8696 bfd_vma plt_entry_address;
8697 Elf_Internal_Rela rela;
8698 bfd_byte *loc;
8699 asection *plt, *gotplt, *relplt;
8700
8701 /* When building a static executable, use .iplt, .igot.plt and
8702 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8703 if (htab->root.splt != NULL)
8704 {
8705 plt = htab->root.splt;
8706 gotplt = htab->root.sgotplt;
8707 relplt = htab->root.srelplt;
8708 }
8709 else
8710 {
8711 plt = htab->root.iplt;
8712 gotplt = htab->root.igotplt;
8713 relplt = htab->root.irelplt;
8714 }
8715
8716 /* Get the index in the procedure linkage table which
8717 corresponds to this symbol. This is the index of this symbol
8718 in all the symbols for which we are making plt entries. The
8719 first entry in the procedure linkage table is reserved.
8720
8721 Get the offset into the .got table of the entry that
8722 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
8723 bytes. The first three are reserved for the dynamic linker.
8724
8725 For static executables, we don't reserve anything. */
8726
8727 if (plt == htab->root.splt)
8728 {
8729 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
8730 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
8731 }
8732 else
8733 {
8734 plt_index = h->plt.offset / htab->plt_entry_size;
8735 got_offset = plt_index * GOT_ENTRY_SIZE;
8736 }
8737
8738 plt_entry = plt->contents + h->plt.offset;
8739 plt_entry_address = plt->output_section->vma
8740 + plt->output_offset + h->plt.offset;
8741 gotplt_entry_address = gotplt->output_section->vma +
8742 gotplt->output_offset + got_offset;
8743
8744 /* Copy in the boiler-plate for the PLTn entry. */
8745 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
8746
8747 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8748 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8749 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8750 plt_entry,
8751 PG (gotplt_entry_address) -
8752 PG (plt_entry_address));
8753
8754 /* Fill in the lo12 bits for the load from the pltgot. */
8755 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8756 plt_entry + 4,
8757 PG_OFFSET (gotplt_entry_address));
8758
8759 /* Fill in the lo12 bits for the add from the pltgot entry. */
8760 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8761 plt_entry + 8,
8762 PG_OFFSET (gotplt_entry_address));
8763
8764 /* All the GOTPLT Entries are essentially initialized to PLT0. */
8765 bfd_put_NN (output_bfd,
8766 plt->output_section->vma + plt->output_offset,
8767 gotplt->contents + got_offset);
8768
8769 rela.r_offset = gotplt_entry_address;
8770
8771 if (h->dynindx == -1
8772 || ((bfd_link_executable (info)
8773 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8774 && h->def_regular
8775 && h->type == STT_GNU_IFUNC))
8776 {
8777 /* If an STT_GNU_IFUNC symbol is locally defined, generate
8778 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
8779 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
8780 rela.r_addend = (h->root.u.def.value
8781 + h->root.u.def.section->output_section->vma
8782 + h->root.u.def.section->output_offset);
8783 }
8784 else
8785 {
8786 /* Fill in the entry in the .rela.plt section. */
8787 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
8788 rela.r_addend = 0;
8789 }
8790
8791 /* Compute the relocation entry to used based on PLT index and do
8792 not adjust reloc_count. The reloc_count has already been adjusted
8793 to account for this entry. */
8794 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
8795 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8796 }
8797
8798 /* Size sections even though they're not dynamic. We use it to setup
8799 _TLS_MODULE_BASE_, if needed. */
8800
8801 static bfd_boolean
8802 elfNN_aarch64_always_size_sections (bfd *output_bfd,
8803 struct bfd_link_info *info)
8804 {
8805 asection *tls_sec;
8806
8807 if (bfd_link_relocatable (info))
8808 return TRUE;
8809
8810 tls_sec = elf_hash_table (info)->tls_sec;
8811
8812 if (tls_sec)
8813 {
8814 struct elf_link_hash_entry *tlsbase;
8815
8816 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
8817 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
8818
8819 if (tlsbase)
8820 {
8821 struct bfd_link_hash_entry *h = NULL;
8822 const struct elf_backend_data *bed =
8823 get_elf_backend_data (output_bfd);
8824
8825 if (!(_bfd_generic_link_add_one_symbol
8826 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
8827 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
8828 return FALSE;
8829
8830 tlsbase->type = STT_TLS;
8831 tlsbase = (struct elf_link_hash_entry *) h;
8832 tlsbase->def_regular = 1;
8833 tlsbase->other = STV_HIDDEN;
8834 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
8835 }
8836 }
8837
8838 return TRUE;
8839 }
8840
8841 /* Finish up dynamic symbol handling. We set the contents of various
8842 dynamic sections here. */
8843
8844 static bfd_boolean
8845 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
8846 struct bfd_link_info *info,
8847 struct elf_link_hash_entry *h,
8848 Elf_Internal_Sym *sym)
8849 {
8850 struct elf_aarch64_link_hash_table *htab;
8851 htab = elf_aarch64_hash_table (info);
8852
8853 if (h->plt.offset != (bfd_vma) - 1)
8854 {
8855 asection *plt, *gotplt, *relplt;
8856
8857 /* This symbol has an entry in the procedure linkage table. Set
8858 it up. */
8859
8860 /* When building a static executable, use .iplt, .igot.plt and
8861 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8862 if (htab->root.splt != NULL)
8863 {
8864 plt = htab->root.splt;
8865 gotplt = htab->root.sgotplt;
8866 relplt = htab->root.srelplt;
8867 }
8868 else
8869 {
8870 plt = htab->root.iplt;
8871 gotplt = htab->root.igotplt;
8872 relplt = htab->root.irelplt;
8873 }
8874
8875 /* This symbol has an entry in the procedure linkage table. Set
8876 it up. */
8877 if ((h->dynindx == -1
8878 && !((h->forced_local || bfd_link_executable (info))
8879 && h->def_regular
8880 && h->type == STT_GNU_IFUNC))
8881 || plt == NULL
8882 || gotplt == NULL
8883 || relplt == NULL)
8884 abort ();
8885
8886 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
8887 if (!h->def_regular)
8888 {
8889 /* Mark the symbol as undefined, rather than as defined in
8890 the .plt section. */
8891 sym->st_shndx = SHN_UNDEF;
8892 /* If the symbol is weak we need to clear the value.
8893 Otherwise, the PLT entry would provide a definition for
8894 the symbol even if the symbol wasn't defined anywhere,
8895 and so the symbol would never be NULL. Leave the value if
8896 there were any relocations where pointer equality matters
8897 (this is a clue for the dynamic linker, to make function
8898 pointer comparisons work between an application and shared
8899 library). */
8900 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
8901 sym->st_value = 0;
8902 }
8903 }
8904
8905 if (h->got.offset != (bfd_vma) - 1
8906 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8907 {
8908 Elf_Internal_Rela rela;
8909 bfd_byte *loc;
8910
8911 /* This symbol has an entry in the global offset table. Set it
8912 up. */
8913 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8914 abort ();
8915
8916 rela.r_offset = (htab->root.sgot->output_section->vma
8917 + htab->root.sgot->output_offset
8918 + (h->got.offset & ~(bfd_vma) 1));
8919
8920 if (h->def_regular
8921 && h->type == STT_GNU_IFUNC)
8922 {
8923 if (bfd_link_pic (info))
8924 {
8925 /* Generate R_AARCH64_GLOB_DAT. */
8926 goto do_glob_dat;
8927 }
8928 else
8929 {
8930 asection *plt;
8931
8932 if (!h->pointer_equality_needed)
8933 abort ();
8934
8935 /* For non-shared object, we can't use .got.plt, which
8936 contains the real function address if we need pointer
8937 equality. We load the GOT entry with the PLT entry. */
8938 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8939 bfd_put_NN (output_bfd, (plt->output_section->vma
8940 + plt->output_offset
8941 + h->plt.offset),
8942 htab->root.sgot->contents
8943 + (h->got.offset & ~(bfd_vma) 1));
8944 return TRUE;
8945 }
8946 }
8947 else if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
8948 {
8949 if (!h->def_regular)
8950 return FALSE;
8951
8952 BFD_ASSERT ((h->got.offset & 1) != 0);
8953 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8954 rela.r_addend = (h->root.u.def.value
8955 + h->root.u.def.section->output_section->vma
8956 + h->root.u.def.section->output_offset);
8957 }
8958 else
8959 {
8960 do_glob_dat:
8961 BFD_ASSERT ((h->got.offset & 1) == 0);
8962 bfd_put_NN (output_bfd, (bfd_vma) 0,
8963 htab->root.sgot->contents + h->got.offset);
8964 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8965 rela.r_addend = 0;
8966 }
8967
8968 loc = htab->root.srelgot->contents;
8969 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8970 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8971 }
8972
8973 if (h->needs_copy)
8974 {
8975 Elf_Internal_Rela rela;
8976 asection *s;
8977 bfd_byte *loc;
8978
8979 /* This symbol needs a copy reloc. Set it up. */
8980 if (h->dynindx == -1
8981 || (h->root.type != bfd_link_hash_defined
8982 && h->root.type != bfd_link_hash_defweak)
8983 || htab->root.srelbss == NULL)
8984 abort ();
8985
8986 rela.r_offset = (h->root.u.def.value
8987 + h->root.u.def.section->output_section->vma
8988 + h->root.u.def.section->output_offset);
8989 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8990 rela.r_addend = 0;
8991 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
8992 s = htab->root.sreldynrelro;
8993 else
8994 s = htab->root.srelbss;
8995 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
8996 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8997 }
8998
8999 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
9000 be NULL for local symbols. */
9001 if (sym != NULL
9002 && (h == elf_hash_table (info)->hdynamic
9003 || h == elf_hash_table (info)->hgot))
9004 sym->st_shndx = SHN_ABS;
9005
9006 return TRUE;
9007 }
9008
9009 /* Finish up local dynamic symbol handling. We set the contents of
9010 various dynamic sections here. */
9011
9012 static bfd_boolean
9013 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
9014 {
9015 struct elf_link_hash_entry *h
9016 = (struct elf_link_hash_entry *) *slot;
9017 struct bfd_link_info *info
9018 = (struct bfd_link_info *) inf;
9019
9020 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
9021 info, h, NULL);
9022 }
9023
9024 static void
9025 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
9026 struct elf_aarch64_link_hash_table
9027 *htab)
9028 {
9029 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
9030 small and large plts and at the minute just generates
9031 the small PLT. */
9032
9033 /* PLT0 of the small PLT looks like this in ELF64 -
9034 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
9035 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
9036 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
9037 // symbol resolver
9038 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
9039 // GOTPLT entry for this.
9040 br x17
9041 PLT0 will be slightly different in ELF32 due to different got entry
9042 size. */
9043 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
9044 bfd_vma plt_base;
9045
9046
9047 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
9048 PLT_ENTRY_SIZE);
9049 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
9050 PLT_ENTRY_SIZE;
9051
9052 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
9053 + htab->root.sgotplt->output_offset
9054 + GOT_ENTRY_SIZE * 2);
9055
9056 plt_base = htab->root.splt->output_section->vma +
9057 htab->root.splt->output_offset;
9058
9059 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9060 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9061 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9062 htab->root.splt->contents + 4,
9063 PG (plt_got_2nd_ent) - PG (plt_base + 4));
9064
9065 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9066 htab->root.splt->contents + 8,
9067 PG_OFFSET (plt_got_2nd_ent));
9068
9069 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9070 htab->root.splt->contents + 12,
9071 PG_OFFSET (plt_got_2nd_ent));
9072 }
9073
9074 static bfd_boolean
9075 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
9076 struct bfd_link_info *info)
9077 {
9078 struct elf_aarch64_link_hash_table *htab;
9079 bfd *dynobj;
9080 asection *sdyn;
9081
9082 htab = elf_aarch64_hash_table (info);
9083 dynobj = htab->root.dynobj;
9084 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
9085
9086 if (htab->root.dynamic_sections_created)
9087 {
9088 ElfNN_External_Dyn *dyncon, *dynconend;
9089
9090 if (sdyn == NULL || htab->root.sgot == NULL)
9091 abort ();
9092
9093 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
9094 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
9095 for (; dyncon < dynconend; dyncon++)
9096 {
9097 Elf_Internal_Dyn dyn;
9098 asection *s;
9099
9100 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
9101
9102 switch (dyn.d_tag)
9103 {
9104 default:
9105 continue;
9106
9107 case DT_PLTGOT:
9108 s = htab->root.sgotplt;
9109 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9110 break;
9111
9112 case DT_JMPREL:
9113 s = htab->root.srelplt;
9114 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9115 break;
9116
9117 case DT_PLTRELSZ:
9118 s = htab->root.srelplt;
9119 dyn.d_un.d_val = s->size;
9120 break;
9121
9122 case DT_TLSDESC_PLT:
9123 s = htab->root.splt;
9124 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9125 + htab->tlsdesc_plt;
9126 break;
9127
9128 case DT_TLSDESC_GOT:
9129 s = htab->root.sgot;
9130 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9131 + htab->dt_tlsdesc_got;
9132 break;
9133 }
9134
9135 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
9136 }
9137
9138 }
9139
9140 /* Fill in the special first entry in the procedure linkage table. */
9141 if (htab->root.splt && htab->root.splt->size > 0)
9142 {
9143 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
9144
9145 elf_section_data (htab->root.splt->output_section)->
9146 this_hdr.sh_entsize = htab->plt_entry_size;
9147
9148
9149 if (htab->tlsdesc_plt)
9150 {
9151 bfd_put_NN (output_bfd, (bfd_vma) 0,
9152 htab->root.sgot->contents + htab->dt_tlsdesc_got);
9153
9154 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
9155 elfNN_aarch64_tlsdesc_small_plt_entry,
9156 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
9157
9158 {
9159 bfd_vma adrp1_addr =
9160 htab->root.splt->output_section->vma
9161 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
9162
9163 bfd_vma adrp2_addr = adrp1_addr + 4;
9164
9165 bfd_vma got_addr =
9166 htab->root.sgot->output_section->vma
9167 + htab->root.sgot->output_offset;
9168
9169 bfd_vma pltgot_addr =
9170 htab->root.sgotplt->output_section->vma
9171 + htab->root.sgotplt->output_offset;
9172
9173 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
9174
9175 bfd_byte *plt_entry =
9176 htab->root.splt->contents + htab->tlsdesc_plt;
9177
9178 /* adrp x2, DT_TLSDESC_GOT */
9179 elf_aarch64_update_plt_entry (output_bfd,
9180 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9181 plt_entry + 4,
9182 (PG (dt_tlsdesc_got)
9183 - PG (adrp1_addr)));
9184
9185 /* adrp x3, 0 */
9186 elf_aarch64_update_plt_entry (output_bfd,
9187 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9188 plt_entry + 8,
9189 (PG (pltgot_addr)
9190 - PG (adrp2_addr)));
9191
9192 /* ldr x2, [x2, #0] */
9193 elf_aarch64_update_plt_entry (output_bfd,
9194 BFD_RELOC_AARCH64_LDSTNN_LO12,
9195 plt_entry + 12,
9196 PG_OFFSET (dt_tlsdesc_got));
9197
9198 /* add x3, x3, 0 */
9199 elf_aarch64_update_plt_entry (output_bfd,
9200 BFD_RELOC_AARCH64_ADD_LO12,
9201 plt_entry + 16,
9202 PG_OFFSET (pltgot_addr));
9203 }
9204 }
9205 }
9206
9207 if (htab->root.sgotplt)
9208 {
9209 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
9210 {
9211 _bfd_error_handler
9212 (_("discarded output section: `%A'"), htab->root.sgotplt);
9213 return FALSE;
9214 }
9215
9216 /* Fill in the first three entries in the global offset table. */
9217 if (htab->root.sgotplt->size > 0)
9218 {
9219 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
9220
9221 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
9222 bfd_put_NN (output_bfd,
9223 (bfd_vma) 0,
9224 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
9225 bfd_put_NN (output_bfd,
9226 (bfd_vma) 0,
9227 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
9228 }
9229
9230 if (htab->root.sgot)
9231 {
9232 if (htab->root.sgot->size > 0)
9233 {
9234 bfd_vma addr =
9235 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
9236 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
9237 }
9238 }
9239
9240 elf_section_data (htab->root.sgotplt->output_section)->
9241 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
9242 }
9243
9244 if (htab->root.sgot && htab->root.sgot->size > 0)
9245 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
9246 = GOT_ENTRY_SIZE;
9247
9248 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
9249 htab_traverse (htab->loc_hash_table,
9250 elfNN_aarch64_finish_local_dynamic_symbol,
9251 info);
9252
9253 return TRUE;
9254 }
9255
9256 /* Return address for Ith PLT stub in section PLT, for relocation REL
9257 or (bfd_vma) -1 if it should not be included. */
9258
9259 static bfd_vma
9260 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
9261 const arelent *rel ATTRIBUTE_UNUSED)
9262 {
9263 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
9264 }
9265
9266 /* Returns TRUE if NAME is an AArch64 mapping symbol.
9267 The ARM ELF standard defines $x (for A64 code) and $d (for data).
9268 It also allows a period initiated suffix to be added to the symbol, ie:
9269 "$[adtx]\.[:sym_char]+". */
9270
9271 static bfd_boolean
9272 is_aarch64_mapping_symbol (const char * name)
9273 {
9274 return name != NULL /* Paranoia. */
9275 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
9276 the mapping symbols could have acquired a prefix.
9277 We do not support this here, since such symbols no
9278 longer conform to the ARM ELF ABI. */
9279 && (name[1] == 'd' || name[1] == 'x')
9280 && (name[2] == 0 || name[2] == '.');
9281 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
9282 any characters that follow the period are legal characters for the body
9283 of a symbol's name. For now we just assume that this is the case. */
9284 }
9285
9286 /* Make sure that mapping symbols in object files are not removed via the
9287 "strip --strip-unneeded" tool. These symbols might needed in order to
9288 correctly generate linked files. Once an object file has been linked,
9289 it should be safe to remove them. */
9290
9291 static void
9292 elfNN_aarch64_backend_symbol_processing (bfd *abfd, asymbol *sym)
9293 {
9294 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
9295 && sym->section != bfd_abs_section_ptr
9296 && is_aarch64_mapping_symbol (sym->name))
9297 sym->flags |= BSF_KEEP;
9298 }
9299
9300
9301 /* We use this so we can override certain functions
9302 (though currently we don't). */
9303
9304 const struct elf_size_info elfNN_aarch64_size_info =
9305 {
9306 sizeof (ElfNN_External_Ehdr),
9307 sizeof (ElfNN_External_Phdr),
9308 sizeof (ElfNN_External_Shdr),
9309 sizeof (ElfNN_External_Rel),
9310 sizeof (ElfNN_External_Rela),
9311 sizeof (ElfNN_External_Sym),
9312 sizeof (ElfNN_External_Dyn),
9313 sizeof (Elf_External_Note),
9314 4, /* Hash table entry size. */
9315 1, /* Internal relocs per external relocs. */
9316 ARCH_SIZE, /* Arch size. */
9317 LOG_FILE_ALIGN, /* Log_file_align. */
9318 ELFCLASSNN, EV_CURRENT,
9319 bfd_elfNN_write_out_phdrs,
9320 bfd_elfNN_write_shdrs_and_ehdr,
9321 bfd_elfNN_checksum_contents,
9322 bfd_elfNN_write_relocs,
9323 bfd_elfNN_swap_symbol_in,
9324 bfd_elfNN_swap_symbol_out,
9325 bfd_elfNN_slurp_reloc_table,
9326 bfd_elfNN_slurp_symbol_table,
9327 bfd_elfNN_swap_dyn_in,
9328 bfd_elfNN_swap_dyn_out,
9329 bfd_elfNN_swap_reloc_in,
9330 bfd_elfNN_swap_reloc_out,
9331 bfd_elfNN_swap_reloca_in,
9332 bfd_elfNN_swap_reloca_out
9333 };
9334
9335 #define ELF_ARCH bfd_arch_aarch64
9336 #define ELF_MACHINE_CODE EM_AARCH64
9337 #define ELF_MAXPAGESIZE 0x10000
9338 #define ELF_MINPAGESIZE 0x1000
9339 #define ELF_COMMONPAGESIZE 0x1000
9340
9341 #define bfd_elfNN_close_and_cleanup \
9342 elfNN_aarch64_close_and_cleanup
9343
9344 #define bfd_elfNN_bfd_free_cached_info \
9345 elfNN_aarch64_bfd_free_cached_info
9346
9347 #define bfd_elfNN_bfd_is_target_special_symbol \
9348 elfNN_aarch64_is_target_special_symbol
9349
9350 #define bfd_elfNN_bfd_link_hash_table_create \
9351 elfNN_aarch64_link_hash_table_create
9352
9353 #define bfd_elfNN_bfd_merge_private_bfd_data \
9354 elfNN_aarch64_merge_private_bfd_data
9355
9356 #define bfd_elfNN_bfd_print_private_bfd_data \
9357 elfNN_aarch64_print_private_bfd_data
9358
9359 #define bfd_elfNN_bfd_reloc_type_lookup \
9360 elfNN_aarch64_reloc_type_lookup
9361
9362 #define bfd_elfNN_bfd_reloc_name_lookup \
9363 elfNN_aarch64_reloc_name_lookup
9364
9365 #define bfd_elfNN_bfd_set_private_flags \
9366 elfNN_aarch64_set_private_flags
9367
9368 #define bfd_elfNN_find_inliner_info \
9369 elfNN_aarch64_find_inliner_info
9370
9371 #define bfd_elfNN_find_nearest_line \
9372 elfNN_aarch64_find_nearest_line
9373
9374 #define bfd_elfNN_mkobject \
9375 elfNN_aarch64_mkobject
9376
9377 #define bfd_elfNN_new_section_hook \
9378 elfNN_aarch64_new_section_hook
9379
9380 #define elf_backend_adjust_dynamic_symbol \
9381 elfNN_aarch64_adjust_dynamic_symbol
9382
9383 #define elf_backend_always_size_sections \
9384 elfNN_aarch64_always_size_sections
9385
9386 #define elf_backend_check_relocs \
9387 elfNN_aarch64_check_relocs
9388
9389 #define elf_backend_copy_indirect_symbol \
9390 elfNN_aarch64_copy_indirect_symbol
9391
9392 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
9393 to them in our hash. */
9394 #define elf_backend_create_dynamic_sections \
9395 elfNN_aarch64_create_dynamic_sections
9396
9397 #define elf_backend_init_index_section \
9398 _bfd_elf_init_2_index_sections
9399
9400 #define elf_backend_finish_dynamic_sections \
9401 elfNN_aarch64_finish_dynamic_sections
9402
9403 #define elf_backend_finish_dynamic_symbol \
9404 elfNN_aarch64_finish_dynamic_symbol
9405
9406 #define elf_backend_gc_sweep_hook \
9407 elfNN_aarch64_gc_sweep_hook
9408
9409 #define elf_backend_object_p \
9410 elfNN_aarch64_object_p
9411
9412 #define elf_backend_output_arch_local_syms \
9413 elfNN_aarch64_output_arch_local_syms
9414
9415 #define elf_backend_plt_sym_val \
9416 elfNN_aarch64_plt_sym_val
9417
9418 #define elf_backend_post_process_headers \
9419 elfNN_aarch64_post_process_headers
9420
9421 #define elf_backend_relocate_section \
9422 elfNN_aarch64_relocate_section
9423
9424 #define elf_backend_reloc_type_class \
9425 elfNN_aarch64_reloc_type_class
9426
9427 #define elf_backend_section_from_shdr \
9428 elfNN_aarch64_section_from_shdr
9429
9430 #define elf_backend_size_dynamic_sections \
9431 elfNN_aarch64_size_dynamic_sections
9432
9433 #define elf_backend_size_info \
9434 elfNN_aarch64_size_info
9435
9436 #define elf_backend_write_section \
9437 elfNN_aarch64_write_section
9438
9439 #define elf_backend_symbol_processing \
9440 elfNN_aarch64_backend_symbol_processing
9441
9442 #define elf_backend_can_refcount 1
9443 #define elf_backend_can_gc_sections 1
9444 #define elf_backend_plt_readonly 1
9445 #define elf_backend_want_got_plt 1
9446 #define elf_backend_want_plt_sym 0
9447 #define elf_backend_want_dynrelro 1
9448 #define elf_backend_may_use_rel_p 0
9449 #define elf_backend_may_use_rela_p 1
9450 #define elf_backend_default_use_rela_p 1
9451 #define elf_backend_rela_normal 1
9452 #define elf_backend_dtrel_excludes_plt 1
9453 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
9454 #define elf_backend_default_execstack 0
9455 #define elf_backend_extern_protected_data 1
9456 #define elf_backend_hash_symbol elf_aarch64_hash_symbol
9457
9458 #undef elf_backend_obj_attrs_section
9459 #define elf_backend_obj_attrs_section ".ARM.attributes"
9460
9461 #include "elfNN-target.h"
9462
9463 /* CloudABI support. */
9464
9465 #undef TARGET_LITTLE_SYM
9466 #define TARGET_LITTLE_SYM aarch64_elfNN_le_cloudabi_vec
9467 #undef TARGET_LITTLE_NAME
9468 #define TARGET_LITTLE_NAME "elfNN-littleaarch64-cloudabi"
9469 #undef TARGET_BIG_SYM
9470 #define TARGET_BIG_SYM aarch64_elfNN_be_cloudabi_vec
9471 #undef TARGET_BIG_NAME
9472 #define TARGET_BIG_NAME "elfNN-bigaarch64-cloudabi"
9473
9474 #undef ELF_OSABI
9475 #define ELF_OSABI ELFOSABI_CLOUDABI
9476
9477 #undef elfNN_bed
9478 #define elfNN_bed elfNN_aarch64_cloudabi_bed
9479
9480 #include "elfNN-target.h"
This page took 0.250658 seconds and 4 git commands to generate.