elf_backend_section_flags and _bfd_elf_init_private_section_data
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "elf-bfd.h"
143 #include "bfdlink.h"
144 #include "objalloc.h"
145 #include "elf/aarch64.h"
146 #include "elfxx-aarch64.h"
147 #include "cpu-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #define BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC BFD_RELOC_AARCH64_TLSDESC_LD64_LO12
158 #endif
159
160 #if ARCH_SIZE == 32
161 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
162 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
163 #define HOWTO64(...) EMPTY_HOWTO (0)
164 #define HOWTO32(...) HOWTO (__VA_ARGS__)
165 #define LOG_FILE_ALIGN 2
166 #define BFD_RELOC_AARCH64_TLSDESC_LD32_LO12 BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
167 #define R_AARCH64_P32_TLSDESC_ADD_LO12 R_AARCH64_P32_TLSDESC_ADD_LO12_NC
168 #endif
169
170 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
171 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12 \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12 \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12 \
191 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12 \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0 \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2 \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12 \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC \
206 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12 \
207 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC \
208 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12 \
209 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC \
210 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12 \
211 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC \
212 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
213 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
214 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
215 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
216 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
217 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
218 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
219 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
220 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
221
222 #define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \
223 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
224 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
225 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
226 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
227 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
228 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
229 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \
230 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
231 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
232 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
233 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
234 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
235 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
236 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
237 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
238 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
239 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
240 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
241 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC \
242 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
243 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
244 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21)
245
246 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
247 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
248 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
249 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
250 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
251 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
252 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
253 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
254 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12 \
255 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
256 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
257 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
258 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
259
260 #define ELIMINATE_COPY_RELOCS 1
261
262 /* Return size of a relocation entry. HTAB is the bfd's
263 elf_aarch64_link_hash_entry. */
264 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
265
266 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
267 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
268 #define PLT_ENTRY_SIZE (32)
269 #define PLT_SMALL_ENTRY_SIZE (16)
270 #define PLT_TLSDESC_ENTRY_SIZE (32)
271 /* PLT sizes with BTI insn. */
272 #define PLT_BTI_SMALL_ENTRY_SIZE (24)
273 /* PLT sizes with PAC insn. */
274 #define PLT_PAC_SMALL_ENTRY_SIZE (24)
275 /* PLT sizes with BTI and PAC insn. */
276 #define PLT_BTI_PAC_SMALL_ENTRY_SIZE (24)
277
278 /* Encoding of the nop instruction. */
279 #define INSN_NOP 0xd503201f
280
281 #define aarch64_compute_jump_table_size(htab) \
282 (((htab)->root.srelplt == NULL) ? 0 \
283 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
284
285 /* The first entry in a procedure linkage table looks like this
286 if the distance between the PLTGOT and the PLT is < 4GB use
287 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
288 in x16 and needs to work out PLTGOT[1] by using an address of
289 [x16,#-GOT_ENTRY_SIZE]. */
290 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
291 {
292 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
293 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
294 #if ARCH_SIZE == 64
295 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
296 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
297 #else
298 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
299 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
300 #endif
301 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
302 0x1f, 0x20, 0x03, 0xd5, /* nop */
303 0x1f, 0x20, 0x03, 0xd5, /* nop */
304 0x1f, 0x20, 0x03, 0xd5, /* nop */
305 };
306
307 static const bfd_byte elfNN_aarch64_small_plt0_bti_entry[PLT_ENTRY_SIZE] =
308 {
309 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
310 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
311 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
312 #if ARCH_SIZE == 64
313 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
314 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
315 #else
316 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
317 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
318 #endif
319 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
320 0x1f, 0x20, 0x03, 0xd5, /* nop */
321 0x1f, 0x20, 0x03, 0xd5, /* nop */
322 };
323
324 /* Per function entry in a procedure linkage table looks like this
325 if the distance between the PLTGOT and the PLT is < 4GB use
326 these PLT entries. Use BTI versions of the PLTs when enabled. */
327 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
328 {
329 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
330 #if ARCH_SIZE == 64
331 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
332 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
333 #else
334 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
335 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
336 #endif
337 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
338 };
339
340 static const bfd_byte
341 elfNN_aarch64_small_plt_bti_entry[PLT_BTI_SMALL_ENTRY_SIZE] =
342 {
343 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
344 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
345 #if ARCH_SIZE == 64
346 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
347 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
348 #else
349 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
350 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
351 #endif
352 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
353 0x1f, 0x20, 0x03, 0xd5, /* nop */
354 };
355
356 static const bfd_byte
357 elfNN_aarch64_small_plt_pac_entry[PLT_PAC_SMALL_ENTRY_SIZE] =
358 {
359 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
360 #if ARCH_SIZE == 64
361 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
362 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
363 #else
364 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
365 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
366 #endif
367 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
368 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
369 0x1f, 0x20, 0x03, 0xd5, /* nop */
370 };
371
372 static const bfd_byte
373 elfNN_aarch64_small_plt_bti_pac_entry[PLT_BTI_PAC_SMALL_ENTRY_SIZE] =
374 {
375 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
376 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
377 #if ARCH_SIZE == 64
378 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
379 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
380 #else
381 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
382 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
383 #endif
384 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
385 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
386 };
387
388 static const bfd_byte
389 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
390 {
391 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
392 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
393 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
394 #if ARCH_SIZE == 64
395 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
396 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
397 #else
398 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
399 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
400 #endif
401 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
402 0x1f, 0x20, 0x03, 0xd5, /* nop */
403 0x1f, 0x20, 0x03, 0xd5, /* nop */
404 };
405
406 static const bfd_byte
407 elfNN_aarch64_tlsdesc_small_plt_bti_entry[PLT_TLSDESC_ENTRY_SIZE] =
408 {
409 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
410 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
411 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
412 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
413 #if ARCH_SIZE == 64
414 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
415 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
416 #else
417 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
418 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
419 #endif
420 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
421 0x1f, 0x20, 0x03, 0xd5, /* nop */
422 };
423
424 #define elf_info_to_howto elfNN_aarch64_info_to_howto
425 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
426
427 #define AARCH64_ELF_ABI_VERSION 0
428
429 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
430 #define ALL_ONES (~ (bfd_vma) 0)
431
432 /* Indexed by the bfd interal reloc enumerators.
433 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
434 in reloc.c. */
435
436 static reloc_howto_type elfNN_aarch64_howto_table[] =
437 {
438 EMPTY_HOWTO (0),
439
440 /* Basic data relocations. */
441
442 /* Deprecated, but retained for backwards compatibility. */
443 HOWTO64 (R_AARCH64_NULL, /* type */
444 0, /* rightshift */
445 3, /* size (0 = byte, 1 = short, 2 = long) */
446 0, /* bitsize */
447 FALSE, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_dont, /* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_AARCH64_NULL", /* name */
452 FALSE, /* partial_inplace */
453 0, /* src_mask */
454 0, /* dst_mask */
455 FALSE), /* pcrel_offset */
456 HOWTO (R_AARCH64_NONE, /* type */
457 0, /* rightshift */
458 3, /* size (0 = byte, 1 = short, 2 = long) */
459 0, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_dont, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_NONE", /* name */
465 FALSE, /* partial_inplace */
466 0, /* src_mask */
467 0, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 /* .xword: (S+A) */
471 HOWTO64 (AARCH64_R (ABS64), /* type */
472 0, /* rightshift */
473 4, /* size (4 = long long) */
474 64, /* bitsize */
475 FALSE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_unsigned, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 AARCH64_R_STR (ABS64), /* name */
480 FALSE, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 FALSE), /* pcrel_offset */
484
485 /* .word: (S+A) */
486 HOWTO (AARCH64_R (ABS32), /* type */
487 0, /* rightshift */
488 2, /* size (0 = byte, 1 = short, 2 = long) */
489 32, /* bitsize */
490 FALSE, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_unsigned, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 AARCH64_R_STR (ABS32), /* name */
495 FALSE, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 FALSE), /* pcrel_offset */
499
500 /* .half: (S+A) */
501 HOWTO (AARCH64_R (ABS16), /* type */
502 0, /* rightshift */
503 1, /* size (0 = byte, 1 = short, 2 = long) */
504 16, /* bitsize */
505 FALSE, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_unsigned, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 AARCH64_R_STR (ABS16), /* name */
510 FALSE, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 FALSE), /* pcrel_offset */
514
515 /* .xword: (S+A-P) */
516 HOWTO64 (AARCH64_R (PREL64), /* type */
517 0, /* rightshift */
518 4, /* size (4 = long long) */
519 64, /* bitsize */
520 TRUE, /* pc_relative */
521 0, /* bitpos */
522 complain_overflow_signed, /* complain_on_overflow */
523 bfd_elf_generic_reloc, /* special_function */
524 AARCH64_R_STR (PREL64), /* name */
525 FALSE, /* partial_inplace */
526 ALL_ONES, /* src_mask */
527 ALL_ONES, /* dst_mask */
528 TRUE), /* pcrel_offset */
529
530 /* .word: (S+A-P) */
531 HOWTO (AARCH64_R (PREL32), /* type */
532 0, /* rightshift */
533 2, /* size (0 = byte, 1 = short, 2 = long) */
534 32, /* bitsize */
535 TRUE, /* pc_relative */
536 0, /* bitpos */
537 complain_overflow_signed, /* complain_on_overflow */
538 bfd_elf_generic_reloc, /* special_function */
539 AARCH64_R_STR (PREL32), /* name */
540 FALSE, /* partial_inplace */
541 0xffffffff, /* src_mask */
542 0xffffffff, /* dst_mask */
543 TRUE), /* pcrel_offset */
544
545 /* .half: (S+A-P) */
546 HOWTO (AARCH64_R (PREL16), /* type */
547 0, /* rightshift */
548 1, /* size (0 = byte, 1 = short, 2 = long) */
549 16, /* bitsize */
550 TRUE, /* pc_relative */
551 0, /* bitpos */
552 complain_overflow_signed, /* complain_on_overflow */
553 bfd_elf_generic_reloc, /* special_function */
554 AARCH64_R_STR (PREL16), /* name */
555 FALSE, /* partial_inplace */
556 0xffff, /* src_mask */
557 0xffff, /* dst_mask */
558 TRUE), /* pcrel_offset */
559
560 /* Group relocations to create a 16, 32, 48 or 64 bit
561 unsigned data or abs address inline. */
562
563 /* MOVZ: ((S+A) >> 0) & 0xffff */
564 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
565 0, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 16, /* bitsize */
568 FALSE, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_unsigned, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 AARCH64_R_STR (MOVW_UABS_G0), /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
577
578 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
579 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
580 0, /* rightshift */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
582 16, /* bitsize */
583 FALSE, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_dont, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
588 FALSE, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE), /* pcrel_offset */
592
593 /* MOVZ: ((S+A) >> 16) & 0xffff */
594 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
595 16, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 16, /* bitsize */
598 FALSE, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_unsigned, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 AARCH64_R_STR (MOVW_UABS_G1), /* name */
603 FALSE, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
609 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
610 16, /* rightshift */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
612 16, /* bitsize */
613 FALSE, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_dont, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
618 FALSE, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE), /* pcrel_offset */
622
623 /* MOVZ: ((S+A) >> 32) & 0xffff */
624 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
625 32, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 16, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_unsigned, /* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 AARCH64_R_STR (MOVW_UABS_G2), /* name */
633 FALSE, /* partial_inplace */
634 0xffff, /* src_mask */
635 0xffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
639 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
640 32, /* rightshift */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
642 16, /* bitsize */
643 FALSE, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont, /* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
648 FALSE, /* partial_inplace */
649 0xffff, /* src_mask */
650 0xffff, /* dst_mask */
651 FALSE), /* pcrel_offset */
652
653 /* MOVZ: ((S+A) >> 48) & 0xffff */
654 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
655 48, /* rightshift */
656 2, /* size (0 = byte, 1 = short, 2 = long) */
657 16, /* bitsize */
658 FALSE, /* pc_relative */
659 0, /* bitpos */
660 complain_overflow_unsigned, /* complain_on_overflow */
661 bfd_elf_generic_reloc, /* special_function */
662 AARCH64_R_STR (MOVW_UABS_G3), /* name */
663 FALSE, /* partial_inplace */
664 0xffff, /* src_mask */
665 0xffff, /* dst_mask */
666 FALSE), /* pcrel_offset */
667
668 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
669 signed data or abs address inline. Will change instruction
670 to MOVN or MOVZ depending on sign of calculated value. */
671
672 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
673 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
674 0, /* rightshift */
675 2, /* size (0 = byte, 1 = short, 2 = long) */
676 17, /* bitsize */
677 FALSE, /* pc_relative */
678 0, /* bitpos */
679 complain_overflow_signed, /* complain_on_overflow */
680 bfd_elf_generic_reloc, /* special_function */
681 AARCH64_R_STR (MOVW_SABS_G0), /* name */
682 FALSE, /* partial_inplace */
683 0xffff, /* src_mask */
684 0xffff, /* dst_mask */
685 FALSE), /* pcrel_offset */
686
687 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
688 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
689 16, /* rightshift */
690 2, /* size (0 = byte, 1 = short, 2 = long) */
691 17, /* bitsize */
692 FALSE, /* pc_relative */
693 0, /* bitpos */
694 complain_overflow_signed, /* complain_on_overflow */
695 bfd_elf_generic_reloc, /* special_function */
696 AARCH64_R_STR (MOVW_SABS_G1), /* name */
697 FALSE, /* partial_inplace */
698 0xffff, /* src_mask */
699 0xffff, /* dst_mask */
700 FALSE), /* pcrel_offset */
701
702 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
703 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
704 32, /* rightshift */
705 2, /* size (0 = byte, 1 = short, 2 = long) */
706 17, /* bitsize */
707 FALSE, /* pc_relative */
708 0, /* bitpos */
709 complain_overflow_signed, /* complain_on_overflow */
710 bfd_elf_generic_reloc, /* special_function */
711 AARCH64_R_STR (MOVW_SABS_G2), /* name */
712 FALSE, /* partial_inplace */
713 0xffff, /* src_mask */
714 0xffff, /* dst_mask */
715 FALSE), /* pcrel_offset */
716
717 /* Group relocations to create a 16, 32, 48 or 64 bit
718 PC relative address inline. */
719
720 /* MOV[NZ]: ((S+A-P) >> 0) & 0xffff */
721 HOWTO (AARCH64_R (MOVW_PREL_G0), /* type */
722 0, /* rightshift */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
724 17, /* bitsize */
725 TRUE, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_signed, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 AARCH64_R_STR (MOVW_PREL_G0), /* name */
730 FALSE, /* partial_inplace */
731 0xffff, /* src_mask */
732 0xffff, /* dst_mask */
733 TRUE), /* pcrel_offset */
734
735 /* MOVK: ((S+A-P) >> 0) & 0xffff [no overflow check] */
736 HOWTO (AARCH64_R (MOVW_PREL_G0_NC), /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 AARCH64_R_STR (MOVW_PREL_G0_NC), /* name */
745 FALSE, /* partial_inplace */
746 0xffff, /* src_mask */
747 0xffff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 /* MOV[NZ]: ((S+A-P) >> 16) & 0xffff */
751 HOWTO (AARCH64_R (MOVW_PREL_G1), /* type */
752 16, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 17, /* bitsize */
755 TRUE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_signed, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 AARCH64_R_STR (MOVW_PREL_G1), /* name */
760 FALSE, /* partial_inplace */
761 0xffff, /* src_mask */
762 0xffff, /* dst_mask */
763 TRUE), /* pcrel_offset */
764
765 /* MOVK: ((S+A-P) >> 16) & 0xffff [no overflow check] */
766 HOWTO64 (AARCH64_R (MOVW_PREL_G1_NC), /* type */
767 16, /* rightshift */
768 2, /* size (0 = byte, 1 = short, 2 = long) */
769 16, /* bitsize */
770 TRUE, /* pc_relative */
771 0, /* bitpos */
772 complain_overflow_dont, /* complain_on_overflow */
773 bfd_elf_generic_reloc, /* special_function */
774 AARCH64_R_STR (MOVW_PREL_G1_NC), /* name */
775 FALSE, /* partial_inplace */
776 0xffff, /* src_mask */
777 0xffff, /* dst_mask */
778 TRUE), /* pcrel_offset */
779
780 /* MOV[NZ]: ((S+A-P) >> 32) & 0xffff */
781 HOWTO64 (AARCH64_R (MOVW_PREL_G2), /* type */
782 32, /* rightshift */
783 2, /* size (0 = byte, 1 = short, 2 = long) */
784 17, /* bitsize */
785 TRUE, /* pc_relative */
786 0, /* bitpos */
787 complain_overflow_signed, /* complain_on_overflow */
788 bfd_elf_generic_reloc, /* special_function */
789 AARCH64_R_STR (MOVW_PREL_G2), /* name */
790 FALSE, /* partial_inplace */
791 0xffff, /* src_mask */
792 0xffff, /* dst_mask */
793 TRUE), /* pcrel_offset */
794
795 /* MOVK: ((S+A-P) >> 32) & 0xffff [no overflow check] */
796 HOWTO64 (AARCH64_R (MOVW_PREL_G2_NC), /* type */
797 32, /* rightshift */
798 2, /* size (0 = byte, 1 = short, 2 = long) */
799 16, /* bitsize */
800 TRUE, /* pc_relative */
801 0, /* bitpos */
802 complain_overflow_dont, /* complain_on_overflow */
803 bfd_elf_generic_reloc, /* special_function */
804 AARCH64_R_STR (MOVW_PREL_G2_NC), /* name */
805 FALSE, /* partial_inplace */
806 0xffff, /* src_mask */
807 0xffff, /* dst_mask */
808 TRUE), /* pcrel_offset */
809
810 /* MOV[NZ]: ((S+A-P) >> 48) & 0xffff */
811 HOWTO64 (AARCH64_R (MOVW_PREL_G3), /* type */
812 48, /* rightshift */
813 2, /* size (0 = byte, 1 = short, 2 = long) */
814 16, /* bitsize */
815 TRUE, /* pc_relative */
816 0, /* bitpos */
817 complain_overflow_dont, /* complain_on_overflow */
818 bfd_elf_generic_reloc, /* special_function */
819 AARCH64_R_STR (MOVW_PREL_G3), /* name */
820 FALSE, /* partial_inplace */
821 0xffff, /* src_mask */
822 0xffff, /* dst_mask */
823 TRUE), /* pcrel_offset */
824
825 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
826 addresses: PG(x) is (x & ~0xfff). */
827
828 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
829 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
830 2, /* rightshift */
831 2, /* size (0 = byte, 1 = short, 2 = long) */
832 19, /* bitsize */
833 TRUE, /* pc_relative */
834 0, /* bitpos */
835 complain_overflow_signed, /* complain_on_overflow */
836 bfd_elf_generic_reloc, /* special_function */
837 AARCH64_R_STR (LD_PREL_LO19), /* name */
838 FALSE, /* partial_inplace */
839 0x7ffff, /* src_mask */
840 0x7ffff, /* dst_mask */
841 TRUE), /* pcrel_offset */
842
843 /* ADR: (S+A-P) & 0x1fffff */
844 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
845 0, /* rightshift */
846 2, /* size (0 = byte, 1 = short, 2 = long) */
847 21, /* bitsize */
848 TRUE, /* pc_relative */
849 0, /* bitpos */
850 complain_overflow_signed, /* complain_on_overflow */
851 bfd_elf_generic_reloc, /* special_function */
852 AARCH64_R_STR (ADR_PREL_LO21), /* name */
853 FALSE, /* partial_inplace */
854 0x1fffff, /* src_mask */
855 0x1fffff, /* dst_mask */
856 TRUE), /* pcrel_offset */
857
858 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
859 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
860 12, /* rightshift */
861 2, /* size (0 = byte, 1 = short, 2 = long) */
862 21, /* bitsize */
863 TRUE, /* pc_relative */
864 0, /* bitpos */
865 complain_overflow_signed, /* complain_on_overflow */
866 bfd_elf_generic_reloc, /* special_function */
867 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
868 FALSE, /* partial_inplace */
869 0x1fffff, /* src_mask */
870 0x1fffff, /* dst_mask */
871 TRUE), /* pcrel_offset */
872
873 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
874 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
875 12, /* rightshift */
876 2, /* size (0 = byte, 1 = short, 2 = long) */
877 21, /* bitsize */
878 TRUE, /* pc_relative */
879 0, /* bitpos */
880 complain_overflow_dont, /* complain_on_overflow */
881 bfd_elf_generic_reloc, /* special_function */
882 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
883 FALSE, /* partial_inplace */
884 0x1fffff, /* src_mask */
885 0x1fffff, /* dst_mask */
886 TRUE), /* pcrel_offset */
887
888 /* ADD: (S+A) & 0xfff [no overflow check] */
889 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
890 0, /* rightshift */
891 2, /* size (0 = byte, 1 = short, 2 = long) */
892 12, /* bitsize */
893 FALSE, /* pc_relative */
894 10, /* bitpos */
895 complain_overflow_dont, /* complain_on_overflow */
896 bfd_elf_generic_reloc, /* special_function */
897 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
898 FALSE, /* partial_inplace */
899 0x3ffc00, /* src_mask */
900 0x3ffc00, /* dst_mask */
901 FALSE), /* pcrel_offset */
902
903 /* LD/ST8: (S+A) & 0xfff */
904 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
905 0, /* rightshift */
906 2, /* size (0 = byte, 1 = short, 2 = long) */
907 12, /* bitsize */
908 FALSE, /* pc_relative */
909 0, /* bitpos */
910 complain_overflow_dont, /* complain_on_overflow */
911 bfd_elf_generic_reloc, /* special_function */
912 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
913 FALSE, /* partial_inplace */
914 0xfff, /* src_mask */
915 0xfff, /* dst_mask */
916 FALSE), /* pcrel_offset */
917
918 /* Relocations for control-flow instructions. */
919
920 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
921 HOWTO (AARCH64_R (TSTBR14), /* type */
922 2, /* rightshift */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
924 14, /* bitsize */
925 TRUE, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_signed, /* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 AARCH64_R_STR (TSTBR14), /* name */
930 FALSE, /* partial_inplace */
931 0x3fff, /* src_mask */
932 0x3fff, /* dst_mask */
933 TRUE), /* pcrel_offset */
934
935 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
936 HOWTO (AARCH64_R (CONDBR19), /* type */
937 2, /* rightshift */
938 2, /* size (0 = byte, 1 = short, 2 = long) */
939 19, /* bitsize */
940 TRUE, /* pc_relative */
941 0, /* bitpos */
942 complain_overflow_signed, /* complain_on_overflow */
943 bfd_elf_generic_reloc, /* special_function */
944 AARCH64_R_STR (CONDBR19), /* name */
945 FALSE, /* partial_inplace */
946 0x7ffff, /* src_mask */
947 0x7ffff, /* dst_mask */
948 TRUE), /* pcrel_offset */
949
950 /* B: ((S+A-P) >> 2) & 0x3ffffff */
951 HOWTO (AARCH64_R (JUMP26), /* type */
952 2, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 26, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_signed, /* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 AARCH64_R_STR (JUMP26), /* name */
960 FALSE, /* partial_inplace */
961 0x3ffffff, /* src_mask */
962 0x3ffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
966 HOWTO (AARCH64_R (CALL26), /* type */
967 2, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 26, /* bitsize */
970 TRUE, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_signed, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (CALL26), /* name */
975 FALSE, /* partial_inplace */
976 0x3ffffff, /* src_mask */
977 0x3ffffff, /* dst_mask */
978 TRUE), /* pcrel_offset */
979
980 /* LD/ST16: (S+A) & 0xffe */
981 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
982 1, /* rightshift */
983 2, /* size (0 = byte, 1 = short, 2 = long) */
984 12, /* bitsize */
985 FALSE, /* pc_relative */
986 0, /* bitpos */
987 complain_overflow_dont, /* complain_on_overflow */
988 bfd_elf_generic_reloc, /* special_function */
989 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
990 FALSE, /* partial_inplace */
991 0xffe, /* src_mask */
992 0xffe, /* dst_mask */
993 FALSE), /* pcrel_offset */
994
995 /* LD/ST32: (S+A) & 0xffc */
996 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
997 2, /* rightshift */
998 2, /* size (0 = byte, 1 = short, 2 = long) */
999 12, /* bitsize */
1000 FALSE, /* pc_relative */
1001 0, /* bitpos */
1002 complain_overflow_dont, /* complain_on_overflow */
1003 bfd_elf_generic_reloc, /* special_function */
1004 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
1005 FALSE, /* partial_inplace */
1006 0xffc, /* src_mask */
1007 0xffc, /* dst_mask */
1008 FALSE), /* pcrel_offset */
1009
1010 /* LD/ST64: (S+A) & 0xff8 */
1011 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
1012 3, /* rightshift */
1013 2, /* size (0 = byte, 1 = short, 2 = long) */
1014 12, /* bitsize */
1015 FALSE, /* pc_relative */
1016 0, /* bitpos */
1017 complain_overflow_dont, /* complain_on_overflow */
1018 bfd_elf_generic_reloc, /* special_function */
1019 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
1020 FALSE, /* partial_inplace */
1021 0xff8, /* src_mask */
1022 0xff8, /* dst_mask */
1023 FALSE), /* pcrel_offset */
1024
1025 /* LD/ST128: (S+A) & 0xff0 */
1026 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
1027 4, /* rightshift */
1028 2, /* size (0 = byte, 1 = short, 2 = long) */
1029 12, /* bitsize */
1030 FALSE, /* pc_relative */
1031 0, /* bitpos */
1032 complain_overflow_dont, /* complain_on_overflow */
1033 bfd_elf_generic_reloc, /* special_function */
1034 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
1035 FALSE, /* partial_inplace */
1036 0xff0, /* src_mask */
1037 0xff0, /* dst_mask */
1038 FALSE), /* pcrel_offset */
1039
1040 /* Set a load-literal immediate field to bits
1041 0x1FFFFC of G(S)-P */
1042 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
1043 2, /* rightshift */
1044 2, /* size (0 = byte,1 = short,2 = long) */
1045 19, /* bitsize */
1046 TRUE, /* pc_relative */
1047 0, /* bitpos */
1048 complain_overflow_signed, /* complain_on_overflow */
1049 bfd_elf_generic_reloc, /* special_function */
1050 AARCH64_R_STR (GOT_LD_PREL19), /* name */
1051 FALSE, /* partial_inplace */
1052 0xffffe0, /* src_mask */
1053 0xffffe0, /* dst_mask */
1054 TRUE), /* pcrel_offset */
1055
1056 /* Get to the page for the GOT entry for the symbol
1057 (G(S) - P) using an ADRP instruction. */
1058 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
1059 12, /* rightshift */
1060 2, /* size (0 = byte, 1 = short, 2 = long) */
1061 21, /* bitsize */
1062 TRUE, /* pc_relative */
1063 0, /* bitpos */
1064 complain_overflow_dont, /* complain_on_overflow */
1065 bfd_elf_generic_reloc, /* special_function */
1066 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
1067 FALSE, /* partial_inplace */
1068 0x1fffff, /* src_mask */
1069 0x1fffff, /* dst_mask */
1070 TRUE), /* pcrel_offset */
1071
1072 /* LD64: GOT offset G(S) & 0xff8 */
1073 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
1074 3, /* rightshift */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1076 12, /* bitsize */
1077 FALSE, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont, /* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
1082 FALSE, /* partial_inplace */
1083 0xff8, /* src_mask */
1084 0xff8, /* dst_mask */
1085 FALSE), /* pcrel_offset */
1086
1087 /* LD32: GOT offset G(S) & 0xffc */
1088 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
1089 2, /* rightshift */
1090 2, /* size (0 = byte, 1 = short, 2 = long) */
1091 12, /* bitsize */
1092 FALSE, /* pc_relative */
1093 0, /* bitpos */
1094 complain_overflow_dont, /* complain_on_overflow */
1095 bfd_elf_generic_reloc, /* special_function */
1096 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
1097 FALSE, /* partial_inplace */
1098 0xffc, /* src_mask */
1099 0xffc, /* dst_mask */
1100 FALSE), /* pcrel_offset */
1101
1102 /* Lower 16 bits of GOT offset for the symbol. */
1103 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G0_NC), /* type */
1104 0, /* rightshift */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1106 16, /* bitsize */
1107 FALSE, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont, /* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 AARCH64_R_STR (MOVW_GOTOFF_G0_NC), /* name */
1112 FALSE, /* partial_inplace */
1113 0xffff, /* src_mask */
1114 0xffff, /* dst_mask */
1115 FALSE), /* pcrel_offset */
1116
1117 /* Higher 16 bits of GOT offset for the symbol. */
1118 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G1), /* type */
1119 16, /* rightshift */
1120 2, /* size (0 = byte, 1 = short, 2 = long) */
1121 16, /* bitsize */
1122 FALSE, /* pc_relative */
1123 0, /* bitpos */
1124 complain_overflow_unsigned, /* complain_on_overflow */
1125 bfd_elf_generic_reloc, /* special_function */
1126 AARCH64_R_STR (MOVW_GOTOFF_G1), /* name */
1127 FALSE, /* partial_inplace */
1128 0xffff, /* src_mask */
1129 0xffff, /* dst_mask */
1130 FALSE), /* pcrel_offset */
1131
1132 /* LD64: GOT offset for the symbol. */
1133 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
1134 3, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 12, /* bitsize */
1137 FALSE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_unsigned, /* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
1142 FALSE, /* partial_inplace */
1143 0x7ff8, /* src_mask */
1144 0x7ff8, /* dst_mask */
1145 FALSE), /* pcrel_offset */
1146
1147 /* LD32: GOT offset to the page address of GOT table.
1148 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
1149 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
1150 2, /* rightshift */
1151 2, /* size (0 = byte, 1 = short, 2 = long) */
1152 12, /* bitsize */
1153 FALSE, /* pc_relative */
1154 0, /* bitpos */
1155 complain_overflow_unsigned, /* complain_on_overflow */
1156 bfd_elf_generic_reloc, /* special_function */
1157 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
1158 FALSE, /* partial_inplace */
1159 0x5ffc, /* src_mask */
1160 0x5ffc, /* dst_mask */
1161 FALSE), /* pcrel_offset */
1162
1163 /* LD64: GOT offset to the page address of GOT table.
1164 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
1165 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
1166 3, /* rightshift */
1167 2, /* size (0 = byte, 1 = short, 2 = long) */
1168 12, /* bitsize */
1169 FALSE, /* pc_relative */
1170 0, /* bitpos */
1171 complain_overflow_unsigned, /* complain_on_overflow */
1172 bfd_elf_generic_reloc, /* special_function */
1173 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
1174 FALSE, /* partial_inplace */
1175 0x7ff8, /* src_mask */
1176 0x7ff8, /* dst_mask */
1177 FALSE), /* pcrel_offset */
1178
1179 /* Get to the page for the GOT entry for the symbol
1180 (G(S) - P) using an ADRP instruction. */
1181 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
1182 12, /* rightshift */
1183 2, /* size (0 = byte, 1 = short, 2 = long) */
1184 21, /* bitsize */
1185 TRUE, /* pc_relative */
1186 0, /* bitpos */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
1190 FALSE, /* partial_inplace */
1191 0x1fffff, /* src_mask */
1192 0x1fffff, /* dst_mask */
1193 TRUE), /* pcrel_offset */
1194
1195 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
1196 0, /* rightshift */
1197 2, /* size (0 = byte, 1 = short, 2 = long) */
1198 21, /* bitsize */
1199 TRUE, /* pc_relative */
1200 0, /* bitpos */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
1204 FALSE, /* partial_inplace */
1205 0x1fffff, /* src_mask */
1206 0x1fffff, /* dst_mask */
1207 TRUE), /* pcrel_offset */
1208
1209 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1210 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
1211 0, /* rightshift */
1212 2, /* size (0 = byte, 1 = short, 2 = long) */
1213 12, /* bitsize */
1214 FALSE, /* pc_relative */
1215 0, /* bitpos */
1216 complain_overflow_dont, /* complain_on_overflow */
1217 bfd_elf_generic_reloc, /* special_function */
1218 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
1219 FALSE, /* partial_inplace */
1220 0xfff, /* src_mask */
1221 0xfff, /* dst_mask */
1222 FALSE), /* pcrel_offset */
1223
1224 /* Lower 16 bits of GOT offset to tls_index. */
1225 HOWTO64 (AARCH64_R (TLSGD_MOVW_G0_NC), /* type */
1226 0, /* rightshift */
1227 2, /* size (0 = byte, 1 = short, 2 = long) */
1228 16, /* bitsize */
1229 FALSE, /* pc_relative */
1230 0, /* bitpos */
1231 complain_overflow_dont, /* complain_on_overflow */
1232 bfd_elf_generic_reloc, /* special_function */
1233 AARCH64_R_STR (TLSGD_MOVW_G0_NC), /* name */
1234 FALSE, /* partial_inplace */
1235 0xffff, /* src_mask */
1236 0xffff, /* dst_mask */
1237 FALSE), /* pcrel_offset */
1238
1239 /* Higher 16 bits of GOT offset to tls_index. */
1240 HOWTO64 (AARCH64_R (TLSGD_MOVW_G1), /* type */
1241 16, /* rightshift */
1242 2, /* size (0 = byte, 1 = short, 2 = long) */
1243 16, /* bitsize */
1244 FALSE, /* pc_relative */
1245 0, /* bitpos */
1246 complain_overflow_unsigned, /* complain_on_overflow */
1247 bfd_elf_generic_reloc, /* special_function */
1248 AARCH64_R_STR (TLSGD_MOVW_G1), /* name */
1249 FALSE, /* partial_inplace */
1250 0xffff, /* src_mask */
1251 0xffff, /* dst_mask */
1252 FALSE), /* pcrel_offset */
1253
1254 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
1255 12, /* rightshift */
1256 2, /* size (0 = byte, 1 = short, 2 = long) */
1257 21, /* bitsize */
1258 FALSE, /* pc_relative */
1259 0, /* bitpos */
1260 complain_overflow_dont, /* complain_on_overflow */
1261 bfd_elf_generic_reloc, /* special_function */
1262 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
1263 FALSE, /* partial_inplace */
1264 0x1fffff, /* src_mask */
1265 0x1fffff, /* dst_mask */
1266 FALSE), /* pcrel_offset */
1267
1268 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
1269 3, /* rightshift */
1270 2, /* size (0 = byte, 1 = short, 2 = long) */
1271 12, /* bitsize */
1272 FALSE, /* pc_relative */
1273 0, /* bitpos */
1274 complain_overflow_dont, /* complain_on_overflow */
1275 bfd_elf_generic_reloc, /* special_function */
1276 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
1277 FALSE, /* partial_inplace */
1278 0xff8, /* src_mask */
1279 0xff8, /* dst_mask */
1280 FALSE), /* pcrel_offset */
1281
1282 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1283 2, /* rightshift */
1284 2, /* size (0 = byte, 1 = short, 2 = long) */
1285 12, /* bitsize */
1286 FALSE, /* pc_relative */
1287 0, /* bitpos */
1288 complain_overflow_dont, /* complain_on_overflow */
1289 bfd_elf_generic_reloc, /* special_function */
1290 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1291 FALSE, /* partial_inplace */
1292 0xffc, /* src_mask */
1293 0xffc, /* dst_mask */
1294 FALSE), /* pcrel_offset */
1295
1296 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1297 2, /* rightshift */
1298 2, /* size (0 = byte, 1 = short, 2 = long) */
1299 19, /* bitsize */
1300 FALSE, /* pc_relative */
1301 0, /* bitpos */
1302 complain_overflow_dont, /* complain_on_overflow */
1303 bfd_elf_generic_reloc, /* special_function */
1304 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1305 FALSE, /* partial_inplace */
1306 0x1ffffc, /* src_mask */
1307 0x1ffffc, /* dst_mask */
1308 FALSE), /* pcrel_offset */
1309
1310 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
1311 0, /* rightshift */
1312 2, /* size (0 = byte, 1 = short, 2 = long) */
1313 16, /* bitsize */
1314 FALSE, /* pc_relative */
1315 0, /* bitpos */
1316 complain_overflow_dont, /* complain_on_overflow */
1317 bfd_elf_generic_reloc, /* special_function */
1318 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
1319 FALSE, /* partial_inplace */
1320 0xffff, /* src_mask */
1321 0xffff, /* dst_mask */
1322 FALSE), /* pcrel_offset */
1323
1324 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
1325 16, /* rightshift */
1326 2, /* size (0 = byte, 1 = short, 2 = long) */
1327 16, /* bitsize */
1328 FALSE, /* pc_relative */
1329 0, /* bitpos */
1330 complain_overflow_unsigned, /* complain_on_overflow */
1331 bfd_elf_generic_reloc, /* special_function */
1332 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
1333 FALSE, /* partial_inplace */
1334 0xffff, /* src_mask */
1335 0xffff, /* dst_mask */
1336 FALSE), /* pcrel_offset */
1337
1338 /* ADD: bit[23:12] of byte offset to module TLS base address. */
1339 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_HI12), /* type */
1340 12, /* rightshift */
1341 2, /* size (0 = byte, 1 = short, 2 = long) */
1342 12, /* bitsize */
1343 FALSE, /* pc_relative */
1344 0, /* bitpos */
1345 complain_overflow_unsigned, /* complain_on_overflow */
1346 bfd_elf_generic_reloc, /* special_function */
1347 AARCH64_R_STR (TLSLD_ADD_DTPREL_HI12), /* name */
1348 FALSE, /* partial_inplace */
1349 0xfff, /* src_mask */
1350 0xfff, /* dst_mask */
1351 FALSE), /* pcrel_offset */
1352
1353 /* Unsigned 12 bit byte offset to module TLS base address. */
1354 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1355 0, /* rightshift */
1356 2, /* size (0 = byte, 1 = short, 2 = long) */
1357 12, /* bitsize */
1358 FALSE, /* pc_relative */
1359 0, /* bitpos */
1360 complain_overflow_unsigned, /* complain_on_overflow */
1361 bfd_elf_generic_reloc, /* special_function */
1362 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1363 FALSE, /* partial_inplace */
1364 0xfff, /* src_mask */
1365 0xfff, /* dst_mask */
1366 FALSE), /* pcrel_offset */
1367
1368 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12. */
1369 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12_NC), /* type */
1370 0, /* rightshift */
1371 2, /* size (0 = byte, 1 = short, 2 = long) */
1372 12, /* bitsize */
1373 FALSE, /* pc_relative */
1374 0, /* bitpos */
1375 complain_overflow_dont, /* complain_on_overflow */
1376 bfd_elf_generic_reloc, /* special_function */
1377 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12_NC), /* name */
1378 FALSE, /* partial_inplace */
1379 0xfff, /* src_mask */
1380 0xfff, /* dst_mask */
1381 FALSE), /* pcrel_offset */
1382
1383 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1384 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1385 0, /* rightshift */
1386 2, /* size (0 = byte, 1 = short, 2 = long) */
1387 12, /* bitsize */
1388 FALSE, /* pc_relative */
1389 0, /* bitpos */
1390 complain_overflow_dont, /* complain_on_overflow */
1391 bfd_elf_generic_reloc, /* special_function */
1392 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1393 FALSE, /* partial_inplace */
1394 0xfff, /* src_mask */
1395 0xfff, /* dst_mask */
1396 FALSE), /* pcrel_offset */
1397
1398 /* Get to the page for the GOT entry for the symbol
1399 (G(S) - P) using an ADRP instruction. */
1400 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1401 12, /* rightshift */
1402 2, /* size (0 = byte, 1 = short, 2 = long) */
1403 21, /* bitsize */
1404 TRUE, /* pc_relative */
1405 0, /* bitpos */
1406 complain_overflow_signed, /* complain_on_overflow */
1407 bfd_elf_generic_reloc, /* special_function */
1408 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1409 FALSE, /* partial_inplace */
1410 0x1fffff, /* src_mask */
1411 0x1fffff, /* dst_mask */
1412 TRUE), /* pcrel_offset */
1413
1414 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1415 0, /* rightshift */
1416 2, /* size (0 = byte, 1 = short, 2 = long) */
1417 21, /* bitsize */
1418 TRUE, /* pc_relative */
1419 0, /* bitpos */
1420 complain_overflow_signed, /* complain_on_overflow */
1421 bfd_elf_generic_reloc, /* special_function */
1422 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1423 FALSE, /* partial_inplace */
1424 0x1fffff, /* src_mask */
1425 0x1fffff, /* dst_mask */
1426 TRUE), /* pcrel_offset */
1427
1428 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1429 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12), /* type */
1430 1, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 11, /* bitsize */
1433 FALSE, /* pc_relative */
1434 10, /* bitpos */
1435 complain_overflow_unsigned, /* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12), /* name */
1438 FALSE, /* partial_inplace */
1439 0x1ffc00, /* src_mask */
1440 0x1ffc00, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, but no overflow check. */
1444 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12_NC), /* type */
1445 1, /* rightshift */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 11, /* bitsize */
1448 FALSE, /* pc_relative */
1449 10, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12_NC), /* name */
1453 FALSE, /* partial_inplace */
1454 0x1ffc00, /* src_mask */
1455 0x1ffc00, /* dst_mask */
1456 FALSE), /* pcrel_offset */
1457
1458 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1459 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12), /* type */
1460 2, /* rightshift */
1461 2, /* size (0 = byte, 1 = short, 2 = long) */
1462 10, /* bitsize */
1463 FALSE, /* pc_relative */
1464 10, /* bitpos */
1465 complain_overflow_unsigned, /* complain_on_overflow */
1466 bfd_elf_generic_reloc, /* special_function */
1467 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12), /* name */
1468 FALSE, /* partial_inplace */
1469 0x3ffc00, /* src_mask */
1470 0x3ffc00, /* dst_mask */
1471 FALSE), /* pcrel_offset */
1472
1473 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, but no overflow check. */
1474 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12_NC), /* type */
1475 2, /* rightshift */
1476 2, /* size (0 = byte, 1 = short, 2 = long) */
1477 10, /* bitsize */
1478 FALSE, /* pc_relative */
1479 10, /* bitpos */
1480 complain_overflow_dont, /* complain_on_overflow */
1481 bfd_elf_generic_reloc, /* special_function */
1482 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12_NC), /* name */
1483 FALSE, /* partial_inplace */
1484 0xffc00, /* src_mask */
1485 0xffc00, /* dst_mask */
1486 FALSE), /* pcrel_offset */
1487
1488 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1489 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12), /* type */
1490 3, /* rightshift */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 9, /* bitsize */
1493 FALSE, /* pc_relative */
1494 10, /* bitpos */
1495 complain_overflow_unsigned, /* complain_on_overflow */
1496 bfd_elf_generic_reloc, /* special_function */
1497 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12), /* name */
1498 FALSE, /* partial_inplace */
1499 0x3ffc00, /* src_mask */
1500 0x3ffc00, /* dst_mask */
1501 FALSE), /* pcrel_offset */
1502
1503 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, but no overflow check. */
1504 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12_NC), /* type */
1505 3, /* rightshift */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 9, /* bitsize */
1508 FALSE, /* pc_relative */
1509 10, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 bfd_elf_generic_reloc, /* special_function */
1512 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12_NC), /* name */
1513 FALSE, /* partial_inplace */
1514 0x7fc00, /* src_mask */
1515 0x7fc00, /* dst_mask */
1516 FALSE), /* pcrel_offset */
1517
1518 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1519 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12), /* type */
1520 0, /* rightshift */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1522 12, /* bitsize */
1523 FALSE, /* pc_relative */
1524 10, /* bitpos */
1525 complain_overflow_unsigned, /* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12), /* name */
1528 FALSE, /* partial_inplace */
1529 0x3ffc00, /* src_mask */
1530 0x3ffc00, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1532
1533 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, but no overflow check. */
1534 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12_NC), /* type */
1535 0, /* rightshift */
1536 2, /* size (0 = byte, 1 = short, 2 = long) */
1537 12, /* bitsize */
1538 FALSE, /* pc_relative */
1539 10, /* bitpos */
1540 complain_overflow_dont, /* complain_on_overflow */
1541 bfd_elf_generic_reloc, /* special_function */
1542 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12_NC), /* name */
1543 FALSE, /* partial_inplace */
1544 0x3ffc00, /* src_mask */
1545 0x3ffc00, /* dst_mask */
1546 FALSE), /* pcrel_offset */
1547
1548 /* MOVZ: bit[15:0] of byte offset to module TLS base address. */
1549 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0), /* type */
1550 0, /* rightshift */
1551 2, /* size (0 = byte, 1 = short, 2 = long) */
1552 16, /* bitsize */
1553 FALSE, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_unsigned, /* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0), /* name */
1558 FALSE, /* partial_inplace */
1559 0xffff, /* src_mask */
1560 0xffff, /* dst_mask */
1561 FALSE), /* pcrel_offset */
1562
1563 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
1564 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0_NC), /* type */
1565 0, /* rightshift */
1566 2, /* size (0 = byte, 1 = short, 2 = long) */
1567 16, /* bitsize */
1568 FALSE, /* pc_relative */
1569 0, /* bitpos */
1570 complain_overflow_dont, /* complain_on_overflow */
1571 bfd_elf_generic_reloc, /* special_function */
1572 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0_NC), /* name */
1573 FALSE, /* partial_inplace */
1574 0xffff, /* src_mask */
1575 0xffff, /* dst_mask */
1576 FALSE), /* pcrel_offset */
1577
1578 /* MOVZ: bit[31:16] of byte offset to module TLS base address. */
1579 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G1), /* type */
1580 16, /* rightshift */
1581 2, /* size (0 = byte, 1 = short, 2 = long) */
1582 16, /* bitsize */
1583 FALSE, /* pc_relative */
1584 0, /* bitpos */
1585 complain_overflow_unsigned, /* complain_on_overflow */
1586 bfd_elf_generic_reloc, /* special_function */
1587 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1), /* name */
1588 FALSE, /* partial_inplace */
1589 0xffff, /* src_mask */
1590 0xffff, /* dst_mask */
1591 FALSE), /* pcrel_offset */
1592
1593 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
1594 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G1_NC), /* type */
1595 16, /* rightshift */
1596 2, /* size (0 = byte, 1 = short, 2 = long) */
1597 16, /* bitsize */
1598 FALSE, /* pc_relative */
1599 0, /* bitpos */
1600 complain_overflow_dont, /* complain_on_overflow */
1601 bfd_elf_generic_reloc, /* special_function */
1602 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1_NC), /* name */
1603 FALSE, /* partial_inplace */
1604 0xffff, /* src_mask */
1605 0xffff, /* dst_mask */
1606 FALSE), /* pcrel_offset */
1607
1608 /* MOVZ: bit[47:32] of byte offset to module TLS base address. */
1609 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G2), /* type */
1610 32, /* rightshift */
1611 2, /* size (0 = byte, 1 = short, 2 = long) */
1612 16, /* bitsize */
1613 FALSE, /* pc_relative */
1614 0, /* bitpos */
1615 complain_overflow_unsigned, /* complain_on_overflow */
1616 bfd_elf_generic_reloc, /* special_function */
1617 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G2), /* name */
1618 FALSE, /* partial_inplace */
1619 0xffff, /* src_mask */
1620 0xffff, /* dst_mask */
1621 FALSE), /* pcrel_offset */
1622
1623 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1624 32, /* rightshift */
1625 2, /* size (0 = byte, 1 = short, 2 = long) */
1626 16, /* bitsize */
1627 FALSE, /* pc_relative */
1628 0, /* bitpos */
1629 complain_overflow_unsigned, /* complain_on_overflow */
1630 bfd_elf_generic_reloc, /* special_function */
1631 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1632 FALSE, /* partial_inplace */
1633 0xffff, /* src_mask */
1634 0xffff, /* dst_mask */
1635 FALSE), /* pcrel_offset */
1636
1637 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1638 16, /* rightshift */
1639 2, /* size (0 = byte, 1 = short, 2 = long) */
1640 16, /* bitsize */
1641 FALSE, /* pc_relative */
1642 0, /* bitpos */
1643 complain_overflow_dont, /* complain_on_overflow */
1644 bfd_elf_generic_reloc, /* special_function */
1645 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1646 FALSE, /* partial_inplace */
1647 0xffff, /* src_mask */
1648 0xffff, /* dst_mask */
1649 FALSE), /* pcrel_offset */
1650
1651 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1652 16, /* rightshift */
1653 2, /* size (0 = byte, 1 = short, 2 = long) */
1654 16, /* bitsize */
1655 FALSE, /* pc_relative */
1656 0, /* bitpos */
1657 complain_overflow_dont, /* complain_on_overflow */
1658 bfd_elf_generic_reloc, /* special_function */
1659 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1660 FALSE, /* partial_inplace */
1661 0xffff, /* src_mask */
1662 0xffff, /* dst_mask */
1663 FALSE), /* pcrel_offset */
1664
1665 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1666 0, /* rightshift */
1667 2, /* size (0 = byte, 1 = short, 2 = long) */
1668 16, /* bitsize */
1669 FALSE, /* pc_relative */
1670 0, /* bitpos */
1671 complain_overflow_dont, /* complain_on_overflow */
1672 bfd_elf_generic_reloc, /* special_function */
1673 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1674 FALSE, /* partial_inplace */
1675 0xffff, /* src_mask */
1676 0xffff, /* dst_mask */
1677 FALSE), /* pcrel_offset */
1678
1679 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1680 0, /* rightshift */
1681 2, /* size (0 = byte, 1 = short, 2 = long) */
1682 16, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_dont, /* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1688 FALSE, /* partial_inplace */
1689 0xffff, /* src_mask */
1690 0xffff, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692
1693 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1694 12, /* rightshift */
1695 2, /* size (0 = byte, 1 = short, 2 = long) */
1696 12, /* bitsize */
1697 FALSE, /* pc_relative */
1698 0, /* bitpos */
1699 complain_overflow_unsigned, /* complain_on_overflow */
1700 bfd_elf_generic_reloc, /* special_function */
1701 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1702 FALSE, /* partial_inplace */
1703 0xfff, /* src_mask */
1704 0xfff, /* dst_mask */
1705 FALSE), /* pcrel_offset */
1706
1707 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1708 0, /* rightshift */
1709 2, /* size (0 = byte, 1 = short, 2 = long) */
1710 12, /* bitsize */
1711 FALSE, /* pc_relative */
1712 0, /* bitpos */
1713 complain_overflow_unsigned, /* complain_on_overflow */
1714 bfd_elf_generic_reloc, /* special_function */
1715 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1716 FALSE, /* partial_inplace */
1717 0xfff, /* src_mask */
1718 0xfff, /* dst_mask */
1719 FALSE), /* pcrel_offset */
1720
1721 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1722 0, /* rightshift */
1723 2, /* size (0 = byte, 1 = short, 2 = long) */
1724 12, /* bitsize */
1725 FALSE, /* pc_relative */
1726 0, /* bitpos */
1727 complain_overflow_dont, /* complain_on_overflow */
1728 bfd_elf_generic_reloc, /* special_function */
1729 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1730 FALSE, /* partial_inplace */
1731 0xfff, /* src_mask */
1732 0xfff, /* dst_mask */
1733 FALSE), /* pcrel_offset */
1734
1735 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1736 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12), /* type */
1737 1, /* rightshift */
1738 2, /* size (0 = byte, 1 = short, 2 = long) */
1739 11, /* bitsize */
1740 FALSE, /* pc_relative */
1741 10, /* bitpos */
1742 complain_overflow_unsigned, /* complain_on_overflow */
1743 bfd_elf_generic_reloc, /* special_function */
1744 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12), /* name */
1745 FALSE, /* partial_inplace */
1746 0x1ffc00, /* src_mask */
1747 0x1ffc00, /* dst_mask */
1748 FALSE), /* pcrel_offset */
1749
1750 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, but no overflow check. */
1751 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12_NC), /* type */
1752 1, /* rightshift */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1754 11, /* bitsize */
1755 FALSE, /* pc_relative */
1756 10, /* bitpos */
1757 complain_overflow_dont, /* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12_NC), /* name */
1760 FALSE, /* partial_inplace */
1761 0x1ffc00, /* src_mask */
1762 0x1ffc00, /* dst_mask */
1763 FALSE), /* pcrel_offset */
1764
1765 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1766 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12), /* type */
1767 2, /* rightshift */
1768 2, /* size (0 = byte, 1 = short, 2 = long) */
1769 10, /* bitsize */
1770 FALSE, /* pc_relative */
1771 10, /* bitpos */
1772 complain_overflow_unsigned, /* complain_on_overflow */
1773 bfd_elf_generic_reloc, /* special_function */
1774 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12), /* name */
1775 FALSE, /* partial_inplace */
1776 0xffc00, /* src_mask */
1777 0xffc00, /* dst_mask */
1778 FALSE), /* pcrel_offset */
1779
1780 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, but no overflow check. */
1781 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12_NC), /* type */
1782 2, /* rightshift */
1783 2, /* size (0 = byte, 1 = short, 2 = long) */
1784 10, /* bitsize */
1785 FALSE, /* pc_relative */
1786 10, /* bitpos */
1787 complain_overflow_dont, /* complain_on_overflow */
1788 bfd_elf_generic_reloc, /* special_function */
1789 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12_NC), /* name */
1790 FALSE, /* partial_inplace */
1791 0xffc00, /* src_mask */
1792 0xffc00, /* dst_mask */
1793 FALSE), /* pcrel_offset */
1794
1795 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1796 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12), /* type */
1797 3, /* rightshift */
1798 2, /* size (0 = byte, 1 = short, 2 = long) */
1799 9, /* bitsize */
1800 FALSE, /* pc_relative */
1801 10, /* bitpos */
1802 complain_overflow_unsigned, /* complain_on_overflow */
1803 bfd_elf_generic_reloc, /* special_function */
1804 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12), /* name */
1805 FALSE, /* partial_inplace */
1806 0x7fc00, /* src_mask */
1807 0x7fc00, /* dst_mask */
1808 FALSE), /* pcrel_offset */
1809
1810 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, but no overflow check. */
1811 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12_NC), /* type */
1812 3, /* rightshift */
1813 2, /* size (0 = byte, 1 = short, 2 = long) */
1814 9, /* bitsize */
1815 FALSE, /* pc_relative */
1816 10, /* bitpos */
1817 complain_overflow_dont, /* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12_NC), /* name */
1820 FALSE, /* partial_inplace */
1821 0x7fc00, /* src_mask */
1822 0x7fc00, /* dst_mask */
1823 FALSE), /* pcrel_offset */
1824
1825 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1826 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12), /* type */
1827 0, /* rightshift */
1828 2, /* size (0 = byte, 1 = short, 2 = long) */
1829 12, /* bitsize */
1830 FALSE, /* pc_relative */
1831 10, /* bitpos */
1832 complain_overflow_unsigned, /* complain_on_overflow */
1833 bfd_elf_generic_reloc, /* special_function */
1834 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12), /* name */
1835 FALSE, /* partial_inplace */
1836 0x3ffc00, /* src_mask */
1837 0x3ffc00, /* dst_mask */
1838 FALSE), /* pcrel_offset */
1839
1840 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, but no overflow check. */
1841 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12_NC), /* type */
1842 0, /* rightshift */
1843 2, /* size (0 = byte, 1 = short, 2 = long) */
1844 12, /* bitsize */
1845 FALSE, /* pc_relative */
1846 10, /* bitpos */
1847 complain_overflow_dont, /* complain_on_overflow */
1848 bfd_elf_generic_reloc, /* special_function */
1849 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12_NC), /* name */
1850 FALSE, /* partial_inplace */
1851 0x3ffc00, /* src_mask */
1852 0x3ffc00, /* dst_mask */
1853 FALSE), /* pcrel_offset */
1854
1855 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1856 2, /* rightshift */
1857 2, /* size (0 = byte, 1 = short, 2 = long) */
1858 19, /* bitsize */
1859 TRUE, /* pc_relative */
1860 0, /* bitpos */
1861 complain_overflow_dont, /* complain_on_overflow */
1862 bfd_elf_generic_reloc, /* special_function */
1863 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1864 FALSE, /* partial_inplace */
1865 0x0ffffe0, /* src_mask */
1866 0x0ffffe0, /* dst_mask */
1867 TRUE), /* pcrel_offset */
1868
1869 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1870 0, /* rightshift */
1871 2, /* size (0 = byte, 1 = short, 2 = long) */
1872 21, /* bitsize */
1873 TRUE, /* pc_relative */
1874 0, /* bitpos */
1875 complain_overflow_dont, /* complain_on_overflow */
1876 bfd_elf_generic_reloc, /* special_function */
1877 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1878 FALSE, /* partial_inplace */
1879 0x1fffff, /* src_mask */
1880 0x1fffff, /* dst_mask */
1881 TRUE), /* pcrel_offset */
1882
1883 /* Get to the page for the GOT entry for the symbol
1884 (G(S) - P) using an ADRP instruction. */
1885 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1886 12, /* rightshift */
1887 2, /* size (0 = byte, 1 = short, 2 = long) */
1888 21, /* bitsize */
1889 TRUE, /* pc_relative */
1890 0, /* bitpos */
1891 complain_overflow_dont, /* complain_on_overflow */
1892 bfd_elf_generic_reloc, /* special_function */
1893 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1894 FALSE, /* partial_inplace */
1895 0x1fffff, /* src_mask */
1896 0x1fffff, /* dst_mask */
1897 TRUE), /* pcrel_offset */
1898
1899 /* LD64: GOT offset G(S) & 0xff8. */
1900 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12), /* type */
1901 3, /* rightshift */
1902 2, /* size (0 = byte, 1 = short, 2 = long) */
1903 12, /* bitsize */
1904 FALSE, /* pc_relative */
1905 0, /* bitpos */
1906 complain_overflow_dont, /* complain_on_overflow */
1907 bfd_elf_generic_reloc, /* special_function */
1908 AARCH64_R_STR (TLSDESC_LD64_LO12), /* name */
1909 FALSE, /* partial_inplace */
1910 0xff8, /* src_mask */
1911 0xff8, /* dst_mask */
1912 FALSE), /* pcrel_offset */
1913
1914 /* LD32: GOT offset G(S) & 0xffc. */
1915 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1916 2, /* rightshift */
1917 2, /* size (0 = byte, 1 = short, 2 = long) */
1918 12, /* bitsize */
1919 FALSE, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont, /* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1924 FALSE, /* partial_inplace */
1925 0xffc, /* src_mask */
1926 0xffc, /* dst_mask */
1927 FALSE), /* pcrel_offset */
1928
1929 /* ADD: GOT offset G(S) & 0xfff. */
1930 HOWTO (AARCH64_R (TLSDESC_ADD_LO12), /* type */
1931 0, /* rightshift */
1932 2, /* size (0 = byte, 1 = short, 2 = long) */
1933 12, /* bitsize */
1934 FALSE, /* pc_relative */
1935 0, /* bitpos */
1936 complain_overflow_dont,/* complain_on_overflow */
1937 bfd_elf_generic_reloc, /* special_function */
1938 AARCH64_R_STR (TLSDESC_ADD_LO12), /* name */
1939 FALSE, /* partial_inplace */
1940 0xfff, /* src_mask */
1941 0xfff, /* dst_mask */
1942 FALSE), /* pcrel_offset */
1943
1944 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1945 16, /* rightshift */
1946 2, /* size (0 = byte, 1 = short, 2 = long) */
1947 12, /* bitsize */
1948 FALSE, /* pc_relative */
1949 0, /* bitpos */
1950 complain_overflow_unsigned, /* complain_on_overflow */
1951 bfd_elf_generic_reloc, /* special_function */
1952 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1953 FALSE, /* partial_inplace */
1954 0xffff, /* src_mask */
1955 0xffff, /* dst_mask */
1956 FALSE), /* pcrel_offset */
1957
1958 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1959 0, /* rightshift */
1960 2, /* size (0 = byte, 1 = short, 2 = long) */
1961 12, /* bitsize */
1962 FALSE, /* pc_relative */
1963 0, /* bitpos */
1964 complain_overflow_dont, /* complain_on_overflow */
1965 bfd_elf_generic_reloc, /* special_function */
1966 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1967 FALSE, /* partial_inplace */
1968 0xffff, /* src_mask */
1969 0xffff, /* dst_mask */
1970 FALSE), /* pcrel_offset */
1971
1972 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1973 0, /* rightshift */
1974 2, /* size (0 = byte, 1 = short, 2 = long) */
1975 12, /* bitsize */
1976 FALSE, /* pc_relative */
1977 0, /* bitpos */
1978 complain_overflow_dont, /* complain_on_overflow */
1979 bfd_elf_generic_reloc, /* special_function */
1980 AARCH64_R_STR (TLSDESC_LDR), /* name */
1981 FALSE, /* partial_inplace */
1982 0x0, /* src_mask */
1983 0x0, /* dst_mask */
1984 FALSE), /* pcrel_offset */
1985
1986 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1987 0, /* rightshift */
1988 2, /* size (0 = byte, 1 = short, 2 = long) */
1989 12, /* bitsize */
1990 FALSE, /* pc_relative */
1991 0, /* bitpos */
1992 complain_overflow_dont, /* complain_on_overflow */
1993 bfd_elf_generic_reloc, /* special_function */
1994 AARCH64_R_STR (TLSDESC_ADD), /* name */
1995 FALSE, /* partial_inplace */
1996 0x0, /* src_mask */
1997 0x0, /* dst_mask */
1998 FALSE), /* pcrel_offset */
1999
2000 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
2001 0, /* rightshift */
2002 2, /* size (0 = byte, 1 = short, 2 = long) */
2003 0, /* bitsize */
2004 FALSE, /* pc_relative */
2005 0, /* bitpos */
2006 complain_overflow_dont, /* complain_on_overflow */
2007 bfd_elf_generic_reloc, /* special_function */
2008 AARCH64_R_STR (TLSDESC_CALL), /* name */
2009 FALSE, /* partial_inplace */
2010 0x0, /* src_mask */
2011 0x0, /* dst_mask */
2012 FALSE), /* pcrel_offset */
2013
2014 HOWTO (AARCH64_R (COPY), /* type */
2015 0, /* rightshift */
2016 2, /* size (0 = byte, 1 = short, 2 = long) */
2017 64, /* bitsize */
2018 FALSE, /* pc_relative */
2019 0, /* bitpos */
2020 complain_overflow_bitfield, /* complain_on_overflow */
2021 bfd_elf_generic_reloc, /* special_function */
2022 AARCH64_R_STR (COPY), /* name */
2023 TRUE, /* partial_inplace */
2024 0xffffffff, /* src_mask */
2025 0xffffffff, /* dst_mask */
2026 FALSE), /* pcrel_offset */
2027
2028 HOWTO (AARCH64_R (GLOB_DAT), /* type */
2029 0, /* rightshift */
2030 2, /* size (0 = byte, 1 = short, 2 = long) */
2031 64, /* bitsize */
2032 FALSE, /* pc_relative */
2033 0, /* bitpos */
2034 complain_overflow_bitfield, /* complain_on_overflow */
2035 bfd_elf_generic_reloc, /* special_function */
2036 AARCH64_R_STR (GLOB_DAT), /* name */
2037 TRUE, /* partial_inplace */
2038 0xffffffff, /* src_mask */
2039 0xffffffff, /* dst_mask */
2040 FALSE), /* pcrel_offset */
2041
2042 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
2043 0, /* rightshift */
2044 2, /* size (0 = byte, 1 = short, 2 = long) */
2045 64, /* bitsize */
2046 FALSE, /* pc_relative */
2047 0, /* bitpos */
2048 complain_overflow_bitfield, /* complain_on_overflow */
2049 bfd_elf_generic_reloc, /* special_function */
2050 AARCH64_R_STR (JUMP_SLOT), /* name */
2051 TRUE, /* partial_inplace */
2052 0xffffffff, /* src_mask */
2053 0xffffffff, /* dst_mask */
2054 FALSE), /* pcrel_offset */
2055
2056 HOWTO (AARCH64_R (RELATIVE), /* type */
2057 0, /* rightshift */
2058 2, /* size (0 = byte, 1 = short, 2 = long) */
2059 64, /* bitsize */
2060 FALSE, /* pc_relative */
2061 0, /* bitpos */
2062 complain_overflow_bitfield, /* complain_on_overflow */
2063 bfd_elf_generic_reloc, /* special_function */
2064 AARCH64_R_STR (RELATIVE), /* name */
2065 TRUE, /* partial_inplace */
2066 ALL_ONES, /* src_mask */
2067 ALL_ONES, /* dst_mask */
2068 FALSE), /* pcrel_offset */
2069
2070 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
2071 0, /* rightshift */
2072 2, /* size (0 = byte, 1 = short, 2 = long) */
2073 64, /* bitsize */
2074 FALSE, /* pc_relative */
2075 0, /* bitpos */
2076 complain_overflow_dont, /* complain_on_overflow */
2077 bfd_elf_generic_reloc, /* special_function */
2078 #if ARCH_SIZE == 64
2079 AARCH64_R_STR (TLS_DTPMOD64), /* name */
2080 #else
2081 AARCH64_R_STR (TLS_DTPMOD), /* name */
2082 #endif
2083 FALSE, /* partial_inplace */
2084 0, /* src_mask */
2085 ALL_ONES, /* dst_mask */
2086 FALSE), /* pc_reloffset */
2087
2088 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
2089 0, /* rightshift */
2090 2, /* size (0 = byte, 1 = short, 2 = long) */
2091 64, /* bitsize */
2092 FALSE, /* pc_relative */
2093 0, /* bitpos */
2094 complain_overflow_dont, /* complain_on_overflow */
2095 bfd_elf_generic_reloc, /* special_function */
2096 #if ARCH_SIZE == 64
2097 AARCH64_R_STR (TLS_DTPREL64), /* name */
2098 #else
2099 AARCH64_R_STR (TLS_DTPREL), /* name */
2100 #endif
2101 FALSE, /* partial_inplace */
2102 0, /* src_mask */
2103 ALL_ONES, /* dst_mask */
2104 FALSE), /* pcrel_offset */
2105
2106 HOWTO (AARCH64_R (TLS_TPREL), /* type */
2107 0, /* rightshift */
2108 2, /* size (0 = byte, 1 = short, 2 = long) */
2109 64, /* bitsize */
2110 FALSE, /* pc_relative */
2111 0, /* bitpos */
2112 complain_overflow_dont, /* complain_on_overflow */
2113 bfd_elf_generic_reloc, /* special_function */
2114 #if ARCH_SIZE == 64
2115 AARCH64_R_STR (TLS_TPREL64), /* name */
2116 #else
2117 AARCH64_R_STR (TLS_TPREL), /* name */
2118 #endif
2119 FALSE, /* partial_inplace */
2120 0, /* src_mask */
2121 ALL_ONES, /* dst_mask */
2122 FALSE), /* pcrel_offset */
2123
2124 HOWTO (AARCH64_R (TLSDESC), /* type */
2125 0, /* rightshift */
2126 2, /* size (0 = byte, 1 = short, 2 = long) */
2127 64, /* bitsize */
2128 FALSE, /* pc_relative */
2129 0, /* bitpos */
2130 complain_overflow_dont, /* complain_on_overflow */
2131 bfd_elf_generic_reloc, /* special_function */
2132 AARCH64_R_STR (TLSDESC), /* name */
2133 FALSE, /* partial_inplace */
2134 0, /* src_mask */
2135 ALL_ONES, /* dst_mask */
2136 FALSE), /* pcrel_offset */
2137
2138 HOWTO (AARCH64_R (IRELATIVE), /* type */
2139 0, /* rightshift */
2140 2, /* size (0 = byte, 1 = short, 2 = long) */
2141 64, /* bitsize */
2142 FALSE, /* pc_relative */
2143 0, /* bitpos */
2144 complain_overflow_bitfield, /* complain_on_overflow */
2145 bfd_elf_generic_reloc, /* special_function */
2146 AARCH64_R_STR (IRELATIVE), /* name */
2147 FALSE, /* partial_inplace */
2148 0, /* src_mask */
2149 ALL_ONES, /* dst_mask */
2150 FALSE), /* pcrel_offset */
2151
2152 EMPTY_HOWTO (0),
2153 };
2154
2155 static reloc_howto_type elfNN_aarch64_howto_none =
2156 HOWTO (R_AARCH64_NONE, /* type */
2157 0, /* rightshift */
2158 3, /* size (0 = byte, 1 = short, 2 = long) */
2159 0, /* bitsize */
2160 FALSE, /* pc_relative */
2161 0, /* bitpos */
2162 complain_overflow_dont,/* complain_on_overflow */
2163 bfd_elf_generic_reloc, /* special_function */
2164 "R_AARCH64_NONE", /* name */
2165 FALSE, /* partial_inplace */
2166 0, /* src_mask */
2167 0, /* dst_mask */
2168 FALSE); /* pcrel_offset */
2169
2170 /* Given HOWTO, return the bfd internal relocation enumerator. */
2171
2172 static bfd_reloc_code_real_type
2173 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
2174 {
2175 const int size
2176 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
2177 const ptrdiff_t offset
2178 = howto - elfNN_aarch64_howto_table;
2179
2180 if (offset > 0 && offset < size - 1)
2181 return BFD_RELOC_AARCH64_RELOC_START + offset;
2182
2183 if (howto == &elfNN_aarch64_howto_none)
2184 return BFD_RELOC_AARCH64_NONE;
2185
2186 return BFD_RELOC_AARCH64_RELOC_START;
2187 }
2188
2189 /* Given R_TYPE, return the bfd internal relocation enumerator. */
2190
2191 static bfd_reloc_code_real_type
2192 elfNN_aarch64_bfd_reloc_from_type (bfd *abfd, unsigned int r_type)
2193 {
2194 static bfd_boolean initialized_p = FALSE;
2195 /* Indexed by R_TYPE, values are offsets in the howto_table. */
2196 static unsigned int offsets[R_AARCH64_end];
2197
2198 if (!initialized_p)
2199 {
2200 unsigned int i;
2201
2202 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2203 if (elfNN_aarch64_howto_table[i].type != 0)
2204 offsets[elfNN_aarch64_howto_table[i].type] = i;
2205
2206 initialized_p = TRUE;
2207 }
2208
2209 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
2210 return BFD_RELOC_AARCH64_NONE;
2211
2212 /* PR 17512: file: b371e70a. */
2213 if (r_type >= R_AARCH64_end)
2214 {
2215 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
2216 abfd, r_type);
2217 bfd_set_error (bfd_error_bad_value);
2218 return BFD_RELOC_AARCH64_NONE;
2219 }
2220
2221 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
2222 }
2223
2224 struct elf_aarch64_reloc_map
2225 {
2226 bfd_reloc_code_real_type from;
2227 bfd_reloc_code_real_type to;
2228 };
2229
2230 /* Map bfd generic reloc to AArch64-specific reloc. */
2231 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
2232 {
2233 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
2234
2235 /* Basic data relocations. */
2236 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
2237 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
2238 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
2239 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
2240 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
2241 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
2242 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
2243 };
2244
2245 /* Given the bfd internal relocation enumerator in CODE, return the
2246 corresponding howto entry. */
2247
2248 static reloc_howto_type *
2249 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
2250 {
2251 unsigned int i;
2252
2253 /* Convert bfd generic reloc to AArch64-specific reloc. */
2254 if (code < BFD_RELOC_AARCH64_RELOC_START
2255 || code > BFD_RELOC_AARCH64_RELOC_END)
2256 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
2257 if (elf_aarch64_reloc_map[i].from == code)
2258 {
2259 code = elf_aarch64_reloc_map[i].to;
2260 break;
2261 }
2262
2263 if (code > BFD_RELOC_AARCH64_RELOC_START
2264 && code < BFD_RELOC_AARCH64_RELOC_END)
2265 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
2266 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
2267
2268 if (code == BFD_RELOC_AARCH64_NONE)
2269 return &elfNN_aarch64_howto_none;
2270
2271 return NULL;
2272 }
2273
2274 static reloc_howto_type *
2275 elfNN_aarch64_howto_from_type (bfd *abfd, unsigned int r_type)
2276 {
2277 bfd_reloc_code_real_type val;
2278 reloc_howto_type *howto;
2279
2280 #if ARCH_SIZE == 32
2281 if (r_type > 256)
2282 {
2283 bfd_set_error (bfd_error_bad_value);
2284 return NULL;
2285 }
2286 #endif
2287
2288 if (r_type == R_AARCH64_NONE)
2289 return &elfNN_aarch64_howto_none;
2290
2291 val = elfNN_aarch64_bfd_reloc_from_type (abfd, r_type);
2292 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
2293
2294 if (howto != NULL)
2295 return howto;
2296
2297 bfd_set_error (bfd_error_bad_value);
2298 return NULL;
2299 }
2300
2301 static bfd_boolean
2302 elfNN_aarch64_info_to_howto (bfd *abfd, arelent *bfd_reloc,
2303 Elf_Internal_Rela *elf_reloc)
2304 {
2305 unsigned int r_type;
2306
2307 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
2308 bfd_reloc->howto = elfNN_aarch64_howto_from_type (abfd, r_type);
2309
2310 if (bfd_reloc->howto == NULL)
2311 {
2312 /* xgettext:c-format */
2313 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), abfd, r_type);
2314 return FALSE;
2315 }
2316 return TRUE;
2317 }
2318
2319 static reloc_howto_type *
2320 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2321 bfd_reloc_code_real_type code)
2322 {
2323 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
2324
2325 if (howto != NULL)
2326 return howto;
2327
2328 bfd_set_error (bfd_error_bad_value);
2329 return NULL;
2330 }
2331
2332 static reloc_howto_type *
2333 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2334 const char *r_name)
2335 {
2336 unsigned int i;
2337
2338 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2339 if (elfNN_aarch64_howto_table[i].name != NULL
2340 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
2341 return &elfNN_aarch64_howto_table[i];
2342
2343 return NULL;
2344 }
2345
2346 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
2347 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
2348 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
2349 #define TARGET_BIG_NAME "elfNN-bigaarch64"
2350
2351 /* The linker script knows the section names for placement.
2352 The entry_names are used to do simple name mangling on the stubs.
2353 Given a function name, and its type, the stub can be found. The
2354 name can be changed. The only requirement is the %s be present. */
2355 #define STUB_ENTRY_NAME "__%s_veneer"
2356
2357 /* The name of the dynamic interpreter. This is put in the .interp
2358 section. */
2359 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
2360
2361 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
2362 (((1 << 25) - 1) << 2)
2363 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
2364 (-((1 << 25) << 2))
2365
2366 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
2367 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
2368
2369 static int
2370 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
2371 {
2372 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
2373 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
2374 }
2375
2376 static int
2377 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
2378 {
2379 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
2380 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
2381 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
2382 }
2383
2384 static const uint32_t aarch64_adrp_branch_stub [] =
2385 {
2386 0x90000010, /* adrp ip0, X */
2387 /* R_AARCH64_ADR_HI21_PCREL(X) */
2388 0x91000210, /* add ip0, ip0, :lo12:X */
2389 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
2390 0xd61f0200, /* br ip0 */
2391 };
2392
2393 static const uint32_t aarch64_long_branch_stub[] =
2394 {
2395 #if ARCH_SIZE == 64
2396 0x58000090, /* ldr ip0, 1f */
2397 #else
2398 0x18000090, /* ldr wip0, 1f */
2399 #endif
2400 0x10000011, /* adr ip1, #0 */
2401 0x8b110210, /* add ip0, ip0, ip1 */
2402 0xd61f0200, /* br ip0 */
2403 0x00000000, /* 1: .xword or .word
2404 R_AARCH64_PRELNN(X) + 12
2405 */
2406 0x00000000,
2407 };
2408
2409 static const uint32_t aarch64_erratum_835769_stub[] =
2410 {
2411 0x00000000, /* Placeholder for multiply accumulate. */
2412 0x14000000, /* b <label> */
2413 };
2414
2415 static const uint32_t aarch64_erratum_843419_stub[] =
2416 {
2417 0x00000000, /* Placeholder for LDR instruction. */
2418 0x14000000, /* b <label> */
2419 };
2420
2421 /* Section name for stubs is the associated section name plus this
2422 string. */
2423 #define STUB_SUFFIX ".stub"
2424
2425 enum elf_aarch64_stub_type
2426 {
2427 aarch64_stub_none,
2428 aarch64_stub_adrp_branch,
2429 aarch64_stub_long_branch,
2430 aarch64_stub_erratum_835769_veneer,
2431 aarch64_stub_erratum_843419_veneer,
2432 };
2433
2434 struct elf_aarch64_stub_hash_entry
2435 {
2436 /* Base hash table entry structure. */
2437 struct bfd_hash_entry root;
2438
2439 /* The stub section. */
2440 asection *stub_sec;
2441
2442 /* Offset within stub_sec of the beginning of this stub. */
2443 bfd_vma stub_offset;
2444
2445 /* Given the symbol's value and its section we can determine its final
2446 value when building the stubs (so the stub knows where to jump). */
2447 bfd_vma target_value;
2448 asection *target_section;
2449
2450 enum elf_aarch64_stub_type stub_type;
2451
2452 /* The symbol table entry, if any, that this was derived from. */
2453 struct elf_aarch64_link_hash_entry *h;
2454
2455 /* Destination symbol type */
2456 unsigned char st_type;
2457
2458 /* Where this stub is being called from, or, in the case of combined
2459 stub sections, the first input section in the group. */
2460 asection *id_sec;
2461
2462 /* The name for the local symbol at the start of this stub. The
2463 stub name in the hash table has to be unique; this does not, so
2464 it can be friendlier. */
2465 char *output_name;
2466
2467 /* The instruction which caused this stub to be generated (only valid for
2468 erratum 835769 workaround stubs at present). */
2469 uint32_t veneered_insn;
2470
2471 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
2472 bfd_vma adrp_offset;
2473 };
2474
2475 /* Used to build a map of a section. This is required for mixed-endian
2476 code/data. */
2477
2478 typedef struct elf_elf_section_map
2479 {
2480 bfd_vma vma;
2481 char type;
2482 }
2483 elf_aarch64_section_map;
2484
2485
2486 typedef struct _aarch64_elf_section_data
2487 {
2488 struct bfd_elf_section_data elf;
2489 unsigned int mapcount;
2490 unsigned int mapsize;
2491 elf_aarch64_section_map *map;
2492 }
2493 _aarch64_elf_section_data;
2494
2495 #define elf_aarch64_section_data(sec) \
2496 ((_aarch64_elf_section_data *) elf_section_data (sec))
2497
2498 /* The size of the thread control block which is defined to be two pointers. */
2499 #define TCB_SIZE (ARCH_SIZE/8)*2
2500
2501 struct elf_aarch64_local_symbol
2502 {
2503 unsigned int got_type;
2504 bfd_signed_vma got_refcount;
2505 bfd_vma got_offset;
2506
2507 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
2508 offset is from the end of the jump table and reserved entries
2509 within the PLTGOT.
2510
2511 The magic value (bfd_vma) -1 indicates that an offset has not be
2512 allocated. */
2513 bfd_vma tlsdesc_got_jump_table_offset;
2514 };
2515
2516 struct elf_aarch64_obj_tdata
2517 {
2518 struct elf_obj_tdata root;
2519
2520 /* local symbol descriptors */
2521 struct elf_aarch64_local_symbol *locals;
2522
2523 /* Zero to warn when linking objects with incompatible enum sizes. */
2524 int no_enum_size_warning;
2525
2526 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2527 int no_wchar_size_warning;
2528
2529 /* All GNU_PROPERTY_AARCH64_FEATURE_1_AND properties. */
2530 uint32_t gnu_and_prop;
2531
2532 /* Zero to warn when linking objects with incompatible
2533 GNU_PROPERTY_AARCH64_FEATURE_1_BTI. */
2534 int no_bti_warn;
2535
2536 /* PLT type based on security. */
2537 aarch64_plt_type plt_type;
2538 };
2539
2540 #define elf_aarch64_tdata(bfd) \
2541 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
2542
2543 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
2544
2545 #define is_aarch64_elf(bfd) \
2546 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2547 && elf_tdata (bfd) != NULL \
2548 && elf_object_id (bfd) == AARCH64_ELF_DATA)
2549
2550 static bfd_boolean
2551 elfNN_aarch64_mkobject (bfd *abfd)
2552 {
2553 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
2554 AARCH64_ELF_DATA);
2555 }
2556
2557 #define elf_aarch64_hash_entry(ent) \
2558 ((struct elf_aarch64_link_hash_entry *)(ent))
2559
2560 #define GOT_UNKNOWN 0
2561 #define GOT_NORMAL 1
2562 #define GOT_TLS_GD 2
2563 #define GOT_TLS_IE 4
2564 #define GOT_TLSDESC_GD 8
2565
2566 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
2567
2568 /* AArch64 ELF linker hash entry. */
2569 struct elf_aarch64_link_hash_entry
2570 {
2571 struct elf_link_hash_entry root;
2572
2573 /* Track dynamic relocs copied for this symbol. */
2574 struct elf_dyn_relocs *dyn_relocs;
2575
2576 /* Since PLT entries have variable size, we need to record the
2577 index into .got.plt instead of recomputing it from the PLT
2578 offset. */
2579 bfd_signed_vma plt_got_offset;
2580
2581 /* Bit mask representing the type of GOT entry(s) if any required by
2582 this symbol. */
2583 unsigned int got_type;
2584
2585 /* A pointer to the most recently used stub hash entry against this
2586 symbol. */
2587 struct elf_aarch64_stub_hash_entry *stub_cache;
2588
2589 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
2590 is from the end of the jump table and reserved entries within the PLTGOT.
2591
2592 The magic value (bfd_vma) -1 indicates that an offset has not
2593 be allocated. */
2594 bfd_vma tlsdesc_got_jump_table_offset;
2595 };
2596
2597 static unsigned int
2598 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
2599 bfd *abfd,
2600 unsigned long r_symndx)
2601 {
2602 if (h)
2603 return elf_aarch64_hash_entry (h)->got_type;
2604
2605 if (! elf_aarch64_locals (abfd))
2606 return GOT_UNKNOWN;
2607
2608 return elf_aarch64_locals (abfd)[r_symndx].got_type;
2609 }
2610
2611 /* Get the AArch64 elf linker hash table from a link_info structure. */
2612 #define elf_aarch64_hash_table(info) \
2613 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
2614
2615 #define aarch64_stub_hash_lookup(table, string, create, copy) \
2616 ((struct elf_aarch64_stub_hash_entry *) \
2617 bfd_hash_lookup ((table), (string), (create), (copy)))
2618
2619 /* AArch64 ELF linker hash table. */
2620 struct elf_aarch64_link_hash_table
2621 {
2622 /* The main hash table. */
2623 struct elf_link_hash_table root;
2624
2625 /* Nonzero to force PIC branch veneers. */
2626 int pic_veneer;
2627
2628 /* Fix erratum 835769. */
2629 int fix_erratum_835769;
2630
2631 /* Fix erratum 843419. */
2632 erratum_84319_opts fix_erratum_843419;
2633
2634 /* Don't apply link-time values for dynamic relocations. */
2635 int no_apply_dynamic_relocs;
2636
2637 /* The number of bytes in the initial entry in the PLT. */
2638 bfd_size_type plt_header_size;
2639
2640 /* The bytes of the initial PLT entry. */
2641 const bfd_byte *plt0_entry;
2642
2643 /* The number of bytes in the subsequent PLT entries. */
2644 bfd_size_type plt_entry_size;
2645
2646 /* The bytes of the subsequent PLT entry. */
2647 const bfd_byte *plt_entry;
2648
2649 /* Small local sym cache. */
2650 struct sym_cache sym_cache;
2651
2652 /* For convenience in allocate_dynrelocs. */
2653 bfd *obfd;
2654
2655 /* The amount of space used by the reserved portion of the sgotplt
2656 section, plus whatever space is used by the jump slots. */
2657 bfd_vma sgotplt_jump_table_size;
2658
2659 /* The stub hash table. */
2660 struct bfd_hash_table stub_hash_table;
2661
2662 /* Linker stub bfd. */
2663 bfd *stub_bfd;
2664
2665 /* Linker call-backs. */
2666 asection *(*add_stub_section) (const char *, asection *);
2667 void (*layout_sections_again) (void);
2668
2669 /* Array to keep track of which stub sections have been created, and
2670 information on stub grouping. */
2671 struct map_stub
2672 {
2673 /* This is the section to which stubs in the group will be
2674 attached. */
2675 asection *link_sec;
2676 /* The stub section. */
2677 asection *stub_sec;
2678 } *stub_group;
2679
2680 /* Assorted information used by elfNN_aarch64_size_stubs. */
2681 unsigned int bfd_count;
2682 unsigned int top_index;
2683 asection **input_list;
2684
2685 /* JUMP_SLOT relocs for variant PCS symbols may be present. */
2686 int variant_pcs;
2687
2688 /* The offset into splt of the PLT entry for the TLS descriptor
2689 resolver. Special values are 0, if not necessary (or not found
2690 to be necessary yet), and -1 if needed but not determined
2691 yet. */
2692 bfd_vma tlsdesc_plt;
2693
2694 /* The number of bytes in the PLT enty for the TLS descriptor. */
2695 bfd_size_type tlsdesc_plt_entry_size;
2696
2697 /* The GOT offset for the lazy trampoline. Communicated to the
2698 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
2699 indicates an offset is not allocated. */
2700 bfd_vma dt_tlsdesc_got;
2701
2702 /* Used by local STT_GNU_IFUNC symbols. */
2703 htab_t loc_hash_table;
2704 void * loc_hash_memory;
2705 };
2706
2707 /* Create an entry in an AArch64 ELF linker hash table. */
2708
2709 static struct bfd_hash_entry *
2710 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2711 struct bfd_hash_table *table,
2712 const char *string)
2713 {
2714 struct elf_aarch64_link_hash_entry *ret =
2715 (struct elf_aarch64_link_hash_entry *) entry;
2716
2717 /* Allocate the structure if it has not already been allocated by a
2718 subclass. */
2719 if (ret == NULL)
2720 ret = bfd_hash_allocate (table,
2721 sizeof (struct elf_aarch64_link_hash_entry));
2722 if (ret == NULL)
2723 return (struct bfd_hash_entry *) ret;
2724
2725 /* Call the allocation method of the superclass. */
2726 ret = ((struct elf_aarch64_link_hash_entry *)
2727 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2728 table, string));
2729 if (ret != NULL)
2730 {
2731 ret->dyn_relocs = NULL;
2732 ret->got_type = GOT_UNKNOWN;
2733 ret->plt_got_offset = (bfd_vma) - 1;
2734 ret->stub_cache = NULL;
2735 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2736 }
2737
2738 return (struct bfd_hash_entry *) ret;
2739 }
2740
2741 /* Initialize an entry in the stub hash table. */
2742
2743 static struct bfd_hash_entry *
2744 stub_hash_newfunc (struct bfd_hash_entry *entry,
2745 struct bfd_hash_table *table, const char *string)
2746 {
2747 /* Allocate the structure if it has not already been allocated by a
2748 subclass. */
2749 if (entry == NULL)
2750 {
2751 entry = bfd_hash_allocate (table,
2752 sizeof (struct
2753 elf_aarch64_stub_hash_entry));
2754 if (entry == NULL)
2755 return entry;
2756 }
2757
2758 /* Call the allocation method of the superclass. */
2759 entry = bfd_hash_newfunc (entry, table, string);
2760 if (entry != NULL)
2761 {
2762 struct elf_aarch64_stub_hash_entry *eh;
2763
2764 /* Initialize the local fields. */
2765 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2766 eh->adrp_offset = 0;
2767 eh->stub_sec = NULL;
2768 eh->stub_offset = 0;
2769 eh->target_value = 0;
2770 eh->target_section = NULL;
2771 eh->stub_type = aarch64_stub_none;
2772 eh->h = NULL;
2773 eh->id_sec = NULL;
2774 }
2775
2776 return entry;
2777 }
2778
2779 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2780 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2781 as global symbol. We reuse indx and dynstr_index for local symbol
2782 hash since they aren't used by global symbols in this backend. */
2783
2784 static hashval_t
2785 elfNN_aarch64_local_htab_hash (const void *ptr)
2786 {
2787 struct elf_link_hash_entry *h
2788 = (struct elf_link_hash_entry *) ptr;
2789 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2790 }
2791
2792 /* Compare local hash entries. */
2793
2794 static int
2795 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2796 {
2797 struct elf_link_hash_entry *h1
2798 = (struct elf_link_hash_entry *) ptr1;
2799 struct elf_link_hash_entry *h2
2800 = (struct elf_link_hash_entry *) ptr2;
2801
2802 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2803 }
2804
2805 /* Find and/or create a hash entry for local symbol. */
2806
2807 static struct elf_link_hash_entry *
2808 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2809 bfd *abfd, const Elf_Internal_Rela *rel,
2810 bfd_boolean create)
2811 {
2812 struct elf_aarch64_link_hash_entry e, *ret;
2813 asection *sec = abfd->sections;
2814 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2815 ELFNN_R_SYM (rel->r_info));
2816 void **slot;
2817
2818 e.root.indx = sec->id;
2819 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2820 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2821 create ? INSERT : NO_INSERT);
2822
2823 if (!slot)
2824 return NULL;
2825
2826 if (*slot)
2827 {
2828 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2829 return &ret->root;
2830 }
2831
2832 ret = (struct elf_aarch64_link_hash_entry *)
2833 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2834 sizeof (struct elf_aarch64_link_hash_entry));
2835 if (ret)
2836 {
2837 memset (ret, 0, sizeof (*ret));
2838 ret->root.indx = sec->id;
2839 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2840 ret->root.dynindx = -1;
2841 *slot = ret;
2842 }
2843 return &ret->root;
2844 }
2845
2846 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2847
2848 static void
2849 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2850 struct elf_link_hash_entry *dir,
2851 struct elf_link_hash_entry *ind)
2852 {
2853 struct elf_aarch64_link_hash_entry *edir, *eind;
2854
2855 edir = (struct elf_aarch64_link_hash_entry *) dir;
2856 eind = (struct elf_aarch64_link_hash_entry *) ind;
2857
2858 if (eind->dyn_relocs != NULL)
2859 {
2860 if (edir->dyn_relocs != NULL)
2861 {
2862 struct elf_dyn_relocs **pp;
2863 struct elf_dyn_relocs *p;
2864
2865 /* Add reloc counts against the indirect sym to the direct sym
2866 list. Merge any entries against the same section. */
2867 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2868 {
2869 struct elf_dyn_relocs *q;
2870
2871 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2872 if (q->sec == p->sec)
2873 {
2874 q->pc_count += p->pc_count;
2875 q->count += p->count;
2876 *pp = p->next;
2877 break;
2878 }
2879 if (q == NULL)
2880 pp = &p->next;
2881 }
2882 *pp = edir->dyn_relocs;
2883 }
2884
2885 edir->dyn_relocs = eind->dyn_relocs;
2886 eind->dyn_relocs = NULL;
2887 }
2888
2889 if (ind->root.type == bfd_link_hash_indirect)
2890 {
2891 /* Copy over PLT info. */
2892 if (dir->got.refcount <= 0)
2893 {
2894 edir->got_type = eind->got_type;
2895 eind->got_type = GOT_UNKNOWN;
2896 }
2897 }
2898
2899 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2900 }
2901
2902 /* Merge non-visibility st_other attributes. */
2903
2904 static void
2905 elfNN_aarch64_merge_symbol_attribute (struct elf_link_hash_entry *h,
2906 const Elf_Internal_Sym *isym,
2907 bfd_boolean definition ATTRIBUTE_UNUSED,
2908 bfd_boolean dynamic ATTRIBUTE_UNUSED)
2909 {
2910 unsigned int isym_sto = isym->st_other & ~ELF_ST_VISIBILITY (-1);
2911 unsigned int h_sto = h->other & ~ELF_ST_VISIBILITY (-1);
2912
2913 if (isym_sto == h_sto)
2914 return;
2915
2916 if (isym_sto & ~STO_AARCH64_VARIANT_PCS)
2917 /* Not fatal, this callback cannot fail. */
2918 _bfd_error_handler (_("unknown attribute for symbol `%s': 0x%02x"),
2919 h->root.root.string, isym_sto);
2920
2921 /* Note: Ideally we would warn about any attribute mismatch, but
2922 this api does not allow that without substantial changes. */
2923 if (isym_sto & STO_AARCH64_VARIANT_PCS)
2924 h->other |= STO_AARCH64_VARIANT_PCS;
2925 }
2926
2927 /* Destroy an AArch64 elf linker hash table. */
2928
2929 static void
2930 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2931 {
2932 struct elf_aarch64_link_hash_table *ret
2933 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2934
2935 if (ret->loc_hash_table)
2936 htab_delete (ret->loc_hash_table);
2937 if (ret->loc_hash_memory)
2938 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2939
2940 bfd_hash_table_free (&ret->stub_hash_table);
2941 _bfd_elf_link_hash_table_free (obfd);
2942 }
2943
2944 /* Create an AArch64 elf linker hash table. */
2945
2946 static struct bfd_link_hash_table *
2947 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2948 {
2949 struct elf_aarch64_link_hash_table *ret;
2950 size_t amt = sizeof (struct elf_aarch64_link_hash_table);
2951
2952 ret = bfd_zmalloc (amt);
2953 if (ret == NULL)
2954 return NULL;
2955
2956 if (!_bfd_elf_link_hash_table_init
2957 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2958 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2959 {
2960 free (ret);
2961 return NULL;
2962 }
2963
2964 ret->plt_header_size = PLT_ENTRY_SIZE;
2965 ret->plt0_entry = elfNN_aarch64_small_plt0_entry;
2966 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2967 ret->plt_entry = elfNN_aarch64_small_plt_entry;
2968 ret->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
2969 ret->obfd = abfd;
2970 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2971
2972 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2973 sizeof (struct elf_aarch64_stub_hash_entry)))
2974 {
2975 _bfd_elf_link_hash_table_free (abfd);
2976 return NULL;
2977 }
2978
2979 ret->loc_hash_table = htab_try_create (1024,
2980 elfNN_aarch64_local_htab_hash,
2981 elfNN_aarch64_local_htab_eq,
2982 NULL);
2983 ret->loc_hash_memory = objalloc_create ();
2984 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2985 {
2986 elfNN_aarch64_link_hash_table_free (abfd);
2987 return NULL;
2988 }
2989 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2990
2991 return &ret->root.root;
2992 }
2993
2994 /* Perform relocation R_TYPE. Returns TRUE upon success, FALSE otherwise. */
2995
2996 static bfd_boolean
2997 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2998 bfd_vma offset, bfd_vma value)
2999 {
3000 reloc_howto_type *howto;
3001 bfd_vma place;
3002
3003 howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
3004 place = (input_section->output_section->vma + input_section->output_offset
3005 + offset);
3006
3007 r_type = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
3008 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, r_type, place,
3009 value, 0, FALSE);
3010 return _bfd_aarch64_elf_put_addend (input_bfd,
3011 input_section->contents + offset, r_type,
3012 howto, value) == bfd_reloc_ok;
3013 }
3014
3015 static enum elf_aarch64_stub_type
3016 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
3017 {
3018 if (aarch64_valid_for_adrp_p (value, place))
3019 return aarch64_stub_adrp_branch;
3020 return aarch64_stub_long_branch;
3021 }
3022
3023 /* Determine the type of stub needed, if any, for a call. */
3024
3025 static enum elf_aarch64_stub_type
3026 aarch64_type_of_stub (asection *input_sec,
3027 const Elf_Internal_Rela *rel,
3028 asection *sym_sec,
3029 unsigned char st_type,
3030 bfd_vma destination)
3031 {
3032 bfd_vma location;
3033 bfd_signed_vma branch_offset;
3034 unsigned int r_type;
3035 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
3036
3037 if (st_type != STT_FUNC
3038 && (sym_sec == input_sec))
3039 return stub_type;
3040
3041 /* Determine where the call point is. */
3042 location = (input_sec->output_offset
3043 + input_sec->output_section->vma + rel->r_offset);
3044
3045 branch_offset = (bfd_signed_vma) (destination - location);
3046
3047 r_type = ELFNN_R_TYPE (rel->r_info);
3048
3049 /* We don't want to redirect any old unconditional jump in this way,
3050 only one which is being used for a sibcall, where it is
3051 acceptable for the IP0 and IP1 registers to be clobbered. */
3052 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
3053 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
3054 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
3055 {
3056 stub_type = aarch64_stub_long_branch;
3057 }
3058
3059 return stub_type;
3060 }
3061
3062 /* Build a name for an entry in the stub hash table. */
3063
3064 static char *
3065 elfNN_aarch64_stub_name (const asection *input_section,
3066 const asection *sym_sec,
3067 const struct elf_aarch64_link_hash_entry *hash,
3068 const Elf_Internal_Rela *rel)
3069 {
3070 char *stub_name;
3071 bfd_size_type len;
3072
3073 if (hash)
3074 {
3075 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
3076 stub_name = bfd_malloc (len);
3077 if (stub_name != NULL)
3078 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
3079 (unsigned int) input_section->id,
3080 hash->root.root.root.string,
3081 rel->r_addend);
3082 }
3083 else
3084 {
3085 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
3086 stub_name = bfd_malloc (len);
3087 if (stub_name != NULL)
3088 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
3089 (unsigned int) input_section->id,
3090 (unsigned int) sym_sec->id,
3091 (unsigned int) ELFNN_R_SYM (rel->r_info),
3092 rel->r_addend);
3093 }
3094
3095 return stub_name;
3096 }
3097
3098 /* Return TRUE if symbol H should be hashed in the `.gnu.hash' section. For
3099 executable PLT slots where the executable never takes the address of those
3100 functions, the function symbols are not added to the hash table. */
3101
3102 static bfd_boolean
3103 elf_aarch64_hash_symbol (struct elf_link_hash_entry *h)
3104 {
3105 if (h->plt.offset != (bfd_vma) -1
3106 && !h->def_regular
3107 && !h->pointer_equality_needed)
3108 return FALSE;
3109
3110 return _bfd_elf_hash_symbol (h);
3111 }
3112
3113
3114 /* Look up an entry in the stub hash. Stub entries are cached because
3115 creating the stub name takes a bit of time. */
3116
3117 static struct elf_aarch64_stub_hash_entry *
3118 elfNN_aarch64_get_stub_entry (const asection *input_section,
3119 const asection *sym_sec,
3120 struct elf_link_hash_entry *hash,
3121 const Elf_Internal_Rela *rel,
3122 struct elf_aarch64_link_hash_table *htab)
3123 {
3124 struct elf_aarch64_stub_hash_entry *stub_entry;
3125 struct elf_aarch64_link_hash_entry *h =
3126 (struct elf_aarch64_link_hash_entry *) hash;
3127 const asection *id_sec;
3128
3129 if ((input_section->flags & SEC_CODE) == 0)
3130 return NULL;
3131
3132 /* If this input section is part of a group of sections sharing one
3133 stub section, then use the id of the first section in the group.
3134 Stub names need to include a section id, as there may well be
3135 more than one stub used to reach say, printf, and we need to
3136 distinguish between them. */
3137 id_sec = htab->stub_group[input_section->id].link_sec;
3138
3139 if (h != NULL && h->stub_cache != NULL
3140 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
3141 {
3142 stub_entry = h->stub_cache;
3143 }
3144 else
3145 {
3146 char *stub_name;
3147
3148 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
3149 if (stub_name == NULL)
3150 return NULL;
3151
3152 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
3153 stub_name, FALSE, FALSE);
3154 if (h != NULL)
3155 h->stub_cache = stub_entry;
3156
3157 free (stub_name);
3158 }
3159
3160 return stub_entry;
3161 }
3162
3163
3164 /* Create a stub section. */
3165
3166 static asection *
3167 _bfd_aarch64_create_stub_section (asection *section,
3168 struct elf_aarch64_link_hash_table *htab)
3169 {
3170 size_t namelen;
3171 bfd_size_type len;
3172 char *s_name;
3173
3174 namelen = strlen (section->name);
3175 len = namelen + sizeof (STUB_SUFFIX);
3176 s_name = bfd_alloc (htab->stub_bfd, len);
3177 if (s_name == NULL)
3178 return NULL;
3179
3180 memcpy (s_name, section->name, namelen);
3181 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3182 return (*htab->add_stub_section) (s_name, section);
3183 }
3184
3185
3186 /* Find or create a stub section for a link section.
3187
3188 Fix or create the stub section used to collect stubs attached to
3189 the specified link section. */
3190
3191 static asection *
3192 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
3193 struct elf_aarch64_link_hash_table *htab)
3194 {
3195 if (htab->stub_group[link_section->id].stub_sec == NULL)
3196 htab->stub_group[link_section->id].stub_sec
3197 = _bfd_aarch64_create_stub_section (link_section, htab);
3198 return htab->stub_group[link_section->id].stub_sec;
3199 }
3200
3201
3202 /* Find or create a stub section in the stub group for an input
3203 section. */
3204
3205 static asection *
3206 _bfd_aarch64_create_or_find_stub_sec (asection *section,
3207 struct elf_aarch64_link_hash_table *htab)
3208 {
3209 asection *link_sec = htab->stub_group[section->id].link_sec;
3210 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
3211 }
3212
3213
3214 /* Add a new stub entry in the stub group associated with an input
3215 section to the stub hash. Not all fields of the new stub entry are
3216 initialised. */
3217
3218 static struct elf_aarch64_stub_hash_entry *
3219 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
3220 asection *section,
3221 struct elf_aarch64_link_hash_table *htab)
3222 {
3223 asection *link_sec;
3224 asection *stub_sec;
3225 struct elf_aarch64_stub_hash_entry *stub_entry;
3226
3227 link_sec = htab->stub_group[section->id].link_sec;
3228 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
3229
3230 /* Enter this entry into the linker stub hash table. */
3231 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3232 TRUE, FALSE);
3233 if (stub_entry == NULL)
3234 {
3235 /* xgettext:c-format */
3236 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
3237 section->owner, stub_name);
3238 return NULL;
3239 }
3240
3241 stub_entry->stub_sec = stub_sec;
3242 stub_entry->stub_offset = 0;
3243 stub_entry->id_sec = link_sec;
3244
3245 return stub_entry;
3246 }
3247
3248 /* Add a new stub entry in the final stub section to the stub hash.
3249 Not all fields of the new stub entry are initialised. */
3250
3251 static struct elf_aarch64_stub_hash_entry *
3252 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
3253 asection *link_section,
3254 struct elf_aarch64_link_hash_table *htab)
3255 {
3256 asection *stub_sec;
3257 struct elf_aarch64_stub_hash_entry *stub_entry;
3258
3259 stub_sec = NULL;
3260 /* Only create the actual stub if we will end up needing it. */
3261 if (htab->fix_erratum_843419 & ERRAT_ADRP)
3262 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
3263 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3264 TRUE, FALSE);
3265 if (stub_entry == NULL)
3266 {
3267 _bfd_error_handler (_("cannot create stub entry %s"), stub_name);
3268 return NULL;
3269 }
3270
3271 stub_entry->stub_sec = stub_sec;
3272 stub_entry->stub_offset = 0;
3273 stub_entry->id_sec = link_section;
3274
3275 return stub_entry;
3276 }
3277
3278
3279 static bfd_boolean
3280 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
3281 void *in_arg ATTRIBUTE_UNUSED)
3282 {
3283 struct elf_aarch64_stub_hash_entry *stub_entry;
3284 asection *stub_sec;
3285 bfd *stub_bfd;
3286 bfd_byte *loc;
3287 bfd_vma sym_value;
3288 bfd_vma veneered_insn_loc;
3289 bfd_vma veneer_entry_loc;
3290 bfd_signed_vma branch_offset = 0;
3291 unsigned int template_size;
3292 const uint32_t *template;
3293 unsigned int i;
3294
3295 /* Massage our args to the form they really have. */
3296 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3297
3298 stub_sec = stub_entry->stub_sec;
3299
3300 /* Make a note of the offset within the stubs for this entry. */
3301 stub_entry->stub_offset = stub_sec->size;
3302 loc = stub_sec->contents + stub_entry->stub_offset;
3303
3304 stub_bfd = stub_sec->owner;
3305
3306 /* This is the address of the stub destination. */
3307 sym_value = (stub_entry->target_value
3308 + stub_entry->target_section->output_offset
3309 + stub_entry->target_section->output_section->vma);
3310
3311 if (stub_entry->stub_type == aarch64_stub_long_branch)
3312 {
3313 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
3314 + stub_sec->output_offset);
3315
3316 /* See if we can relax the stub. */
3317 if (aarch64_valid_for_adrp_p (sym_value, place))
3318 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
3319 }
3320
3321 switch (stub_entry->stub_type)
3322 {
3323 case aarch64_stub_adrp_branch:
3324 template = aarch64_adrp_branch_stub;
3325 template_size = sizeof (aarch64_adrp_branch_stub);
3326 break;
3327 case aarch64_stub_long_branch:
3328 template = aarch64_long_branch_stub;
3329 template_size = sizeof (aarch64_long_branch_stub);
3330 break;
3331 case aarch64_stub_erratum_835769_veneer:
3332 template = aarch64_erratum_835769_stub;
3333 template_size = sizeof (aarch64_erratum_835769_stub);
3334 break;
3335 case aarch64_stub_erratum_843419_veneer:
3336 template = aarch64_erratum_843419_stub;
3337 template_size = sizeof (aarch64_erratum_843419_stub);
3338 break;
3339 default:
3340 abort ();
3341 }
3342
3343 for (i = 0; i < (template_size / sizeof template[0]); i++)
3344 {
3345 bfd_putl32 (template[i], loc);
3346 loc += 4;
3347 }
3348
3349 template_size = (template_size + 7) & ~7;
3350 stub_sec->size += template_size;
3351
3352 switch (stub_entry->stub_type)
3353 {
3354 case aarch64_stub_adrp_branch:
3355 if (!aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
3356 stub_entry->stub_offset, sym_value))
3357 /* The stub would not have been relaxed if the offset was out
3358 of range. */
3359 BFD_FAIL ();
3360
3361 if (!aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
3362 stub_entry->stub_offset + 4, sym_value))
3363 BFD_FAIL ();
3364 break;
3365
3366 case aarch64_stub_long_branch:
3367 /* We want the value relative to the address 12 bytes back from the
3368 value itself. */
3369 if (!aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
3370 stub_entry->stub_offset + 16, sym_value + 12))
3371 BFD_FAIL ();
3372 break;
3373
3374 case aarch64_stub_erratum_835769_veneer:
3375 veneered_insn_loc = stub_entry->target_section->output_section->vma
3376 + stub_entry->target_section->output_offset
3377 + stub_entry->target_value;
3378 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
3379 + stub_entry->stub_sec->output_offset
3380 + stub_entry->stub_offset;
3381 branch_offset = veneered_insn_loc - veneer_entry_loc;
3382 branch_offset >>= 2;
3383 branch_offset &= 0x3ffffff;
3384 bfd_putl32 (stub_entry->veneered_insn,
3385 stub_sec->contents + stub_entry->stub_offset);
3386 bfd_putl32 (template[1] | branch_offset,
3387 stub_sec->contents + stub_entry->stub_offset + 4);
3388 break;
3389
3390 case aarch64_stub_erratum_843419_veneer:
3391 if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
3392 stub_entry->stub_offset + 4, sym_value + 4))
3393 BFD_FAIL ();
3394 break;
3395
3396 default:
3397 abort ();
3398 }
3399
3400 return TRUE;
3401 }
3402
3403 /* As above, but don't actually build the stub. Just bump offset so
3404 we know stub section sizes. */
3405
3406 static bfd_boolean
3407 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3408 {
3409 struct elf_aarch64_stub_hash_entry *stub_entry;
3410 struct elf_aarch64_link_hash_table *htab;
3411 int size;
3412
3413 /* Massage our args to the form they really have. */
3414 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3415 htab = (struct elf_aarch64_link_hash_table *) in_arg;
3416
3417 switch (stub_entry->stub_type)
3418 {
3419 case aarch64_stub_adrp_branch:
3420 size = sizeof (aarch64_adrp_branch_stub);
3421 break;
3422 case aarch64_stub_long_branch:
3423 size = sizeof (aarch64_long_branch_stub);
3424 break;
3425 case aarch64_stub_erratum_835769_veneer:
3426 size = sizeof (aarch64_erratum_835769_stub);
3427 break;
3428 case aarch64_stub_erratum_843419_veneer:
3429 {
3430 if (htab->fix_erratum_843419 == ERRAT_ADR)
3431 return TRUE;
3432 size = sizeof (aarch64_erratum_843419_stub);
3433 }
3434 break;
3435 default:
3436 abort ();
3437 }
3438
3439 size = (size + 7) & ~7;
3440 stub_entry->stub_sec->size += size;
3441 return TRUE;
3442 }
3443
3444 /* External entry points for sizing and building linker stubs. */
3445
3446 /* Set up various things so that we can make a list of input sections
3447 for each output section included in the link. Returns -1 on error,
3448 0 when no stubs will be needed, and 1 on success. */
3449
3450 int
3451 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
3452 struct bfd_link_info *info)
3453 {
3454 bfd *input_bfd;
3455 unsigned int bfd_count;
3456 unsigned int top_id, top_index;
3457 asection *section;
3458 asection **input_list, **list;
3459 size_t amt;
3460 struct elf_aarch64_link_hash_table *htab =
3461 elf_aarch64_hash_table (info);
3462
3463 if (!is_elf_hash_table (htab))
3464 return 0;
3465
3466 /* Count the number of input BFDs and find the top input section id. */
3467 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3468 input_bfd != NULL; input_bfd = input_bfd->link.next)
3469 {
3470 bfd_count += 1;
3471 for (section = input_bfd->sections;
3472 section != NULL; section = section->next)
3473 {
3474 if (top_id < section->id)
3475 top_id = section->id;
3476 }
3477 }
3478 htab->bfd_count = bfd_count;
3479
3480 amt = sizeof (struct map_stub) * (top_id + 1);
3481 htab->stub_group = bfd_zmalloc (amt);
3482 if (htab->stub_group == NULL)
3483 return -1;
3484
3485 /* We can't use output_bfd->section_count here to find the top output
3486 section index as some sections may have been removed, and
3487 _bfd_strip_section_from_output doesn't renumber the indices. */
3488 for (section = output_bfd->sections, top_index = 0;
3489 section != NULL; section = section->next)
3490 {
3491 if (top_index < section->index)
3492 top_index = section->index;
3493 }
3494
3495 htab->top_index = top_index;
3496 amt = sizeof (asection *) * (top_index + 1);
3497 input_list = bfd_malloc (amt);
3498 htab->input_list = input_list;
3499 if (input_list == NULL)
3500 return -1;
3501
3502 /* For sections we aren't interested in, mark their entries with a
3503 value we can check later. */
3504 list = input_list + top_index;
3505 do
3506 *list = bfd_abs_section_ptr;
3507 while (list-- != input_list);
3508
3509 for (section = output_bfd->sections;
3510 section != NULL; section = section->next)
3511 {
3512 if ((section->flags & SEC_CODE) != 0)
3513 input_list[section->index] = NULL;
3514 }
3515
3516 return 1;
3517 }
3518
3519 /* Used by elfNN_aarch64_next_input_section and group_sections. */
3520 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3521
3522 /* The linker repeatedly calls this function for each input section,
3523 in the order that input sections are linked into output sections.
3524 Build lists of input sections to determine groupings between which
3525 we may insert linker stubs. */
3526
3527 void
3528 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
3529 {
3530 struct elf_aarch64_link_hash_table *htab =
3531 elf_aarch64_hash_table (info);
3532
3533 if (isec->output_section->index <= htab->top_index)
3534 {
3535 asection **list = htab->input_list + isec->output_section->index;
3536
3537 if (*list != bfd_abs_section_ptr)
3538 {
3539 /* Steal the link_sec pointer for our list. */
3540 /* This happens to make the list in reverse order,
3541 which is what we want. */
3542 PREV_SEC (isec) = *list;
3543 *list = isec;
3544 }
3545 }
3546 }
3547
3548 /* See whether we can group stub sections together. Grouping stub
3549 sections may result in fewer stubs. More importantly, we need to
3550 put all .init* and .fini* stubs at the beginning of the .init or
3551 .fini output sections respectively, because glibc splits the
3552 _init and _fini functions into multiple parts. Putting a stub in
3553 the middle of a function is not a good idea. */
3554
3555 static void
3556 group_sections (struct elf_aarch64_link_hash_table *htab,
3557 bfd_size_type stub_group_size,
3558 bfd_boolean stubs_always_before_branch)
3559 {
3560 asection **list = htab->input_list + htab->top_index;
3561
3562 do
3563 {
3564 asection *tail = *list;
3565
3566 if (tail == bfd_abs_section_ptr)
3567 continue;
3568
3569 while (tail != NULL)
3570 {
3571 asection *curr;
3572 asection *prev;
3573 bfd_size_type total;
3574
3575 curr = tail;
3576 total = tail->size;
3577 while ((prev = PREV_SEC (curr)) != NULL
3578 && ((total += curr->output_offset - prev->output_offset)
3579 < stub_group_size))
3580 curr = prev;
3581
3582 /* OK, the size from the start of CURR to the end is less
3583 than stub_group_size and thus can be handled by one stub
3584 section. (Or the tail section is itself larger than
3585 stub_group_size, in which case we may be toast.)
3586 We should really be keeping track of the total size of
3587 stubs added here, as stubs contribute to the final output
3588 section size. */
3589 do
3590 {
3591 prev = PREV_SEC (tail);
3592 /* Set up this stub group. */
3593 htab->stub_group[tail->id].link_sec = curr;
3594 }
3595 while (tail != curr && (tail = prev) != NULL);
3596
3597 /* But wait, there's more! Input sections up to stub_group_size
3598 bytes before the stub section can be handled by it too. */
3599 if (!stubs_always_before_branch)
3600 {
3601 total = 0;
3602 while (prev != NULL
3603 && ((total += tail->output_offset - prev->output_offset)
3604 < stub_group_size))
3605 {
3606 tail = prev;
3607 prev = PREV_SEC (tail);
3608 htab->stub_group[tail->id].link_sec = curr;
3609 }
3610 }
3611 tail = prev;
3612 }
3613 }
3614 while (list-- != htab->input_list);
3615
3616 free (htab->input_list);
3617 }
3618
3619 #undef PREV_SEC
3620
3621 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
3622
3623 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
3624 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
3625 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
3626 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
3627 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
3628 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
3629
3630 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
3631 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
3632 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
3633 #define AARCH64_ZR 0x1f
3634
3635 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
3636 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
3637
3638 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
3639 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
3640 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
3641 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
3642 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
3643 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
3644 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
3645 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
3646 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
3647 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
3648 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
3649 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
3650 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
3651 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
3652 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
3653 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
3654 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
3655 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
3656
3657 /* Classify an INSN if it is indeed a load/store.
3658
3659 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
3660
3661 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
3662 is set equal to RT.
3663
3664 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned. */
3665
3666 static bfd_boolean
3667 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
3668 bfd_boolean *pair, bfd_boolean *load)
3669 {
3670 uint32_t opcode;
3671 unsigned int r;
3672 uint32_t opc = 0;
3673 uint32_t v = 0;
3674 uint32_t opc_v = 0;
3675
3676 /* Bail out quickly if INSN doesn't fall into the load-store
3677 encoding space. */
3678 if (!AARCH64_LDST (insn))
3679 return FALSE;
3680
3681 *pair = FALSE;
3682 *load = FALSE;
3683 if (AARCH64_LDST_EX (insn))
3684 {
3685 *rt = AARCH64_RT (insn);
3686 *rt2 = *rt;
3687 if (AARCH64_BIT (insn, 21) == 1)
3688 {
3689 *pair = TRUE;
3690 *rt2 = AARCH64_RT2 (insn);
3691 }
3692 *load = AARCH64_LD (insn);
3693 return TRUE;
3694 }
3695 else if (AARCH64_LDST_NAP (insn)
3696 || AARCH64_LDSTP_PI (insn)
3697 || AARCH64_LDSTP_O (insn)
3698 || AARCH64_LDSTP_PRE (insn))
3699 {
3700 *pair = TRUE;
3701 *rt = AARCH64_RT (insn);
3702 *rt2 = AARCH64_RT2 (insn);
3703 *load = AARCH64_LD (insn);
3704 return TRUE;
3705 }
3706 else if (AARCH64_LDST_PCREL (insn)
3707 || AARCH64_LDST_UI (insn)
3708 || AARCH64_LDST_PIIMM (insn)
3709 || AARCH64_LDST_U (insn)
3710 || AARCH64_LDST_PREIMM (insn)
3711 || AARCH64_LDST_RO (insn)
3712 || AARCH64_LDST_UIMM (insn))
3713 {
3714 *rt = AARCH64_RT (insn);
3715 *rt2 = *rt;
3716 if (AARCH64_LDST_PCREL (insn))
3717 *load = TRUE;
3718 opc = AARCH64_BITS (insn, 22, 2);
3719 v = AARCH64_BIT (insn, 26);
3720 opc_v = opc | (v << 2);
3721 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
3722 || opc_v == 5 || opc_v == 7);
3723 return TRUE;
3724 }
3725 else if (AARCH64_LDST_SIMD_M (insn)
3726 || AARCH64_LDST_SIMD_M_PI (insn))
3727 {
3728 *rt = AARCH64_RT (insn);
3729 *load = AARCH64_BIT (insn, 22);
3730 opcode = (insn >> 12) & 0xf;
3731 switch (opcode)
3732 {
3733 case 0:
3734 case 2:
3735 *rt2 = *rt + 3;
3736 break;
3737
3738 case 4:
3739 case 6:
3740 *rt2 = *rt + 2;
3741 break;
3742
3743 case 7:
3744 *rt2 = *rt;
3745 break;
3746
3747 case 8:
3748 case 10:
3749 *rt2 = *rt + 1;
3750 break;
3751
3752 default:
3753 return FALSE;
3754 }
3755 return TRUE;
3756 }
3757 else if (AARCH64_LDST_SIMD_S (insn)
3758 || AARCH64_LDST_SIMD_S_PI (insn))
3759 {
3760 *rt = AARCH64_RT (insn);
3761 r = (insn >> 21) & 1;
3762 *load = AARCH64_BIT (insn, 22);
3763 opcode = (insn >> 13) & 0x7;
3764 switch (opcode)
3765 {
3766 case 0:
3767 case 2:
3768 case 4:
3769 *rt2 = *rt + r;
3770 break;
3771
3772 case 1:
3773 case 3:
3774 case 5:
3775 *rt2 = *rt + (r == 0 ? 2 : 3);
3776 break;
3777
3778 case 6:
3779 *rt2 = *rt + r;
3780 break;
3781
3782 case 7:
3783 *rt2 = *rt + (r == 0 ? 2 : 3);
3784 break;
3785
3786 default:
3787 return FALSE;
3788 }
3789 return TRUE;
3790 }
3791
3792 return FALSE;
3793 }
3794
3795 /* Return TRUE if INSN is multiply-accumulate. */
3796
3797 static bfd_boolean
3798 aarch64_mlxl_p (uint32_t insn)
3799 {
3800 uint32_t op31 = AARCH64_OP31 (insn);
3801
3802 if (AARCH64_MAC (insn)
3803 && (op31 == 0 || op31 == 1 || op31 == 5)
3804 /* Exclude MUL instructions which are encoded as a multiple accumulate
3805 with RA = XZR. */
3806 && AARCH64_RA (insn) != AARCH64_ZR)
3807 return TRUE;
3808
3809 return FALSE;
3810 }
3811
3812 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3813 it is possible for a 64-bit multiply-accumulate instruction to generate an
3814 incorrect result. The details are quite complex and hard to
3815 determine statically, since branches in the code may exist in some
3816 circumstances, but all cases end with a memory (load, store, or
3817 prefetch) instruction followed immediately by the multiply-accumulate
3818 operation. We employ a linker patching technique, by moving the potentially
3819 affected multiply-accumulate instruction into a patch region and replacing
3820 the original instruction with a branch to the patch. This function checks
3821 if INSN_1 is the memory operation followed by a multiply-accumulate
3822 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3823 if INSN_1 and INSN_2 are safe. */
3824
3825 static bfd_boolean
3826 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3827 {
3828 uint32_t rt;
3829 uint32_t rt2;
3830 uint32_t rn;
3831 uint32_t rm;
3832 uint32_t ra;
3833 bfd_boolean pair;
3834 bfd_boolean load;
3835
3836 if (aarch64_mlxl_p (insn_2)
3837 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3838 {
3839 /* Any SIMD memory op is independent of the subsequent MLA
3840 by definition of the erratum. */
3841 if (AARCH64_BIT (insn_1, 26))
3842 return TRUE;
3843
3844 /* If not SIMD, check for integer memory ops and MLA relationship. */
3845 rn = AARCH64_RN (insn_2);
3846 ra = AARCH64_RA (insn_2);
3847 rm = AARCH64_RM (insn_2);
3848
3849 /* If this is a load and there's a true(RAW) dependency, we are safe
3850 and this is not an erratum sequence. */
3851 if (load &&
3852 (rt == rn || rt == rm || rt == ra
3853 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3854 return FALSE;
3855
3856 /* We conservatively put out stubs for all other cases (including
3857 writebacks). */
3858 return TRUE;
3859 }
3860
3861 return FALSE;
3862 }
3863
3864 /* Used to order a list of mapping symbols by address. */
3865
3866 static int
3867 elf_aarch64_compare_mapping (const void *a, const void *b)
3868 {
3869 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3870 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3871
3872 if (amap->vma > bmap->vma)
3873 return 1;
3874 else if (amap->vma < bmap->vma)
3875 return -1;
3876 else if (amap->type > bmap->type)
3877 /* Ensure results do not depend on the host qsort for objects with
3878 multiple mapping symbols at the same address by sorting on type
3879 after vma. */
3880 return 1;
3881 else if (amap->type < bmap->type)
3882 return -1;
3883 else
3884 return 0;
3885 }
3886
3887
3888 static char *
3889 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3890 {
3891 char *stub_name = (char *) bfd_malloc
3892 (strlen ("__erratum_835769_veneer_") + 16);
3893 if (stub_name != NULL)
3894 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3895 return stub_name;
3896 }
3897
3898 /* Scan for Cortex-A53 erratum 835769 sequence.
3899
3900 Return TRUE else FALSE on abnormal termination. */
3901
3902 static bfd_boolean
3903 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3904 struct bfd_link_info *info,
3905 unsigned int *num_fixes_p)
3906 {
3907 asection *section;
3908 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3909 unsigned int num_fixes = *num_fixes_p;
3910
3911 if (htab == NULL)
3912 return TRUE;
3913
3914 for (section = input_bfd->sections;
3915 section != NULL;
3916 section = section->next)
3917 {
3918 bfd_byte *contents = NULL;
3919 struct _aarch64_elf_section_data *sec_data;
3920 unsigned int span;
3921
3922 if (elf_section_type (section) != SHT_PROGBITS
3923 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3924 || (section->flags & SEC_EXCLUDE) != 0
3925 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3926 || (section->output_section == bfd_abs_section_ptr))
3927 continue;
3928
3929 if (elf_section_data (section)->this_hdr.contents != NULL)
3930 contents = elf_section_data (section)->this_hdr.contents;
3931 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3932 return FALSE;
3933
3934 sec_data = elf_aarch64_section_data (section);
3935
3936 qsort (sec_data->map, sec_data->mapcount,
3937 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3938
3939 for (span = 0; span < sec_data->mapcount; span++)
3940 {
3941 unsigned int span_start = sec_data->map[span].vma;
3942 unsigned int span_end = ((span == sec_data->mapcount - 1)
3943 ? sec_data->map[0].vma + section->size
3944 : sec_data->map[span + 1].vma);
3945 unsigned int i;
3946 char span_type = sec_data->map[span].type;
3947
3948 if (span_type == 'd')
3949 continue;
3950
3951 for (i = span_start; i + 4 < span_end; i += 4)
3952 {
3953 uint32_t insn_1 = bfd_getl32 (contents + i);
3954 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3955
3956 if (aarch64_erratum_sequence (insn_1, insn_2))
3957 {
3958 struct elf_aarch64_stub_hash_entry *stub_entry;
3959 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3960 if (! stub_name)
3961 return FALSE;
3962
3963 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3964 section,
3965 htab);
3966 if (! stub_entry)
3967 return FALSE;
3968
3969 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3970 stub_entry->target_section = section;
3971 stub_entry->target_value = i + 4;
3972 stub_entry->veneered_insn = insn_2;
3973 stub_entry->output_name = stub_name;
3974 num_fixes++;
3975 }
3976 }
3977 }
3978 if (elf_section_data (section)->this_hdr.contents == NULL)
3979 free (contents);
3980 }
3981
3982 *num_fixes_p = num_fixes;
3983
3984 return TRUE;
3985 }
3986
3987
3988 /* Test if instruction INSN is ADRP. */
3989
3990 static bfd_boolean
3991 _bfd_aarch64_adrp_p (uint32_t insn)
3992 {
3993 return ((insn & AARCH64_ADRP_OP_MASK) == AARCH64_ADRP_OP);
3994 }
3995
3996
3997 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3998
3999 static bfd_boolean
4000 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
4001 uint32_t insn_3)
4002 {
4003 uint32_t rt;
4004 uint32_t rt2;
4005 bfd_boolean pair;
4006 bfd_boolean load;
4007
4008 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
4009 && (!pair
4010 || (pair && !load))
4011 && AARCH64_LDST_UIMM (insn_3)
4012 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
4013 }
4014
4015
4016 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
4017
4018 Return TRUE if section CONTENTS at offset I contains one of the
4019 erratum 843419 sequences, otherwise return FALSE. If a sequence is
4020 seen set P_VENEER_I to the offset of the final LOAD/STORE
4021 instruction in the sequence.
4022 */
4023
4024 static bfd_boolean
4025 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
4026 bfd_vma i, bfd_vma span_end,
4027 bfd_vma *p_veneer_i)
4028 {
4029 uint32_t insn_1 = bfd_getl32 (contents + i);
4030
4031 if (!_bfd_aarch64_adrp_p (insn_1))
4032 return FALSE;
4033
4034 if (span_end < i + 12)
4035 return FALSE;
4036
4037 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
4038 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
4039
4040 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
4041 return FALSE;
4042
4043 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
4044 {
4045 *p_veneer_i = i + 8;
4046 return TRUE;
4047 }
4048
4049 if (span_end < i + 16)
4050 return FALSE;
4051
4052 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
4053
4054 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
4055 {
4056 *p_veneer_i = i + 12;
4057 return TRUE;
4058 }
4059
4060 return FALSE;
4061 }
4062
4063
4064 /* Resize all stub sections. */
4065
4066 static void
4067 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
4068 {
4069 asection *section;
4070
4071 /* OK, we've added some stubs. Find out the new size of the
4072 stub sections. */
4073 for (section = htab->stub_bfd->sections;
4074 section != NULL; section = section->next)
4075 {
4076 /* Ignore non-stub sections. */
4077 if (!strstr (section->name, STUB_SUFFIX))
4078 continue;
4079 section->size = 0;
4080 }
4081
4082 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
4083
4084 for (section = htab->stub_bfd->sections;
4085 section != NULL; section = section->next)
4086 {
4087 if (!strstr (section->name, STUB_SUFFIX))
4088 continue;
4089
4090 /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned,
4091 as long branch stubs contain a 64-bit address. */
4092 if (section->size)
4093 section->size += 8;
4094
4095 /* Ensure all stub sections have a size which is a multiple of
4096 4096. This is important in order to ensure that the insertion
4097 of stub sections does not in itself move existing code around
4098 in such a way that new errata sequences are created. We only do this
4099 when the ADRP workaround is enabled. If only the ADR workaround is
4100 enabled then the stubs workaround won't ever be used. */
4101 if (htab->fix_erratum_843419 & ERRAT_ADRP)
4102 if (section->size)
4103 section->size = BFD_ALIGN (section->size, 0x1000);
4104 }
4105 }
4106
4107 /* Construct an erratum 843419 workaround stub name. */
4108
4109 static char *
4110 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
4111 bfd_vma offset)
4112 {
4113 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
4114 char *stub_name = bfd_malloc (len);
4115
4116 if (stub_name != NULL)
4117 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
4118 input_section->owner->id,
4119 input_section->id,
4120 offset);
4121 return stub_name;
4122 }
4123
4124 /* Build a stub_entry structure describing an 843419 fixup.
4125
4126 The stub_entry constructed is populated with the bit pattern INSN
4127 of the instruction located at OFFSET within input SECTION.
4128
4129 Returns TRUE on success. */
4130
4131 static bfd_boolean
4132 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
4133 bfd_vma adrp_offset,
4134 bfd_vma ldst_offset,
4135 asection *section,
4136 struct bfd_link_info *info)
4137 {
4138 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4139 char *stub_name;
4140 struct elf_aarch64_stub_hash_entry *stub_entry;
4141
4142 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
4143 if (stub_name == NULL)
4144 return FALSE;
4145 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4146 FALSE, FALSE);
4147 if (stub_entry)
4148 {
4149 free (stub_name);
4150 return TRUE;
4151 }
4152
4153 /* We always place an 843419 workaround veneer in the stub section
4154 attached to the input section in which an erratum sequence has
4155 been found. This ensures that later in the link process (in
4156 elfNN_aarch64_write_section) when we copy the veneered
4157 instruction from the input section into the stub section the
4158 copied instruction will have had any relocations applied to it.
4159 If we placed workaround veneers in any other stub section then we
4160 could not assume that all relocations have been processed on the
4161 corresponding input section at the point we output the stub
4162 section. */
4163
4164 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
4165 if (stub_entry == NULL)
4166 {
4167 free (stub_name);
4168 return FALSE;
4169 }
4170
4171 stub_entry->adrp_offset = adrp_offset;
4172 stub_entry->target_value = ldst_offset;
4173 stub_entry->target_section = section;
4174 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
4175 stub_entry->veneered_insn = insn;
4176 stub_entry->output_name = stub_name;
4177
4178 return TRUE;
4179 }
4180
4181
4182 /* Scan an input section looking for the signature of erratum 843419.
4183
4184 Scans input SECTION in INPUT_BFD looking for erratum 843419
4185 signatures, for each signature found a stub_entry is created
4186 describing the location of the erratum for subsequent fixup.
4187
4188 Return TRUE on successful scan, FALSE on failure to scan.
4189 */
4190
4191 static bfd_boolean
4192 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
4193 struct bfd_link_info *info)
4194 {
4195 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4196
4197 if (htab == NULL)
4198 return TRUE;
4199
4200 if (elf_section_type (section) != SHT_PROGBITS
4201 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4202 || (section->flags & SEC_EXCLUDE) != 0
4203 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4204 || (section->output_section == bfd_abs_section_ptr))
4205 return TRUE;
4206
4207 do
4208 {
4209 bfd_byte *contents = NULL;
4210 struct _aarch64_elf_section_data *sec_data;
4211 unsigned int span;
4212
4213 if (elf_section_data (section)->this_hdr.contents != NULL)
4214 contents = elf_section_data (section)->this_hdr.contents;
4215 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4216 return FALSE;
4217
4218 sec_data = elf_aarch64_section_data (section);
4219
4220 qsort (sec_data->map, sec_data->mapcount,
4221 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
4222
4223 for (span = 0; span < sec_data->mapcount; span++)
4224 {
4225 unsigned int span_start = sec_data->map[span].vma;
4226 unsigned int span_end = ((span == sec_data->mapcount - 1)
4227 ? sec_data->map[0].vma + section->size
4228 : sec_data->map[span + 1].vma);
4229 unsigned int i;
4230 char span_type = sec_data->map[span].type;
4231
4232 if (span_type == 'd')
4233 continue;
4234
4235 for (i = span_start; i + 8 < span_end; i += 4)
4236 {
4237 bfd_vma vma = (section->output_section->vma
4238 + section->output_offset
4239 + i);
4240 bfd_vma veneer_i;
4241
4242 if (_bfd_aarch64_erratum_843419_p
4243 (contents, vma, i, span_end, &veneer_i))
4244 {
4245 uint32_t insn = bfd_getl32 (contents + veneer_i);
4246
4247 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
4248 section, info))
4249 return FALSE;
4250 }
4251 }
4252 }
4253
4254 if (elf_section_data (section)->this_hdr.contents == NULL)
4255 free (contents);
4256 }
4257 while (0);
4258
4259 return TRUE;
4260 }
4261
4262
4263 /* Determine and set the size of the stub section for a final link.
4264
4265 The basic idea here is to examine all the relocations looking for
4266 PC-relative calls to a target that is unreachable with a "bl"
4267 instruction. */
4268
4269 bfd_boolean
4270 elfNN_aarch64_size_stubs (bfd *output_bfd,
4271 bfd *stub_bfd,
4272 struct bfd_link_info *info,
4273 bfd_signed_vma group_size,
4274 asection * (*add_stub_section) (const char *,
4275 asection *),
4276 void (*layout_sections_again) (void))
4277 {
4278 bfd_size_type stub_group_size;
4279 bfd_boolean stubs_always_before_branch;
4280 bfd_boolean stub_changed = FALSE;
4281 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4282 unsigned int num_erratum_835769_fixes = 0;
4283
4284 /* Propagate mach to stub bfd, because it may not have been
4285 finalized when we created stub_bfd. */
4286 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4287 bfd_get_mach (output_bfd));
4288
4289 /* Stash our params away. */
4290 htab->stub_bfd = stub_bfd;
4291 htab->add_stub_section = add_stub_section;
4292 htab->layout_sections_again = layout_sections_again;
4293 stubs_always_before_branch = group_size < 0;
4294 if (group_size < 0)
4295 stub_group_size = -group_size;
4296 else
4297 stub_group_size = group_size;
4298
4299 if (stub_group_size == 1)
4300 {
4301 /* Default values. */
4302 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
4303 stub_group_size = 127 * 1024 * 1024;
4304 }
4305
4306 group_sections (htab, stub_group_size, stubs_always_before_branch);
4307
4308 (*htab->layout_sections_again) ();
4309
4310 if (htab->fix_erratum_835769)
4311 {
4312 bfd *input_bfd;
4313
4314 for (input_bfd = info->input_bfds;
4315 input_bfd != NULL; input_bfd = input_bfd->link.next)
4316 {
4317 if (!is_aarch64_elf (input_bfd)
4318 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4319 continue;
4320
4321 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
4322 &num_erratum_835769_fixes))
4323 return FALSE;
4324 }
4325
4326 _bfd_aarch64_resize_stubs (htab);
4327 (*htab->layout_sections_again) ();
4328 }
4329
4330 if (htab->fix_erratum_843419 != ERRAT_NONE)
4331 {
4332 bfd *input_bfd;
4333
4334 for (input_bfd = info->input_bfds;
4335 input_bfd != NULL;
4336 input_bfd = input_bfd->link.next)
4337 {
4338 asection *section;
4339
4340 if (!is_aarch64_elf (input_bfd)
4341 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4342 continue;
4343
4344 for (section = input_bfd->sections;
4345 section != NULL;
4346 section = section->next)
4347 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
4348 return FALSE;
4349 }
4350
4351 _bfd_aarch64_resize_stubs (htab);
4352 (*htab->layout_sections_again) ();
4353 }
4354
4355 while (1)
4356 {
4357 bfd *input_bfd;
4358
4359 for (input_bfd = info->input_bfds;
4360 input_bfd != NULL; input_bfd = input_bfd->link.next)
4361 {
4362 Elf_Internal_Shdr *symtab_hdr;
4363 asection *section;
4364 Elf_Internal_Sym *local_syms = NULL;
4365
4366 if (!is_aarch64_elf (input_bfd)
4367 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4368 continue;
4369
4370 /* We'll need the symbol table in a second. */
4371 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4372 if (symtab_hdr->sh_info == 0)
4373 continue;
4374
4375 /* Walk over each section attached to the input bfd. */
4376 for (section = input_bfd->sections;
4377 section != NULL; section = section->next)
4378 {
4379 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4380
4381 /* If there aren't any relocs, then there's nothing more
4382 to do. */
4383 if ((section->flags & SEC_RELOC) == 0
4384 || section->reloc_count == 0
4385 || (section->flags & SEC_CODE) == 0)
4386 continue;
4387
4388 /* If this section is a link-once section that will be
4389 discarded, then don't create any stubs. */
4390 if (section->output_section == NULL
4391 || section->output_section->owner != output_bfd)
4392 continue;
4393
4394 /* Get the relocs. */
4395 internal_relocs
4396 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4397 NULL, info->keep_memory);
4398 if (internal_relocs == NULL)
4399 goto error_ret_free_local;
4400
4401 /* Now examine each relocation. */
4402 irela = internal_relocs;
4403 irelaend = irela + section->reloc_count;
4404 for (; irela < irelaend; irela++)
4405 {
4406 unsigned int r_type, r_indx;
4407 enum elf_aarch64_stub_type stub_type;
4408 struct elf_aarch64_stub_hash_entry *stub_entry;
4409 asection *sym_sec;
4410 bfd_vma sym_value;
4411 bfd_vma destination;
4412 struct elf_aarch64_link_hash_entry *hash;
4413 const char *sym_name;
4414 char *stub_name;
4415 const asection *id_sec;
4416 unsigned char st_type;
4417 bfd_size_type len;
4418
4419 r_type = ELFNN_R_TYPE (irela->r_info);
4420 r_indx = ELFNN_R_SYM (irela->r_info);
4421
4422 if (r_type >= (unsigned int) R_AARCH64_end)
4423 {
4424 bfd_set_error (bfd_error_bad_value);
4425 error_ret_free_internal:
4426 if (elf_section_data (section)->relocs == NULL)
4427 free (internal_relocs);
4428 goto error_ret_free_local;
4429 }
4430
4431 /* Only look for stubs on unconditional branch and
4432 branch and link instructions. */
4433 if (r_type != (unsigned int) AARCH64_R (CALL26)
4434 && r_type != (unsigned int) AARCH64_R (JUMP26))
4435 continue;
4436
4437 /* Now determine the call target, its name, value,
4438 section. */
4439 sym_sec = NULL;
4440 sym_value = 0;
4441 destination = 0;
4442 hash = NULL;
4443 sym_name = NULL;
4444 if (r_indx < symtab_hdr->sh_info)
4445 {
4446 /* It's a local symbol. */
4447 Elf_Internal_Sym *sym;
4448 Elf_Internal_Shdr *hdr;
4449
4450 if (local_syms == NULL)
4451 {
4452 local_syms
4453 = (Elf_Internal_Sym *) symtab_hdr->contents;
4454 if (local_syms == NULL)
4455 local_syms
4456 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4457 symtab_hdr->sh_info, 0,
4458 NULL, NULL, NULL);
4459 if (local_syms == NULL)
4460 goto error_ret_free_internal;
4461 }
4462
4463 sym = local_syms + r_indx;
4464 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4465 sym_sec = hdr->bfd_section;
4466 if (!sym_sec)
4467 /* This is an undefined symbol. It can never
4468 be resolved. */
4469 continue;
4470
4471 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4472 sym_value = sym->st_value;
4473 destination = (sym_value + irela->r_addend
4474 + sym_sec->output_offset
4475 + sym_sec->output_section->vma);
4476 st_type = ELF_ST_TYPE (sym->st_info);
4477 sym_name
4478 = bfd_elf_string_from_elf_section (input_bfd,
4479 symtab_hdr->sh_link,
4480 sym->st_name);
4481 }
4482 else
4483 {
4484 int e_indx;
4485
4486 e_indx = r_indx - symtab_hdr->sh_info;
4487 hash = ((struct elf_aarch64_link_hash_entry *)
4488 elf_sym_hashes (input_bfd)[e_indx]);
4489
4490 while (hash->root.root.type == bfd_link_hash_indirect
4491 || hash->root.root.type == bfd_link_hash_warning)
4492 hash = ((struct elf_aarch64_link_hash_entry *)
4493 hash->root.root.u.i.link);
4494
4495 if (hash->root.root.type == bfd_link_hash_defined
4496 || hash->root.root.type == bfd_link_hash_defweak)
4497 {
4498 struct elf_aarch64_link_hash_table *globals =
4499 elf_aarch64_hash_table (info);
4500 sym_sec = hash->root.root.u.def.section;
4501 sym_value = hash->root.root.u.def.value;
4502 /* For a destination in a shared library,
4503 use the PLT stub as target address to
4504 decide whether a branch stub is
4505 needed. */
4506 if (globals->root.splt != NULL && hash != NULL
4507 && hash->root.plt.offset != (bfd_vma) - 1)
4508 {
4509 sym_sec = globals->root.splt;
4510 sym_value = hash->root.plt.offset;
4511 if (sym_sec->output_section != NULL)
4512 destination = (sym_value
4513 + sym_sec->output_offset
4514 +
4515 sym_sec->output_section->vma);
4516 }
4517 else if (sym_sec->output_section != NULL)
4518 destination = (sym_value + irela->r_addend
4519 + sym_sec->output_offset
4520 + sym_sec->output_section->vma);
4521 }
4522 else if (hash->root.root.type == bfd_link_hash_undefined
4523 || (hash->root.root.type
4524 == bfd_link_hash_undefweak))
4525 {
4526 /* For a shared library, use the PLT stub as
4527 target address to decide whether a long
4528 branch stub is needed.
4529 For absolute code, they cannot be handled. */
4530 struct elf_aarch64_link_hash_table *globals =
4531 elf_aarch64_hash_table (info);
4532
4533 if (globals->root.splt != NULL && hash != NULL
4534 && hash->root.plt.offset != (bfd_vma) - 1)
4535 {
4536 sym_sec = globals->root.splt;
4537 sym_value = hash->root.plt.offset;
4538 if (sym_sec->output_section != NULL)
4539 destination = (sym_value
4540 + sym_sec->output_offset
4541 +
4542 sym_sec->output_section->vma);
4543 }
4544 else
4545 continue;
4546 }
4547 else
4548 {
4549 bfd_set_error (bfd_error_bad_value);
4550 goto error_ret_free_internal;
4551 }
4552 st_type = ELF_ST_TYPE (hash->root.type);
4553 sym_name = hash->root.root.root.string;
4554 }
4555
4556 /* Determine what (if any) linker stub is needed. */
4557 stub_type = aarch64_type_of_stub (section, irela, sym_sec,
4558 st_type, destination);
4559 if (stub_type == aarch64_stub_none)
4560 continue;
4561
4562 /* Support for grouping stub sections. */
4563 id_sec = htab->stub_group[section->id].link_sec;
4564
4565 /* Get the name of this stub. */
4566 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
4567 irela);
4568 if (!stub_name)
4569 goto error_ret_free_internal;
4570
4571 stub_entry =
4572 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4573 stub_name, FALSE, FALSE);
4574 if (stub_entry != NULL)
4575 {
4576 /* The proper stub has already been created. */
4577 free (stub_name);
4578 /* Always update this stub's target since it may have
4579 changed after layout. */
4580 stub_entry->target_value = sym_value + irela->r_addend;
4581 continue;
4582 }
4583
4584 stub_entry = _bfd_aarch64_add_stub_entry_in_group
4585 (stub_name, section, htab);
4586 if (stub_entry == NULL)
4587 {
4588 free (stub_name);
4589 goto error_ret_free_internal;
4590 }
4591
4592 stub_entry->target_value = sym_value + irela->r_addend;
4593 stub_entry->target_section = sym_sec;
4594 stub_entry->stub_type = stub_type;
4595 stub_entry->h = hash;
4596 stub_entry->st_type = st_type;
4597
4598 if (sym_name == NULL)
4599 sym_name = "unnamed";
4600 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
4601 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
4602 if (stub_entry->output_name == NULL)
4603 {
4604 free (stub_name);
4605 goto error_ret_free_internal;
4606 }
4607
4608 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
4609 sym_name);
4610
4611 stub_changed = TRUE;
4612 }
4613
4614 /* We're done with the internal relocs, free them. */
4615 if (elf_section_data (section)->relocs == NULL)
4616 free (internal_relocs);
4617 }
4618 }
4619
4620 if (!stub_changed)
4621 break;
4622
4623 _bfd_aarch64_resize_stubs (htab);
4624
4625 /* Ask the linker to do its stuff. */
4626 (*htab->layout_sections_again) ();
4627 stub_changed = FALSE;
4628 }
4629
4630 return TRUE;
4631
4632 error_ret_free_local:
4633 return FALSE;
4634 }
4635
4636 /* Build all the stubs associated with the current output file. The
4637 stubs are kept in a hash table attached to the main linker hash
4638 table. We also set up the .plt entries for statically linked PIC
4639 functions here. This function is called via aarch64_elf_finish in the
4640 linker. */
4641
4642 bfd_boolean
4643 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
4644 {
4645 asection *stub_sec;
4646 struct bfd_hash_table *table;
4647 struct elf_aarch64_link_hash_table *htab;
4648
4649 htab = elf_aarch64_hash_table (info);
4650
4651 for (stub_sec = htab->stub_bfd->sections;
4652 stub_sec != NULL; stub_sec = stub_sec->next)
4653 {
4654 bfd_size_type size;
4655
4656 /* Ignore non-stub sections. */
4657 if (!strstr (stub_sec->name, STUB_SUFFIX))
4658 continue;
4659
4660 /* Allocate memory to hold the linker stubs. */
4661 size = stub_sec->size;
4662 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4663 if (stub_sec->contents == NULL && size != 0)
4664 return FALSE;
4665 stub_sec->size = 0;
4666
4667 /* Add a branch around the stub section, and a nop, to keep it 8 byte
4668 aligned, as long branch stubs contain a 64-bit address. */
4669 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
4670 bfd_putl32 (INSN_NOP, stub_sec->contents + 4);
4671 stub_sec->size += 8;
4672 }
4673
4674 /* Build the stubs as directed by the stub hash table. */
4675 table = &htab->stub_hash_table;
4676 bfd_hash_traverse (table, aarch64_build_one_stub, info);
4677
4678 return TRUE;
4679 }
4680
4681
4682 /* Add an entry to the code/data map for section SEC. */
4683
4684 static void
4685 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
4686 {
4687 struct _aarch64_elf_section_data *sec_data =
4688 elf_aarch64_section_data (sec);
4689 unsigned int newidx;
4690
4691 if (sec_data->map == NULL)
4692 {
4693 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
4694 sec_data->mapcount = 0;
4695 sec_data->mapsize = 1;
4696 }
4697
4698 newidx = sec_data->mapcount++;
4699
4700 if (sec_data->mapcount > sec_data->mapsize)
4701 {
4702 sec_data->mapsize *= 2;
4703 sec_data->map = bfd_realloc_or_free
4704 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
4705 }
4706
4707 if (sec_data->map)
4708 {
4709 sec_data->map[newidx].vma = vma;
4710 sec_data->map[newidx].type = type;
4711 }
4712 }
4713
4714
4715 /* Initialise maps of insn/data for input BFDs. */
4716 void
4717 bfd_elfNN_aarch64_init_maps (bfd *abfd)
4718 {
4719 Elf_Internal_Sym *isymbuf;
4720 Elf_Internal_Shdr *hdr;
4721 unsigned int i, localsyms;
4722
4723 /* Make sure that we are dealing with an AArch64 elf binary. */
4724 if (!is_aarch64_elf (abfd))
4725 return;
4726
4727 if ((abfd->flags & DYNAMIC) != 0)
4728 return;
4729
4730 hdr = &elf_symtab_hdr (abfd);
4731 localsyms = hdr->sh_info;
4732
4733 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
4734 should contain the number of local symbols, which should come before any
4735 global symbols. Mapping symbols are always local. */
4736 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
4737
4738 /* No internal symbols read? Skip this BFD. */
4739 if (isymbuf == NULL)
4740 return;
4741
4742 for (i = 0; i < localsyms; i++)
4743 {
4744 Elf_Internal_Sym *isym = &isymbuf[i];
4745 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
4746 const char *name;
4747
4748 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4749 {
4750 name = bfd_elf_string_from_elf_section (abfd,
4751 hdr->sh_link,
4752 isym->st_name);
4753
4754 if (bfd_is_aarch64_special_symbol_name
4755 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4756 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4757 }
4758 }
4759 }
4760
4761 static void
4762 setup_plt_values (struct bfd_link_info *link_info,
4763 aarch64_plt_type plt_type)
4764 {
4765 struct elf_aarch64_link_hash_table *globals;
4766 globals = elf_aarch64_hash_table (link_info);
4767
4768 if (plt_type == PLT_BTI_PAC)
4769 {
4770 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
4771
4772 /* Only in ET_EXEC we need PLTn with BTI. */
4773 if (bfd_link_pde (link_info))
4774 {
4775 globals->plt_entry_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
4776 globals->plt_entry = elfNN_aarch64_small_plt_bti_pac_entry;
4777 }
4778 else
4779 {
4780 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
4781 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
4782 }
4783 }
4784 else if (plt_type == PLT_BTI)
4785 {
4786 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
4787
4788 /* Only in ET_EXEC we need PLTn with BTI. */
4789 if (bfd_link_pde (link_info))
4790 {
4791 globals->plt_entry_size = PLT_BTI_SMALL_ENTRY_SIZE;
4792 globals->plt_entry = elfNN_aarch64_small_plt_bti_entry;
4793 }
4794 }
4795 else if (plt_type == PLT_PAC)
4796 {
4797 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
4798 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
4799 }
4800 }
4801
4802 /* Set option values needed during linking. */
4803 void
4804 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
4805 struct bfd_link_info *link_info,
4806 int no_enum_warn,
4807 int no_wchar_warn, int pic_veneer,
4808 int fix_erratum_835769,
4809 erratum_84319_opts fix_erratum_843419,
4810 int no_apply_dynamic_relocs,
4811 aarch64_bti_pac_info bp_info)
4812 {
4813 struct elf_aarch64_link_hash_table *globals;
4814
4815 globals = elf_aarch64_hash_table (link_info);
4816 globals->pic_veneer = pic_veneer;
4817 globals->fix_erratum_835769 = fix_erratum_835769;
4818 /* If the default options are used, then ERRAT_ADR will be set by default
4819 which will enable the ADRP->ADR workaround for the erratum 843419
4820 workaround. */
4821 globals->fix_erratum_843419 = fix_erratum_843419;
4822 globals->no_apply_dynamic_relocs = no_apply_dynamic_relocs;
4823
4824 BFD_ASSERT (is_aarch64_elf (output_bfd));
4825 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
4826 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4827
4828 switch (bp_info.bti_type)
4829 {
4830 case BTI_WARN:
4831 elf_aarch64_tdata (output_bfd)->no_bti_warn = 0;
4832 elf_aarch64_tdata (output_bfd)->gnu_and_prop
4833 |= GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
4834 break;
4835
4836 default:
4837 break;
4838 }
4839 elf_aarch64_tdata (output_bfd)->plt_type = bp_info.plt_type;
4840 setup_plt_values (link_info, bp_info.plt_type);
4841 }
4842
4843 static bfd_vma
4844 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
4845 struct elf_aarch64_link_hash_table
4846 *globals, struct bfd_link_info *info,
4847 bfd_vma value, bfd *output_bfd,
4848 bfd_boolean *unresolved_reloc_p)
4849 {
4850 bfd_vma off = (bfd_vma) - 1;
4851 asection *basegot = globals->root.sgot;
4852 bfd_boolean dyn = globals->root.dynamic_sections_created;
4853
4854 if (h != NULL)
4855 {
4856 BFD_ASSERT (basegot != NULL);
4857 off = h->got.offset;
4858 BFD_ASSERT (off != (bfd_vma) - 1);
4859 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4860 || (bfd_link_pic (info)
4861 && SYMBOL_REFERENCES_LOCAL (info, h))
4862 || (ELF_ST_VISIBILITY (h->other)
4863 && h->root.type == bfd_link_hash_undefweak))
4864 {
4865 /* This is actually a static link, or it is a -Bsymbolic link
4866 and the symbol is defined locally. We must initialize this
4867 entry in the global offset table. Since the offset must
4868 always be a multiple of 8 (4 in the case of ILP32), we use
4869 the least significant bit to record whether we have
4870 initialized it already.
4871 When doing a dynamic link, we create a .rel(a).got relocation
4872 entry to initialize the value. This is done in the
4873 finish_dynamic_symbol routine. */
4874 if ((off & 1) != 0)
4875 off &= ~1;
4876 else
4877 {
4878 bfd_put_NN (output_bfd, value, basegot->contents + off);
4879 h->got.offset |= 1;
4880 }
4881 }
4882 else
4883 *unresolved_reloc_p = FALSE;
4884
4885 off = off + basegot->output_section->vma + basegot->output_offset;
4886 }
4887
4888 return off;
4889 }
4890
4891 /* Change R_TYPE to a more efficient access model where possible,
4892 return the new reloc type. */
4893
4894 static bfd_reloc_code_real_type
4895 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4896 struct elf_link_hash_entry *h)
4897 {
4898 bfd_boolean is_local = h == NULL;
4899
4900 switch (r_type)
4901 {
4902 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4903 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4904 return (is_local
4905 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4906 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4907
4908 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4909 return (is_local
4910 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4911 : r_type);
4912
4913 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4914 return (is_local
4915 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4916 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4917
4918 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4919 return (is_local
4920 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4921 : BFD_RELOC_AARCH64_NONE);
4922
4923 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4924 return (is_local
4925 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
4926 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC);
4927
4928 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4929 return (is_local
4930 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
4931 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1);
4932
4933 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4934 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4935 return (is_local
4936 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4937 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4938
4939 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4940 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4941
4942 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4943 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4944
4945 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4946 return r_type;
4947
4948 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4949 return (is_local
4950 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4951 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4952
4953 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4954 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
4955 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4956 /* Instructions with these relocations will become NOPs. */
4957 return BFD_RELOC_AARCH64_NONE;
4958
4959 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
4960 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4961 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4962 return is_local ? BFD_RELOC_AARCH64_NONE : r_type;
4963
4964 #if ARCH_SIZE == 64
4965 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4966 return is_local
4967 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
4968 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
4969
4970 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4971 return is_local
4972 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
4973 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
4974 #endif
4975
4976 default:
4977 break;
4978 }
4979
4980 return r_type;
4981 }
4982
4983 static unsigned int
4984 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4985 {
4986 switch (r_type)
4987 {
4988 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4989 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4990 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4991 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4992 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
4993 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4994 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4995 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4996 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4997 return GOT_NORMAL;
4998
4999 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5000 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5001 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5002 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5003 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5004 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5005 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5006 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5007 return GOT_TLS_GD;
5008
5009 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5010 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
5011 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5012 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5013 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5014 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5015 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
5016 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5017 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5018 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5019 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5020 return GOT_TLSDESC_GD;
5021
5022 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5023 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5024 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5025 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5026 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5027 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5028 return GOT_TLS_IE;
5029
5030 default:
5031 break;
5032 }
5033 return GOT_UNKNOWN;
5034 }
5035
5036 static bfd_boolean
5037 aarch64_can_relax_tls (bfd *input_bfd,
5038 struct bfd_link_info *info,
5039 bfd_reloc_code_real_type r_type,
5040 struct elf_link_hash_entry *h,
5041 unsigned long r_symndx)
5042 {
5043 unsigned int symbol_got_type;
5044 unsigned int reloc_got_type;
5045
5046 if (! IS_AARCH64_TLS_RELAX_RELOC (r_type))
5047 return FALSE;
5048
5049 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
5050 reloc_got_type = aarch64_reloc_got_type (r_type);
5051
5052 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
5053 return TRUE;
5054
5055 if (!bfd_link_executable (info))
5056 return FALSE;
5057
5058 if (h && h->root.type == bfd_link_hash_undefweak)
5059 return FALSE;
5060
5061 return TRUE;
5062 }
5063
5064 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
5065 enumerator. */
5066
5067 static bfd_reloc_code_real_type
5068 aarch64_tls_transition (bfd *input_bfd,
5069 struct bfd_link_info *info,
5070 unsigned int r_type,
5071 struct elf_link_hash_entry *h,
5072 unsigned long r_symndx)
5073 {
5074 bfd_reloc_code_real_type bfd_r_type
5075 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
5076
5077 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
5078 return bfd_r_type;
5079
5080 return aarch64_tls_transition_without_check (bfd_r_type, h);
5081 }
5082
5083 /* Return the base VMA address which should be subtracted from real addresses
5084 when resolving R_AARCH64_TLS_DTPREL relocation. */
5085
5086 static bfd_vma
5087 dtpoff_base (struct bfd_link_info *info)
5088 {
5089 /* If tls_sec is NULL, we should have signalled an error already. */
5090 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
5091 return elf_hash_table (info)->tls_sec->vma;
5092 }
5093
5094 /* Return the base VMA address which should be subtracted from real addresses
5095 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
5096
5097 static bfd_vma
5098 tpoff_base (struct bfd_link_info *info)
5099 {
5100 struct elf_link_hash_table *htab = elf_hash_table (info);
5101
5102 /* If tls_sec is NULL, we should have signalled an error already. */
5103 BFD_ASSERT (htab->tls_sec != NULL);
5104
5105 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
5106 htab->tls_sec->alignment_power);
5107 return htab->tls_sec->vma - base;
5108 }
5109
5110 static bfd_vma *
5111 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
5112 unsigned long r_symndx)
5113 {
5114 /* Calculate the address of the GOT entry for symbol
5115 referred to in h. */
5116 if (h != NULL)
5117 return &h->got.offset;
5118 else
5119 {
5120 /* local symbol */
5121 struct elf_aarch64_local_symbol *l;
5122
5123 l = elf_aarch64_locals (input_bfd);
5124 return &l[r_symndx].got_offset;
5125 }
5126 }
5127
5128 static void
5129 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
5130 unsigned long r_symndx)
5131 {
5132 bfd_vma *p;
5133 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
5134 *p |= 1;
5135 }
5136
5137 static int
5138 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
5139 unsigned long r_symndx)
5140 {
5141 bfd_vma value;
5142 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
5143 return value & 1;
5144 }
5145
5146 static bfd_vma
5147 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
5148 unsigned long r_symndx)
5149 {
5150 bfd_vma value;
5151 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
5152 value &= ~1;
5153 return value;
5154 }
5155
5156 static bfd_vma *
5157 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
5158 unsigned long r_symndx)
5159 {
5160 /* Calculate the address of the GOT entry for symbol
5161 referred to in h. */
5162 if (h != NULL)
5163 {
5164 struct elf_aarch64_link_hash_entry *eh;
5165 eh = (struct elf_aarch64_link_hash_entry *) h;
5166 return &eh->tlsdesc_got_jump_table_offset;
5167 }
5168 else
5169 {
5170 /* local symbol */
5171 struct elf_aarch64_local_symbol *l;
5172
5173 l = elf_aarch64_locals (input_bfd);
5174 return &l[r_symndx].tlsdesc_got_jump_table_offset;
5175 }
5176 }
5177
5178 static void
5179 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
5180 unsigned long r_symndx)
5181 {
5182 bfd_vma *p;
5183 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5184 *p |= 1;
5185 }
5186
5187 static int
5188 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
5189 struct elf_link_hash_entry *h,
5190 unsigned long r_symndx)
5191 {
5192 bfd_vma value;
5193 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5194 return value & 1;
5195 }
5196
5197 static bfd_vma
5198 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
5199 unsigned long r_symndx)
5200 {
5201 bfd_vma value;
5202 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5203 value &= ~1;
5204 return value;
5205 }
5206
5207 /* Data for make_branch_to_erratum_835769_stub(). */
5208
5209 struct erratum_835769_branch_to_stub_data
5210 {
5211 struct bfd_link_info *info;
5212 asection *output_section;
5213 bfd_byte *contents;
5214 };
5215
5216 /* Helper to insert branches to erratum 835769 stubs in the right
5217 places for a particular section. */
5218
5219 static bfd_boolean
5220 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
5221 void *in_arg)
5222 {
5223 struct elf_aarch64_stub_hash_entry *stub_entry;
5224 struct erratum_835769_branch_to_stub_data *data;
5225 bfd_byte *contents;
5226 unsigned long branch_insn = 0;
5227 bfd_vma veneered_insn_loc, veneer_entry_loc;
5228 bfd_signed_vma branch_offset;
5229 unsigned int target;
5230 bfd *abfd;
5231
5232 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5233 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
5234
5235 if (stub_entry->target_section != data->output_section
5236 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
5237 return TRUE;
5238
5239 contents = data->contents;
5240 veneered_insn_loc = stub_entry->target_section->output_section->vma
5241 + stub_entry->target_section->output_offset
5242 + stub_entry->target_value;
5243 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
5244 + stub_entry->stub_sec->output_offset
5245 + stub_entry->stub_offset;
5246 branch_offset = veneer_entry_loc - veneered_insn_loc;
5247
5248 abfd = stub_entry->target_section->owner;
5249 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
5250 _bfd_error_handler
5251 (_("%pB: error: erratum 835769 stub out "
5252 "of range (input file too large)"), abfd);
5253
5254 target = stub_entry->target_value;
5255 branch_insn = 0x14000000;
5256 branch_offset >>= 2;
5257 branch_offset &= 0x3ffffff;
5258 branch_insn |= branch_offset;
5259 bfd_putl32 (branch_insn, &contents[target]);
5260
5261 return TRUE;
5262 }
5263
5264
5265 static bfd_boolean
5266 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
5267 void *in_arg)
5268 {
5269 struct elf_aarch64_stub_hash_entry *stub_entry
5270 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5271 struct erratum_835769_branch_to_stub_data *data
5272 = (struct erratum_835769_branch_to_stub_data *) in_arg;
5273 struct bfd_link_info *info;
5274 struct elf_aarch64_link_hash_table *htab;
5275 bfd_byte *contents;
5276 asection *section;
5277 bfd *abfd;
5278 bfd_vma place;
5279 uint32_t insn;
5280
5281 info = data->info;
5282 contents = data->contents;
5283 section = data->output_section;
5284
5285 htab = elf_aarch64_hash_table (info);
5286
5287 if (stub_entry->target_section != section
5288 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
5289 return TRUE;
5290
5291 BFD_ASSERT (((htab->fix_erratum_843419 & ERRAT_ADRP) && stub_entry->stub_sec)
5292 || (htab->fix_erratum_843419 & ERRAT_ADR));
5293
5294 /* Only update the stub section if we have one. We should always have one if
5295 we're allowed to use the ADRP errata workaround, otherwise it is not
5296 required. */
5297 if (stub_entry->stub_sec)
5298 {
5299 insn = bfd_getl32 (contents + stub_entry->target_value);
5300 bfd_putl32 (insn,
5301 stub_entry->stub_sec->contents + stub_entry->stub_offset);
5302 }
5303
5304 place = (section->output_section->vma + section->output_offset
5305 + stub_entry->adrp_offset);
5306 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
5307
5308 if (!_bfd_aarch64_adrp_p (insn))
5309 abort ();
5310
5311 bfd_signed_vma imm =
5312 (_bfd_aarch64_sign_extend
5313 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
5314 - (place & 0xfff));
5315
5316 if ((htab->fix_erratum_843419 & ERRAT_ADR)
5317 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
5318 {
5319 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
5320 | AARCH64_RT (insn));
5321 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
5322 /* Stub is not needed, don't map it out. */
5323 stub_entry->stub_type = aarch64_stub_none;
5324 }
5325 else if (htab->fix_erratum_843419 & ERRAT_ADRP)
5326 {
5327 bfd_vma veneered_insn_loc;
5328 bfd_vma veneer_entry_loc;
5329 bfd_signed_vma branch_offset;
5330 uint32_t branch_insn;
5331
5332 veneered_insn_loc = stub_entry->target_section->output_section->vma
5333 + stub_entry->target_section->output_offset
5334 + stub_entry->target_value;
5335 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
5336 + stub_entry->stub_sec->output_offset
5337 + stub_entry->stub_offset;
5338 branch_offset = veneer_entry_loc - veneered_insn_loc;
5339
5340 abfd = stub_entry->target_section->owner;
5341 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
5342 _bfd_error_handler
5343 (_("%pB: error: erratum 843419 stub out "
5344 "of range (input file too large)"), abfd);
5345
5346 branch_insn = 0x14000000;
5347 branch_offset >>= 2;
5348 branch_offset &= 0x3ffffff;
5349 branch_insn |= branch_offset;
5350 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
5351 }
5352 else
5353 {
5354 abfd = stub_entry->target_section->owner;
5355 _bfd_error_handler
5356 (_("%pB: error: erratum 843419 immediate 0x%" BFD_VMA_FMT "x "
5357 "out of range for ADR (input file too large) and "
5358 "--fix-cortex-a53-843419=adr used. Run the linker with "
5359 "--fix-cortex-a53-843419=full instead"), abfd, imm);
5360 bfd_set_error (bfd_error_bad_value);
5361 /* This function is called inside a hashtable traversal and the error
5362 handlers called above turn into non-fatal errors. Which means this
5363 case ld returns an exit code 0 and also produces a broken object file.
5364 To prevent this, issue a hard abort. */
5365 BFD_FAIL ();
5366 }
5367 return TRUE;
5368 }
5369
5370
5371 static bfd_boolean
5372 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
5373 struct bfd_link_info *link_info,
5374 asection *sec,
5375 bfd_byte *contents)
5376
5377 {
5378 struct elf_aarch64_link_hash_table *globals =
5379 elf_aarch64_hash_table (link_info);
5380
5381 if (globals == NULL)
5382 return FALSE;
5383
5384 /* Fix code to point to erratum 835769 stubs. */
5385 if (globals->fix_erratum_835769)
5386 {
5387 struct erratum_835769_branch_to_stub_data data;
5388
5389 data.info = link_info;
5390 data.output_section = sec;
5391 data.contents = contents;
5392 bfd_hash_traverse (&globals->stub_hash_table,
5393 make_branch_to_erratum_835769_stub, &data);
5394 }
5395
5396 if (globals->fix_erratum_843419)
5397 {
5398 struct erratum_835769_branch_to_stub_data data;
5399
5400 data.info = link_info;
5401 data.output_section = sec;
5402 data.contents = contents;
5403 bfd_hash_traverse (&globals->stub_hash_table,
5404 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
5405 }
5406
5407 return FALSE;
5408 }
5409
5410 /* Return TRUE if RELOC is a relocation against the base of GOT table. */
5411
5412 static bfd_boolean
5413 aarch64_relocation_aginst_gp_p (bfd_reloc_code_real_type reloc)
5414 {
5415 return (reloc == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14
5416 || reloc == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5417 || reloc == BFD_RELOC_AARCH64_LD64_GOTOFF_LO15
5418 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC
5419 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G1);
5420 }
5421
5422 /* Perform a relocation as part of a final link. The input relocation type
5423 should be TLS relaxed. */
5424
5425 static bfd_reloc_status_type
5426 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
5427 bfd *input_bfd,
5428 bfd *output_bfd,
5429 asection *input_section,
5430 bfd_byte *contents,
5431 Elf_Internal_Rela *rel,
5432 bfd_vma value,
5433 struct bfd_link_info *info,
5434 asection *sym_sec,
5435 struct elf_link_hash_entry *h,
5436 bfd_boolean *unresolved_reloc_p,
5437 bfd_boolean save_addend,
5438 bfd_vma *saved_addend,
5439 Elf_Internal_Sym *sym)
5440 {
5441 Elf_Internal_Shdr *symtab_hdr;
5442 unsigned int r_type = howto->type;
5443 bfd_reloc_code_real_type bfd_r_type
5444 = elfNN_aarch64_bfd_reloc_from_howto (howto);
5445 unsigned long r_symndx;
5446 bfd_byte *hit_data = contents + rel->r_offset;
5447 bfd_vma place, off, got_entry_addr = 0;
5448 bfd_signed_vma signed_addend;
5449 struct elf_aarch64_link_hash_table *globals;
5450 bfd_boolean weak_undef_p;
5451 bfd_boolean relative_reloc;
5452 asection *base_got;
5453 bfd_vma orig_value = value;
5454 bfd_boolean resolved_to_zero;
5455 bfd_boolean abs_symbol_p;
5456
5457 globals = elf_aarch64_hash_table (info);
5458
5459 symtab_hdr = &elf_symtab_hdr (input_bfd);
5460
5461 BFD_ASSERT (is_aarch64_elf (input_bfd));
5462
5463 r_symndx = ELFNN_R_SYM (rel->r_info);
5464
5465 place = input_section->output_section->vma
5466 + input_section->output_offset + rel->r_offset;
5467
5468 /* Get addend, accumulating the addend for consecutive relocs
5469 which refer to the same offset. */
5470 signed_addend = saved_addend ? *saved_addend : 0;
5471 signed_addend += rel->r_addend;
5472
5473 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
5474 : bfd_is_und_section (sym_sec));
5475 abs_symbol_p = h != NULL && bfd_is_abs_symbol (&h->root);
5476
5477
5478 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
5479 it here if it is defined in a non-shared object. */
5480 if (h != NULL
5481 && h->type == STT_GNU_IFUNC
5482 && h->def_regular)
5483 {
5484 asection *plt;
5485 const char *name;
5486 bfd_vma addend = 0;
5487
5488 if ((input_section->flags & SEC_ALLOC) == 0)
5489 {
5490 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
5491 STT_GNU_IFUNC symbol as STT_FUNC. */
5492 if (elf_section_type (input_section) == SHT_NOTE)
5493 goto skip_ifunc;
5494
5495 /* Dynamic relocs are not propagated for SEC_DEBUGGING
5496 sections because such sections are not SEC_ALLOC and
5497 thus ld.so will not process them. */
5498 if ((input_section->flags & SEC_DEBUGGING) != 0)
5499 return bfd_reloc_ok;
5500
5501 if (h->root.root.string)
5502 name = h->root.root.string;
5503 else
5504 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL);
5505 _bfd_error_handler
5506 /* xgettext:c-format */
5507 (_("%pB(%pA+%#" PRIx64 "): "
5508 "unresolvable %s relocation against symbol `%s'"),
5509 input_bfd, input_section, (uint64_t) rel->r_offset,
5510 howto->name, name);
5511 bfd_set_error (bfd_error_bad_value);
5512 return bfd_reloc_notsupported;
5513 }
5514 else if (h->plt.offset == (bfd_vma) -1)
5515 goto bad_ifunc_reloc;
5516
5517 /* STT_GNU_IFUNC symbol must go through PLT. */
5518 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
5519 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
5520
5521 switch (bfd_r_type)
5522 {
5523 default:
5524 bad_ifunc_reloc:
5525 if (h->root.root.string)
5526 name = h->root.root.string;
5527 else
5528 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
5529 NULL);
5530 _bfd_error_handler
5531 /* xgettext:c-format */
5532 (_("%pB: relocation %s against STT_GNU_IFUNC "
5533 "symbol `%s' isn't handled by %s"), input_bfd,
5534 howto->name, name, __FUNCTION__);
5535 bfd_set_error (bfd_error_bad_value);
5536 return bfd_reloc_notsupported;
5537
5538 case BFD_RELOC_AARCH64_NN:
5539 if (rel->r_addend != 0)
5540 {
5541 if (h->root.root.string)
5542 name = h->root.root.string;
5543 else
5544 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
5545 sym, NULL);
5546 _bfd_error_handler
5547 /* xgettext:c-format */
5548 (_("%pB: relocation %s against STT_GNU_IFUNC "
5549 "symbol `%s' has non-zero addend: %" PRId64),
5550 input_bfd, howto->name, name, (int64_t) rel->r_addend);
5551 bfd_set_error (bfd_error_bad_value);
5552 return bfd_reloc_notsupported;
5553 }
5554
5555 /* Generate dynamic relocation only when there is a
5556 non-GOT reference in a shared object. */
5557 if (bfd_link_pic (info) && h->non_got_ref)
5558 {
5559 Elf_Internal_Rela outrel;
5560 asection *sreloc;
5561
5562 /* Need a dynamic relocation to get the real function
5563 address. */
5564 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
5565 info,
5566 input_section,
5567 rel->r_offset);
5568 if (outrel.r_offset == (bfd_vma) -1
5569 || outrel.r_offset == (bfd_vma) -2)
5570 abort ();
5571
5572 outrel.r_offset += (input_section->output_section->vma
5573 + input_section->output_offset);
5574
5575 if (h->dynindx == -1
5576 || h->forced_local
5577 || bfd_link_executable (info))
5578 {
5579 /* This symbol is resolved locally. */
5580 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
5581 outrel.r_addend = (h->root.u.def.value
5582 + h->root.u.def.section->output_section->vma
5583 + h->root.u.def.section->output_offset);
5584 }
5585 else
5586 {
5587 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5588 outrel.r_addend = 0;
5589 }
5590
5591 sreloc = globals->root.irelifunc;
5592 elf_append_rela (output_bfd, sreloc, &outrel);
5593
5594 /* If this reloc is against an external symbol, we
5595 do not want to fiddle with the addend. Otherwise,
5596 we need to include the symbol value so that it
5597 becomes an addend for the dynamic reloc. For an
5598 internal symbol, we have updated addend. */
5599 return bfd_reloc_ok;
5600 }
5601 /* FALLTHROUGH */
5602 case BFD_RELOC_AARCH64_CALL26:
5603 case BFD_RELOC_AARCH64_JUMP26:
5604 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5605 place, value,
5606 signed_addend,
5607 weak_undef_p);
5608 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5609 howto, value);
5610 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5611 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5612 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5613 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5614 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5615 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5616 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5617 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5618 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5619 base_got = globals->root.sgot;
5620 off = h->got.offset;
5621
5622 if (base_got == NULL)
5623 abort ();
5624
5625 if (off == (bfd_vma) -1)
5626 {
5627 bfd_vma plt_index;
5628
5629 /* We can't use h->got.offset here to save state, or
5630 even just remember the offset, as finish_dynamic_symbol
5631 would use that as offset into .got. */
5632
5633 if (globals->root.splt != NULL)
5634 {
5635 plt_index = ((h->plt.offset - globals->plt_header_size) /
5636 globals->plt_entry_size);
5637 off = (plt_index + 3) * GOT_ENTRY_SIZE;
5638 base_got = globals->root.sgotplt;
5639 }
5640 else
5641 {
5642 plt_index = h->plt.offset / globals->plt_entry_size;
5643 off = plt_index * GOT_ENTRY_SIZE;
5644 base_got = globals->root.igotplt;
5645 }
5646
5647 if (h->dynindx == -1
5648 || h->forced_local
5649 || info->symbolic)
5650 {
5651 /* This references the local definition. We must
5652 initialize this entry in the global offset table.
5653 Since the offset must always be a multiple of 8,
5654 we use the least significant bit to record
5655 whether we have initialized it already.
5656
5657 When doing a dynamic link, we create a .rela.got
5658 relocation entry to initialize the value. This
5659 is done in the finish_dynamic_symbol routine. */
5660 if ((off & 1) != 0)
5661 off &= ~1;
5662 else
5663 {
5664 bfd_put_NN (output_bfd, value,
5665 base_got->contents + off);
5666 /* Note that this is harmless as -1 | 1 still is -1. */
5667 h->got.offset |= 1;
5668 }
5669 }
5670 value = (base_got->output_section->vma
5671 + base_got->output_offset + off);
5672 }
5673 else
5674 value = aarch64_calculate_got_entry_vma (h, globals, info,
5675 value, output_bfd,
5676 unresolved_reloc_p);
5677
5678 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
5679 addend = (globals->root.sgot->output_section->vma
5680 + globals->root.sgot->output_offset);
5681
5682 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5683 place, value,
5684 addend, weak_undef_p);
5685 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
5686 case BFD_RELOC_AARCH64_ADD_LO12:
5687 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5688 break;
5689 }
5690 }
5691
5692 skip_ifunc:
5693 resolved_to_zero = (h != NULL
5694 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
5695
5696 switch (bfd_r_type)
5697 {
5698 case BFD_RELOC_AARCH64_NONE:
5699 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5700 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5701 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5702 *unresolved_reloc_p = FALSE;
5703 return bfd_reloc_ok;
5704
5705 case BFD_RELOC_AARCH64_NN:
5706
5707 /* When generating a shared object or relocatable executable, these
5708 relocations are copied into the output file to be resolved at
5709 run time. */
5710 if (((bfd_link_pic (info)
5711 || globals->root.is_relocatable_executable)
5712 && (input_section->flags & SEC_ALLOC)
5713 && (h == NULL
5714 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5715 && !resolved_to_zero)
5716 || h->root.type != bfd_link_hash_undefweak))
5717 /* Or we are creating an executable, we may need to keep relocations
5718 for symbols satisfied by a dynamic library if we manage to avoid
5719 copy relocs for the symbol. */
5720 || (ELIMINATE_COPY_RELOCS
5721 && !bfd_link_pic (info)
5722 && h != NULL
5723 && (input_section->flags & SEC_ALLOC)
5724 && h->dynindx != -1
5725 && !h->non_got_ref
5726 && ((h->def_dynamic
5727 && !h->def_regular)
5728 || h->root.type == bfd_link_hash_undefweak
5729 || h->root.type == bfd_link_hash_undefined)))
5730 {
5731 Elf_Internal_Rela outrel;
5732 bfd_byte *loc;
5733 bfd_boolean skip, relocate;
5734 asection *sreloc;
5735
5736 *unresolved_reloc_p = FALSE;
5737
5738 skip = FALSE;
5739 relocate = FALSE;
5740
5741 outrel.r_addend = signed_addend;
5742 outrel.r_offset =
5743 _bfd_elf_section_offset (output_bfd, info, input_section,
5744 rel->r_offset);
5745 if (outrel.r_offset == (bfd_vma) - 1)
5746 skip = TRUE;
5747 else if (outrel.r_offset == (bfd_vma) - 2)
5748 {
5749 skip = TRUE;
5750 relocate = TRUE;
5751 }
5752 else if (abs_symbol_p)
5753 {
5754 /* Local absolute symbol. */
5755 skip = (h->forced_local || (h->dynindx == -1));
5756 relocate = skip;
5757 }
5758
5759 outrel.r_offset += (input_section->output_section->vma
5760 + input_section->output_offset);
5761
5762 if (skip)
5763 memset (&outrel, 0, sizeof outrel);
5764 else if (h != NULL
5765 && h->dynindx != -1
5766 && (!bfd_link_pic (info)
5767 || !(bfd_link_pie (info) || SYMBOLIC_BIND (info, h))
5768 || !h->def_regular))
5769 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5770 else
5771 {
5772 int symbol;
5773
5774 /* On SVR4-ish systems, the dynamic loader cannot
5775 relocate the text and data segments independently,
5776 so the symbol does not matter. */
5777 symbol = 0;
5778 relocate = globals->no_apply_dynamic_relocs ? FALSE : TRUE;
5779 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
5780 outrel.r_addend += value;
5781 }
5782
5783 sreloc = elf_section_data (input_section)->sreloc;
5784 if (sreloc == NULL || sreloc->contents == NULL)
5785 return bfd_reloc_notsupported;
5786
5787 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
5788 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
5789
5790 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
5791 {
5792 /* Sanity to check that we have previously allocated
5793 sufficient space in the relocation section for the
5794 number of relocations we actually want to emit. */
5795 abort ();
5796 }
5797
5798 /* If this reloc is against an external symbol, we do not want to
5799 fiddle with the addend. Otherwise, we need to include the symbol
5800 value so that it becomes an addend for the dynamic reloc. */
5801 if (!relocate)
5802 return bfd_reloc_ok;
5803
5804 return _bfd_final_link_relocate (howto, input_bfd, input_section,
5805 contents, rel->r_offset, value,
5806 signed_addend);
5807 }
5808 else
5809 value += signed_addend;
5810 break;
5811
5812 case BFD_RELOC_AARCH64_CALL26:
5813 case BFD_RELOC_AARCH64_JUMP26:
5814 {
5815 asection *splt = globals->root.splt;
5816 bfd_boolean via_plt_p =
5817 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
5818
5819 /* A call to an undefined weak symbol is converted to a jump to
5820 the next instruction unless a PLT entry will be created.
5821 The jump to the next instruction is optimized as a NOP.
5822 Do the same for local undefined symbols. */
5823 if (weak_undef_p && ! via_plt_p)
5824 {
5825 bfd_putl32 (INSN_NOP, hit_data);
5826 return bfd_reloc_ok;
5827 }
5828
5829 /* If the call goes through a PLT entry, make sure to
5830 check distance to the right destination address. */
5831 if (via_plt_p)
5832 value = (splt->output_section->vma
5833 + splt->output_offset + h->plt.offset);
5834
5835 /* Check if a stub has to be inserted because the destination
5836 is too far away. */
5837 struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
5838
5839 /* If the branch destination is directed to plt stub, "value" will be
5840 the final destination, otherwise we should plus signed_addend, it may
5841 contain non-zero value, for example call to local function symbol
5842 which are turned into "sec_sym + sec_off", and sec_off is kept in
5843 signed_addend. */
5844 if (! aarch64_valid_branch_p (via_plt_p ? value : value + signed_addend,
5845 place))
5846 /* The target is out of reach, so redirect the branch to
5847 the local stub for this function. */
5848 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
5849 rel, globals);
5850 if (stub_entry != NULL)
5851 {
5852 value = (stub_entry->stub_offset
5853 + stub_entry->stub_sec->output_offset
5854 + stub_entry->stub_sec->output_section->vma);
5855
5856 /* We have redirected the destination to stub entry address,
5857 so ignore any addend record in the original rela entry. */
5858 signed_addend = 0;
5859 }
5860 }
5861 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5862 place, value,
5863 signed_addend, weak_undef_p);
5864 *unresolved_reloc_p = FALSE;
5865 break;
5866
5867 case BFD_RELOC_AARCH64_16_PCREL:
5868 case BFD_RELOC_AARCH64_32_PCREL:
5869 case BFD_RELOC_AARCH64_64_PCREL:
5870 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5871 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5872 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5873 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
5874 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5875 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5876 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5877 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5878 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5879 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5880 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5881 if (bfd_link_pic (info)
5882 && (input_section->flags & SEC_ALLOC) != 0
5883 && (input_section->flags & SEC_READONLY) != 0
5884 && !SYMBOL_REFERENCES_LOCAL (info, h))
5885 {
5886 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5887
5888 _bfd_error_handler
5889 /* xgettext:c-format */
5890 (_("%pB: relocation %s against symbol `%s' which may bind "
5891 "externally can not be used when making a shared object; "
5892 "recompile with -fPIC"),
5893 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
5894 h->root.root.string);
5895 bfd_set_error (bfd_error_bad_value);
5896 return bfd_reloc_notsupported;
5897 }
5898 /* Fall through. */
5899
5900 case BFD_RELOC_AARCH64_16:
5901 #if ARCH_SIZE == 64
5902 case BFD_RELOC_AARCH64_32:
5903 #endif
5904 case BFD_RELOC_AARCH64_ADD_LO12:
5905 case BFD_RELOC_AARCH64_BRANCH19:
5906 case BFD_RELOC_AARCH64_LDST128_LO12:
5907 case BFD_RELOC_AARCH64_LDST16_LO12:
5908 case BFD_RELOC_AARCH64_LDST32_LO12:
5909 case BFD_RELOC_AARCH64_LDST64_LO12:
5910 case BFD_RELOC_AARCH64_LDST8_LO12:
5911 case BFD_RELOC_AARCH64_MOVW_G0:
5912 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5913 case BFD_RELOC_AARCH64_MOVW_G0_S:
5914 case BFD_RELOC_AARCH64_MOVW_G1:
5915 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5916 case BFD_RELOC_AARCH64_MOVW_G1_S:
5917 case BFD_RELOC_AARCH64_MOVW_G2:
5918 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5919 case BFD_RELOC_AARCH64_MOVW_G2_S:
5920 case BFD_RELOC_AARCH64_MOVW_G3:
5921 case BFD_RELOC_AARCH64_TSTBR14:
5922 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5923 place, value,
5924 signed_addend, weak_undef_p);
5925 break;
5926
5927 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5928 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5929 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5930 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5931 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5932 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5933 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5934 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5935 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5936 if (globals->root.sgot == NULL)
5937 BFD_ASSERT (h != NULL);
5938
5939 relative_reloc = FALSE;
5940 if (h != NULL)
5941 {
5942 bfd_vma addend = 0;
5943
5944 /* If a symbol is not dynamic and is not undefined weak, bind it
5945 locally and generate a RELATIVE relocation under PIC mode.
5946
5947 NOTE: one symbol may be referenced by several relocations, we
5948 should only generate one RELATIVE relocation for that symbol.
5949 Therefore, check GOT offset mark first. */
5950 if (h->dynindx == -1
5951 && !h->forced_local
5952 && h->root.type != bfd_link_hash_undefweak
5953 && bfd_link_pic (info)
5954 && !symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5955 relative_reloc = TRUE;
5956
5957 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
5958 output_bfd,
5959 unresolved_reloc_p);
5960 /* Record the GOT entry address which will be used when generating
5961 RELATIVE relocation. */
5962 if (relative_reloc)
5963 got_entry_addr = value;
5964
5965 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
5966 addend = (globals->root.sgot->output_section->vma
5967 + globals->root.sgot->output_offset);
5968 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5969 place, value,
5970 addend, weak_undef_p);
5971 }
5972 else
5973 {
5974 bfd_vma addend = 0;
5975 struct elf_aarch64_local_symbol *locals
5976 = elf_aarch64_locals (input_bfd);
5977
5978 if (locals == NULL)
5979 {
5980 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5981 _bfd_error_handler
5982 /* xgettext:c-format */
5983 (_("%pB: local symbol descriptor table be NULL when applying "
5984 "relocation %s against local symbol"),
5985 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
5986 abort ();
5987 }
5988
5989 off = symbol_got_offset (input_bfd, h, r_symndx);
5990 base_got = globals->root.sgot;
5991 got_entry_addr = (base_got->output_section->vma
5992 + base_got->output_offset + off);
5993
5994 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5995 {
5996 bfd_put_64 (output_bfd, value, base_got->contents + off);
5997
5998 /* For local symbol, we have done absolute relocation in static
5999 linking stage. While for shared library, we need to update the
6000 content of GOT entry according to the shared object's runtime
6001 base address. So, we need to generate a R_AARCH64_RELATIVE reloc
6002 for dynamic linker. */
6003 if (bfd_link_pic (info))
6004 relative_reloc = TRUE;
6005
6006 symbol_got_offset_mark (input_bfd, h, r_symndx);
6007 }
6008
6009 /* Update the relocation value to GOT entry addr as we have transformed
6010 the direct data access into indirect data access through GOT. */
6011 value = got_entry_addr;
6012
6013 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
6014 addend = base_got->output_section->vma + base_got->output_offset;
6015
6016 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6017 place, value,
6018 addend, weak_undef_p);
6019 }
6020
6021 if (relative_reloc)
6022 {
6023 asection *s;
6024 Elf_Internal_Rela outrel;
6025
6026 s = globals->root.srelgot;
6027 if (s == NULL)
6028 abort ();
6029
6030 outrel.r_offset = got_entry_addr;
6031 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
6032 outrel.r_addend = orig_value;
6033 elf_append_rela (output_bfd, s, &outrel);
6034 }
6035 break;
6036
6037 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6038 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6039 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6040 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6041 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6042 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6043 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6044 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6045 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6046 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6047 if (globals->root.sgot == NULL)
6048 return bfd_reloc_notsupported;
6049
6050 value = (symbol_got_offset (input_bfd, h, r_symndx)
6051 + globals->root.sgot->output_section->vma
6052 + globals->root.sgot->output_offset);
6053
6054 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6055 place, value,
6056 0, weak_undef_p);
6057 *unresolved_reloc_p = FALSE;
6058 break;
6059
6060 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6061 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6062 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6063 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6064 if (globals->root.sgot == NULL)
6065 return bfd_reloc_notsupported;
6066
6067 value = symbol_got_offset (input_bfd, h, r_symndx);
6068 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6069 place, value,
6070 0, weak_undef_p);
6071 *unresolved_reloc_p = FALSE;
6072 break;
6073
6074 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
6075 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
6076 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6077 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
6078 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
6079 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
6080 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
6081 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
6082 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
6083 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
6084 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
6085 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6086 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6087 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6088 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6089 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6090 {
6091 if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
6092 {
6093 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6094 _bfd_error_handler
6095 /* xgettext:c-format */
6096 (_("%pB: TLS relocation %s against undefined symbol `%s'"),
6097 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6098 h->root.root.string);
6099 bfd_set_error (bfd_error_bad_value);
6100 return bfd_reloc_notsupported;
6101 }
6102
6103 bfd_vma def_value
6104 = weak_undef_p ? 0 : signed_addend - dtpoff_base (info);
6105 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6106 place, value,
6107 def_value, weak_undef_p);
6108 break;
6109 }
6110
6111 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6112 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6113 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6114 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
6115 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
6116 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
6117 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
6118 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
6119 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
6120 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
6121 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
6122 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6123 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6124 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6125 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6126 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6127 {
6128 if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
6129 {
6130 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6131 _bfd_error_handler
6132 /* xgettext:c-format */
6133 (_("%pB: TLS relocation %s against undefined symbol `%s'"),
6134 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6135 h->root.root.string);
6136 bfd_set_error (bfd_error_bad_value);
6137 return bfd_reloc_notsupported;
6138 }
6139
6140 bfd_vma def_value
6141 = weak_undef_p ? 0 : signed_addend - tpoff_base (info);
6142 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6143 place, value,
6144 def_value, weak_undef_p);
6145 *unresolved_reloc_p = FALSE;
6146 break;
6147 }
6148
6149 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
6150 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6151 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6152 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6153 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
6154 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6155 if (globals->root.sgot == NULL)
6156 return bfd_reloc_notsupported;
6157 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
6158 + globals->root.sgotplt->output_section->vma
6159 + globals->root.sgotplt->output_offset
6160 + globals->sgotplt_jump_table_size);
6161
6162 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6163 place, value,
6164 0, weak_undef_p);
6165 *unresolved_reloc_p = FALSE;
6166 break;
6167
6168 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6169 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6170 if (globals->root.sgot == NULL)
6171 return bfd_reloc_notsupported;
6172
6173 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
6174 + globals->root.sgotplt->output_section->vma
6175 + globals->root.sgotplt->output_offset
6176 + globals->sgotplt_jump_table_size);
6177
6178 value -= (globals->root.sgot->output_section->vma
6179 + globals->root.sgot->output_offset);
6180
6181 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6182 place, value,
6183 0, weak_undef_p);
6184 *unresolved_reloc_p = FALSE;
6185 break;
6186
6187 default:
6188 return bfd_reloc_notsupported;
6189 }
6190
6191 if (saved_addend)
6192 *saved_addend = value;
6193
6194 /* Only apply the final relocation in a sequence. */
6195 if (save_addend)
6196 return bfd_reloc_continue;
6197
6198 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
6199 howto, value);
6200 }
6201
6202 /* LP64 and ILP32 operates on x- and w-registers respectively.
6203 Next definitions take into account the difference between
6204 corresponding machine codes. R means x-register if the target
6205 arch is LP64, and w-register if the target is ILP32. */
6206
6207 #if ARCH_SIZE == 64
6208 # define add_R0_R0 (0x91000000)
6209 # define add_R0_R0_R1 (0x8b000020)
6210 # define add_R0_R1 (0x91400020)
6211 # define ldr_R0 (0x58000000)
6212 # define ldr_R0_mask(i) (i & 0xffffffe0)
6213 # define ldr_R0_x0 (0xf9400000)
6214 # define ldr_hw_R0 (0xf2a00000)
6215 # define movk_R0 (0xf2800000)
6216 # define movz_R0 (0xd2a00000)
6217 # define movz_hw_R0 (0xd2c00000)
6218 #else /*ARCH_SIZE == 32 */
6219 # define add_R0_R0 (0x11000000)
6220 # define add_R0_R0_R1 (0x0b000020)
6221 # define add_R0_R1 (0x11400020)
6222 # define ldr_R0 (0x18000000)
6223 # define ldr_R0_mask(i) (i & 0xbfffffe0)
6224 # define ldr_R0_x0 (0xb9400000)
6225 # define ldr_hw_R0 (0x72a00000)
6226 # define movk_R0 (0x72800000)
6227 # define movz_R0 (0x52a00000)
6228 # define movz_hw_R0 (0x52c00000)
6229 #endif
6230
6231 /* Structure to hold payload for _bfd_aarch64_erratum_843419_clear_stub,
6232 it is used to identify the stub information to reset. */
6233
6234 struct erratum_843419_branch_to_stub_clear_data
6235 {
6236 bfd_vma adrp_offset;
6237 asection *output_section;
6238 };
6239
6240 /* Clear the erratum information for GEN_ENTRY if the ADRP_OFFSET and
6241 section inside IN_ARG matches. The clearing is done by setting the
6242 stub_type to none. */
6243
6244 static bfd_boolean
6245 _bfd_aarch64_erratum_843419_clear_stub (struct bfd_hash_entry *gen_entry,
6246 void *in_arg)
6247 {
6248 struct elf_aarch64_stub_hash_entry *stub_entry
6249 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6250 struct erratum_843419_branch_to_stub_clear_data *data
6251 = (struct erratum_843419_branch_to_stub_clear_data *) in_arg;
6252
6253 if (stub_entry->target_section != data->output_section
6254 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer
6255 || stub_entry->adrp_offset != data->adrp_offset)
6256 return TRUE;
6257
6258 /* Change the stub type instead of removing the entry, removing from the hash
6259 table would be slower and we have already reserved the memory for the entry
6260 so there wouldn't be much gain. Changing the stub also keeps around a
6261 record of what was there before. */
6262 stub_entry->stub_type = aarch64_stub_none;
6263
6264 /* We're done and there could have been only one matching stub at that
6265 particular offset, so abort further traversal. */
6266 return FALSE;
6267 }
6268
6269 /* TLS Relaxations may relax an adrp sequence that matches the erratum 843419
6270 sequence. In this case the erratum no longer applies and we need to remove
6271 the entry from the pending stub generation. This clears matching adrp insn
6272 at ADRP_OFFSET in INPUT_SECTION in the stub table defined in GLOBALS. */
6273
6274 static void
6275 clear_erratum_843419_entry (struct elf_aarch64_link_hash_table *globals,
6276 bfd_vma adrp_offset, asection *input_section)
6277 {
6278 if (globals->fix_erratum_843419 & ERRAT_ADRP)
6279 {
6280 struct erratum_843419_branch_to_stub_clear_data data;
6281 data.adrp_offset = adrp_offset;
6282 data.output_section = input_section;
6283
6284 bfd_hash_traverse (&globals->stub_hash_table,
6285 _bfd_aarch64_erratum_843419_clear_stub, &data);
6286 }
6287 }
6288
6289 /* Handle TLS relaxations. Relaxing is possible for symbols that use
6290 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
6291 link.
6292
6293 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
6294 is to then call final_link_relocate. Return other values in the
6295 case of error. */
6296
6297 static bfd_reloc_status_type
6298 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
6299 bfd *input_bfd, asection *input_section,
6300 bfd_byte *contents, Elf_Internal_Rela *rel,
6301 struct elf_link_hash_entry *h)
6302 {
6303 bfd_boolean is_local = h == NULL;
6304 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
6305 unsigned long insn;
6306
6307 BFD_ASSERT (globals && input_bfd && contents && rel);
6308
6309 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
6310 {
6311 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6312 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6313 if (is_local)
6314 {
6315 /* GD->LE relaxation:
6316 adrp x0, :tlsgd:var => movz R0, :tprel_g1:var
6317 or
6318 adrp x0, :tlsdesc:var => movz R0, :tprel_g1:var
6319
6320 Where R is x for LP64, and w for ILP32. */
6321 bfd_putl32 (movz_R0, contents + rel->r_offset);
6322 /* We have relaxed the adrp into a mov, we may have to clear any
6323 pending erratum fixes. */
6324 clear_erratum_843419_entry (globals, rel->r_offset, input_section);
6325 return bfd_reloc_continue;
6326 }
6327 else
6328 {
6329 /* GD->IE relaxation:
6330 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
6331 or
6332 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
6333 */
6334 return bfd_reloc_continue;
6335 }
6336
6337 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6338 BFD_ASSERT (0);
6339 break;
6340
6341 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6342 if (is_local)
6343 {
6344 /* Tiny TLSDESC->LE relaxation:
6345 ldr x1, :tlsdesc:var => movz R0, #:tprel_g1:var
6346 adr x0, :tlsdesc:var => movk R0, #:tprel_g0_nc:var
6347 .tlsdesccall var
6348 blr x1 => nop
6349
6350 Where R is x for LP64, and w for ILP32. */
6351 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
6352 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
6353
6354 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6355 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
6356 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6357
6358 bfd_putl32 (movz_R0, contents + rel->r_offset);
6359 bfd_putl32 (movk_R0, contents + rel->r_offset + 4);
6360 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
6361 return bfd_reloc_continue;
6362 }
6363 else
6364 {
6365 /* Tiny TLSDESC->IE relaxation:
6366 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
6367 adr x0, :tlsdesc:var => nop
6368 .tlsdesccall var
6369 blr x1 => nop
6370 */
6371 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
6372 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
6373
6374 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6375 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6376
6377 bfd_putl32 (ldr_R0, contents + rel->r_offset);
6378 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
6379 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
6380 return bfd_reloc_continue;
6381 }
6382
6383 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6384 if (is_local)
6385 {
6386 /* Tiny GD->LE relaxation:
6387 adr x0, :tlsgd:var => mrs x1, tpidr_el0
6388 bl __tls_get_addr => add R0, R1, #:tprel_hi12:x, lsl #12
6389 nop => add R0, R0, #:tprel_lo12_nc:x
6390
6391 Where R is x for LP64, and x for Ilp32. */
6392
6393 /* First kill the tls_get_addr reloc on the bl instruction. */
6394 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6395
6396 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
6397 bfd_putl32 (add_R0_R1, contents + rel->r_offset + 4);
6398 bfd_putl32 (add_R0_R0, contents + rel->r_offset + 8);
6399
6400 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6401 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
6402 rel[1].r_offset = rel->r_offset + 8;
6403
6404 /* Move the current relocation to the second instruction in
6405 the sequence. */
6406 rel->r_offset += 4;
6407 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6408 AARCH64_R (TLSLE_ADD_TPREL_HI12));
6409 return bfd_reloc_continue;
6410 }
6411 else
6412 {
6413 /* Tiny GD->IE relaxation:
6414 adr x0, :tlsgd:var => ldr R0, :gottprel:var
6415 bl __tls_get_addr => mrs x1, tpidr_el0
6416 nop => add R0, R0, R1
6417
6418 Where R is x for LP64, and w for Ilp32. */
6419
6420 /* First kill the tls_get_addr reloc on the bl instruction. */
6421 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6422 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6423
6424 bfd_putl32 (ldr_R0, contents + rel->r_offset);
6425 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
6426 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
6427 return bfd_reloc_continue;
6428 }
6429
6430 #if ARCH_SIZE == 64
6431 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6432 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSGD_MOVW_G0_NC));
6433 BFD_ASSERT (rel->r_offset + 12 == rel[2].r_offset);
6434 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (CALL26));
6435
6436 if (is_local)
6437 {
6438 /* Large GD->LE relaxation:
6439 movz x0, #:tlsgd_g1:var => movz x0, #:tprel_g2:var, lsl #32
6440 movk x0, #:tlsgd_g0_nc:var => movk x0, #:tprel_g1_nc:var, lsl #16
6441 add x0, gp, x0 => movk x0, #:tprel_g0_nc:var
6442 bl __tls_get_addr => mrs x1, tpidr_el0
6443 nop => add x0, x0, x1
6444 */
6445 rel[2].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6446 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
6447 rel[2].r_offset = rel->r_offset + 8;
6448
6449 bfd_putl32 (movz_hw_R0, contents + rel->r_offset + 0);
6450 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset + 4);
6451 bfd_putl32 (movk_R0, contents + rel->r_offset + 8);
6452 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
6453 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
6454 }
6455 else
6456 {
6457 /* Large GD->IE relaxation:
6458 movz x0, #:tlsgd_g1:var => movz x0, #:gottprel_g1:var, lsl #16
6459 movk x0, #:tlsgd_g0_nc:var => movk x0, #:gottprel_g0_nc:var
6460 add x0, gp, x0 => ldr x0, [gp, x0]
6461 bl __tls_get_addr => mrs x1, tpidr_el0
6462 nop => add x0, x0, x1
6463 */
6464 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6465 bfd_putl32 (0xd2a80000, contents + rel->r_offset + 0);
6466 bfd_putl32 (ldr_R0, contents + rel->r_offset + 8);
6467 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
6468 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
6469 }
6470 return bfd_reloc_continue;
6471
6472 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6473 return bfd_reloc_continue;
6474 #endif
6475
6476 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6477 return bfd_reloc_continue;
6478
6479 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
6480 if (is_local)
6481 {
6482 /* GD->LE relaxation:
6483 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
6484
6485 Where R is x for lp64 mode, and w for ILP32 mode. */
6486 bfd_putl32 (movk_R0, contents + rel->r_offset);
6487 return bfd_reloc_continue;
6488 }
6489 else
6490 {
6491 /* GD->IE relaxation:
6492 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr R0, [x0, #:gottprel_lo12:var]
6493
6494 Where R is x for lp64 mode, and w for ILP32 mode. */
6495 insn = bfd_getl32 (contents + rel->r_offset);
6496 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
6497 return bfd_reloc_continue;
6498 }
6499
6500 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6501 if (is_local)
6502 {
6503 /* GD->LE relaxation
6504 add x0, #:tlsgd_lo12:var => movk R0, :tprel_g0_nc:var
6505 bl __tls_get_addr => mrs x1, tpidr_el0
6506 nop => add R0, R1, R0
6507
6508 Where R is x for lp64 mode, and w for ILP32 mode. */
6509
6510 /* First kill the tls_get_addr reloc on the bl instruction. */
6511 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6512 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6513
6514 bfd_putl32 (movk_R0, contents + rel->r_offset);
6515 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
6516 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
6517 return bfd_reloc_continue;
6518 }
6519 else
6520 {
6521 /* GD->IE relaxation
6522 ADD x0, #:tlsgd_lo12:var => ldr R0, [x0, #:gottprel_lo12:var]
6523 BL __tls_get_addr => mrs x1, tpidr_el0
6524 R_AARCH64_CALL26
6525 NOP => add R0, R1, R0
6526
6527 Where R is x for lp64 mode, and w for ilp32 mode. */
6528
6529 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6530
6531 /* Remove the relocation on the BL instruction. */
6532 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6533
6534 /* We choose to fixup the BL and NOP instructions using the
6535 offset from the second relocation to allow flexibility in
6536 scheduling instructions between the ADD and BL. */
6537 bfd_putl32 (ldr_R0_x0, contents + rel->r_offset);
6538 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
6539 bfd_putl32 (add_R0_R0_R1, contents + rel[1].r_offset + 4);
6540 return bfd_reloc_continue;
6541 }
6542
6543 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6544 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
6545 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6546 /* GD->IE/LE relaxation:
6547 add x0, x0, #:tlsdesc_lo12:var => nop
6548 blr xd => nop
6549 */
6550 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
6551 return bfd_reloc_ok;
6552
6553 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6554 if (is_local)
6555 {
6556 /* GD->LE relaxation:
6557 ldr xd, [gp, xn] => movk R0, #:tprel_g0_nc:var
6558
6559 Where R is x for lp64 mode, and w for ILP32 mode. */
6560 bfd_putl32 (movk_R0, contents + rel->r_offset);
6561 return bfd_reloc_continue;
6562 }
6563 else
6564 {
6565 /* GD->IE relaxation:
6566 ldr xd, [gp, xn] => ldr R0, [gp, xn]
6567
6568 Where R is x for lp64 mode, and w for ILP32 mode. */
6569 insn = bfd_getl32 (contents + rel->r_offset);
6570 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
6571 return bfd_reloc_ok;
6572 }
6573
6574 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6575 /* GD->LE relaxation:
6576 movk xd, #:tlsdesc_off_g0_nc:var => movk R0, #:tprel_g1_nc:var, lsl #16
6577 GD->IE relaxation:
6578 movk xd, #:tlsdesc_off_g0_nc:var => movk Rd, #:gottprel_g0_nc:var
6579
6580 Where R is x for lp64 mode, and w for ILP32 mode. */
6581 if (is_local)
6582 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset);
6583 return bfd_reloc_continue;
6584
6585 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6586 if (is_local)
6587 {
6588 /* GD->LE relaxation:
6589 movz xd, #:tlsdesc_off_g1:var => movz R0, #:tprel_g2:var, lsl #32
6590
6591 Where R is x for lp64 mode, and w for ILP32 mode. */
6592 bfd_putl32 (movz_hw_R0, contents + rel->r_offset);
6593 return bfd_reloc_continue;
6594 }
6595 else
6596 {
6597 /* GD->IE relaxation:
6598 movz xd, #:tlsdesc_off_g1:var => movz Rd, #:gottprel_g1:var, lsl #16
6599
6600 Where R is x for lp64 mode, and w for ILP32 mode. */
6601 insn = bfd_getl32 (contents + rel->r_offset);
6602 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
6603 return bfd_reloc_continue;
6604 }
6605
6606 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6607 /* IE->LE relaxation:
6608 adrp xd, :gottprel:var => movz Rd, :tprel_g1:var
6609
6610 Where R is x for lp64 mode, and w for ILP32 mode. */
6611 if (is_local)
6612 {
6613 insn = bfd_getl32 (contents + rel->r_offset);
6614 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
6615 /* We have relaxed the adrp into a mov, we may have to clear any
6616 pending erratum fixes. */
6617 clear_erratum_843419_entry (globals, rel->r_offset, input_section);
6618 }
6619 return bfd_reloc_continue;
6620
6621 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
6622 /* IE->LE relaxation:
6623 ldr xd, [xm, #:gottprel_lo12:var] => movk Rd, :tprel_g0_nc:var
6624
6625 Where R is x for lp64 mode, and w for ILP32 mode. */
6626 if (is_local)
6627 {
6628 insn = bfd_getl32 (contents + rel->r_offset);
6629 bfd_putl32 (movk_R0 | (insn & 0x1f), contents + rel->r_offset);
6630 }
6631 return bfd_reloc_continue;
6632
6633 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6634 /* LD->LE relaxation (tiny):
6635 adr x0, :tlsldm:x => mrs x0, tpidr_el0
6636 bl __tls_get_addr => add R0, R0, TCB_SIZE
6637
6638 Where R is x for lp64 mode, and w for ilp32 mode. */
6639 if (is_local)
6640 {
6641 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6642 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6643 /* No need of CALL26 relocation for tls_get_addr. */
6644 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6645 bfd_putl32 (0xd53bd040, contents + rel->r_offset + 0);
6646 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6647 contents + rel->r_offset + 4);
6648 return bfd_reloc_ok;
6649 }
6650 return bfd_reloc_continue;
6651
6652 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6653 /* LD->LE relaxation (small):
6654 adrp x0, :tlsldm:x => mrs x0, tpidr_el0
6655 */
6656 if (is_local)
6657 {
6658 bfd_putl32 (0xd53bd040, contents + rel->r_offset);
6659 return bfd_reloc_ok;
6660 }
6661 return bfd_reloc_continue;
6662
6663 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6664 /* LD->LE relaxation (small):
6665 add x0, #:tlsldm_lo12:x => add R0, R0, TCB_SIZE
6666 bl __tls_get_addr => nop
6667
6668 Where R is x for lp64 mode, and w for ilp32 mode. */
6669 if (is_local)
6670 {
6671 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6672 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6673 /* No need of CALL26 relocation for tls_get_addr. */
6674 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6675 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6676 contents + rel->r_offset + 0);
6677 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
6678 return bfd_reloc_ok;
6679 }
6680 return bfd_reloc_continue;
6681
6682 default:
6683 return bfd_reloc_continue;
6684 }
6685
6686 return bfd_reloc_ok;
6687 }
6688
6689 /* Relocate an AArch64 ELF section. */
6690
6691 static bfd_boolean
6692 elfNN_aarch64_relocate_section (bfd *output_bfd,
6693 struct bfd_link_info *info,
6694 bfd *input_bfd,
6695 asection *input_section,
6696 bfd_byte *contents,
6697 Elf_Internal_Rela *relocs,
6698 Elf_Internal_Sym *local_syms,
6699 asection **local_sections)
6700 {
6701 Elf_Internal_Shdr *symtab_hdr;
6702 struct elf_link_hash_entry **sym_hashes;
6703 Elf_Internal_Rela *rel;
6704 Elf_Internal_Rela *relend;
6705 const char *name;
6706 struct elf_aarch64_link_hash_table *globals;
6707 bfd_boolean save_addend = FALSE;
6708 bfd_vma addend = 0;
6709
6710 globals = elf_aarch64_hash_table (info);
6711
6712 symtab_hdr = &elf_symtab_hdr (input_bfd);
6713 sym_hashes = elf_sym_hashes (input_bfd);
6714
6715 rel = relocs;
6716 relend = relocs + input_section->reloc_count;
6717 for (; rel < relend; rel++)
6718 {
6719 unsigned int r_type;
6720 bfd_reloc_code_real_type bfd_r_type;
6721 bfd_reloc_code_real_type relaxed_bfd_r_type;
6722 reloc_howto_type *howto;
6723 unsigned long r_symndx;
6724 Elf_Internal_Sym *sym;
6725 asection *sec;
6726 struct elf_link_hash_entry *h;
6727 bfd_vma relocation;
6728 bfd_reloc_status_type r;
6729 arelent bfd_reloc;
6730 char sym_type;
6731 bfd_boolean unresolved_reloc = FALSE;
6732 char *error_message = NULL;
6733
6734 r_symndx = ELFNN_R_SYM (rel->r_info);
6735 r_type = ELFNN_R_TYPE (rel->r_info);
6736
6737 bfd_reloc.howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
6738 howto = bfd_reloc.howto;
6739
6740 if (howto == NULL)
6741 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
6742
6743 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
6744
6745 h = NULL;
6746 sym = NULL;
6747 sec = NULL;
6748
6749 if (r_symndx < symtab_hdr->sh_info)
6750 {
6751 sym = local_syms + r_symndx;
6752 sym_type = ELFNN_ST_TYPE (sym->st_info);
6753 sec = local_sections[r_symndx];
6754
6755 /* An object file might have a reference to a local
6756 undefined symbol. This is a daft object file, but we
6757 should at least do something about it. */
6758 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
6759 && bfd_is_und_section (sec)
6760 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
6761 (*info->callbacks->undefined_symbol)
6762 (info, bfd_elf_string_from_elf_section
6763 (input_bfd, symtab_hdr->sh_link, sym->st_name),
6764 input_bfd, input_section, rel->r_offset, TRUE);
6765
6766 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
6767
6768 /* Relocate against local STT_GNU_IFUNC symbol. */
6769 if (!bfd_link_relocatable (info)
6770 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
6771 {
6772 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
6773 rel, FALSE);
6774 if (h == NULL)
6775 abort ();
6776
6777 /* Set STT_GNU_IFUNC symbol value. */
6778 h->root.u.def.value = sym->st_value;
6779 h->root.u.def.section = sec;
6780 }
6781 }
6782 else
6783 {
6784 bfd_boolean warned, ignored;
6785
6786 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
6787 r_symndx, symtab_hdr, sym_hashes,
6788 h, sec, relocation,
6789 unresolved_reloc, warned, ignored);
6790
6791 sym_type = h->type;
6792 }
6793
6794 if (sec != NULL && discarded_section (sec))
6795 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
6796 rel, 1, relend, howto, 0, contents);
6797
6798 if (bfd_link_relocatable (info))
6799 continue;
6800
6801 if (h != NULL)
6802 name = h->root.root.string;
6803 else
6804 {
6805 name = (bfd_elf_string_from_elf_section
6806 (input_bfd, symtab_hdr->sh_link, sym->st_name));
6807 if (name == NULL || *name == '\0')
6808 name = bfd_section_name (sec);
6809 }
6810
6811 if (r_symndx != 0
6812 && r_type != R_AARCH64_NONE
6813 && r_type != R_AARCH64_NULL
6814 && (h == NULL
6815 || h->root.type == bfd_link_hash_defined
6816 || h->root.type == bfd_link_hash_defweak)
6817 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
6818 {
6819 _bfd_error_handler
6820 ((sym_type == STT_TLS
6821 /* xgettext:c-format */
6822 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
6823 /* xgettext:c-format */
6824 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
6825 input_bfd,
6826 input_section, (uint64_t) rel->r_offset, howto->name, name);
6827 }
6828
6829 /* We relax only if we can see that there can be a valid transition
6830 from a reloc type to another.
6831 We call elfNN_aarch64_final_link_relocate unless we're completely
6832 done, i.e., the relaxation produced the final output we want. */
6833
6834 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
6835 h, r_symndx);
6836 if (relaxed_bfd_r_type != bfd_r_type)
6837 {
6838 bfd_r_type = relaxed_bfd_r_type;
6839 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
6840 BFD_ASSERT (howto != NULL);
6841 r_type = howto->type;
6842 r = elfNN_aarch64_tls_relax (globals, input_bfd, input_section,
6843 contents, rel, h);
6844 unresolved_reloc = 0;
6845 }
6846 else
6847 r = bfd_reloc_continue;
6848
6849 /* There may be multiple consecutive relocations for the
6850 same offset. In that case we are supposed to treat the
6851 output of each relocation as the addend for the next. */
6852 if (rel + 1 < relend
6853 && rel->r_offset == rel[1].r_offset
6854 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
6855 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
6856 save_addend = TRUE;
6857 else
6858 save_addend = FALSE;
6859
6860 if (r == bfd_reloc_continue)
6861 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
6862 input_section, contents, rel,
6863 relocation, info, sec,
6864 h, &unresolved_reloc,
6865 save_addend, &addend, sym);
6866
6867 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
6868 {
6869 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6870 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6871 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6872 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6873 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6874 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6875 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6876 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6877 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6878 {
6879 bfd_boolean need_relocs = FALSE;
6880 bfd_byte *loc;
6881 int indx;
6882 bfd_vma off;
6883
6884 off = symbol_got_offset (input_bfd, h, r_symndx);
6885 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6886
6887 need_relocs =
6888 (!bfd_link_executable (info) || indx != 0) &&
6889 (h == NULL
6890 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6891 || h->root.type != bfd_link_hash_undefweak);
6892
6893 BFD_ASSERT (globals->root.srelgot != NULL);
6894
6895 if (need_relocs)
6896 {
6897 Elf_Internal_Rela rela;
6898 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
6899 rela.r_addend = 0;
6900 rela.r_offset = globals->root.sgot->output_section->vma +
6901 globals->root.sgot->output_offset + off;
6902
6903
6904 loc = globals->root.srelgot->contents;
6905 loc += globals->root.srelgot->reloc_count++
6906 * RELOC_SIZE (htab);
6907 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6908
6909 bfd_reloc_code_real_type real_type =
6910 elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
6911
6912 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
6913 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
6914 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
6915 {
6916 /* For local dynamic, don't generate DTPREL in any case.
6917 Initialize the DTPREL slot into zero, so we get module
6918 base address when invoke runtime TLS resolver. */
6919 bfd_put_NN (output_bfd, 0,
6920 globals->root.sgot->contents + off
6921 + GOT_ENTRY_SIZE);
6922 }
6923 else if (indx == 0)
6924 {
6925 bfd_put_NN (output_bfd,
6926 relocation - dtpoff_base (info),
6927 globals->root.sgot->contents + off
6928 + GOT_ENTRY_SIZE);
6929 }
6930 else
6931 {
6932 /* This TLS symbol is global. We emit a
6933 relocation to fixup the tls offset at load
6934 time. */
6935 rela.r_info =
6936 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
6937 rela.r_addend = 0;
6938 rela.r_offset =
6939 (globals->root.sgot->output_section->vma
6940 + globals->root.sgot->output_offset + off
6941 + GOT_ENTRY_SIZE);
6942
6943 loc = globals->root.srelgot->contents;
6944 loc += globals->root.srelgot->reloc_count++
6945 * RELOC_SIZE (globals);
6946 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6947 bfd_put_NN (output_bfd, (bfd_vma) 0,
6948 globals->root.sgot->contents + off
6949 + GOT_ENTRY_SIZE);
6950 }
6951 }
6952 else
6953 {
6954 bfd_put_NN (output_bfd, (bfd_vma) 1,
6955 globals->root.sgot->contents + off);
6956 bfd_put_NN (output_bfd,
6957 relocation - dtpoff_base (info),
6958 globals->root.sgot->contents + off
6959 + GOT_ENTRY_SIZE);
6960 }
6961
6962 symbol_got_offset_mark (input_bfd, h, r_symndx);
6963 }
6964 break;
6965
6966 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6967 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
6968 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6969 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6970 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6971 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6972 {
6973 bfd_boolean need_relocs = FALSE;
6974 bfd_byte *loc;
6975 int indx;
6976 bfd_vma off;
6977
6978 off = symbol_got_offset (input_bfd, h, r_symndx);
6979
6980 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6981
6982 need_relocs =
6983 (!bfd_link_executable (info) || indx != 0) &&
6984 (h == NULL
6985 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6986 || h->root.type != bfd_link_hash_undefweak);
6987
6988 BFD_ASSERT (globals->root.srelgot != NULL);
6989
6990 if (need_relocs)
6991 {
6992 Elf_Internal_Rela rela;
6993
6994 if (indx == 0)
6995 rela.r_addend = relocation - dtpoff_base (info);
6996 else
6997 rela.r_addend = 0;
6998
6999 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
7000 rela.r_offset = globals->root.sgot->output_section->vma +
7001 globals->root.sgot->output_offset + off;
7002
7003 loc = globals->root.srelgot->contents;
7004 loc += globals->root.srelgot->reloc_count++
7005 * RELOC_SIZE (htab);
7006
7007 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7008
7009 bfd_put_NN (output_bfd, rela.r_addend,
7010 globals->root.sgot->contents + off);
7011 }
7012 else
7013 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
7014 globals->root.sgot->contents + off);
7015
7016 symbol_got_offset_mark (input_bfd, h, r_symndx);
7017 }
7018 break;
7019
7020 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7021 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7022 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7023 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
7024 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7025 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7026 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7027 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
7028 {
7029 bfd_boolean need_relocs = FALSE;
7030 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
7031 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
7032
7033 need_relocs = (h == NULL
7034 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7035 || h->root.type != bfd_link_hash_undefweak);
7036
7037 BFD_ASSERT (globals->root.srelgot != NULL);
7038 BFD_ASSERT (globals->root.sgot != NULL);
7039
7040 if (need_relocs)
7041 {
7042 bfd_byte *loc;
7043 Elf_Internal_Rela rela;
7044 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
7045
7046 rela.r_addend = 0;
7047 rela.r_offset = (globals->root.sgotplt->output_section->vma
7048 + globals->root.sgotplt->output_offset
7049 + off + globals->sgotplt_jump_table_size);
7050
7051 if (indx == 0)
7052 rela.r_addend = relocation - dtpoff_base (info);
7053
7054 /* Allocate the next available slot in the PLT reloc
7055 section to hold our R_AARCH64_TLSDESC, the next
7056 available slot is determined from reloc_count,
7057 which we step. But note, reloc_count was
7058 artifically moved down while allocating slots for
7059 real PLT relocs such that all of the PLT relocs
7060 will fit above the initial reloc_count and the
7061 extra stuff will fit below. */
7062 loc = globals->root.srelplt->contents;
7063 loc += globals->root.srelplt->reloc_count++
7064 * RELOC_SIZE (globals);
7065
7066 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7067
7068 bfd_put_NN (output_bfd, (bfd_vma) 0,
7069 globals->root.sgotplt->contents + off +
7070 globals->sgotplt_jump_table_size);
7071 bfd_put_NN (output_bfd, (bfd_vma) 0,
7072 globals->root.sgotplt->contents + off +
7073 globals->sgotplt_jump_table_size +
7074 GOT_ENTRY_SIZE);
7075 }
7076
7077 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
7078 }
7079 break;
7080 default:
7081 break;
7082 }
7083
7084 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
7085 because such sections are not SEC_ALLOC and thus ld.so will
7086 not process them. */
7087 if (unresolved_reloc
7088 && !((input_section->flags & SEC_DEBUGGING) != 0
7089 && h->def_dynamic)
7090 && _bfd_elf_section_offset (output_bfd, info, input_section,
7091 +rel->r_offset) != (bfd_vma) - 1)
7092 {
7093 _bfd_error_handler
7094 /* xgettext:c-format */
7095 (_("%pB(%pA+%#" PRIx64 "): "
7096 "unresolvable %s relocation against symbol `%s'"),
7097 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
7098 h->root.root.string);
7099 return FALSE;
7100 }
7101
7102 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
7103 {
7104 bfd_reloc_code_real_type real_r_type
7105 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
7106
7107 switch (r)
7108 {
7109 case bfd_reloc_overflow:
7110 (*info->callbacks->reloc_overflow)
7111 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
7112 input_bfd, input_section, rel->r_offset);
7113 if (real_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
7114 || real_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
7115 {
7116 (*info->callbacks->warning)
7117 (info,
7118 _("too many GOT entries for -fpic, "
7119 "please recompile with -fPIC"),
7120 name, input_bfd, input_section, rel->r_offset);
7121 return FALSE;
7122 }
7123 /* Overflow can occur when a variable is referenced with a type
7124 that has a larger alignment than the type with which it was
7125 declared. eg:
7126 file1.c: extern int foo; int a (void) { return foo; }
7127 file2.c: char bar, foo, baz;
7128 If the variable is placed into a data section at an offset
7129 that is incompatible with the larger alignment requirement
7130 overflow will occur. (Strictly speaking this is not overflow
7131 but rather an alignment problem, but the bfd_reloc_ error
7132 enum does not have a value to cover that situation).
7133
7134 Try to catch this situation here and provide a more helpful
7135 error message to the user. */
7136 if (addend & ((1 << howto->rightshift) - 1)
7137 /* FIXME: Are we testing all of the appropriate reloc
7138 types here ? */
7139 && (real_r_type == BFD_RELOC_AARCH64_LD_LO19_PCREL
7140 || real_r_type == BFD_RELOC_AARCH64_LDST16_LO12
7141 || real_r_type == BFD_RELOC_AARCH64_LDST32_LO12
7142 || real_r_type == BFD_RELOC_AARCH64_LDST64_LO12
7143 || real_r_type == BFD_RELOC_AARCH64_LDST128_LO12))
7144 {
7145 info->callbacks->warning
7146 (info, _("one possible cause of this error is that the \
7147 symbol is being referenced in the indicated code as if it had a larger \
7148 alignment than was declared where it was defined"),
7149 name, input_bfd, input_section, rel->r_offset);
7150 }
7151 break;
7152
7153 case bfd_reloc_undefined:
7154 (*info->callbacks->undefined_symbol)
7155 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
7156 break;
7157
7158 case bfd_reloc_outofrange:
7159 error_message = _("out of range");
7160 goto common_error;
7161
7162 case bfd_reloc_notsupported:
7163 error_message = _("unsupported relocation");
7164 goto common_error;
7165
7166 case bfd_reloc_dangerous:
7167 /* error_message should already be set. */
7168 goto common_error;
7169
7170 default:
7171 error_message = _("unknown error");
7172 /* Fall through. */
7173
7174 common_error:
7175 BFD_ASSERT (error_message != NULL);
7176 (*info->callbacks->reloc_dangerous)
7177 (info, error_message, input_bfd, input_section, rel->r_offset);
7178 break;
7179 }
7180 }
7181
7182 if (!save_addend)
7183 addend = 0;
7184 }
7185
7186 return TRUE;
7187 }
7188
7189 /* Set the right machine number. */
7190
7191 static bfd_boolean
7192 elfNN_aarch64_object_p (bfd *abfd)
7193 {
7194 #if ARCH_SIZE == 32
7195 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
7196 #else
7197 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
7198 #endif
7199 return TRUE;
7200 }
7201
7202 /* Function to keep AArch64 specific flags in the ELF header. */
7203
7204 static bfd_boolean
7205 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
7206 {
7207 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
7208 {
7209 }
7210 else
7211 {
7212 elf_elfheader (abfd)->e_flags = flags;
7213 elf_flags_init (abfd) = TRUE;
7214 }
7215
7216 return TRUE;
7217 }
7218
7219 /* Merge backend specific data from an object file to the output
7220 object file when linking. */
7221
7222 static bfd_boolean
7223 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
7224 {
7225 bfd *obfd = info->output_bfd;
7226 flagword out_flags;
7227 flagword in_flags;
7228 bfd_boolean flags_compatible = TRUE;
7229 asection *sec;
7230
7231 /* Check if we have the same endianess. */
7232 if (!_bfd_generic_verify_endian_match (ibfd, info))
7233 return FALSE;
7234
7235 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
7236 return TRUE;
7237
7238 /* The input BFD must have had its flags initialised. */
7239 /* The following seems bogus to me -- The flags are initialized in
7240 the assembler but I don't think an elf_flags_init field is
7241 written into the object. */
7242 /* BFD_ASSERT (elf_flags_init (ibfd)); */
7243
7244 in_flags = elf_elfheader (ibfd)->e_flags;
7245 out_flags = elf_elfheader (obfd)->e_flags;
7246
7247 if (!elf_flags_init (obfd))
7248 {
7249 /* If the input is the default architecture and had the default
7250 flags then do not bother setting the flags for the output
7251 architecture, instead allow future merges to do this. If no
7252 future merges ever set these flags then they will retain their
7253 uninitialised values, which surprise surprise, correspond
7254 to the default values. */
7255 if (bfd_get_arch_info (ibfd)->the_default
7256 && elf_elfheader (ibfd)->e_flags == 0)
7257 return TRUE;
7258
7259 elf_flags_init (obfd) = TRUE;
7260 elf_elfheader (obfd)->e_flags = in_flags;
7261
7262 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
7263 && bfd_get_arch_info (obfd)->the_default)
7264 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
7265 bfd_get_mach (ibfd));
7266
7267 return TRUE;
7268 }
7269
7270 /* Identical flags must be compatible. */
7271 if (in_flags == out_flags)
7272 return TRUE;
7273
7274 /* Check to see if the input BFD actually contains any sections. If
7275 not, its flags may not have been initialised either, but it
7276 cannot actually cause any incompatiblity. Do not short-circuit
7277 dynamic objects; their section list may be emptied by
7278 elf_link_add_object_symbols.
7279
7280 Also check to see if there are no code sections in the input.
7281 In this case there is no need to check for code specific flags.
7282 XXX - do we need to worry about floating-point format compatability
7283 in data sections ? */
7284 if (!(ibfd->flags & DYNAMIC))
7285 {
7286 bfd_boolean null_input_bfd = TRUE;
7287 bfd_boolean only_data_sections = TRUE;
7288
7289 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
7290 {
7291 if ((bfd_section_flags (sec)
7292 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
7293 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
7294 only_data_sections = FALSE;
7295
7296 null_input_bfd = FALSE;
7297 break;
7298 }
7299
7300 if (null_input_bfd || only_data_sections)
7301 return TRUE;
7302 }
7303
7304 return flags_compatible;
7305 }
7306
7307 /* Display the flags field. */
7308
7309 static bfd_boolean
7310 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
7311 {
7312 FILE *file = (FILE *) ptr;
7313 unsigned long flags;
7314
7315 BFD_ASSERT (abfd != NULL && ptr != NULL);
7316
7317 /* Print normal ELF private data. */
7318 _bfd_elf_print_private_bfd_data (abfd, ptr);
7319
7320 flags = elf_elfheader (abfd)->e_flags;
7321 /* Ignore init flag - it may not be set, despite the flags field
7322 containing valid data. */
7323
7324 /* xgettext:c-format */
7325 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
7326
7327 if (flags)
7328 fprintf (file, _("<Unrecognised flag bits set>"));
7329
7330 fputc ('\n', file);
7331
7332 return TRUE;
7333 }
7334
7335 /* Find dynamic relocs for H that apply to read-only sections. */
7336
7337 static asection *
7338 readonly_dynrelocs (struct elf_link_hash_entry *h)
7339 {
7340 struct elf_dyn_relocs *p;
7341
7342 for (p = elf_aarch64_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
7343 {
7344 asection *s = p->sec->output_section;
7345
7346 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7347 return p->sec;
7348 }
7349 return NULL;
7350 }
7351
7352 /* Return true if we need copy relocation against EH. */
7353
7354 static bfd_boolean
7355 need_copy_relocation_p (struct elf_aarch64_link_hash_entry *eh)
7356 {
7357 struct elf_dyn_relocs *p;
7358 asection *s;
7359
7360 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7361 {
7362 /* If there is any pc-relative reference, we need to keep copy relocation
7363 to avoid propagating the relocation into runtime that current glibc
7364 does not support. */
7365 if (p->pc_count)
7366 return TRUE;
7367
7368 s = p->sec->output_section;
7369 /* Need copy relocation if it's against read-only section. */
7370 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7371 return TRUE;
7372 }
7373
7374 return FALSE;
7375 }
7376
7377 /* Adjust a symbol defined by a dynamic object and referenced by a
7378 regular object. The current definition is in some section of the
7379 dynamic object, but we're not including those sections. We have to
7380 change the definition to something the rest of the link can
7381 understand. */
7382
7383 static bfd_boolean
7384 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
7385 struct elf_link_hash_entry *h)
7386 {
7387 struct elf_aarch64_link_hash_table *htab;
7388 asection *s, *srel;
7389
7390 /* If this is a function, put it in the procedure linkage table. We
7391 will fill in the contents of the procedure linkage table later,
7392 when we know the address of the .got section. */
7393 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
7394 {
7395 if (h->plt.refcount <= 0
7396 || (h->type != STT_GNU_IFUNC
7397 && (SYMBOL_CALLS_LOCAL (info, h)
7398 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
7399 && h->root.type == bfd_link_hash_undefweak))))
7400 {
7401 /* This case can occur if we saw a CALL26 reloc in
7402 an input file, but the symbol wasn't referred to
7403 by a dynamic object or all references were
7404 garbage collected. In which case we can end up
7405 resolving. */
7406 h->plt.offset = (bfd_vma) - 1;
7407 h->needs_plt = 0;
7408 }
7409
7410 return TRUE;
7411 }
7412 else
7413 /* Otherwise, reset to -1. */
7414 h->plt.offset = (bfd_vma) - 1;
7415
7416
7417 /* If this is a weak symbol, and there is a real definition, the
7418 processor independent code will have arranged for us to see the
7419 real definition first, and we can just use the same value. */
7420 if (h->is_weakalias)
7421 {
7422 struct elf_link_hash_entry *def = weakdef (h);
7423 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
7424 h->root.u.def.section = def->root.u.def.section;
7425 h->root.u.def.value = def->root.u.def.value;
7426 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
7427 h->non_got_ref = def->non_got_ref;
7428 return TRUE;
7429 }
7430
7431 /* If we are creating a shared library, we must presume that the
7432 only references to the symbol are via the global offset table.
7433 For such cases we need not do anything here; the relocations will
7434 be handled correctly by relocate_section. */
7435 if (bfd_link_pic (info))
7436 return TRUE;
7437
7438 /* If there are no references to this symbol that do not use the
7439 GOT, we don't need to generate a copy reloc. */
7440 if (!h->non_got_ref)
7441 return TRUE;
7442
7443 /* If -z nocopyreloc was given, we won't generate them either. */
7444 if (info->nocopyreloc)
7445 {
7446 h->non_got_ref = 0;
7447 return TRUE;
7448 }
7449
7450 if (ELIMINATE_COPY_RELOCS)
7451 {
7452 struct elf_aarch64_link_hash_entry *eh;
7453 /* If we don't find any dynamic relocs in read-only sections, then
7454 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
7455 eh = (struct elf_aarch64_link_hash_entry *) h;
7456 if (!need_copy_relocation_p (eh))
7457 {
7458 h->non_got_ref = 0;
7459 return TRUE;
7460 }
7461 }
7462
7463 /* We must allocate the symbol in our .dynbss section, which will
7464 become part of the .bss section of the executable. There will be
7465 an entry for this symbol in the .dynsym section. The dynamic
7466 object will contain position independent code, so all references
7467 from the dynamic object to this symbol will go through the global
7468 offset table. The dynamic linker will use the .dynsym entry to
7469 determine the address it must put in the global offset table, so
7470 both the dynamic object and the regular object will refer to the
7471 same memory location for the variable. */
7472
7473 htab = elf_aarch64_hash_table (info);
7474
7475 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
7476 to copy the initial value out of the dynamic object and into the
7477 runtime process image. */
7478 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
7479 {
7480 s = htab->root.sdynrelro;
7481 srel = htab->root.sreldynrelro;
7482 }
7483 else
7484 {
7485 s = htab->root.sdynbss;
7486 srel = htab->root.srelbss;
7487 }
7488 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
7489 {
7490 srel->size += RELOC_SIZE (htab);
7491 h->needs_copy = 1;
7492 }
7493
7494 return _bfd_elf_adjust_dynamic_copy (info, h, s);
7495
7496 }
7497
7498 static bfd_boolean
7499 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
7500 {
7501 struct elf_aarch64_local_symbol *locals;
7502 locals = elf_aarch64_locals (abfd);
7503 if (locals == NULL)
7504 {
7505 locals = (struct elf_aarch64_local_symbol *)
7506 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
7507 if (locals == NULL)
7508 return FALSE;
7509 elf_aarch64_locals (abfd) = locals;
7510 }
7511 return TRUE;
7512 }
7513
7514 /* Create the .got section to hold the global offset table. */
7515
7516 static bfd_boolean
7517 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
7518 {
7519 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
7520 flagword flags;
7521 asection *s;
7522 struct elf_link_hash_entry *h;
7523 struct elf_link_hash_table *htab = elf_hash_table (info);
7524
7525 /* This function may be called more than once. */
7526 if (htab->sgot != NULL)
7527 return TRUE;
7528
7529 flags = bed->dynamic_sec_flags;
7530
7531 s = bfd_make_section_anyway_with_flags (abfd,
7532 (bed->rela_plts_and_copies_p
7533 ? ".rela.got" : ".rel.got"),
7534 (bed->dynamic_sec_flags
7535 | SEC_READONLY));
7536 if (s == NULL
7537 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7538 return FALSE;
7539 htab->srelgot = s;
7540
7541 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
7542 if (s == NULL
7543 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7544 return FALSE;
7545 htab->sgot = s;
7546 htab->sgot->size += GOT_ENTRY_SIZE;
7547
7548 if (bed->want_got_sym)
7549 {
7550 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
7551 (or .got.plt) section. We don't do this in the linker script
7552 because we don't want to define the symbol if we are not creating
7553 a global offset table. */
7554 h = _bfd_elf_define_linkage_sym (abfd, info, s,
7555 "_GLOBAL_OFFSET_TABLE_");
7556 elf_hash_table (info)->hgot = h;
7557 if (h == NULL)
7558 return FALSE;
7559 }
7560
7561 if (bed->want_got_plt)
7562 {
7563 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
7564 if (s == NULL
7565 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7566 return FALSE;
7567 htab->sgotplt = s;
7568 }
7569
7570 /* The first bit of the global offset table is the header. */
7571 s->size += bed->got_header_size;
7572
7573 return TRUE;
7574 }
7575
7576 /* Look through the relocs for a section during the first phase. */
7577
7578 static bfd_boolean
7579 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
7580 asection *sec, const Elf_Internal_Rela *relocs)
7581 {
7582 Elf_Internal_Shdr *symtab_hdr;
7583 struct elf_link_hash_entry **sym_hashes;
7584 const Elf_Internal_Rela *rel;
7585 const Elf_Internal_Rela *rel_end;
7586 asection *sreloc;
7587
7588 struct elf_aarch64_link_hash_table *htab;
7589
7590 if (bfd_link_relocatable (info))
7591 return TRUE;
7592
7593 BFD_ASSERT (is_aarch64_elf (abfd));
7594
7595 htab = elf_aarch64_hash_table (info);
7596 sreloc = NULL;
7597
7598 symtab_hdr = &elf_symtab_hdr (abfd);
7599 sym_hashes = elf_sym_hashes (abfd);
7600
7601 rel_end = relocs + sec->reloc_count;
7602 for (rel = relocs; rel < rel_end; rel++)
7603 {
7604 struct elf_link_hash_entry *h;
7605 unsigned int r_symndx;
7606 unsigned int r_type;
7607 bfd_reloc_code_real_type bfd_r_type;
7608 Elf_Internal_Sym *isym;
7609
7610 r_symndx = ELFNN_R_SYM (rel->r_info);
7611 r_type = ELFNN_R_TYPE (rel->r_info);
7612
7613 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
7614 {
7615 /* xgettext:c-format */
7616 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
7617 return FALSE;
7618 }
7619
7620 if (r_symndx < symtab_hdr->sh_info)
7621 {
7622 /* A local symbol. */
7623 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
7624 abfd, r_symndx);
7625 if (isym == NULL)
7626 return FALSE;
7627
7628 /* Check relocation against local STT_GNU_IFUNC symbol. */
7629 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
7630 {
7631 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
7632 TRUE);
7633 if (h == NULL)
7634 return FALSE;
7635
7636 /* Fake a STT_GNU_IFUNC symbol. */
7637 h->type = STT_GNU_IFUNC;
7638 h->def_regular = 1;
7639 h->ref_regular = 1;
7640 h->forced_local = 1;
7641 h->root.type = bfd_link_hash_defined;
7642 }
7643 else
7644 h = NULL;
7645 }
7646 else
7647 {
7648 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
7649 while (h->root.type == bfd_link_hash_indirect
7650 || h->root.type == bfd_link_hash_warning)
7651 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7652 }
7653
7654 /* Could be done earlier, if h were already available. */
7655 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
7656
7657 if (h != NULL)
7658 {
7659 /* If a relocation refers to _GLOBAL_OFFSET_TABLE_, create the .got.
7660 This shows up in particular in an R_AARCH64_PREL64 in large model
7661 when calculating the pc-relative address to .got section which is
7662 used to initialize the gp register. */
7663 if (h->root.root.string
7664 && strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0)
7665 {
7666 if (htab->root.dynobj == NULL)
7667 htab->root.dynobj = abfd;
7668
7669 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
7670 return FALSE;
7671
7672 BFD_ASSERT (h == htab->root.hgot);
7673 }
7674
7675 /* Create the ifunc sections for static executables. If we
7676 never see an indirect function symbol nor we are building
7677 a static executable, those sections will be empty and
7678 won't appear in output. */
7679 switch (bfd_r_type)
7680 {
7681 default:
7682 break;
7683
7684 case BFD_RELOC_AARCH64_ADD_LO12:
7685 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7686 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7687 case BFD_RELOC_AARCH64_CALL26:
7688 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7689 case BFD_RELOC_AARCH64_JUMP26:
7690 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7691 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7692 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7693 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7694 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7695 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7696 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7697 case BFD_RELOC_AARCH64_NN:
7698 if (htab->root.dynobj == NULL)
7699 htab->root.dynobj = abfd;
7700 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
7701 return FALSE;
7702 break;
7703 }
7704
7705 /* It is referenced by a non-shared object. */
7706 h->ref_regular = 1;
7707 }
7708
7709 switch (bfd_r_type)
7710 {
7711 case BFD_RELOC_AARCH64_16:
7712 #if ARCH_SIZE == 64
7713 case BFD_RELOC_AARCH64_32:
7714 #endif
7715 if (bfd_link_pic (info) && (sec->flags & SEC_ALLOC) != 0)
7716 {
7717 if (h != NULL
7718 /* This is an absolute symbol. It represents a value instead
7719 of an address. */
7720 && (bfd_is_abs_symbol (&h->root)
7721 /* This is an undefined symbol. */
7722 || h->root.type == bfd_link_hash_undefined))
7723 break;
7724
7725 /* For local symbols, defined global symbols in a non-ABS section,
7726 it is assumed that the value is an address. */
7727 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7728 _bfd_error_handler
7729 /* xgettext:c-format */
7730 (_("%pB: relocation %s against `%s' can not be used when making "
7731 "a shared object"),
7732 abfd, elfNN_aarch64_howto_table[howto_index].name,
7733 (h) ? h->root.root.string : "a local symbol");
7734 bfd_set_error (bfd_error_bad_value);
7735 return FALSE;
7736 }
7737 else
7738 break;
7739
7740 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7741 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7742 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7743 case BFD_RELOC_AARCH64_MOVW_G3:
7744 if (bfd_link_pic (info))
7745 {
7746 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7747 _bfd_error_handler
7748 /* xgettext:c-format */
7749 (_("%pB: relocation %s against `%s' can not be used when making "
7750 "a shared object; recompile with -fPIC"),
7751 abfd, elfNN_aarch64_howto_table[howto_index].name,
7752 (h) ? h->root.root.string : "a local symbol");
7753 bfd_set_error (bfd_error_bad_value);
7754 return FALSE;
7755 }
7756 /* Fall through. */
7757
7758 case BFD_RELOC_AARCH64_16_PCREL:
7759 case BFD_RELOC_AARCH64_32_PCREL:
7760 case BFD_RELOC_AARCH64_64_PCREL:
7761 case BFD_RELOC_AARCH64_ADD_LO12:
7762 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7763 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7764 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7765 case BFD_RELOC_AARCH64_LDST128_LO12:
7766 case BFD_RELOC_AARCH64_LDST16_LO12:
7767 case BFD_RELOC_AARCH64_LDST32_LO12:
7768 case BFD_RELOC_AARCH64_LDST64_LO12:
7769 case BFD_RELOC_AARCH64_LDST8_LO12:
7770 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7771 if (h == NULL || bfd_link_pic (info))
7772 break;
7773 /* Fall through. */
7774
7775 case BFD_RELOC_AARCH64_NN:
7776
7777 /* We don't need to handle relocs into sections not going into
7778 the "real" output. */
7779 if ((sec->flags & SEC_ALLOC) == 0)
7780 break;
7781
7782 if (h != NULL)
7783 {
7784 if (!bfd_link_pic (info))
7785 h->non_got_ref = 1;
7786
7787 h->plt.refcount += 1;
7788 h->pointer_equality_needed = 1;
7789 }
7790
7791 /* No need to do anything if we're not creating a shared
7792 object. */
7793 if (!(bfd_link_pic (info)
7794 /* If on the other hand, we are creating an executable, we
7795 may need to keep relocations for symbols satisfied by a
7796 dynamic library if we manage to avoid copy relocs for the
7797 symbol.
7798
7799 NOTE: Currently, there is no support of copy relocs
7800 elimination on pc-relative relocation types, because there is
7801 no dynamic relocation support for them in glibc. We still
7802 record the dynamic symbol reference for them. This is
7803 because one symbol may be referenced by both absolute
7804 relocation (for example, BFD_RELOC_AARCH64_NN) and
7805 pc-relative relocation. We need full symbol reference
7806 information to make correct decision later in
7807 elfNN_aarch64_adjust_dynamic_symbol. */
7808 || (ELIMINATE_COPY_RELOCS
7809 && !bfd_link_pic (info)
7810 && h != NULL
7811 && (h->root.type == bfd_link_hash_defweak
7812 || !h->def_regular))))
7813 break;
7814
7815 {
7816 struct elf_dyn_relocs *p;
7817 struct elf_dyn_relocs **head;
7818 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7819
7820 /* We must copy these reloc types into the output file.
7821 Create a reloc section in dynobj and make room for
7822 this reloc. */
7823 if (sreloc == NULL)
7824 {
7825 if (htab->root.dynobj == NULL)
7826 htab->root.dynobj = abfd;
7827
7828 sreloc = _bfd_elf_make_dynamic_reloc_section
7829 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
7830
7831 if (sreloc == NULL)
7832 return FALSE;
7833 }
7834
7835 /* If this is a global symbol, we count the number of
7836 relocations we need for this symbol. */
7837 if (h != NULL)
7838 {
7839 struct elf_aarch64_link_hash_entry *eh;
7840 eh = (struct elf_aarch64_link_hash_entry *) h;
7841 head = &eh->dyn_relocs;
7842 }
7843 else
7844 {
7845 /* Track dynamic relocs needed for local syms too.
7846 We really need local syms available to do this
7847 easily. Oh well. */
7848
7849 asection *s;
7850 void **vpp;
7851
7852 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
7853 abfd, r_symndx);
7854 if (isym == NULL)
7855 return FALSE;
7856
7857 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
7858 if (s == NULL)
7859 s = sec;
7860
7861 /* Beware of type punned pointers vs strict aliasing
7862 rules. */
7863 vpp = &(elf_section_data (s)->local_dynrel);
7864 head = (struct elf_dyn_relocs **) vpp;
7865 }
7866
7867 p = *head;
7868 if (p == NULL || p->sec != sec)
7869 {
7870 size_t amt = sizeof *p;
7871 p = ((struct elf_dyn_relocs *)
7872 bfd_zalloc (htab->root.dynobj, amt));
7873 if (p == NULL)
7874 return FALSE;
7875 p->next = *head;
7876 *head = p;
7877 p->sec = sec;
7878 }
7879
7880 p->count += 1;
7881
7882 if (elfNN_aarch64_howto_table[howto_index].pc_relative)
7883 p->pc_count += 1;
7884 }
7885 break;
7886
7887 /* RR: We probably want to keep a consistency check that
7888 there are no dangling GOT_PAGE relocs. */
7889 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7890 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7891 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7892 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7893 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7894 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7895 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7896 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7897 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7898 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7899 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7900 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7901 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7902 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
7903 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7904 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7905 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7906 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7907 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7908 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7909 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7910 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7911 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7912 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7913 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7914 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7915 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7916 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7917 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7918 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7919 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7920 {
7921 unsigned got_type;
7922 unsigned old_got_type;
7923
7924 got_type = aarch64_reloc_got_type (bfd_r_type);
7925
7926 if (h)
7927 {
7928 h->got.refcount += 1;
7929 old_got_type = elf_aarch64_hash_entry (h)->got_type;
7930 }
7931 else
7932 {
7933 struct elf_aarch64_local_symbol *locals;
7934
7935 if (!elfNN_aarch64_allocate_local_symbols
7936 (abfd, symtab_hdr->sh_info))
7937 return FALSE;
7938
7939 locals = elf_aarch64_locals (abfd);
7940 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
7941 locals[r_symndx].got_refcount += 1;
7942 old_got_type = locals[r_symndx].got_type;
7943 }
7944
7945 /* If a variable is accessed with both general dynamic TLS
7946 methods, two slots may be created. */
7947 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
7948 got_type |= old_got_type;
7949
7950 /* We will already have issued an error message if there
7951 is a TLS/non-TLS mismatch, based on the symbol type.
7952 So just combine any TLS types needed. */
7953 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
7954 && got_type != GOT_NORMAL)
7955 got_type |= old_got_type;
7956
7957 /* If the symbol is accessed by both IE and GD methods, we
7958 are able to relax. Turn off the GD flag, without
7959 messing up with any other kind of TLS types that may be
7960 involved. */
7961 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
7962 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
7963
7964 if (old_got_type != got_type)
7965 {
7966 if (h != NULL)
7967 elf_aarch64_hash_entry (h)->got_type = got_type;
7968 else
7969 {
7970 struct elf_aarch64_local_symbol *locals;
7971 locals = elf_aarch64_locals (abfd);
7972 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
7973 locals[r_symndx].got_type = got_type;
7974 }
7975 }
7976
7977 if (htab->root.dynobj == NULL)
7978 htab->root.dynobj = abfd;
7979 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
7980 return FALSE;
7981 break;
7982 }
7983
7984 case BFD_RELOC_AARCH64_CALL26:
7985 case BFD_RELOC_AARCH64_JUMP26:
7986 /* If this is a local symbol then we resolve it
7987 directly without creating a PLT entry. */
7988 if (h == NULL)
7989 continue;
7990
7991 h->needs_plt = 1;
7992 if (h->plt.refcount <= 0)
7993 h->plt.refcount = 1;
7994 else
7995 h->plt.refcount += 1;
7996 break;
7997
7998 default:
7999 break;
8000 }
8001 }
8002
8003 return TRUE;
8004 }
8005
8006 /* Treat mapping symbols as special target symbols. */
8007
8008 static bfd_boolean
8009 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
8010 asymbol *sym)
8011 {
8012 return bfd_is_aarch64_special_symbol_name (sym->name,
8013 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
8014 }
8015
8016 /* If the ELF symbol SYM might be a function in SEC, return the
8017 function size and set *CODE_OFF to the function's entry point,
8018 otherwise return zero. */
8019
8020 static bfd_size_type
8021 elfNN_aarch64_maybe_function_sym (const asymbol *sym, asection *sec,
8022 bfd_vma *code_off)
8023 {
8024 bfd_size_type size;
8025
8026 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
8027 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
8028 || sym->section != sec)
8029 return 0;
8030
8031 if (!(sym->flags & BSF_SYNTHETIC))
8032 switch (ELF_ST_TYPE (((elf_symbol_type *) sym)->internal_elf_sym.st_info))
8033 {
8034 case STT_FUNC:
8035 case STT_NOTYPE:
8036 break;
8037 default:
8038 return 0;
8039 }
8040
8041 if ((sym->flags & BSF_LOCAL)
8042 && bfd_is_aarch64_special_symbol_name (sym->name,
8043 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY))
8044 return 0;
8045
8046 *code_off = sym->value;
8047 size = 0;
8048 if (!(sym->flags & BSF_SYNTHETIC))
8049 size = ((elf_symbol_type *) sym)->internal_elf_sym.st_size;
8050 if (size == 0)
8051 size = 1;
8052 return size;
8053 }
8054
8055 static bfd_boolean
8056 elfNN_aarch64_find_inliner_info (bfd *abfd,
8057 const char **filename_ptr,
8058 const char **functionname_ptr,
8059 unsigned int *line_ptr)
8060 {
8061 bfd_boolean found;
8062 found = _bfd_dwarf2_find_inliner_info
8063 (abfd, filename_ptr,
8064 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
8065 return found;
8066 }
8067
8068
8069 static bfd_boolean
8070 elfNN_aarch64_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
8071 {
8072 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
8073
8074 if (!_bfd_elf_init_file_header (abfd, link_info))
8075 return FALSE;
8076
8077 i_ehdrp = elf_elfheader (abfd);
8078 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
8079 return TRUE;
8080 }
8081
8082 static enum elf_reloc_type_class
8083 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
8084 const asection *rel_sec ATTRIBUTE_UNUSED,
8085 const Elf_Internal_Rela *rela)
8086 {
8087 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
8088
8089 if (htab->root.dynsym != NULL
8090 && htab->root.dynsym->contents != NULL)
8091 {
8092 /* Check relocation against STT_GNU_IFUNC symbol if there are
8093 dynamic symbols. */
8094 bfd *abfd = info->output_bfd;
8095 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
8096 unsigned long r_symndx = ELFNN_R_SYM (rela->r_info);
8097 if (r_symndx != STN_UNDEF)
8098 {
8099 Elf_Internal_Sym sym;
8100 if (!bed->s->swap_symbol_in (abfd,
8101 (htab->root.dynsym->contents
8102 + r_symndx * bed->s->sizeof_sym),
8103 0, &sym))
8104 {
8105 /* xgettext:c-format */
8106 _bfd_error_handler (_("%pB symbol number %lu references"
8107 " nonexistent SHT_SYMTAB_SHNDX section"),
8108 abfd, r_symndx);
8109 /* Ideally an error class should be returned here. */
8110 }
8111 else if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
8112 return reloc_class_ifunc;
8113 }
8114 }
8115
8116 switch ((int) ELFNN_R_TYPE (rela->r_info))
8117 {
8118 case AARCH64_R (IRELATIVE):
8119 return reloc_class_ifunc;
8120 case AARCH64_R (RELATIVE):
8121 return reloc_class_relative;
8122 case AARCH64_R (JUMP_SLOT):
8123 return reloc_class_plt;
8124 case AARCH64_R (COPY):
8125 return reloc_class_copy;
8126 default:
8127 return reloc_class_normal;
8128 }
8129 }
8130
8131 /* Handle an AArch64 specific section when reading an object file. This is
8132 called when bfd_section_from_shdr finds a section with an unknown
8133 type. */
8134
8135 static bfd_boolean
8136 elfNN_aarch64_section_from_shdr (bfd *abfd,
8137 Elf_Internal_Shdr *hdr,
8138 const char *name, int shindex)
8139 {
8140 /* There ought to be a place to keep ELF backend specific flags, but
8141 at the moment there isn't one. We just keep track of the
8142 sections by their name, instead. Fortunately, the ABI gives
8143 names for all the AArch64 specific sections, so we will probably get
8144 away with this. */
8145 switch (hdr->sh_type)
8146 {
8147 case SHT_AARCH64_ATTRIBUTES:
8148 break;
8149
8150 default:
8151 return FALSE;
8152 }
8153
8154 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
8155 return FALSE;
8156
8157 return TRUE;
8158 }
8159
8160 /* A structure used to record a list of sections, independently
8161 of the next and prev fields in the asection structure. */
8162 typedef struct section_list
8163 {
8164 asection *sec;
8165 struct section_list *next;
8166 struct section_list *prev;
8167 }
8168 section_list;
8169
8170 /* Unfortunately we need to keep a list of sections for which
8171 an _aarch64_elf_section_data structure has been allocated. This
8172 is because it is possible for functions like elfNN_aarch64_write_section
8173 to be called on a section which has had an elf_data_structure
8174 allocated for it (and so the used_by_bfd field is valid) but
8175 for which the AArch64 extended version of this structure - the
8176 _aarch64_elf_section_data structure - has not been allocated. */
8177 static section_list *sections_with_aarch64_elf_section_data = NULL;
8178
8179 static void
8180 record_section_with_aarch64_elf_section_data (asection *sec)
8181 {
8182 struct section_list *entry;
8183
8184 entry = bfd_malloc (sizeof (*entry));
8185 if (entry == NULL)
8186 return;
8187 entry->sec = sec;
8188 entry->next = sections_with_aarch64_elf_section_data;
8189 entry->prev = NULL;
8190 if (entry->next != NULL)
8191 entry->next->prev = entry;
8192 sections_with_aarch64_elf_section_data = entry;
8193 }
8194
8195 static struct section_list *
8196 find_aarch64_elf_section_entry (asection *sec)
8197 {
8198 struct section_list *entry;
8199 static struct section_list *last_entry = NULL;
8200
8201 /* This is a short cut for the typical case where the sections are added
8202 to the sections_with_aarch64_elf_section_data list in forward order and
8203 then looked up here in backwards order. This makes a real difference
8204 to the ld-srec/sec64k.exp linker test. */
8205 entry = sections_with_aarch64_elf_section_data;
8206 if (last_entry != NULL)
8207 {
8208 if (last_entry->sec == sec)
8209 entry = last_entry;
8210 else if (last_entry->next != NULL && last_entry->next->sec == sec)
8211 entry = last_entry->next;
8212 }
8213
8214 for (; entry; entry = entry->next)
8215 if (entry->sec == sec)
8216 break;
8217
8218 if (entry)
8219 /* Record the entry prior to this one - it is the entry we are
8220 most likely to want to locate next time. Also this way if we
8221 have been called from
8222 unrecord_section_with_aarch64_elf_section_data () we will not
8223 be caching a pointer that is about to be freed. */
8224 last_entry = entry->prev;
8225
8226 return entry;
8227 }
8228
8229 static void
8230 unrecord_section_with_aarch64_elf_section_data (asection *sec)
8231 {
8232 struct section_list *entry;
8233
8234 entry = find_aarch64_elf_section_entry (sec);
8235
8236 if (entry)
8237 {
8238 if (entry->prev != NULL)
8239 entry->prev->next = entry->next;
8240 if (entry->next != NULL)
8241 entry->next->prev = entry->prev;
8242 if (entry == sections_with_aarch64_elf_section_data)
8243 sections_with_aarch64_elf_section_data = entry->next;
8244 free (entry);
8245 }
8246 }
8247
8248
8249 typedef struct
8250 {
8251 void *finfo;
8252 struct bfd_link_info *info;
8253 asection *sec;
8254 int sec_shndx;
8255 int (*func) (void *, const char *, Elf_Internal_Sym *,
8256 asection *, struct elf_link_hash_entry *);
8257 } output_arch_syminfo;
8258
8259 enum map_symbol_type
8260 {
8261 AARCH64_MAP_INSN,
8262 AARCH64_MAP_DATA
8263 };
8264
8265
8266 /* Output a single mapping symbol. */
8267
8268 static bfd_boolean
8269 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
8270 enum map_symbol_type type, bfd_vma offset)
8271 {
8272 static const char *names[2] = { "$x", "$d" };
8273 Elf_Internal_Sym sym;
8274
8275 sym.st_value = (osi->sec->output_section->vma
8276 + osi->sec->output_offset + offset);
8277 sym.st_size = 0;
8278 sym.st_other = 0;
8279 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
8280 sym.st_shndx = osi->sec_shndx;
8281 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
8282 }
8283
8284 /* Output a single local symbol for a generated stub. */
8285
8286 static bfd_boolean
8287 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
8288 bfd_vma offset, bfd_vma size)
8289 {
8290 Elf_Internal_Sym sym;
8291
8292 sym.st_value = (osi->sec->output_section->vma
8293 + osi->sec->output_offset + offset);
8294 sym.st_size = size;
8295 sym.st_other = 0;
8296 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
8297 sym.st_shndx = osi->sec_shndx;
8298 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
8299 }
8300
8301 static bfd_boolean
8302 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
8303 {
8304 struct elf_aarch64_stub_hash_entry *stub_entry;
8305 asection *stub_sec;
8306 bfd_vma addr;
8307 char *stub_name;
8308 output_arch_syminfo *osi;
8309
8310 /* Massage our args to the form they really have. */
8311 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
8312 osi = (output_arch_syminfo *) in_arg;
8313
8314 stub_sec = stub_entry->stub_sec;
8315
8316 /* Ensure this stub is attached to the current section being
8317 processed. */
8318 if (stub_sec != osi->sec)
8319 return TRUE;
8320
8321 addr = (bfd_vma) stub_entry->stub_offset;
8322
8323 stub_name = stub_entry->output_name;
8324
8325 switch (stub_entry->stub_type)
8326 {
8327 case aarch64_stub_adrp_branch:
8328 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8329 sizeof (aarch64_adrp_branch_stub)))
8330 return FALSE;
8331 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8332 return FALSE;
8333 break;
8334 case aarch64_stub_long_branch:
8335 if (!elfNN_aarch64_output_stub_sym
8336 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
8337 return FALSE;
8338 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8339 return FALSE;
8340 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
8341 return FALSE;
8342 break;
8343 case aarch64_stub_erratum_835769_veneer:
8344 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8345 sizeof (aarch64_erratum_835769_stub)))
8346 return FALSE;
8347 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8348 return FALSE;
8349 break;
8350 case aarch64_stub_erratum_843419_veneer:
8351 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8352 sizeof (aarch64_erratum_843419_stub)))
8353 return FALSE;
8354 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8355 return FALSE;
8356 break;
8357 case aarch64_stub_none:
8358 break;
8359
8360 default:
8361 abort ();
8362 }
8363
8364 return TRUE;
8365 }
8366
8367 /* Output mapping symbols for linker generated sections. */
8368
8369 static bfd_boolean
8370 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
8371 struct bfd_link_info *info,
8372 void *finfo,
8373 int (*func) (void *, const char *,
8374 Elf_Internal_Sym *,
8375 asection *,
8376 struct elf_link_hash_entry
8377 *))
8378 {
8379 output_arch_syminfo osi;
8380 struct elf_aarch64_link_hash_table *htab;
8381
8382 htab = elf_aarch64_hash_table (info);
8383
8384 osi.finfo = finfo;
8385 osi.info = info;
8386 osi.func = func;
8387
8388 /* Long calls stubs. */
8389 if (htab->stub_bfd && htab->stub_bfd->sections)
8390 {
8391 asection *stub_sec;
8392
8393 for (stub_sec = htab->stub_bfd->sections;
8394 stub_sec != NULL; stub_sec = stub_sec->next)
8395 {
8396 /* Ignore non-stub sections. */
8397 if (!strstr (stub_sec->name, STUB_SUFFIX))
8398 continue;
8399
8400 osi.sec = stub_sec;
8401
8402 osi.sec_shndx = _bfd_elf_section_from_bfd_section
8403 (output_bfd, osi.sec->output_section);
8404
8405 /* The first instruction in a stub is always a branch. */
8406 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
8407 return FALSE;
8408
8409 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
8410 &osi);
8411 }
8412 }
8413
8414 /* Finally, output mapping symbols for the PLT. */
8415 if (!htab->root.splt || htab->root.splt->size == 0)
8416 return TRUE;
8417
8418 osi.sec_shndx = _bfd_elf_section_from_bfd_section
8419 (output_bfd, htab->root.splt->output_section);
8420 osi.sec = htab->root.splt;
8421
8422 elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0);
8423
8424 return TRUE;
8425
8426 }
8427
8428 /* Allocate target specific section data. */
8429
8430 static bfd_boolean
8431 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
8432 {
8433 if (!sec->used_by_bfd)
8434 {
8435 _aarch64_elf_section_data *sdata;
8436 size_t amt = sizeof (*sdata);
8437
8438 sdata = bfd_zalloc (abfd, amt);
8439 if (sdata == NULL)
8440 return FALSE;
8441 sec->used_by_bfd = sdata;
8442 }
8443
8444 record_section_with_aarch64_elf_section_data (sec);
8445
8446 return _bfd_elf_new_section_hook (abfd, sec);
8447 }
8448
8449
8450 static void
8451 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
8452 asection *sec,
8453 void *ignore ATTRIBUTE_UNUSED)
8454 {
8455 unrecord_section_with_aarch64_elf_section_data (sec);
8456 }
8457
8458 static bfd_boolean
8459 elfNN_aarch64_close_and_cleanup (bfd *abfd)
8460 {
8461 if (abfd->sections)
8462 bfd_map_over_sections (abfd,
8463 unrecord_section_via_map_over_sections, NULL);
8464
8465 return _bfd_elf_close_and_cleanup (abfd);
8466 }
8467
8468 static bfd_boolean
8469 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
8470 {
8471 if (abfd->sections)
8472 bfd_map_over_sections (abfd,
8473 unrecord_section_via_map_over_sections, NULL);
8474
8475 return _bfd_free_cached_info (abfd);
8476 }
8477
8478 /* Create dynamic sections. This is different from the ARM backend in that
8479 the got, plt, gotplt and their relocation sections are all created in the
8480 standard part of the bfd elf backend. */
8481
8482 static bfd_boolean
8483 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
8484 struct bfd_link_info *info)
8485 {
8486 /* We need to create .got section. */
8487 if (!aarch64_elf_create_got_section (dynobj, info))
8488 return FALSE;
8489
8490 return _bfd_elf_create_dynamic_sections (dynobj, info);
8491 }
8492
8493
8494 /* Allocate space in .plt, .got and associated reloc sections for
8495 dynamic relocs. */
8496
8497 static bfd_boolean
8498 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
8499 {
8500 struct bfd_link_info *info;
8501 struct elf_aarch64_link_hash_table *htab;
8502 struct elf_aarch64_link_hash_entry *eh;
8503 struct elf_dyn_relocs *p;
8504
8505 /* An example of a bfd_link_hash_indirect symbol is versioned
8506 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
8507 -> __gxx_personality_v0(bfd_link_hash_defined)
8508
8509 There is no need to process bfd_link_hash_indirect symbols here
8510 because we will also be presented with the concrete instance of
8511 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
8512 called to copy all relevant data from the generic to the concrete
8513 symbol instance. */
8514 if (h->root.type == bfd_link_hash_indirect)
8515 return TRUE;
8516
8517 if (h->root.type == bfd_link_hash_warning)
8518 h = (struct elf_link_hash_entry *) h->root.u.i.link;
8519
8520 info = (struct bfd_link_info *) inf;
8521 htab = elf_aarch64_hash_table (info);
8522
8523 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
8524 here if it is defined and referenced in a non-shared object. */
8525 if (h->type == STT_GNU_IFUNC
8526 && h->def_regular)
8527 return TRUE;
8528 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
8529 {
8530 /* Make sure this symbol is output as a dynamic symbol.
8531 Undefined weak syms won't yet be marked as dynamic. */
8532 if (h->dynindx == -1 && !h->forced_local
8533 && h->root.type == bfd_link_hash_undefweak)
8534 {
8535 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8536 return FALSE;
8537 }
8538
8539 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
8540 {
8541 asection *s = htab->root.splt;
8542
8543 /* If this is the first .plt entry, make room for the special
8544 first entry. */
8545 if (s->size == 0)
8546 s->size += htab->plt_header_size;
8547
8548 h->plt.offset = s->size;
8549
8550 /* If this symbol is not defined in a regular file, and we are
8551 not generating a shared library, then set the symbol to this
8552 location in the .plt. This is required to make function
8553 pointers compare as equal between the normal executable and
8554 the shared library. */
8555 if (!bfd_link_pic (info) && !h->def_regular)
8556 {
8557 h->root.u.def.section = s;
8558 h->root.u.def.value = h->plt.offset;
8559 }
8560
8561 /* Make room for this entry. For now we only create the
8562 small model PLT entries. We later need to find a way
8563 of relaxing into these from the large model PLT entries. */
8564 s->size += htab->plt_entry_size;
8565
8566 /* We also need to make an entry in the .got.plt section, which
8567 will be placed in the .got section by the linker script. */
8568 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
8569
8570 /* We also need to make an entry in the .rela.plt section. */
8571 htab->root.srelplt->size += RELOC_SIZE (htab);
8572
8573 /* We need to ensure that all GOT entries that serve the PLT
8574 are consecutive with the special GOT slots [0] [1] and
8575 [2]. Any addtional relocations, such as
8576 R_AARCH64_TLSDESC, must be placed after the PLT related
8577 entries. We abuse the reloc_count such that during
8578 sizing we adjust reloc_count to indicate the number of
8579 PLT related reserved entries. In subsequent phases when
8580 filling in the contents of the reloc entries, PLT related
8581 entries are placed by computing their PLT index (0
8582 .. reloc_count). While other none PLT relocs are placed
8583 at the slot indicated by reloc_count and reloc_count is
8584 updated. */
8585
8586 htab->root.srelplt->reloc_count++;
8587
8588 /* Mark the DSO in case R_<CLS>_JUMP_SLOT relocs against
8589 variant PCS symbols are present. */
8590 if (h->other & STO_AARCH64_VARIANT_PCS)
8591 htab->variant_pcs = 1;
8592
8593 }
8594 else
8595 {
8596 h->plt.offset = (bfd_vma) - 1;
8597 h->needs_plt = 0;
8598 }
8599 }
8600 else
8601 {
8602 h->plt.offset = (bfd_vma) - 1;
8603 h->needs_plt = 0;
8604 }
8605
8606 eh = (struct elf_aarch64_link_hash_entry *) h;
8607 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
8608
8609 if (h->got.refcount > 0)
8610 {
8611 bfd_boolean dyn;
8612 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
8613
8614 h->got.offset = (bfd_vma) - 1;
8615
8616 dyn = htab->root.dynamic_sections_created;
8617
8618 /* Make sure this symbol is output as a dynamic symbol.
8619 Undefined weak syms won't yet be marked as dynamic. */
8620 if (dyn && h->dynindx == -1 && !h->forced_local
8621 && h->root.type == bfd_link_hash_undefweak)
8622 {
8623 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8624 return FALSE;
8625 }
8626
8627 if (got_type == GOT_UNKNOWN)
8628 {
8629 }
8630 else if (got_type == GOT_NORMAL)
8631 {
8632 h->got.offset = htab->root.sgot->size;
8633 htab->root.sgot->size += GOT_ENTRY_SIZE;
8634 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8635 || h->root.type != bfd_link_hash_undefweak)
8636 && (bfd_link_pic (info)
8637 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))
8638 /* Undefined weak symbol in static PIE resolves to 0 without
8639 any dynamic relocations. */
8640 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
8641 {
8642 htab->root.srelgot->size += RELOC_SIZE (htab);
8643 }
8644 }
8645 else
8646 {
8647 int indx;
8648 if (got_type & GOT_TLSDESC_GD)
8649 {
8650 eh->tlsdesc_got_jump_table_offset =
8651 (htab->root.sgotplt->size
8652 - aarch64_compute_jump_table_size (htab));
8653 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
8654 h->got.offset = (bfd_vma) - 2;
8655 }
8656
8657 if (got_type & GOT_TLS_GD)
8658 {
8659 h->got.offset = htab->root.sgot->size;
8660 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
8661 }
8662
8663 if (got_type & GOT_TLS_IE)
8664 {
8665 h->got.offset = htab->root.sgot->size;
8666 htab->root.sgot->size += GOT_ENTRY_SIZE;
8667 }
8668
8669 indx = h && h->dynindx != -1 ? h->dynindx : 0;
8670 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8671 || h->root.type != bfd_link_hash_undefweak)
8672 && (!bfd_link_executable (info)
8673 || indx != 0
8674 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
8675 {
8676 if (got_type & GOT_TLSDESC_GD)
8677 {
8678 htab->root.srelplt->size += RELOC_SIZE (htab);
8679 /* Note reloc_count not incremented here! We have
8680 already adjusted reloc_count for this relocation
8681 type. */
8682
8683 /* TLSDESC PLT is now needed, but not yet determined. */
8684 htab->tlsdesc_plt = (bfd_vma) - 1;
8685 }
8686
8687 if (got_type & GOT_TLS_GD)
8688 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
8689
8690 if (got_type & GOT_TLS_IE)
8691 htab->root.srelgot->size += RELOC_SIZE (htab);
8692 }
8693 }
8694 }
8695 else
8696 {
8697 h->got.offset = (bfd_vma) - 1;
8698 }
8699
8700 if (eh->dyn_relocs == NULL)
8701 return TRUE;
8702
8703 /* In the shared -Bsymbolic case, discard space allocated for
8704 dynamic pc-relative relocs against symbols which turn out to be
8705 defined in regular objects. For the normal shared case, discard
8706 space for pc-relative relocs that have become local due to symbol
8707 visibility changes. */
8708
8709 if (bfd_link_pic (info))
8710 {
8711 /* Relocs that use pc_count are those that appear on a call
8712 insn, or certain REL relocs that can generated via assembly.
8713 We want calls to protected symbols to resolve directly to the
8714 function rather than going via the plt. If people want
8715 function pointer comparisons to work as expected then they
8716 should avoid writing weird assembly. */
8717 if (SYMBOL_CALLS_LOCAL (info, h))
8718 {
8719 struct elf_dyn_relocs **pp;
8720
8721 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
8722 {
8723 p->count -= p->pc_count;
8724 p->pc_count = 0;
8725 if (p->count == 0)
8726 *pp = p->next;
8727 else
8728 pp = &p->next;
8729 }
8730 }
8731
8732 /* Also discard relocs on undefined weak syms with non-default
8733 visibility. */
8734 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
8735 {
8736 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
8737 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
8738 eh->dyn_relocs = NULL;
8739
8740 /* Make sure undefined weak symbols are output as a dynamic
8741 symbol in PIEs. */
8742 else if (h->dynindx == -1
8743 && !h->forced_local
8744 && h->root.type == bfd_link_hash_undefweak
8745 && !bfd_elf_link_record_dynamic_symbol (info, h))
8746 return FALSE;
8747 }
8748
8749 }
8750 else if (ELIMINATE_COPY_RELOCS)
8751 {
8752 /* For the non-shared case, discard space for relocs against
8753 symbols which turn out to need copy relocs or are not
8754 dynamic. */
8755
8756 if (!h->non_got_ref
8757 && ((h->def_dynamic
8758 && !h->def_regular)
8759 || (htab->root.dynamic_sections_created
8760 && (h->root.type == bfd_link_hash_undefweak
8761 || h->root.type == bfd_link_hash_undefined))))
8762 {
8763 /* Make sure this symbol is output as a dynamic symbol.
8764 Undefined weak syms won't yet be marked as dynamic. */
8765 if (h->dynindx == -1
8766 && !h->forced_local
8767 && h->root.type == bfd_link_hash_undefweak
8768 && !bfd_elf_link_record_dynamic_symbol (info, h))
8769 return FALSE;
8770
8771 /* If that succeeded, we know we'll be keeping all the
8772 relocs. */
8773 if (h->dynindx != -1)
8774 goto keep;
8775 }
8776
8777 eh->dyn_relocs = NULL;
8778
8779 keep:;
8780 }
8781
8782 /* Finally, allocate space. */
8783 for (p = eh->dyn_relocs; p != NULL; p = p->next)
8784 {
8785 asection *sreloc;
8786
8787 sreloc = elf_section_data (p->sec)->sreloc;
8788
8789 BFD_ASSERT (sreloc != NULL);
8790
8791 sreloc->size += p->count * RELOC_SIZE (htab);
8792 }
8793
8794 return TRUE;
8795 }
8796
8797 /* Allocate space in .plt, .got and associated reloc sections for
8798 ifunc dynamic relocs. */
8799
8800 static bfd_boolean
8801 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
8802 void *inf)
8803 {
8804 struct bfd_link_info *info;
8805 struct elf_aarch64_link_hash_table *htab;
8806 struct elf_aarch64_link_hash_entry *eh;
8807
8808 /* An example of a bfd_link_hash_indirect symbol is versioned
8809 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
8810 -> __gxx_personality_v0(bfd_link_hash_defined)
8811
8812 There is no need to process bfd_link_hash_indirect symbols here
8813 because we will also be presented with the concrete instance of
8814 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
8815 called to copy all relevant data from the generic to the concrete
8816 symbol instance. */
8817 if (h->root.type == bfd_link_hash_indirect)
8818 return TRUE;
8819
8820 if (h->root.type == bfd_link_hash_warning)
8821 h = (struct elf_link_hash_entry *) h->root.u.i.link;
8822
8823 info = (struct bfd_link_info *) inf;
8824 htab = elf_aarch64_hash_table (info);
8825
8826 eh = (struct elf_aarch64_link_hash_entry *) h;
8827
8828 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
8829 here if it is defined and referenced in a non-shared object. */
8830 if (h->type == STT_GNU_IFUNC
8831 && h->def_regular)
8832 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
8833 &eh->dyn_relocs,
8834 NULL,
8835 htab->plt_entry_size,
8836 htab->plt_header_size,
8837 GOT_ENTRY_SIZE,
8838 FALSE);
8839 return TRUE;
8840 }
8841
8842 /* Allocate space in .plt, .got and associated reloc sections for
8843 local ifunc dynamic relocs. */
8844
8845 static bfd_boolean
8846 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
8847 {
8848 struct elf_link_hash_entry *h
8849 = (struct elf_link_hash_entry *) *slot;
8850
8851 if (h->type != STT_GNU_IFUNC
8852 || !h->def_regular
8853 || !h->ref_regular
8854 || !h->forced_local
8855 || h->root.type != bfd_link_hash_defined)
8856 abort ();
8857
8858 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
8859 }
8860
8861 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
8862 read-only sections. */
8863
8864 static bfd_boolean
8865 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
8866 {
8867 asection *sec;
8868
8869 if (h->root.type == bfd_link_hash_indirect)
8870 return TRUE;
8871
8872 sec = readonly_dynrelocs (h);
8873 if (sec != NULL)
8874 {
8875 struct bfd_link_info *info = (struct bfd_link_info *) info_p;
8876
8877 info->flags |= DF_TEXTREL;
8878 info->callbacks->minfo
8879 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
8880 sec->owner, h->root.root.string, sec);
8881
8882 /* Not an error, just cut short the traversal. */
8883 return FALSE;
8884 }
8885 return TRUE;
8886 }
8887
8888 /* This is the most important function of all . Innocuosly named
8889 though ! */
8890
8891 static bfd_boolean
8892 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
8893 struct bfd_link_info *info)
8894 {
8895 struct elf_aarch64_link_hash_table *htab;
8896 bfd *dynobj;
8897 asection *s;
8898 bfd_boolean relocs;
8899 bfd *ibfd;
8900
8901 htab = elf_aarch64_hash_table ((info));
8902 dynobj = htab->root.dynobj;
8903
8904 BFD_ASSERT (dynobj != NULL);
8905
8906 if (htab->root.dynamic_sections_created)
8907 {
8908 if (bfd_link_executable (info) && !info->nointerp)
8909 {
8910 s = bfd_get_linker_section (dynobj, ".interp");
8911 if (s == NULL)
8912 abort ();
8913 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
8914 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
8915 }
8916 }
8917
8918 /* Set up .got offsets for local syms, and space for local dynamic
8919 relocs. */
8920 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
8921 {
8922 struct elf_aarch64_local_symbol *locals = NULL;
8923 Elf_Internal_Shdr *symtab_hdr;
8924 asection *srel;
8925 unsigned int i;
8926
8927 if (!is_aarch64_elf (ibfd))
8928 continue;
8929
8930 for (s = ibfd->sections; s != NULL; s = s->next)
8931 {
8932 struct elf_dyn_relocs *p;
8933
8934 for (p = (struct elf_dyn_relocs *)
8935 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
8936 {
8937 if (!bfd_is_abs_section (p->sec)
8938 && bfd_is_abs_section (p->sec->output_section))
8939 {
8940 /* Input section has been discarded, either because
8941 it is a copy of a linkonce section or due to
8942 linker script /DISCARD/, so we'll be discarding
8943 the relocs too. */
8944 }
8945 else if (p->count != 0)
8946 {
8947 srel = elf_section_data (p->sec)->sreloc;
8948 srel->size += p->count * RELOC_SIZE (htab);
8949 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
8950 info->flags |= DF_TEXTREL;
8951 }
8952 }
8953 }
8954
8955 locals = elf_aarch64_locals (ibfd);
8956 if (!locals)
8957 continue;
8958
8959 symtab_hdr = &elf_symtab_hdr (ibfd);
8960 srel = htab->root.srelgot;
8961 for (i = 0; i < symtab_hdr->sh_info; i++)
8962 {
8963 locals[i].got_offset = (bfd_vma) - 1;
8964 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
8965 if (locals[i].got_refcount > 0)
8966 {
8967 unsigned got_type = locals[i].got_type;
8968 if (got_type & GOT_TLSDESC_GD)
8969 {
8970 locals[i].tlsdesc_got_jump_table_offset =
8971 (htab->root.sgotplt->size
8972 - aarch64_compute_jump_table_size (htab));
8973 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
8974 locals[i].got_offset = (bfd_vma) - 2;
8975 }
8976
8977 if (got_type & GOT_TLS_GD)
8978 {
8979 locals[i].got_offset = htab->root.sgot->size;
8980 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
8981 }
8982
8983 if (got_type & GOT_TLS_IE
8984 || got_type & GOT_NORMAL)
8985 {
8986 locals[i].got_offset = htab->root.sgot->size;
8987 htab->root.sgot->size += GOT_ENTRY_SIZE;
8988 }
8989
8990 if (got_type == GOT_UNKNOWN)
8991 {
8992 }
8993
8994 if (bfd_link_pic (info))
8995 {
8996 if (got_type & GOT_TLSDESC_GD)
8997 {
8998 htab->root.srelplt->size += RELOC_SIZE (htab);
8999 /* Note RELOC_COUNT not incremented here! */
9000 htab->tlsdesc_plt = (bfd_vma) - 1;
9001 }
9002
9003 if (got_type & GOT_TLS_GD)
9004 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
9005
9006 if (got_type & GOT_TLS_IE
9007 || got_type & GOT_NORMAL)
9008 htab->root.srelgot->size += RELOC_SIZE (htab);
9009 }
9010 }
9011 else
9012 {
9013 locals[i].got_refcount = (bfd_vma) - 1;
9014 }
9015 }
9016 }
9017
9018
9019 /* Allocate global sym .plt and .got entries, and space for global
9020 sym dynamic relocs. */
9021 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
9022 info);
9023
9024 /* Allocate global ifunc sym .plt and .got entries, and space for global
9025 ifunc sym dynamic relocs. */
9026 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
9027 info);
9028
9029 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
9030 htab_traverse (htab->loc_hash_table,
9031 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
9032 info);
9033
9034 /* For every jump slot reserved in the sgotplt, reloc_count is
9035 incremented. However, when we reserve space for TLS descriptors,
9036 it's not incremented, so in order to compute the space reserved
9037 for them, it suffices to multiply the reloc count by the jump
9038 slot size. */
9039
9040 if (htab->root.srelplt)
9041 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
9042
9043 if (htab->tlsdesc_plt)
9044 {
9045 if (htab->root.splt->size == 0)
9046 htab->root.splt->size += htab->plt_header_size;
9047
9048 /* If we're not using lazy TLS relocations, don't generate the
9049 GOT and PLT entry required. */
9050 if (!(info->flags & DF_BIND_NOW))
9051 {
9052 htab->tlsdesc_plt = htab->root.splt->size;
9053 htab->root.splt->size += htab->tlsdesc_plt_entry_size;
9054
9055 htab->dt_tlsdesc_got = htab->root.sgot->size;
9056 htab->root.sgot->size += GOT_ENTRY_SIZE;
9057 }
9058 }
9059
9060 /* Init mapping symbols information to use later to distingush between
9061 code and data while scanning for errata. */
9062 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
9063 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
9064 {
9065 if (!is_aarch64_elf (ibfd))
9066 continue;
9067 bfd_elfNN_aarch64_init_maps (ibfd);
9068 }
9069
9070 /* We now have determined the sizes of the various dynamic sections.
9071 Allocate memory for them. */
9072 relocs = FALSE;
9073 for (s = dynobj->sections; s != NULL; s = s->next)
9074 {
9075 if ((s->flags & SEC_LINKER_CREATED) == 0)
9076 continue;
9077
9078 if (s == htab->root.splt
9079 || s == htab->root.sgot
9080 || s == htab->root.sgotplt
9081 || s == htab->root.iplt
9082 || s == htab->root.igotplt
9083 || s == htab->root.sdynbss
9084 || s == htab->root.sdynrelro)
9085 {
9086 /* Strip this section if we don't need it; see the
9087 comment below. */
9088 }
9089 else if (CONST_STRNEQ (bfd_section_name (s), ".rela"))
9090 {
9091 if (s->size != 0 && s != htab->root.srelplt)
9092 relocs = TRUE;
9093
9094 /* We use the reloc_count field as a counter if we need
9095 to copy relocs into the output file. */
9096 if (s != htab->root.srelplt)
9097 s->reloc_count = 0;
9098 }
9099 else
9100 {
9101 /* It's not one of our sections, so don't allocate space. */
9102 continue;
9103 }
9104
9105 if (s->size == 0)
9106 {
9107 /* If we don't need this section, strip it from the
9108 output file. This is mostly to handle .rela.bss and
9109 .rela.plt. We must create both sections in
9110 create_dynamic_sections, because they must be created
9111 before the linker maps input sections to output
9112 sections. The linker does that before
9113 adjust_dynamic_symbol is called, and it is that
9114 function which decides whether anything needs to go
9115 into these sections. */
9116 s->flags |= SEC_EXCLUDE;
9117 continue;
9118 }
9119
9120 if ((s->flags & SEC_HAS_CONTENTS) == 0)
9121 continue;
9122
9123 /* Allocate memory for the section contents. We use bfd_zalloc
9124 here in case unused entries are not reclaimed before the
9125 section's contents are written out. This should not happen,
9126 but this way if it does, we get a R_AARCH64_NONE reloc instead
9127 of garbage. */
9128 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
9129 if (s->contents == NULL)
9130 return FALSE;
9131 }
9132
9133 if (htab->root.dynamic_sections_created)
9134 {
9135 /* Add some entries to the .dynamic section. We fill in the
9136 values later, in elfNN_aarch64_finish_dynamic_sections, but we
9137 must add the entries now so that we get the correct size for
9138 the .dynamic section. The DT_DEBUG entry is filled in by the
9139 dynamic linker and used by the debugger. */
9140 #define add_dynamic_entry(TAG, VAL) \
9141 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
9142
9143 if (bfd_link_executable (info))
9144 {
9145 if (!add_dynamic_entry (DT_DEBUG, 0))
9146 return FALSE;
9147 }
9148
9149 if (htab->root.splt->size != 0)
9150 {
9151 if (!add_dynamic_entry (DT_PLTGOT, 0)
9152 || !add_dynamic_entry (DT_PLTRELSZ, 0)
9153 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
9154 || !add_dynamic_entry (DT_JMPREL, 0))
9155 return FALSE;
9156
9157 if (htab->variant_pcs
9158 && !add_dynamic_entry (DT_AARCH64_VARIANT_PCS, 0))
9159 return FALSE;
9160
9161 if (htab->tlsdesc_plt
9162 && !(info->flags & DF_BIND_NOW)
9163 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
9164 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
9165 return FALSE;
9166
9167 if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI_PAC)
9168 && (!add_dynamic_entry (DT_AARCH64_BTI_PLT, 0)
9169 || !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0)))
9170 return FALSE;
9171
9172 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI)
9173 && !add_dynamic_entry (DT_AARCH64_BTI_PLT, 0))
9174 return FALSE;
9175
9176 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_PAC)
9177 && !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0))
9178 return FALSE;
9179 }
9180
9181 if (relocs)
9182 {
9183 if (!add_dynamic_entry (DT_RELA, 0)
9184 || !add_dynamic_entry (DT_RELASZ, 0)
9185 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
9186 return FALSE;
9187
9188 /* If any dynamic relocs apply to a read-only section,
9189 then we need a DT_TEXTREL entry. */
9190 if ((info->flags & DF_TEXTREL) == 0)
9191 elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
9192
9193 if ((info->flags & DF_TEXTREL) != 0)
9194 {
9195 if (!add_dynamic_entry (DT_TEXTREL, 0))
9196 return FALSE;
9197 }
9198 }
9199 }
9200 #undef add_dynamic_entry
9201
9202 return TRUE;
9203 }
9204
9205 static inline void
9206 elf_aarch64_update_plt_entry (bfd *output_bfd,
9207 bfd_reloc_code_real_type r_type,
9208 bfd_byte *plt_entry, bfd_vma value)
9209 {
9210 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
9211
9212 /* FIXME: We should check the return value from this function call. */
9213 (void) _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
9214 }
9215
9216 static void
9217 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
9218 struct elf_aarch64_link_hash_table
9219 *htab, bfd *output_bfd,
9220 struct bfd_link_info *info)
9221 {
9222 bfd_byte *plt_entry;
9223 bfd_vma plt_index;
9224 bfd_vma got_offset;
9225 bfd_vma gotplt_entry_address;
9226 bfd_vma plt_entry_address;
9227 Elf_Internal_Rela rela;
9228 bfd_byte *loc;
9229 asection *plt, *gotplt, *relplt;
9230
9231 /* When building a static executable, use .iplt, .igot.plt and
9232 .rela.iplt sections for STT_GNU_IFUNC symbols. */
9233 if (htab->root.splt != NULL)
9234 {
9235 plt = htab->root.splt;
9236 gotplt = htab->root.sgotplt;
9237 relplt = htab->root.srelplt;
9238 }
9239 else
9240 {
9241 plt = htab->root.iplt;
9242 gotplt = htab->root.igotplt;
9243 relplt = htab->root.irelplt;
9244 }
9245
9246 /* Get the index in the procedure linkage table which
9247 corresponds to this symbol. This is the index of this symbol
9248 in all the symbols for which we are making plt entries. The
9249 first entry in the procedure linkage table is reserved.
9250
9251 Get the offset into the .got table of the entry that
9252 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
9253 bytes. The first three are reserved for the dynamic linker.
9254
9255 For static executables, we don't reserve anything. */
9256
9257 if (plt == htab->root.splt)
9258 {
9259 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
9260 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
9261 }
9262 else
9263 {
9264 plt_index = h->plt.offset / htab->plt_entry_size;
9265 got_offset = plt_index * GOT_ENTRY_SIZE;
9266 }
9267
9268 plt_entry = plt->contents + h->plt.offset;
9269 plt_entry_address = plt->output_section->vma
9270 + plt->output_offset + h->plt.offset;
9271 gotplt_entry_address = gotplt->output_section->vma +
9272 gotplt->output_offset + got_offset;
9273
9274 /* Copy in the boiler-plate for the PLTn entry. */
9275 memcpy (plt_entry, htab->plt_entry, htab->plt_entry_size);
9276
9277 /* First instruction in BTI enabled PLT stub is a BTI
9278 instruction so skip it. */
9279 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI
9280 && elf_elfheader (output_bfd)->e_type == ET_EXEC)
9281 plt_entry = plt_entry + 4;
9282
9283 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9284 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9285 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9286 plt_entry,
9287 PG (gotplt_entry_address) -
9288 PG (plt_entry_address));
9289
9290 /* Fill in the lo12 bits for the load from the pltgot. */
9291 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9292 plt_entry + 4,
9293 PG_OFFSET (gotplt_entry_address));
9294
9295 /* Fill in the lo12 bits for the add from the pltgot entry. */
9296 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9297 plt_entry + 8,
9298 PG_OFFSET (gotplt_entry_address));
9299
9300 /* All the GOTPLT Entries are essentially initialized to PLT0. */
9301 bfd_put_NN (output_bfd,
9302 plt->output_section->vma + plt->output_offset,
9303 gotplt->contents + got_offset);
9304
9305 rela.r_offset = gotplt_entry_address;
9306
9307 if (h->dynindx == -1
9308 || ((bfd_link_executable (info)
9309 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
9310 && h->def_regular
9311 && h->type == STT_GNU_IFUNC))
9312 {
9313 /* If an STT_GNU_IFUNC symbol is locally defined, generate
9314 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
9315 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
9316 rela.r_addend = (h->root.u.def.value
9317 + h->root.u.def.section->output_section->vma
9318 + h->root.u.def.section->output_offset);
9319 }
9320 else
9321 {
9322 /* Fill in the entry in the .rela.plt section. */
9323 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
9324 rela.r_addend = 0;
9325 }
9326
9327 /* Compute the relocation entry to used based on PLT index and do
9328 not adjust reloc_count. The reloc_count has already been adjusted
9329 to account for this entry. */
9330 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
9331 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9332 }
9333
9334 /* Size sections even though they're not dynamic. We use it to setup
9335 _TLS_MODULE_BASE_, if needed. */
9336
9337 static bfd_boolean
9338 elfNN_aarch64_always_size_sections (bfd *output_bfd,
9339 struct bfd_link_info *info)
9340 {
9341 asection *tls_sec;
9342
9343 if (bfd_link_relocatable (info))
9344 return TRUE;
9345
9346 tls_sec = elf_hash_table (info)->tls_sec;
9347
9348 if (tls_sec)
9349 {
9350 struct elf_link_hash_entry *tlsbase;
9351
9352 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
9353 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
9354
9355 if (tlsbase)
9356 {
9357 struct bfd_link_hash_entry *h = NULL;
9358 const struct elf_backend_data *bed =
9359 get_elf_backend_data (output_bfd);
9360
9361 if (!(_bfd_generic_link_add_one_symbol
9362 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
9363 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
9364 return FALSE;
9365
9366 tlsbase->type = STT_TLS;
9367 tlsbase = (struct elf_link_hash_entry *) h;
9368 tlsbase->def_regular = 1;
9369 tlsbase->other = STV_HIDDEN;
9370 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
9371 }
9372 }
9373
9374 return TRUE;
9375 }
9376
9377 /* Finish up dynamic symbol handling. We set the contents of various
9378 dynamic sections here. */
9379
9380 static bfd_boolean
9381 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
9382 struct bfd_link_info *info,
9383 struct elf_link_hash_entry *h,
9384 Elf_Internal_Sym *sym)
9385 {
9386 struct elf_aarch64_link_hash_table *htab;
9387 htab = elf_aarch64_hash_table (info);
9388
9389 if (h->plt.offset != (bfd_vma) - 1)
9390 {
9391 asection *plt, *gotplt, *relplt;
9392
9393 /* This symbol has an entry in the procedure linkage table. Set
9394 it up. */
9395
9396 /* When building a static executable, use .iplt, .igot.plt and
9397 .rela.iplt sections for STT_GNU_IFUNC symbols. */
9398 if (htab->root.splt != NULL)
9399 {
9400 plt = htab->root.splt;
9401 gotplt = htab->root.sgotplt;
9402 relplt = htab->root.srelplt;
9403 }
9404 else
9405 {
9406 plt = htab->root.iplt;
9407 gotplt = htab->root.igotplt;
9408 relplt = htab->root.irelplt;
9409 }
9410
9411 /* This symbol has an entry in the procedure linkage table. Set
9412 it up. */
9413 if ((h->dynindx == -1
9414 && !((h->forced_local || bfd_link_executable (info))
9415 && h->def_regular
9416 && h->type == STT_GNU_IFUNC))
9417 || plt == NULL
9418 || gotplt == NULL
9419 || relplt == NULL)
9420 return FALSE;
9421
9422 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
9423 if (!h->def_regular)
9424 {
9425 /* Mark the symbol as undefined, rather than as defined in
9426 the .plt section. */
9427 sym->st_shndx = SHN_UNDEF;
9428 /* If the symbol is weak we need to clear the value.
9429 Otherwise, the PLT entry would provide a definition for
9430 the symbol even if the symbol wasn't defined anywhere,
9431 and so the symbol would never be NULL. Leave the value if
9432 there were any relocations where pointer equality matters
9433 (this is a clue for the dynamic linker, to make function
9434 pointer comparisons work between an application and shared
9435 library). */
9436 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
9437 sym->st_value = 0;
9438 }
9439 }
9440
9441 if (h->got.offset != (bfd_vma) - 1
9442 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL
9443 /* Undefined weak symbol in static PIE resolves to 0 without
9444 any dynamic relocations. */
9445 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
9446 {
9447 Elf_Internal_Rela rela;
9448 bfd_byte *loc;
9449
9450 /* This symbol has an entry in the global offset table. Set it
9451 up. */
9452 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
9453 abort ();
9454
9455 rela.r_offset = (htab->root.sgot->output_section->vma
9456 + htab->root.sgot->output_offset
9457 + (h->got.offset & ~(bfd_vma) 1));
9458
9459 if (h->def_regular
9460 && h->type == STT_GNU_IFUNC)
9461 {
9462 if (bfd_link_pic (info))
9463 {
9464 /* Generate R_AARCH64_GLOB_DAT. */
9465 goto do_glob_dat;
9466 }
9467 else
9468 {
9469 asection *plt;
9470
9471 if (!h->pointer_equality_needed)
9472 abort ();
9473
9474 /* For non-shared object, we can't use .got.plt, which
9475 contains the real function address if we need pointer
9476 equality. We load the GOT entry with the PLT entry. */
9477 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
9478 bfd_put_NN (output_bfd, (plt->output_section->vma
9479 + plt->output_offset
9480 + h->plt.offset),
9481 htab->root.sgot->contents
9482 + (h->got.offset & ~(bfd_vma) 1));
9483 return TRUE;
9484 }
9485 }
9486 else if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
9487 {
9488 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
9489 return FALSE;
9490
9491 BFD_ASSERT ((h->got.offset & 1) != 0);
9492 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
9493 rela.r_addend = (h->root.u.def.value
9494 + h->root.u.def.section->output_section->vma
9495 + h->root.u.def.section->output_offset);
9496 }
9497 else
9498 {
9499 do_glob_dat:
9500 BFD_ASSERT ((h->got.offset & 1) == 0);
9501 bfd_put_NN (output_bfd, (bfd_vma) 0,
9502 htab->root.sgot->contents + h->got.offset);
9503 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
9504 rela.r_addend = 0;
9505 }
9506
9507 loc = htab->root.srelgot->contents;
9508 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
9509 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9510 }
9511
9512 if (h->needs_copy)
9513 {
9514 Elf_Internal_Rela rela;
9515 asection *s;
9516 bfd_byte *loc;
9517
9518 /* This symbol needs a copy reloc. Set it up. */
9519 if (h->dynindx == -1
9520 || (h->root.type != bfd_link_hash_defined
9521 && h->root.type != bfd_link_hash_defweak)
9522 || htab->root.srelbss == NULL)
9523 abort ();
9524
9525 rela.r_offset = (h->root.u.def.value
9526 + h->root.u.def.section->output_section->vma
9527 + h->root.u.def.section->output_offset);
9528 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
9529 rela.r_addend = 0;
9530 if (h->root.u.def.section == htab->root.sdynrelro)
9531 s = htab->root.sreldynrelro;
9532 else
9533 s = htab->root.srelbss;
9534 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
9535 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9536 }
9537
9538 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
9539 be NULL for local symbols. */
9540 if (sym != NULL
9541 && (h == elf_hash_table (info)->hdynamic
9542 || h == elf_hash_table (info)->hgot))
9543 sym->st_shndx = SHN_ABS;
9544
9545 return TRUE;
9546 }
9547
9548 /* Finish up local dynamic symbol handling. We set the contents of
9549 various dynamic sections here. */
9550
9551 static bfd_boolean
9552 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
9553 {
9554 struct elf_link_hash_entry *h
9555 = (struct elf_link_hash_entry *) *slot;
9556 struct bfd_link_info *info
9557 = (struct bfd_link_info *) inf;
9558
9559 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
9560 info, h, NULL);
9561 }
9562
9563 static void
9564 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
9565 struct elf_aarch64_link_hash_table
9566 *htab)
9567 {
9568 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
9569 small and large plts and at the minute just generates
9570 the small PLT. */
9571
9572 /* PLT0 of the small PLT looks like this in ELF64 -
9573 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
9574 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
9575 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
9576 // symbol resolver
9577 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
9578 // GOTPLT entry for this.
9579 br x17
9580 PLT0 will be slightly different in ELF32 due to different got entry
9581 size. */
9582 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
9583 bfd_vma plt_base;
9584
9585
9586 memcpy (htab->root.splt->contents, htab->plt0_entry,
9587 htab->plt_header_size);
9588 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
9589 htab->plt_header_size;
9590
9591 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
9592 + htab->root.sgotplt->output_offset
9593 + GOT_ENTRY_SIZE * 2);
9594
9595 plt_base = htab->root.splt->output_section->vma +
9596 htab->root.splt->output_offset;
9597
9598 /* First instruction in BTI enabled PLT stub is a BTI
9599 instruction so skip it. */
9600 bfd_byte *plt0_entry = htab->root.splt->contents;
9601 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI)
9602 plt0_entry = plt0_entry + 4;
9603
9604 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9605 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9606 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9607 plt0_entry + 4,
9608 PG (plt_got_2nd_ent) - PG (plt_base + 4));
9609
9610 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9611 plt0_entry + 8,
9612 PG_OFFSET (plt_got_2nd_ent));
9613
9614 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9615 plt0_entry + 12,
9616 PG_OFFSET (plt_got_2nd_ent));
9617 }
9618
9619 static bfd_boolean
9620 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
9621 struct bfd_link_info *info)
9622 {
9623 struct elf_aarch64_link_hash_table *htab;
9624 bfd *dynobj;
9625 asection *sdyn;
9626
9627 htab = elf_aarch64_hash_table (info);
9628 dynobj = htab->root.dynobj;
9629 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
9630
9631 if (htab->root.dynamic_sections_created)
9632 {
9633 ElfNN_External_Dyn *dyncon, *dynconend;
9634
9635 if (sdyn == NULL || htab->root.sgot == NULL)
9636 abort ();
9637
9638 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
9639 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
9640 for (; dyncon < dynconend; dyncon++)
9641 {
9642 Elf_Internal_Dyn dyn;
9643 asection *s;
9644
9645 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
9646
9647 switch (dyn.d_tag)
9648 {
9649 default:
9650 continue;
9651
9652 case DT_PLTGOT:
9653 s = htab->root.sgotplt;
9654 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9655 break;
9656
9657 case DT_JMPREL:
9658 s = htab->root.srelplt;
9659 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9660 break;
9661
9662 case DT_PLTRELSZ:
9663 s = htab->root.srelplt;
9664 dyn.d_un.d_val = s->size;
9665 break;
9666
9667 case DT_TLSDESC_PLT:
9668 s = htab->root.splt;
9669 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9670 + htab->tlsdesc_plt;
9671 break;
9672
9673 case DT_TLSDESC_GOT:
9674 s = htab->root.sgot;
9675 BFD_ASSERT (htab->dt_tlsdesc_got != (bfd_vma)-1);
9676 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9677 + htab->dt_tlsdesc_got;
9678 break;
9679 }
9680
9681 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
9682 }
9683
9684 }
9685
9686 /* Fill in the special first entry in the procedure linkage table. */
9687 if (htab->root.splt && htab->root.splt->size > 0)
9688 {
9689 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
9690
9691 elf_section_data (htab->root.splt->output_section)->
9692 this_hdr.sh_entsize = htab->plt_entry_size;
9693
9694
9695 if (htab->tlsdesc_plt && !(info->flags & DF_BIND_NOW))
9696 {
9697 BFD_ASSERT (htab->dt_tlsdesc_got != (bfd_vma)-1);
9698 bfd_put_NN (output_bfd, (bfd_vma) 0,
9699 htab->root.sgot->contents + htab->dt_tlsdesc_got);
9700
9701 const bfd_byte *entry = elfNN_aarch64_tlsdesc_small_plt_entry;
9702 htab->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
9703
9704 aarch64_plt_type type = elf_aarch64_tdata (output_bfd)->plt_type;
9705 if (type == PLT_BTI || type == PLT_BTI_PAC)
9706 {
9707 entry = elfNN_aarch64_tlsdesc_small_plt_bti_entry;
9708 }
9709
9710 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
9711 entry, htab->tlsdesc_plt_entry_size);
9712
9713 {
9714 bfd_vma adrp1_addr =
9715 htab->root.splt->output_section->vma
9716 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
9717
9718 bfd_vma adrp2_addr = adrp1_addr + 4;
9719
9720 bfd_vma got_addr =
9721 htab->root.sgot->output_section->vma
9722 + htab->root.sgot->output_offset;
9723
9724 bfd_vma pltgot_addr =
9725 htab->root.sgotplt->output_section->vma
9726 + htab->root.sgotplt->output_offset;
9727
9728 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
9729
9730 bfd_byte *plt_entry =
9731 htab->root.splt->contents + htab->tlsdesc_plt;
9732
9733 /* First instruction in BTI enabled PLT stub is a BTI
9734 instruction so skip it. */
9735 if (type & PLT_BTI)
9736 {
9737 plt_entry = plt_entry + 4;
9738 adrp1_addr = adrp1_addr + 4;
9739 adrp2_addr = adrp2_addr + 4;
9740 }
9741
9742 /* adrp x2, DT_TLSDESC_GOT */
9743 elf_aarch64_update_plt_entry (output_bfd,
9744 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9745 plt_entry + 4,
9746 (PG (dt_tlsdesc_got)
9747 - PG (adrp1_addr)));
9748
9749 /* adrp x3, 0 */
9750 elf_aarch64_update_plt_entry (output_bfd,
9751 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9752 plt_entry + 8,
9753 (PG (pltgot_addr)
9754 - PG (adrp2_addr)));
9755
9756 /* ldr x2, [x2, #0] */
9757 elf_aarch64_update_plt_entry (output_bfd,
9758 BFD_RELOC_AARCH64_LDSTNN_LO12,
9759 plt_entry + 12,
9760 PG_OFFSET (dt_tlsdesc_got));
9761
9762 /* add x3, x3, 0 */
9763 elf_aarch64_update_plt_entry (output_bfd,
9764 BFD_RELOC_AARCH64_ADD_LO12,
9765 plt_entry + 16,
9766 PG_OFFSET (pltgot_addr));
9767 }
9768 }
9769 }
9770
9771 if (htab->root.sgotplt)
9772 {
9773 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
9774 {
9775 _bfd_error_handler
9776 (_("discarded output section: `%pA'"), htab->root.sgotplt);
9777 return FALSE;
9778 }
9779
9780 /* Fill in the first three entries in the global offset table. */
9781 if (htab->root.sgotplt->size > 0)
9782 {
9783 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
9784
9785 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
9786 bfd_put_NN (output_bfd,
9787 (bfd_vma) 0,
9788 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
9789 bfd_put_NN (output_bfd,
9790 (bfd_vma) 0,
9791 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
9792 }
9793
9794 if (htab->root.sgot)
9795 {
9796 if (htab->root.sgot->size > 0)
9797 {
9798 bfd_vma addr =
9799 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
9800 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
9801 }
9802 }
9803
9804 elf_section_data (htab->root.sgotplt->output_section)->
9805 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
9806 }
9807
9808 if (htab->root.sgot && htab->root.sgot->size > 0)
9809 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
9810 = GOT_ENTRY_SIZE;
9811
9812 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
9813 htab_traverse (htab->loc_hash_table,
9814 elfNN_aarch64_finish_local_dynamic_symbol,
9815 info);
9816
9817 return TRUE;
9818 }
9819
9820 /* Check if BTI enabled PLTs are needed. Returns the type needed. */
9821 static aarch64_plt_type
9822 get_plt_type (bfd *abfd)
9823 {
9824 aarch64_plt_type ret = PLT_NORMAL;
9825 bfd_byte *contents, *extdyn, *extdynend;
9826 asection *sec = bfd_get_section_by_name (abfd, ".dynamic");
9827 if (!sec || !bfd_malloc_and_get_section (abfd, sec, &contents))
9828 return ret;
9829 extdyn = contents;
9830 extdynend = contents + sec->size;
9831 for (; extdyn < extdynend; extdyn += sizeof (ElfNN_External_Dyn))
9832 {
9833 Elf_Internal_Dyn dyn;
9834 bfd_elfNN_swap_dyn_in (abfd, extdyn, &dyn);
9835
9836 /* Let's check the processor specific dynamic array tags. */
9837 bfd_vma tag = dyn.d_tag;
9838 if (tag < DT_LOPROC || tag > DT_HIPROC)
9839 continue;
9840
9841 switch (tag)
9842 {
9843 case DT_AARCH64_BTI_PLT:
9844 ret |= PLT_BTI;
9845 break;
9846
9847 case DT_AARCH64_PAC_PLT:
9848 ret |= PLT_PAC;
9849 break;
9850
9851 default: break;
9852 }
9853 }
9854 free (contents);
9855 return ret;
9856 }
9857
9858 static long
9859 elfNN_aarch64_get_synthetic_symtab (bfd *abfd,
9860 long symcount,
9861 asymbol **syms,
9862 long dynsymcount,
9863 asymbol **dynsyms,
9864 asymbol **ret)
9865 {
9866 elf_aarch64_tdata (abfd)->plt_type = get_plt_type (abfd);
9867 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms,
9868 dynsymcount, dynsyms, ret);
9869 }
9870
9871 /* Return address for Ith PLT stub in section PLT, for relocation REL
9872 or (bfd_vma) -1 if it should not be included. */
9873
9874 static bfd_vma
9875 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
9876 const arelent *rel ATTRIBUTE_UNUSED)
9877 {
9878 size_t plt0_size = PLT_ENTRY_SIZE;
9879 size_t pltn_size = PLT_SMALL_ENTRY_SIZE;
9880
9881 if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI_PAC)
9882 {
9883 if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
9884 pltn_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
9885 else
9886 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
9887 }
9888 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI)
9889 {
9890 if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
9891 pltn_size = PLT_BTI_SMALL_ENTRY_SIZE;
9892 }
9893 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_PAC)
9894 {
9895 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
9896 }
9897
9898 return plt->vma + plt0_size + i * pltn_size;
9899 }
9900
9901 /* Returns TRUE if NAME is an AArch64 mapping symbol.
9902 The ARM ELF standard defines $x (for A64 code) and $d (for data).
9903 It also allows a period initiated suffix to be added to the symbol, ie:
9904 "$[adtx]\.[:sym_char]+". */
9905
9906 static bfd_boolean
9907 is_aarch64_mapping_symbol (const char * name)
9908 {
9909 return name != NULL /* Paranoia. */
9910 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
9911 the mapping symbols could have acquired a prefix.
9912 We do not support this here, since such symbols no
9913 longer conform to the ARM ELF ABI. */
9914 && (name[1] == 'd' || name[1] == 'x')
9915 && (name[2] == 0 || name[2] == '.');
9916 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
9917 any characters that follow the period are legal characters for the body
9918 of a symbol's name. For now we just assume that this is the case. */
9919 }
9920
9921 /* Make sure that mapping symbols in object files are not removed via the
9922 "strip --strip-unneeded" tool. These symbols might needed in order to
9923 correctly generate linked files. Once an object file has been linked,
9924 it should be safe to remove them. */
9925
9926 static void
9927 elfNN_aarch64_backend_symbol_processing (bfd *abfd, asymbol *sym)
9928 {
9929 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
9930 && sym->section != bfd_abs_section_ptr
9931 && is_aarch64_mapping_symbol (sym->name))
9932 sym->flags |= BSF_KEEP;
9933 }
9934
9935 /* Implement elf_backend_setup_gnu_properties for AArch64. It serves as a
9936 wrapper function for _bfd_aarch64_elf_link_setup_gnu_properties to account
9937 for the effect of GNU properties of the output_bfd. */
9938 static bfd *
9939 elfNN_aarch64_link_setup_gnu_properties (struct bfd_link_info *info)
9940 {
9941 uint32_t prop = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
9942 bfd *pbfd = _bfd_aarch64_elf_link_setup_gnu_properties (info, &prop);
9943 elf_aarch64_tdata (info->output_bfd)->gnu_and_prop = prop;
9944 elf_aarch64_tdata (info->output_bfd)->plt_type
9945 |= (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ? PLT_BTI : 0;
9946 setup_plt_values (info, elf_aarch64_tdata (info->output_bfd)->plt_type);
9947 return pbfd;
9948 }
9949
9950 /* Implement elf_backend_merge_gnu_properties for AArch64. It serves as a
9951 wrapper function for _bfd_aarch64_elf_merge_gnu_properties to account
9952 for the effect of GNU properties of the output_bfd. */
9953 static bfd_boolean
9954 elfNN_aarch64_merge_gnu_properties (struct bfd_link_info *info,
9955 bfd *abfd, bfd *bbfd,
9956 elf_property *aprop,
9957 elf_property *bprop)
9958 {
9959 uint32_t prop
9960 = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
9961
9962 /* If output has been marked with BTI using command line argument, give out
9963 warning if necessary. */
9964 /* Properties are merged per type, hence only check for warnings when merging
9965 GNU_PROPERTY_AARCH64_FEATURE_1_AND. */
9966 if (((aprop && aprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND)
9967 || (bprop && bprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND))
9968 && (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
9969 && (!elf_aarch64_tdata (info->output_bfd)->no_bti_warn))
9970 {
9971 if ((aprop && !(aprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
9972 || !aprop)
9973 {
9974 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
9975 "all inputs do not have BTI in NOTE section."),
9976 abfd);
9977 }
9978 if ((bprop && !(bprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
9979 || !bprop)
9980 {
9981 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
9982 "all inputs do not have BTI in NOTE section."),
9983 bbfd);
9984 }
9985 }
9986
9987 return _bfd_aarch64_elf_merge_gnu_properties (info, abfd, aprop,
9988 bprop, prop);
9989 }
9990
9991 /* We use this so we can override certain functions
9992 (though currently we don't). */
9993
9994 const struct elf_size_info elfNN_aarch64_size_info =
9995 {
9996 sizeof (ElfNN_External_Ehdr),
9997 sizeof (ElfNN_External_Phdr),
9998 sizeof (ElfNN_External_Shdr),
9999 sizeof (ElfNN_External_Rel),
10000 sizeof (ElfNN_External_Rela),
10001 sizeof (ElfNN_External_Sym),
10002 sizeof (ElfNN_External_Dyn),
10003 sizeof (Elf_External_Note),
10004 4, /* Hash table entry size. */
10005 1, /* Internal relocs per external relocs. */
10006 ARCH_SIZE, /* Arch size. */
10007 LOG_FILE_ALIGN, /* Log_file_align. */
10008 ELFCLASSNN, EV_CURRENT,
10009 bfd_elfNN_write_out_phdrs,
10010 bfd_elfNN_write_shdrs_and_ehdr,
10011 bfd_elfNN_checksum_contents,
10012 bfd_elfNN_write_relocs,
10013 bfd_elfNN_swap_symbol_in,
10014 bfd_elfNN_swap_symbol_out,
10015 bfd_elfNN_slurp_reloc_table,
10016 bfd_elfNN_slurp_symbol_table,
10017 bfd_elfNN_swap_dyn_in,
10018 bfd_elfNN_swap_dyn_out,
10019 bfd_elfNN_swap_reloc_in,
10020 bfd_elfNN_swap_reloc_out,
10021 bfd_elfNN_swap_reloca_in,
10022 bfd_elfNN_swap_reloca_out
10023 };
10024
10025 #define ELF_ARCH bfd_arch_aarch64
10026 #define ELF_MACHINE_CODE EM_AARCH64
10027 #define ELF_MAXPAGESIZE 0x10000
10028 #define ELF_MINPAGESIZE 0x1000
10029 #define ELF_COMMONPAGESIZE 0x1000
10030
10031 #define bfd_elfNN_close_and_cleanup \
10032 elfNN_aarch64_close_and_cleanup
10033
10034 #define bfd_elfNN_bfd_free_cached_info \
10035 elfNN_aarch64_bfd_free_cached_info
10036
10037 #define bfd_elfNN_bfd_is_target_special_symbol \
10038 elfNN_aarch64_is_target_special_symbol
10039
10040 #define bfd_elfNN_bfd_link_hash_table_create \
10041 elfNN_aarch64_link_hash_table_create
10042
10043 #define bfd_elfNN_bfd_merge_private_bfd_data \
10044 elfNN_aarch64_merge_private_bfd_data
10045
10046 #define bfd_elfNN_bfd_print_private_bfd_data \
10047 elfNN_aarch64_print_private_bfd_data
10048
10049 #define bfd_elfNN_bfd_reloc_type_lookup \
10050 elfNN_aarch64_reloc_type_lookup
10051
10052 #define bfd_elfNN_bfd_reloc_name_lookup \
10053 elfNN_aarch64_reloc_name_lookup
10054
10055 #define bfd_elfNN_bfd_set_private_flags \
10056 elfNN_aarch64_set_private_flags
10057
10058 #define bfd_elfNN_find_inliner_info \
10059 elfNN_aarch64_find_inliner_info
10060
10061 #define bfd_elfNN_get_synthetic_symtab \
10062 elfNN_aarch64_get_synthetic_symtab
10063
10064 #define bfd_elfNN_mkobject \
10065 elfNN_aarch64_mkobject
10066
10067 #define bfd_elfNN_new_section_hook \
10068 elfNN_aarch64_new_section_hook
10069
10070 #define elf_backend_adjust_dynamic_symbol \
10071 elfNN_aarch64_adjust_dynamic_symbol
10072
10073 #define elf_backend_always_size_sections \
10074 elfNN_aarch64_always_size_sections
10075
10076 #define elf_backend_check_relocs \
10077 elfNN_aarch64_check_relocs
10078
10079 #define elf_backend_copy_indirect_symbol \
10080 elfNN_aarch64_copy_indirect_symbol
10081
10082 #define elf_backend_merge_symbol_attribute \
10083 elfNN_aarch64_merge_symbol_attribute
10084
10085 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
10086 to them in our hash. */
10087 #define elf_backend_create_dynamic_sections \
10088 elfNN_aarch64_create_dynamic_sections
10089
10090 #define elf_backend_init_index_section \
10091 _bfd_elf_init_2_index_sections
10092
10093 #define elf_backend_finish_dynamic_sections \
10094 elfNN_aarch64_finish_dynamic_sections
10095
10096 #define elf_backend_finish_dynamic_symbol \
10097 elfNN_aarch64_finish_dynamic_symbol
10098
10099 #define elf_backend_object_p \
10100 elfNN_aarch64_object_p
10101
10102 #define elf_backend_output_arch_local_syms \
10103 elfNN_aarch64_output_arch_local_syms
10104
10105 #define elf_backend_maybe_function_sym \
10106 elfNN_aarch64_maybe_function_sym
10107
10108 #define elf_backend_plt_sym_val \
10109 elfNN_aarch64_plt_sym_val
10110
10111 #define elf_backend_init_file_header \
10112 elfNN_aarch64_init_file_header
10113
10114 #define elf_backend_relocate_section \
10115 elfNN_aarch64_relocate_section
10116
10117 #define elf_backend_reloc_type_class \
10118 elfNN_aarch64_reloc_type_class
10119
10120 #define elf_backend_section_from_shdr \
10121 elfNN_aarch64_section_from_shdr
10122
10123 #define elf_backend_size_dynamic_sections \
10124 elfNN_aarch64_size_dynamic_sections
10125
10126 #define elf_backend_size_info \
10127 elfNN_aarch64_size_info
10128
10129 #define elf_backend_write_section \
10130 elfNN_aarch64_write_section
10131
10132 #define elf_backend_symbol_processing \
10133 elfNN_aarch64_backend_symbol_processing
10134
10135 #define elf_backend_setup_gnu_properties \
10136 elfNN_aarch64_link_setup_gnu_properties
10137
10138 #define elf_backend_merge_gnu_properties \
10139 elfNN_aarch64_merge_gnu_properties
10140
10141 #define elf_backend_can_refcount 1
10142 #define elf_backend_can_gc_sections 1
10143 #define elf_backend_plt_readonly 1
10144 #define elf_backend_want_got_plt 1
10145 #define elf_backend_want_plt_sym 0
10146 #define elf_backend_want_dynrelro 1
10147 #define elf_backend_may_use_rel_p 0
10148 #define elf_backend_may_use_rela_p 1
10149 #define elf_backend_default_use_rela_p 1
10150 #define elf_backend_rela_normal 1
10151 #define elf_backend_dtrel_excludes_plt 1
10152 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
10153 #define elf_backend_default_execstack 0
10154 #define elf_backend_extern_protected_data 1
10155 #define elf_backend_hash_symbol elf_aarch64_hash_symbol
10156
10157 #undef elf_backend_obj_attrs_section
10158 #define elf_backend_obj_attrs_section ".ARM.attributes"
10159
10160 #include "elfNN-target.h"
10161
10162 /* CloudABI support. */
10163
10164 #undef TARGET_LITTLE_SYM
10165 #define TARGET_LITTLE_SYM aarch64_elfNN_le_cloudabi_vec
10166 #undef TARGET_LITTLE_NAME
10167 #define TARGET_LITTLE_NAME "elfNN-littleaarch64-cloudabi"
10168 #undef TARGET_BIG_SYM
10169 #define TARGET_BIG_SYM aarch64_elfNN_be_cloudabi_vec
10170 #undef TARGET_BIG_NAME
10171 #define TARGET_BIG_NAME "elfNN-bigaarch64-cloudabi"
10172
10173 #undef ELF_OSABI
10174 #define ELF_OSABI ELFOSABI_CLOUDABI
10175
10176 #undef elfNN_bed
10177 #define elfNN_bed elfNN_aarch64_cloudabi_bed
10178
10179 #include "elfNN-target.h"
This page took 0.244273 seconds and 5 git commands to generate.