[AArch64, ILP32] 1/6 Rename elf64-aarch64.c to elfnn-aarch64.c
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
CommitLineData
a06ea964 1/* ELF support for AArch64.
59c108f7 2 Copyright 2009-2013 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21/* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
418009c2 43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
a06ea964
NC
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elf64_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elf64_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elf64_aarch64_relocate_section ()
123
124 Calls elf64_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elf64_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138#include "sysdep.h"
139#include "bfd.h"
140#include "libiberty.h"
141#include "libbfd.h"
142#include "bfd_stdint.h"
143#include "elf-bfd.h"
144#include "bfdlink.h"
145#include "elf/aarch64.h"
146
147static bfd_reloc_status_type
148bfd_elf_aarch64_put_addend (bfd *abfd,
149 bfd_byte *address,
150 reloc_howto_type *howto, bfd_signed_vma addend);
151
152#define IS_AARCH64_TLS_RELOC(R_TYPE) \
153 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
154 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
155 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
156 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
157 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
158 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
159 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
160 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
161 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
162 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
163 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
164 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
165 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
166 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
167 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
168 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
169 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
170 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
171 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
172
173#define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
418009c2 174 ((R_TYPE) == R_AARCH64_TLSDESC_LD_PREL19 \
a06ea964 175 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
418009c2 176 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE21 \
a06ea964
NC
177 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
178 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
179 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
180 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
181 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
182 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
183 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
184 || (R_TYPE) == R_AARCH64_TLSDESC)
185
186#define ELIMINATE_COPY_RELOCS 0
187
a06ea964
NC
188/* Return size of a relocation entry. HTAB is the bfd's
189 elf64_aarch64_link_hash_entry. */
190#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
191
a06ea964
NC
192/* GOT Entry size - 8 bytes. */
193#define GOT_ENTRY_SIZE (8)
194#define PLT_ENTRY_SIZE (32)
195#define PLT_SMALL_ENTRY_SIZE (16)
196#define PLT_TLSDESC_ENTRY_SIZE (32)
197
198/* Take the PAGE component of an address or offset. */
199#define PG(x) ((x) & ~ 0xfff)
200#define PG_OFFSET(x) ((x) & 0xfff)
201
202/* Encoding of the nop instruction */
203#define INSN_NOP 0xd503201f
204
205#define aarch64_compute_jump_table_size(htab) \
206 (((htab)->root.srelplt == NULL) ? 0 \
207 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
208
209/* The first entry in a procedure linkage table looks like this
210 if the distance between the PLTGOT and the PLT is < 4GB use
211 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
212 in x16 and needs to work out PLTGOT[1] by using an address of
213 [x16,#-8]. */
214static const bfd_byte elf64_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
215{
216 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
217 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
218 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
219 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
220 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
221 0x1f, 0x20, 0x03, 0xd5, /* nop */
222 0x1f, 0x20, 0x03, 0xd5, /* nop */
223 0x1f, 0x20, 0x03, 0xd5, /* nop */
224};
225
226/* Per function entry in a procedure linkage table looks like this
227 if the distance between the PLTGOT and the PLT is < 4GB use
228 these PLT entries. */
229static const bfd_byte elf64_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
230{
231 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
232 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
233 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
234 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
235};
236
237static const bfd_byte
238elf64_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
239{
240 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
241 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
242 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
243 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
244 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
245 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
246 0x1f, 0x20, 0x03, 0xd5, /* nop */
247 0x1f, 0x20, 0x03, 0xd5, /* nop */
248};
249
250#define elf_info_to_howto elf64_aarch64_info_to_howto
251#define elf_info_to_howto_rel elf64_aarch64_info_to_howto
252
253#define AARCH64_ELF_ABI_VERSION 0
a06ea964
NC
254
255/* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
256#define ALL_ONES (~ (bfd_vma) 0)
257
258static reloc_howto_type elf64_aarch64_howto_none =
259 HOWTO (R_AARCH64_NONE, /* type */
260 0, /* rightshift */
261 0, /* size (0 = byte, 1 = short, 2 = long) */
262 0, /* bitsize */
263 FALSE, /* pc_relative */
264 0, /* bitpos */
265 complain_overflow_dont,/* complain_on_overflow */
266 bfd_elf_generic_reloc, /* special_function */
267 "R_AARCH64_NONE", /* name */
268 FALSE, /* partial_inplace */
269 0, /* src_mask */
270 0, /* dst_mask */
271 FALSE); /* pcrel_offset */
272
273static reloc_howto_type elf64_aarch64_howto_dynrelocs[] =
274{
275 HOWTO (R_AARCH64_COPY, /* type */
276 0, /* rightshift */
277 2, /* size (0 = byte, 1 = short, 2 = long) */
278 64, /* bitsize */
279 FALSE, /* pc_relative */
280 0, /* bitpos */
281 complain_overflow_bitfield, /* complain_on_overflow */
282 bfd_elf_generic_reloc, /* special_function */
283 "R_AARCH64_COPY", /* name */
284 TRUE, /* partial_inplace */
285 0xffffffff, /* src_mask */
286 0xffffffff, /* dst_mask */
287 FALSE), /* pcrel_offset */
288
289 HOWTO (R_AARCH64_GLOB_DAT, /* type */
290 0, /* rightshift */
291 2, /* size (0 = byte, 1 = short, 2 = long) */
292 64, /* bitsize */
293 FALSE, /* pc_relative */
294 0, /* bitpos */
295 complain_overflow_bitfield, /* complain_on_overflow */
296 bfd_elf_generic_reloc, /* special_function */
297 "R_AARCH64_GLOB_DAT", /* name */
298 TRUE, /* partial_inplace */
299 0xffffffff, /* src_mask */
300 0xffffffff, /* dst_mask */
301 FALSE), /* pcrel_offset */
302
303 HOWTO (R_AARCH64_JUMP_SLOT, /* type */
304 0, /* rightshift */
305 2, /* size (0 = byte, 1 = short, 2 = long) */
306 64, /* bitsize */
307 FALSE, /* pc_relative */
308 0, /* bitpos */
309 complain_overflow_bitfield, /* complain_on_overflow */
310 bfd_elf_generic_reloc, /* special_function */
311 "R_AARCH64_JUMP_SLOT", /* name */
312 TRUE, /* partial_inplace */
313 0xffffffff, /* src_mask */
314 0xffffffff, /* dst_mask */
315 FALSE), /* pcrel_offset */
316
317 HOWTO (R_AARCH64_RELATIVE, /* type */
318 0, /* rightshift */
319 2, /* size (0 = byte, 1 = short, 2 = long) */
320 64, /* bitsize */
321 FALSE, /* pc_relative */
322 0, /* bitpos */
323 complain_overflow_bitfield, /* complain_on_overflow */
324 bfd_elf_generic_reloc, /* special_function */
325 "R_AARCH64_RELATIVE", /* name */
326 TRUE, /* partial_inplace */
327 ALL_ONES, /* src_mask */
328 ALL_ONES, /* dst_mask */
329 FALSE), /* pcrel_offset */
330
331 HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */
332 0, /* rightshift */
333 2, /* size (0 = byte, 1 = short, 2 = long) */
334 64, /* bitsize */
335 FALSE, /* pc_relative */
336 0, /* bitpos */
337 complain_overflow_dont, /* complain_on_overflow */
338 bfd_elf_generic_reloc, /* special_function */
339 "R_AARCH64_TLS_DTPMOD64", /* name */
340 FALSE, /* partial_inplace */
341 0, /* src_mask */
342 ALL_ONES, /* dst_mask */
343 FALSE), /* pc_reloffset */
344
345 HOWTO (R_AARCH64_TLS_DTPREL64, /* type */
346 0, /* rightshift */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
348 64, /* bitsize */
349 FALSE, /* pc_relative */
350 0, /* bitpos */
351 complain_overflow_dont, /* complain_on_overflow */
352 bfd_elf_generic_reloc, /* special_function */
353 "R_AARCH64_TLS_DTPREL64", /* name */
354 FALSE, /* partial_inplace */
355 0, /* src_mask */
356 ALL_ONES, /* dst_mask */
357 FALSE), /* pcrel_offset */
358
359 HOWTO (R_AARCH64_TLS_TPREL64, /* type */
360 0, /* rightshift */
361 2, /* size (0 = byte, 1 = short, 2 = long) */
362 64, /* bitsize */
363 FALSE, /* pc_relative */
364 0, /* bitpos */
365 complain_overflow_dont, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 "R_AARCH64_TLS_TPREL64", /* name */
368 FALSE, /* partial_inplace */
369 0, /* src_mask */
370 ALL_ONES, /* dst_mask */
371 FALSE), /* pcrel_offset */
372
373 HOWTO (R_AARCH64_TLSDESC, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 64, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_dont, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_AARCH64_TLSDESC", /* name */
382 FALSE, /* partial_inplace */
383 0, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387};
388
389/* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
390 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
391 in that slot. */
392
393static reloc_howto_type elf64_aarch64_howto_table[] =
394{
395 /* Basic data relocations. */
396
397 HOWTO (R_AARCH64_NULL, /* type */
398 0, /* rightshift */
399 0, /* size (0 = byte, 1 = short, 2 = long) */
400 0, /* bitsize */
401 FALSE, /* pc_relative */
402 0, /* bitpos */
403 complain_overflow_dont, /* complain_on_overflow */
404 bfd_elf_generic_reloc, /* special_function */
405 "R_AARCH64_NULL", /* name */
406 FALSE, /* partial_inplace */
407 0, /* src_mask */
408 0, /* dst_mask */
409 FALSE), /* pcrel_offset */
410
411 /* .xword: (S+A) */
412 HOWTO (R_AARCH64_ABS64, /* type */
413 0, /* rightshift */
414 4, /* size (4 = long long) */
415 64, /* bitsize */
416 FALSE, /* pc_relative */
417 0, /* bitpos */
418 complain_overflow_unsigned, /* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_AARCH64_ABS64", /* name */
421 FALSE, /* partial_inplace */
422 ALL_ONES, /* src_mask */
423 ALL_ONES, /* dst_mask */
424 FALSE), /* pcrel_offset */
425
426 /* .word: (S+A) */
427 HOWTO (R_AARCH64_ABS32, /* type */
428 0, /* rightshift */
429 2, /* size (0 = byte, 1 = short, 2 = long) */
430 32, /* bitsize */
431 FALSE, /* pc_relative */
432 0, /* bitpos */
433 complain_overflow_unsigned, /* complain_on_overflow */
434 bfd_elf_generic_reloc, /* special_function */
435 "R_AARCH64_ABS32", /* name */
436 FALSE, /* partial_inplace */
437 0xffffffff, /* src_mask */
438 0xffffffff, /* dst_mask */
439 FALSE), /* pcrel_offset */
440
441 /* .half: (S+A) */
442 HOWTO (R_AARCH64_ABS16, /* type */
443 0, /* rightshift */
444 1, /* size (0 = byte, 1 = short, 2 = long) */
445 16, /* bitsize */
446 FALSE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_unsigned, /* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_AARCH64_ABS16", /* name */
451 FALSE, /* partial_inplace */
452 0xffff, /* src_mask */
453 0xffff, /* dst_mask */
454 FALSE), /* pcrel_offset */
455
456 /* .xword: (S+A-P) */
457 HOWTO (R_AARCH64_PREL64, /* type */
458 0, /* rightshift */
459 4, /* size (4 = long long) */
460 64, /* bitsize */
461 TRUE, /* pc_relative */
462 0, /* bitpos */
463 complain_overflow_signed, /* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_AARCH64_PREL64", /* name */
466 FALSE, /* partial_inplace */
467 ALL_ONES, /* src_mask */
468 ALL_ONES, /* dst_mask */
469 TRUE), /* pcrel_offset */
470
471 /* .word: (S+A-P) */
472 HOWTO (R_AARCH64_PREL32, /* type */
473 0, /* rightshift */
474 2, /* size (0 = byte, 1 = short, 2 = long) */
475 32, /* bitsize */
476 TRUE, /* pc_relative */
477 0, /* bitpos */
478 complain_overflow_signed, /* complain_on_overflow */
479 bfd_elf_generic_reloc, /* special_function */
480 "R_AARCH64_PREL32", /* name */
481 FALSE, /* partial_inplace */
482 0xffffffff, /* src_mask */
483 0xffffffff, /* dst_mask */
484 TRUE), /* pcrel_offset */
485
486 /* .half: (S+A-P) */
487 HOWTO (R_AARCH64_PREL16, /* type */
488 0, /* rightshift */
489 1, /* size (0 = byte, 1 = short, 2 = long) */
490 16, /* bitsize */
491 TRUE, /* pc_relative */
492 0, /* bitpos */
493 complain_overflow_signed, /* complain_on_overflow */
494 bfd_elf_generic_reloc, /* special_function */
495 "R_AARCH64_PREL16", /* name */
496 FALSE, /* partial_inplace */
497 0xffff, /* src_mask */
498 0xffff, /* dst_mask */
499 TRUE), /* pcrel_offset */
500
501 /* Group relocations to create a 16, 32, 48 or 64 bit
502 unsigned data or abs address inline. */
503
504 /* MOVZ: ((S+A) >> 0) & 0xffff */
505 HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */
506 0, /* rightshift */
507 2, /* size (0 = byte, 1 = short, 2 = long) */
508 16, /* bitsize */
509 FALSE, /* pc_relative */
510 0, /* bitpos */
511 complain_overflow_unsigned, /* complain_on_overflow */
512 bfd_elf_generic_reloc, /* special_function */
513 "R_AARCH64_MOVW_UABS_G0", /* name */
514 FALSE, /* partial_inplace */
515 0xffff, /* src_mask */
516 0xffff, /* dst_mask */
517 FALSE), /* pcrel_offset */
518
519 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
520 HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */
521 0, /* rightshift */
522 2, /* size (0 = byte, 1 = short, 2 = long) */
523 16, /* bitsize */
524 FALSE, /* pc_relative */
525 0, /* bitpos */
526 complain_overflow_dont, /* complain_on_overflow */
527 bfd_elf_generic_reloc, /* special_function */
528 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
529 FALSE, /* partial_inplace */
530 0xffff, /* src_mask */
531 0xffff, /* dst_mask */
532 FALSE), /* pcrel_offset */
533
534 /* MOVZ: ((S+A) >> 16) & 0xffff */
535 HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */
536 16, /* rightshift */
537 2, /* size (0 = byte, 1 = short, 2 = long) */
538 16, /* bitsize */
539 FALSE, /* pc_relative */
540 0, /* bitpos */
541 complain_overflow_unsigned, /* complain_on_overflow */
542 bfd_elf_generic_reloc, /* special_function */
543 "R_AARCH64_MOVW_UABS_G1", /* name */
544 FALSE, /* partial_inplace */
545 0xffff, /* src_mask */
546 0xffff, /* dst_mask */
547 FALSE), /* pcrel_offset */
548
549 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
550 HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */
551 16, /* rightshift */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
553 16, /* bitsize */
554 FALSE, /* pc_relative */
555 0, /* bitpos */
556 complain_overflow_dont, /* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
559 FALSE, /* partial_inplace */
560 0xffff, /* src_mask */
561 0xffff, /* dst_mask */
562 FALSE), /* pcrel_offset */
563
564 /* MOVZ: ((S+A) >> 32) & 0xffff */
565 HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */
566 32, /* rightshift */
567 2, /* size (0 = byte, 1 = short, 2 = long) */
568 16, /* bitsize */
569 FALSE, /* pc_relative */
570 0, /* bitpos */
571 complain_overflow_unsigned, /* complain_on_overflow */
572 bfd_elf_generic_reloc, /* special_function */
573 "R_AARCH64_MOVW_UABS_G2", /* name */
574 FALSE, /* partial_inplace */
575 0xffff, /* src_mask */
576 0xffff, /* dst_mask */
577 FALSE), /* pcrel_offset */
578
579 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
580 HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */
581 32, /* rightshift */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
583 16, /* bitsize */
584 FALSE, /* pc_relative */
585 0, /* bitpos */
586 complain_overflow_dont, /* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
589 FALSE, /* partial_inplace */
590 0xffff, /* src_mask */
591 0xffff, /* dst_mask */
592 FALSE), /* pcrel_offset */
593
594 /* MOVZ: ((S+A) >> 48) & 0xffff */
595 HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */
596 48, /* rightshift */
597 2, /* size (0 = byte, 1 = short, 2 = long) */
598 16, /* bitsize */
599 FALSE, /* pc_relative */
600 0, /* bitpos */
601 complain_overflow_unsigned, /* complain_on_overflow */
602 bfd_elf_generic_reloc, /* special_function */
603 "R_AARCH64_MOVW_UABS_G3", /* name */
604 FALSE, /* partial_inplace */
605 0xffff, /* src_mask */
606 0xffff, /* dst_mask */
607 FALSE), /* pcrel_offset */
608
609 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
610 signed data or abs address inline. Will change instruction
611 to MOVN or MOVZ depending on sign of calculated value. */
612
613 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
614 HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */
615 0, /* rightshift */
616 2, /* size (0 = byte, 1 = short, 2 = long) */
617 16, /* bitsize */
618 FALSE, /* pc_relative */
619 0, /* bitpos */
620 complain_overflow_signed, /* complain_on_overflow */
621 bfd_elf_generic_reloc, /* special_function */
622 "R_AARCH64_MOVW_SABS_G0", /* name */
623 FALSE, /* partial_inplace */
624 0xffff, /* src_mask */
625 0xffff, /* dst_mask */
626 FALSE), /* pcrel_offset */
627
628 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
629 HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */
630 16, /* rightshift */
631 2, /* size (0 = byte, 1 = short, 2 = long) */
632 16, /* bitsize */
633 FALSE, /* pc_relative */
634 0, /* bitpos */
635 complain_overflow_signed, /* complain_on_overflow */
636 bfd_elf_generic_reloc, /* special_function */
637 "R_AARCH64_MOVW_SABS_G1", /* name */
638 FALSE, /* partial_inplace */
639 0xffff, /* src_mask */
640 0xffff, /* dst_mask */
641 FALSE), /* pcrel_offset */
642
643 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
644 HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */
645 32, /* rightshift */
646 2, /* size (0 = byte, 1 = short, 2 = long) */
647 16, /* bitsize */
648 FALSE, /* pc_relative */
649 0, /* bitpos */
650 complain_overflow_signed, /* complain_on_overflow */
651 bfd_elf_generic_reloc, /* special_function */
652 "R_AARCH64_MOVW_SABS_G2", /* name */
653 FALSE, /* partial_inplace */
654 0xffff, /* src_mask */
655 0xffff, /* dst_mask */
656 FALSE), /* pcrel_offset */
657
658/* Relocations to generate 19, 21 and 33 bit PC-relative load/store
659 addresses: PG(x) is (x & ~0xfff). */
660
661 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
662 HOWTO (R_AARCH64_LD_PREL_LO19, /* type */
663 2, /* rightshift */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
665 19, /* bitsize */
666 TRUE, /* pc_relative */
667 0, /* bitpos */
668 complain_overflow_signed, /* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_AARCH64_LD_PREL_LO19", /* name */
671 FALSE, /* partial_inplace */
672 0x7ffff, /* src_mask */
673 0x7ffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
675
676 /* ADR: (S+A-P) & 0x1fffff */
677 HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */
678 0, /* rightshift */
679 2, /* size (0 = byte, 1 = short, 2 = long) */
680 21, /* bitsize */
681 TRUE, /* pc_relative */
682 0, /* bitpos */
683 complain_overflow_signed, /* complain_on_overflow */
684 bfd_elf_generic_reloc, /* special_function */
685 "R_AARCH64_ADR_PREL_LO21", /* name */
686 FALSE, /* partial_inplace */
687 0x1fffff, /* src_mask */
688 0x1fffff, /* dst_mask */
689 TRUE), /* pcrel_offset */
690
691 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
692 HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */
693 12, /* rightshift */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
695 21, /* bitsize */
696 TRUE, /* pc_relative */
697 0, /* bitpos */
698 complain_overflow_signed, /* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
701 FALSE, /* partial_inplace */
702 0x1fffff, /* src_mask */
703 0x1fffff, /* dst_mask */
704 TRUE), /* pcrel_offset */
705
706 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
707 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */
708 12, /* rightshift */
709 2, /* size (0 = byte, 1 = short, 2 = long) */
710 21, /* bitsize */
711 TRUE, /* pc_relative */
712 0, /* bitpos */
713 complain_overflow_dont, /* complain_on_overflow */
714 bfd_elf_generic_reloc, /* special_function */
715 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
716 FALSE, /* partial_inplace */
717 0x1fffff, /* src_mask */
718 0x1fffff, /* dst_mask */
719 TRUE), /* pcrel_offset */
720
721 /* ADD: (S+A) & 0xfff [no overflow check] */
722 HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 12, /* bitsize */
726 FALSE, /* pc_relative */
727 10, /* bitpos */
728 complain_overflow_dont, /* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x3ffc00, /* src_mask */
733 0x3ffc00, /* dst_mask */
734 FALSE), /* pcrel_offset */
735
736 /* LD/ST8: (S+A) & 0xfff */
737 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */
738 0, /* rightshift */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
740 12, /* bitsize */
741 FALSE, /* pc_relative */
742 0, /* bitpos */
743 complain_overflow_dont, /* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
746 FALSE, /* partial_inplace */
747 0xfff, /* src_mask */
748 0xfff, /* dst_mask */
749 FALSE), /* pcrel_offset */
750
751 /* Relocations for control-flow instructions. */
752
753 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
754 HOWTO (R_AARCH64_TSTBR14, /* type */
755 2, /* rightshift */
756 2, /* size (0 = byte, 1 = short, 2 = long) */
757 14, /* bitsize */
758 TRUE, /* pc_relative */
759 0, /* bitpos */
760 complain_overflow_signed, /* complain_on_overflow */
761 bfd_elf_generic_reloc, /* special_function */
762 "R_AARCH64_TSTBR14", /* name */
763 FALSE, /* partial_inplace */
764 0x3fff, /* src_mask */
765 0x3fff, /* dst_mask */
766 TRUE), /* pcrel_offset */
767
768 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
769 HOWTO (R_AARCH64_CONDBR19, /* type */
770 2, /* rightshift */
771 2, /* size (0 = byte, 1 = short, 2 = long) */
772 19, /* bitsize */
773 TRUE, /* pc_relative */
774 0, /* bitpos */
775 complain_overflow_signed, /* complain_on_overflow */
776 bfd_elf_generic_reloc, /* special_function */
777 "R_AARCH64_CONDBR19", /* name */
778 FALSE, /* partial_inplace */
779 0x7ffff, /* src_mask */
780 0x7ffff, /* dst_mask */
781 TRUE), /* pcrel_offset */
782
783 EMPTY_HOWTO (281),
784
785 /* B: ((S+A-P) >> 2) & 0x3ffffff */
786 HOWTO (R_AARCH64_JUMP26, /* type */
787 2, /* rightshift */
788 2, /* size (0 = byte, 1 = short, 2 = long) */
789 26, /* bitsize */
790 TRUE, /* pc_relative */
791 0, /* bitpos */
792 complain_overflow_signed, /* complain_on_overflow */
793 bfd_elf_generic_reloc, /* special_function */
794 "R_AARCH64_JUMP26", /* name */
795 FALSE, /* partial_inplace */
796 0x3ffffff, /* src_mask */
797 0x3ffffff, /* dst_mask */
798 TRUE), /* pcrel_offset */
799
800 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
801 HOWTO (R_AARCH64_CALL26, /* type */
802 2, /* rightshift */
803 2, /* size (0 = byte, 1 = short, 2 = long) */
804 26, /* bitsize */
805 TRUE, /* pc_relative */
806 0, /* bitpos */
807 complain_overflow_signed, /* complain_on_overflow */
808 bfd_elf_generic_reloc, /* special_function */
809 "R_AARCH64_CALL26", /* name */
810 FALSE, /* partial_inplace */
811 0x3ffffff, /* src_mask */
812 0x3ffffff, /* dst_mask */
813 TRUE), /* pcrel_offset */
814
815 /* LD/ST16: (S+A) & 0xffe */
816 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */
817 1, /* rightshift */
818 2, /* size (0 = byte, 1 = short, 2 = long) */
819 12, /* bitsize */
820 FALSE, /* pc_relative */
821 0, /* bitpos */
822 complain_overflow_dont, /* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
825 FALSE, /* partial_inplace */
826 0xffe, /* src_mask */
827 0xffe, /* dst_mask */
828 FALSE), /* pcrel_offset */
829
830 /* LD/ST32: (S+A) & 0xffc */
831 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */
832 2, /* rightshift */
833 2, /* size (0 = byte, 1 = short, 2 = long) */
834 12, /* bitsize */
835 FALSE, /* pc_relative */
836 0, /* bitpos */
837 complain_overflow_dont, /* complain_on_overflow */
838 bfd_elf_generic_reloc, /* special_function */
839 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
840 FALSE, /* partial_inplace */
841 0xffc, /* src_mask */
842 0xffc, /* dst_mask */
843 FALSE), /* pcrel_offset */
844
845 /* LD/ST64: (S+A) & 0xff8 */
846 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */
847 3, /* rightshift */
848 2, /* size (0 = byte, 1 = short, 2 = long) */
849 12, /* bitsize */
850 FALSE, /* pc_relative */
851 0, /* bitpos */
852 complain_overflow_dont, /* complain_on_overflow */
853 bfd_elf_generic_reloc, /* special_function */
854 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
855 FALSE, /* partial_inplace */
856 0xff8, /* src_mask */
857 0xff8, /* dst_mask */
858 FALSE), /* pcrel_offset */
859
860 EMPTY_HOWTO (287),
861 EMPTY_HOWTO (288),
862 EMPTY_HOWTO (289),
863 EMPTY_HOWTO (290),
864 EMPTY_HOWTO (291),
865 EMPTY_HOWTO (292),
866 EMPTY_HOWTO (293),
867 EMPTY_HOWTO (294),
868 EMPTY_HOWTO (295),
869 EMPTY_HOWTO (296),
870 EMPTY_HOWTO (297),
871 EMPTY_HOWTO (298),
872
873 /* LD/ST128: (S+A) & 0xff0 */
874 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */
875 4, /* rightshift */
876 2, /* size (0 = byte, 1 = short, 2 = long) */
877 12, /* bitsize */
878 FALSE, /* pc_relative */
879 0, /* bitpos */
880 complain_overflow_dont, /* complain_on_overflow */
881 bfd_elf_generic_reloc, /* special_function */
882 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
883 FALSE, /* partial_inplace */
884 0xff0, /* src_mask */
885 0xff0, /* dst_mask */
886 FALSE), /* pcrel_offset */
887
888 EMPTY_HOWTO (300),
889 EMPTY_HOWTO (301),
890 EMPTY_HOWTO (302),
891 EMPTY_HOWTO (303),
892 EMPTY_HOWTO (304),
893 EMPTY_HOWTO (305),
894 EMPTY_HOWTO (306),
895 EMPTY_HOWTO (307),
896 EMPTY_HOWTO (308),
f41aef5f
RE
897
898 /* Set a load-literal immediate field to bits
899 0x1FFFFC of G(S)-P */
900 HOWTO (R_AARCH64_GOT_LD_PREL19, /* type */
901 2, /* rightshift */
902 2, /* size (0 = byte,1 = short,2 = long) */
903 19, /* bitsize */
904 TRUE, /* pc_relative */
905 0, /* bitpos */
906 complain_overflow_signed, /* complain_on_overflow */
907 bfd_elf_generic_reloc, /* special_function */
908 "R_AARCH64_GOT_LD_PREL19", /* name */
909 FALSE, /* partial_inplace */
910 0xffffe0, /* src_mask */
911 0xffffe0, /* dst_mask */
912 TRUE), /* pcrel_offset */
913
a06ea964
NC
914 EMPTY_HOWTO (310),
915
916 /* Get to the page for the GOT entry for the symbol
917 (G(S) - P) using an ADRP instruction. */
918 HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */
919 12, /* rightshift */
920 2, /* size (0 = byte, 1 = short, 2 = long) */
921 21, /* bitsize */
922 TRUE, /* pc_relative */
923 0, /* bitpos */
924 complain_overflow_dont, /* complain_on_overflow */
925 bfd_elf_generic_reloc, /* special_function */
926 "R_AARCH64_ADR_GOT_PAGE", /* name */
927 FALSE, /* partial_inplace */
928 0x1fffff, /* src_mask */
929 0x1fffff, /* dst_mask */
930 TRUE), /* pcrel_offset */
931
932 /* LD64: GOT offset G(S) & 0xff8 */
933 HOWTO (R_AARCH64_LD64_GOT_LO12_NC, /* type */
934 3, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 12, /* bitsize */
937 FALSE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont, /* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_AARCH64_LD64_GOT_LO12_NC", /* name */
942 FALSE, /* partial_inplace */
943 0xff8, /* src_mask */
944 0xff8, /* dst_mask */
945 FALSE) /* pcrel_offset */
946};
947
948static reloc_howto_type elf64_aarch64_tls_howto_table[] =
949{
950 EMPTY_HOWTO (512),
951
952 /* Get to the page for the GOT entry for the symbol
953 (G(S) - P) using an ADRP instruction. */
954 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */
955 12, /* rightshift */
956 2, /* size (0 = byte, 1 = short, 2 = long) */
957 21, /* bitsize */
958 TRUE, /* pc_relative */
959 0, /* bitpos */
960 complain_overflow_dont, /* complain_on_overflow */
961 bfd_elf_generic_reloc, /* special_function */
962 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
963 FALSE, /* partial_inplace */
964 0x1fffff, /* src_mask */
965 0x1fffff, /* dst_mask */
966 TRUE), /* pcrel_offset */
967
968 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
969 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */
970 0, /* rightshift */
971 2, /* size (0 = byte, 1 = short, 2 = long) */
972 12, /* bitsize */
973 FALSE, /* pc_relative */
974 0, /* bitpos */
975 complain_overflow_dont, /* complain_on_overflow */
976 bfd_elf_generic_reloc, /* special_function */
977 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
978 FALSE, /* partial_inplace */
979 0xfff, /* src_mask */
980 0xfff, /* dst_mask */
981 FALSE), /* pcrel_offset */
982
983 EMPTY_HOWTO (515),
984 EMPTY_HOWTO (516),
985 EMPTY_HOWTO (517),
986 EMPTY_HOWTO (518),
987 EMPTY_HOWTO (519),
988 EMPTY_HOWTO (520),
989 EMPTY_HOWTO (521),
990 EMPTY_HOWTO (522),
991 EMPTY_HOWTO (523),
992 EMPTY_HOWTO (524),
993 EMPTY_HOWTO (525),
994 EMPTY_HOWTO (526),
995 EMPTY_HOWTO (527),
996 EMPTY_HOWTO (528),
997 EMPTY_HOWTO (529),
998 EMPTY_HOWTO (530),
999 EMPTY_HOWTO (531),
1000 EMPTY_HOWTO (532),
1001 EMPTY_HOWTO (533),
1002 EMPTY_HOWTO (534),
1003 EMPTY_HOWTO (535),
1004 EMPTY_HOWTO (536),
1005 EMPTY_HOWTO (537),
1006 EMPTY_HOWTO (538),
1007
1008 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */
1009 16, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 16, /* bitsize */
1012 FALSE, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont, /* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1017 FALSE, /* partial_inplace */
1018 0xffff, /* src_mask */
1019 0xffff, /* dst_mask */
1020 FALSE), /* pcrel_offset */
1021
1022 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 32, /* bitsize */
1026 FALSE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1035
1036 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */
1037 12, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 21, /* bitsize */
1040 FALSE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1045 FALSE, /* partial_inplace */
1046 0x1fffff, /* src_mask */
1047 0x1fffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1049
1050 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */
1051 3, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 12, /* bitsize */
1054 FALSE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1059 FALSE, /* partial_inplace */
1060 0xff8, /* src_mask */
1061 0xff8, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1063
1064 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */
bb3f9ed8 1065 2, /* rightshift */
a06ea964
NC
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 21, /* bitsize */
1068 FALSE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1073 FALSE, /* partial_inplace */
1074 0x1ffffc, /* src_mask */
1075 0x1ffffc, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1077
1078 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */
bb3f9ed8 1079 32, /* rightshift */
a06ea964
NC
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 12, /* bitsize */
1082 FALSE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1087 FALSE, /* partial_inplace */
1088 0xffff, /* src_mask */
1089 0xffff, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1091
1092 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */
bb3f9ed8 1093 16, /* rightshift */
a06ea964
NC
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 12, /* bitsize */
1096 FALSE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffff, /* src_mask */
1103 0xffff, /* dst_mask */
1104 FALSE), /* pcrel_offset */
1105
1106 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */
bb3f9ed8 1107 16, /* rightshift */
a06ea964
NC
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 12, /* bitsize */
1110 FALSE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffff, /* src_mask */
1117 0xffff, /* dst_mask */
1118 FALSE), /* pcrel_offset */
1119
1120 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */
1121 0, /* rightshift */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 12, /* bitsize */
1124 FALSE, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont, /* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffff, /* src_mask */
1131 0xffff, /* dst_mask */
1132 FALSE), /* pcrel_offset */
1133
1134 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 12, /* bitsize */
1138 FALSE, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont, /* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffff, /* src_mask */
1145 0xffff, /* dst_mask */
1146 FALSE), /* pcrel_offset */
1147
1148 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */
bb3f9ed8 1149 12, /* rightshift */
a06ea964
NC
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 12, /* bitsize */
1152 FALSE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont, /* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1157 FALSE, /* partial_inplace */
1158 0xfff, /* src_mask */
1159 0xfff, /* dst_mask */
1160 FALSE), /* pcrel_offset */
1161
1162 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */
1163 0, /* rightshift */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 12, /* bitsize */
1166 FALSE, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont, /* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1171 FALSE, /* partial_inplace */
1172 0xfff, /* src_mask */
1173 0xfff, /* dst_mask */
1174 FALSE), /* pcrel_offset */
1175
1176 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 12, /* bitsize */
1180 FALSE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont, /* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1185 FALSE, /* partial_inplace */
1186 0xfff, /* src_mask */
1187 0xfff, /* dst_mask */
1188 FALSE), /* pcrel_offset */
1189};
1190
1191static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] =
1192{
418009c2 1193 HOWTO (R_AARCH64_TLSDESC_LD_PREL19, /* type */
bb3f9ed8 1194 2, /* rightshift */
a06ea964
NC
1195 2, /* size (0 = byte, 1 = short, 2 = long) */
1196 21, /* bitsize */
1197 TRUE, /* pc_relative */
1198 0, /* bitpos */
1199 complain_overflow_dont, /* complain_on_overflow */
1200 bfd_elf_generic_reloc, /* special_function */
418009c2 1201 "R_AARCH64_TLSDESC_LD_PREL19", /* name */
a06ea964
NC
1202 FALSE, /* partial_inplace */
1203 0x1ffffc, /* src_mask */
1204 0x1ffffc, /* dst_mask */
1205 TRUE), /* pcrel_offset */
1206
1207 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */
1208 0, /* rightshift */
1209 2, /* size (0 = byte, 1 = short, 2 = long) */
1210 21, /* bitsize */
1211 TRUE, /* pc_relative */
1212 0, /* bitpos */
1213 complain_overflow_dont, /* complain_on_overflow */
1214 bfd_elf_generic_reloc, /* special_function */
1215 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1216 FALSE, /* partial_inplace */
1217 0x1fffff, /* src_mask */
1218 0x1fffff, /* dst_mask */
1219 TRUE), /* pcrel_offset */
1220
1221 /* Get to the page for the GOT entry for the symbol
1222 (G(S) - P) using an ADRP instruction. */
418009c2 1223 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE21, /* type */
a06ea964
NC
1224 12, /* rightshift */
1225 2, /* size (0 = byte, 1 = short, 2 = long) */
1226 21, /* bitsize */
1227 TRUE, /* pc_relative */
1228 0, /* bitpos */
1229 complain_overflow_dont, /* complain_on_overflow */
1230 bfd_elf_generic_reloc, /* special_function */
418009c2 1231 "R_AARCH64_TLSDESC_ADR_PAGE21", /* name */
a06ea964
NC
1232 FALSE, /* partial_inplace */
1233 0x1fffff, /* src_mask */
1234 0x1fffff, /* dst_mask */
1235 TRUE), /* pcrel_offset */
1236
1237 /* LD64: GOT offset G(S) & 0xfff. */
1238 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */
1239 3, /* rightshift */
1240 2, /* size (0 = byte, 1 = short, 2 = long) */
1241 12, /* bitsize */
1242 FALSE, /* pc_relative */
1243 0, /* bitpos */
1244 complain_overflow_dont, /* complain_on_overflow */
1245 bfd_elf_generic_reloc, /* special_function */
1246 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1247 FALSE, /* partial_inplace */
1248 0xfff, /* src_mask */
1249 0xfff, /* dst_mask */
1250 FALSE), /* pcrel_offset */
1251
1252 /* ADD: GOT offset G(S) & 0xfff. */
1253 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */
1254 0, /* rightshift */
1255 2, /* size (0 = byte, 1 = short, 2 = long) */
1256 12, /* bitsize */
1257 FALSE, /* pc_relative */
1258 0, /* bitpos */
1259 complain_overflow_dont, /* complain_on_overflow */
1260 bfd_elf_generic_reloc, /* special_function */
1261 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1262 FALSE, /* partial_inplace */
1263 0xfff, /* src_mask */
1264 0xfff, /* dst_mask */
1265 FALSE), /* pcrel_offset */
1266
1267 HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */
bb3f9ed8 1268 16, /* rightshift */
a06ea964
NC
1269 2, /* size (0 = byte, 1 = short, 2 = long) */
1270 12, /* bitsize */
1271 FALSE, /* pc_relative */
1272 0, /* bitpos */
1273 complain_overflow_dont, /* complain_on_overflow */
1274 bfd_elf_generic_reloc, /* special_function */
1275 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1276 FALSE, /* partial_inplace */
1277 0xffff, /* src_mask */
1278 0xffff, /* dst_mask */
1279 FALSE), /* pcrel_offset */
1280
1281 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */
1282 0, /* rightshift */
1283 2, /* size (0 = byte, 1 = short, 2 = long) */
1284 12, /* bitsize */
1285 FALSE, /* pc_relative */
1286 0, /* bitpos */
1287 complain_overflow_dont, /* complain_on_overflow */
1288 bfd_elf_generic_reloc, /* special_function */
1289 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1290 FALSE, /* partial_inplace */
1291 0xffff, /* src_mask */
1292 0xffff, /* dst_mask */
1293 FALSE), /* pcrel_offset */
1294
1295 HOWTO (R_AARCH64_TLSDESC_LDR, /* type */
1296 0, /* rightshift */
1297 2, /* size (0 = byte, 1 = short, 2 = long) */
1298 12, /* bitsize */
1299 FALSE, /* pc_relative */
1300 0, /* bitpos */
1301 complain_overflow_dont, /* complain_on_overflow */
1302 bfd_elf_generic_reloc, /* special_function */
1303 "R_AARCH64_TLSDESC_LDR", /* name */
1304 FALSE, /* partial_inplace */
1305 0x0, /* src_mask */
1306 0x0, /* dst_mask */
1307 FALSE), /* pcrel_offset */
1308
1309 HOWTO (R_AARCH64_TLSDESC_ADD, /* type */
1310 0, /* rightshift */
1311 2, /* size (0 = byte, 1 = short, 2 = long) */
1312 12, /* bitsize */
1313 FALSE, /* pc_relative */
1314 0, /* bitpos */
1315 complain_overflow_dont, /* complain_on_overflow */
1316 bfd_elf_generic_reloc, /* special_function */
1317 "R_AARCH64_TLSDESC_ADD", /* name */
1318 FALSE, /* partial_inplace */
1319 0x0, /* src_mask */
1320 0x0, /* dst_mask */
1321 FALSE), /* pcrel_offset */
1322
1323 HOWTO (R_AARCH64_TLSDESC_CALL, /* type */
1324 0, /* rightshift */
1325 2, /* size (0 = byte, 1 = short, 2 = long) */
1326 12, /* bitsize */
1327 FALSE, /* pc_relative */
1328 0, /* bitpos */
1329 complain_overflow_dont, /* complain_on_overflow */
1330 bfd_elf_generic_reloc, /* special_function */
1331 "R_AARCH64_TLSDESC_CALL", /* name */
1332 FALSE, /* partial_inplace */
1333 0x0, /* src_mask */
1334 0x0, /* dst_mask */
1335 FALSE), /* pcrel_offset */
1336};
1337
1338static reloc_howto_type *
1339elf64_aarch64_howto_from_type (unsigned int r_type)
1340{
1341 if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max)
1342 return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min];
1343
1344 if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max)
1345 return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min];
1346
1347 if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max)
1348 return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min];
1349
1350 if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max)
1351 return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min];
1352
1353 switch (r_type)
1354 {
1355 case R_AARCH64_NONE:
1356 return &elf64_aarch64_howto_none;
1357
1358 }
1359 bfd_set_error (bfd_error_bad_value);
1360 return NULL;
1361}
1362
1363static void
1364elf64_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1365 Elf_Internal_Rela *elf_reloc)
1366{
1367 unsigned int r_type;
1368
1369 r_type = ELF64_R_TYPE (elf_reloc->r_info);
1370 bfd_reloc->howto = elf64_aarch64_howto_from_type (r_type);
1371}
1372
1373struct elf64_aarch64_reloc_map
1374{
1375 bfd_reloc_code_real_type bfd_reloc_val;
1376 unsigned int elf_reloc_val;
1377};
1378
1379/* All entries in this list must also be present in
1380 elf64_aarch64_howto_table. */
1381static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] =
1382{
1383 {BFD_RELOC_NONE, R_AARCH64_NONE},
1384
1385 /* Basic data relocations. */
1386 {BFD_RELOC_CTOR, R_AARCH64_ABS64},
1387 {BFD_RELOC_64, R_AARCH64_ABS64},
1388 {BFD_RELOC_32, R_AARCH64_ABS32},
1389 {BFD_RELOC_16, R_AARCH64_ABS16},
1390 {BFD_RELOC_64_PCREL, R_AARCH64_PREL64},
1391 {BFD_RELOC_32_PCREL, R_AARCH64_PREL32},
1392 {BFD_RELOC_16_PCREL, R_AARCH64_PREL16},
1393
1394 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1395 value inline. */
1396 {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC},
1397 {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC},
1398 {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC},
1399
1400 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1401 signed value inline. */
1402 {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0},
1403 {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1},
1404 {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2},
1405
1406 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1407 unsigned value inline. */
1408 {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0},
1409 {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1},
1410 {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2},
1411 {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3},
1412
1413 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1414 {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19},
1415 {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21},
1416 {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21},
1417 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC},
1418 {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC},
1419 {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC},
1420 {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC},
1421 {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC},
1422 {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC},
1423 {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC},
1424
1425 /* Relocations for control-flow instructions. */
1426 {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14},
1427 {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19},
1428 {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26},
1429 {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26},
1430
1431 /* Relocations for PIC. */
f41aef5f 1432 {BFD_RELOC_AARCH64_GOT_LD_PREL19, R_AARCH64_GOT_LD_PREL19},
a06ea964
NC
1433 {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE},
1434 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC},
1435
1436 /* Relocations for TLS. */
1437 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21},
1438 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC},
1439 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
1440 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1},
1441 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
1442 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC},
1443 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
1444 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21},
1445 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
1446 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
1447 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
1448 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
1449 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2},
1450 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1},
1451 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
1452 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC},
1453 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0},
1454 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
1455 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC},
1456 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12},
1457 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12},
1458 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
1459 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC},
418009c2 1460 {BFD_RELOC_AARCH64_TLSDESC_LD_PREL19, R_AARCH64_TLSDESC_LD_PREL19},
a06ea964 1461 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21},
418009c2 1462 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21, R_AARCH64_TLSDESC_ADR_PAGE21},
a06ea964
NC
1463 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC},
1464 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC},
1465 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1},
1466 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC},
1467 {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR},
1468 {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD},
1469 {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL},
1470 {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64},
1471 {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64},
1472 {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64},
1473 {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC},
1474};
1475
1476static reloc_howto_type *
1477elf64_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1478 bfd_reloc_code_real_type code)
1479{
1480 unsigned int i;
1481
1482 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++)
1483 if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code)
1484 return elf64_aarch64_howto_from_type
1485 (elf64_aarch64_reloc_map[i].elf_reloc_val);
1486
1487 bfd_set_error (bfd_error_bad_value);
1488 return NULL;
1489}
1490
1491static reloc_howto_type *
1492elf64_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1493 const char *r_name)
1494{
1495 unsigned int i;
1496
1497 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++)
1498 if (elf64_aarch64_howto_table[i].name != NULL
1499 && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0)
1500 return &elf64_aarch64_howto_table[i];
1501
1502 return NULL;
1503}
1504
cd6fa7fd
YZ
1505/* Support for core dump NOTE sections. */
1506
1507static bfd_boolean
1508elf64_aarch64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1509{
1510 int offset;
1511 size_t size;
1512
1513 switch (note->descsz)
1514 {
1515 default:
1516 return FALSE;
1517
1518 case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
1519 /* pr_cursig */
228e534f 1520 elf_tdata (abfd)->core->signal
cd6fa7fd
YZ
1521 = bfd_get_16 (abfd, note->descdata + 12);
1522
1523 /* pr_pid */
228e534f 1524 elf_tdata (abfd)->core->lwpid
cd6fa7fd
YZ
1525 = bfd_get_32 (abfd, note->descdata + 32);
1526
1527 /* pr_reg */
1528 offset = 112;
170a8295 1529 size = 272;
cd6fa7fd
YZ
1530
1531 break;
1532 }
1533
1534 /* Make a ".reg/999" section. */
1535 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1536 size, note->descpos + offset);
1537}
1538
a06ea964
NC
1539#define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
1540#define TARGET_LITTLE_NAME "elf64-littleaarch64"
1541#define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
1542#define TARGET_BIG_NAME "elf64-bigaarch64"
1543
cd6fa7fd
YZ
1544#define elf_backend_grok_prstatus elf64_aarch64_grok_prstatus
1545
a06ea964
NC
1546typedef unsigned long int insn32;
1547
1548/* The linker script knows the section names for placement.
1549 The entry_names are used to do simple name mangling on the stubs.
1550 Given a function name, and its type, the stub can be found. The
1551 name can be changed. The only requirement is the %s be present. */
1552#define STUB_ENTRY_NAME "__%s_veneer"
1553
1554/* The name of the dynamic interpreter. This is put in the .interp
1555 section. */
1556#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1557
1558#define AARCH64_MAX_FWD_BRANCH_OFFSET \
1559 (((1 << 25) - 1) << 2)
1560#define AARCH64_MAX_BWD_BRANCH_OFFSET \
1561 (-((1 << 25) << 2))
1562
1563#define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1564#define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1565
1566static int
1567aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1568{
1569 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1570 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1571}
1572
1573static int
1574aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1575{
1576 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1577 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1578 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1579}
1580
1581static const uint32_t aarch64_adrp_branch_stub [] =
1582{
1583 0x90000010, /* adrp ip0, X */
1584 /* R_AARCH64_ADR_HI21_PCREL(X) */
1585 0x91000210, /* add ip0, ip0, :lo12:X */
1586 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1587 0xd61f0200, /* br ip0 */
1588};
1589
1590static const uint32_t aarch64_long_branch_stub[] =
1591{
1592 0x58000090, /* ldr ip0, 1f */
1593 0x10000011, /* adr ip1, #0 */
1594 0x8b110210, /* add ip0, ip0, ip1 */
1595 0xd61f0200, /* br ip0 */
1596 0x00000000, /* 1: .xword
1597 R_AARCH64_PREL64(X) + 12
1598 */
1599 0x00000000,
1600};
1601
1602/* Section name for stubs is the associated section name plus this
1603 string. */
1604#define STUB_SUFFIX ".stub"
1605
1606enum elf64_aarch64_stub_type
1607{
1608 aarch64_stub_none,
1609 aarch64_stub_adrp_branch,
1610 aarch64_stub_long_branch,
1611};
1612
1613struct elf64_aarch64_stub_hash_entry
1614{
1615 /* Base hash table entry structure. */
1616 struct bfd_hash_entry root;
1617
1618 /* The stub section. */
1619 asection *stub_sec;
1620
1621 /* Offset within stub_sec of the beginning of this stub. */
1622 bfd_vma stub_offset;
1623
1624 /* Given the symbol's value and its section we can determine its final
1625 value when building the stubs (so the stub knows where to jump). */
1626 bfd_vma target_value;
1627 asection *target_section;
1628
1629 enum elf64_aarch64_stub_type stub_type;
1630
1631 /* The symbol table entry, if any, that this was derived from. */
1632 struct elf64_aarch64_link_hash_entry *h;
1633
1634 /* Destination symbol type */
1635 unsigned char st_type;
1636
1637 /* Where this stub is being called from, or, in the case of combined
1638 stub sections, the first input section in the group. */
1639 asection *id_sec;
1640
1641 /* The name for the local symbol at the start of this stub. The
1642 stub name in the hash table has to be unique; this does not, so
1643 it can be friendlier. */
1644 char *output_name;
1645};
1646
1647/* Used to build a map of a section. This is required for mixed-endian
1648 code/data. */
1649
1650typedef struct elf64_elf_section_map
1651{
1652 bfd_vma vma;
1653 char type;
1654}
1655elf64_aarch64_section_map;
1656
1657
1658typedef struct _aarch64_elf_section_data
1659{
1660 struct bfd_elf_section_data elf;
1661 unsigned int mapcount;
1662 unsigned int mapsize;
1663 elf64_aarch64_section_map *map;
1664}
1665_aarch64_elf_section_data;
1666
1667#define elf64_aarch64_section_data(sec) \
1668 ((_aarch64_elf_section_data *) elf_section_data (sec))
1669
1670/* The size of the thread control block. */
1671#define TCB_SIZE 16
1672
1673struct elf_aarch64_local_symbol
1674{
1675 unsigned int got_type;
1676 bfd_signed_vma got_refcount;
1677 bfd_vma got_offset;
1678
1679 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1680 offset is from the end of the jump table and reserved entries
1681 within the PLTGOT.
1682
1683 The magic value (bfd_vma) -1 indicates that an offset has not be
1684 allocated. */
1685 bfd_vma tlsdesc_got_jump_table_offset;
1686};
1687
1688struct elf_aarch64_obj_tdata
1689{
1690 struct elf_obj_tdata root;
1691
1692 /* local symbol descriptors */
1693 struct elf_aarch64_local_symbol *locals;
1694
1695 /* Zero to warn when linking objects with incompatible enum sizes. */
1696 int no_enum_size_warning;
1697
1698 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1699 int no_wchar_size_warning;
1700};
1701
1702#define elf_aarch64_tdata(bfd) \
1703 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1704
1705#define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1706
1707#define is_aarch64_elf(bfd) \
1708 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1709 && elf_tdata (bfd) != NULL \
1710 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1711
1712static bfd_boolean
1713elf64_aarch64_mkobject (bfd *abfd)
1714{
1715 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1716 AARCH64_ELF_DATA);
1717}
1718
a06ea964
NC
1719#define elf64_aarch64_hash_entry(ent) \
1720 ((struct elf64_aarch64_link_hash_entry *)(ent))
1721
1722#define GOT_UNKNOWN 0
1723#define GOT_NORMAL 1
1724#define GOT_TLS_GD 2
1725#define GOT_TLS_IE 4
1726#define GOT_TLSDESC_GD 8
1727
1728#define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1729
1730/* AArch64 ELF linker hash entry. */
1731struct elf64_aarch64_link_hash_entry
1732{
1733 struct elf_link_hash_entry root;
1734
1735 /* Track dynamic relocs copied for this symbol. */
1736 struct elf_dyn_relocs *dyn_relocs;
1737
a06ea964
NC
1738 /* Since PLT entries have variable size, we need to record the
1739 index into .got.plt instead of recomputing it from the PLT
1740 offset. */
1741 bfd_signed_vma plt_got_offset;
1742
1743 /* Bit mask representing the type of GOT entry(s) if any required by
1744 this symbol. */
1745 unsigned int got_type;
1746
1747 /* A pointer to the most recently used stub hash entry against this
1748 symbol. */
1749 struct elf64_aarch64_stub_hash_entry *stub_cache;
1750
1751 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1752 is from the end of the jump table and reserved entries within the PLTGOT.
1753
1754 The magic value (bfd_vma) -1 indicates that an offset has not
1755 be allocated. */
1756 bfd_vma tlsdesc_got_jump_table_offset;
1757};
1758
1759static unsigned int
1760elf64_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1761 bfd *abfd,
1762 unsigned long r_symndx)
1763{
1764 if (h)
1765 return elf64_aarch64_hash_entry (h)->got_type;
1766
1767 if (! elf64_aarch64_locals (abfd))
1768 return GOT_UNKNOWN;
1769
1770 return elf64_aarch64_locals (abfd)[r_symndx].got_type;
1771}
1772
a06ea964
NC
1773/* Get the AArch64 elf linker hash table from a link_info structure. */
1774#define elf64_aarch64_hash_table(info) \
1775 ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
1776
1777#define aarch64_stub_hash_lookup(table, string, create, copy) \
1778 ((struct elf64_aarch64_stub_hash_entry *) \
1779 bfd_hash_lookup ((table), (string), (create), (copy)))
1780
1781/* AArch64 ELF linker hash table. */
1782struct elf64_aarch64_link_hash_table
1783{
1784 /* The main hash table. */
1785 struct elf_link_hash_table root;
1786
1787 /* Nonzero to force PIC branch veneers. */
1788 int pic_veneer;
1789
1790 /* The number of bytes in the initial entry in the PLT. */
1791 bfd_size_type plt_header_size;
1792
1793 /* The number of bytes in the subsequent PLT etries. */
1794 bfd_size_type plt_entry_size;
1795
1796 /* Short-cuts to get to dynamic linker sections. */
1797 asection *sdynbss;
1798 asection *srelbss;
1799
1800 /* Small local sym cache. */
1801 struct sym_cache sym_cache;
1802
1803 /* For convenience in allocate_dynrelocs. */
1804 bfd *obfd;
1805
1806 /* The amount of space used by the reserved portion of the sgotplt
1807 section, plus whatever space is used by the jump slots. */
1808 bfd_vma sgotplt_jump_table_size;
1809
1810 /* The stub hash table. */
1811 struct bfd_hash_table stub_hash_table;
1812
1813 /* Linker stub bfd. */
1814 bfd *stub_bfd;
1815
1816 /* Linker call-backs. */
1817 asection *(*add_stub_section) (const char *, asection *);
1818 void (*layout_sections_again) (void);
1819
1820 /* Array to keep track of which stub sections have been created, and
1821 information on stub grouping. */
1822 struct map_stub
1823 {
1824 /* This is the section to which stubs in the group will be
1825 attached. */
1826 asection *link_sec;
1827 /* The stub section. */
1828 asection *stub_sec;
1829 } *stub_group;
1830
1831 /* Assorted information used by elf64_aarch64_size_stubs. */
1832 unsigned int bfd_count;
1833 int top_index;
1834 asection **input_list;
1835
1836 /* The offset into splt of the PLT entry for the TLS descriptor
1837 resolver. Special values are 0, if not necessary (or not found
1838 to be necessary yet), and -1 if needed but not determined
1839 yet. */
1840 bfd_vma tlsdesc_plt;
1841
1842 /* The GOT offset for the lazy trampoline. Communicated to the
1843 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1844 indicates an offset is not allocated. */
1845 bfd_vma dt_tlsdesc_got;
1846};
1847
1848
1849/* Return non-zero if the indicated VALUE has overflowed the maximum
1850 range expressible by a unsigned number with the indicated number of
1851 BITS. */
1852
1853static bfd_reloc_status_type
1854aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
1855{
1856 bfd_vma lim;
1857 if (bits >= sizeof (bfd_vma) * 8)
1858 return bfd_reloc_ok;
1859 lim = (bfd_vma) 1 << bits;
1860 if (value >= lim)
1861 return bfd_reloc_overflow;
1862 return bfd_reloc_ok;
1863}
1864
1865
1866/* Return non-zero if the indicated VALUE has overflowed the maximum
1867 range expressible by an signed number with the indicated number of
1868 BITS. */
1869
1870static bfd_reloc_status_type
1871aarch64_signed_overflow (bfd_vma value, unsigned int bits)
1872{
1873 bfd_signed_vma svalue = (bfd_signed_vma) value;
1874 bfd_signed_vma lim;
1875
1876 if (bits >= sizeof (bfd_vma) * 8)
1877 return bfd_reloc_ok;
1878 lim = (bfd_signed_vma) 1 << (bits - 1);
1879 if (svalue < -lim || svalue >= lim)
1880 return bfd_reloc_overflow;
1881 return bfd_reloc_ok;
1882}
1883
1884/* Create an entry in an AArch64 ELF linker hash table. */
1885
1886static struct bfd_hash_entry *
1887elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1888 struct bfd_hash_table *table,
1889 const char *string)
1890{
1891 struct elf64_aarch64_link_hash_entry *ret =
1892 (struct elf64_aarch64_link_hash_entry *) entry;
1893
1894 /* Allocate the structure if it has not already been allocated by a
1895 subclass. */
1896 if (ret == NULL)
1897 ret = bfd_hash_allocate (table,
1898 sizeof (struct elf64_aarch64_link_hash_entry));
1899 if (ret == NULL)
1900 return (struct bfd_hash_entry *) ret;
1901
1902 /* Call the allocation method of the superclass. */
1903 ret = ((struct elf64_aarch64_link_hash_entry *)
1904 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1905 table, string));
1906 if (ret != NULL)
1907 {
1908 ret->dyn_relocs = NULL;
a06ea964
NC
1909 ret->got_type = GOT_UNKNOWN;
1910 ret->plt_got_offset = (bfd_vma) - 1;
1911 ret->stub_cache = NULL;
1912 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1913 }
1914
1915 return (struct bfd_hash_entry *) ret;
1916}
1917
1918/* Initialize an entry in the stub hash table. */
1919
1920static struct bfd_hash_entry *
1921stub_hash_newfunc (struct bfd_hash_entry *entry,
1922 struct bfd_hash_table *table, const char *string)
1923{
1924 /* Allocate the structure if it has not already been allocated by a
1925 subclass. */
1926 if (entry == NULL)
1927 {
1928 entry = bfd_hash_allocate (table,
1929 sizeof (struct
1930 elf64_aarch64_stub_hash_entry));
1931 if (entry == NULL)
1932 return entry;
1933 }
1934
1935 /* Call the allocation method of the superclass. */
1936 entry = bfd_hash_newfunc (entry, table, string);
1937 if (entry != NULL)
1938 {
1939 struct elf64_aarch64_stub_hash_entry *eh;
1940
1941 /* Initialize the local fields. */
1942 eh = (struct elf64_aarch64_stub_hash_entry *) entry;
1943 eh->stub_sec = NULL;
1944 eh->stub_offset = 0;
1945 eh->target_value = 0;
1946 eh->target_section = NULL;
1947 eh->stub_type = aarch64_stub_none;
1948 eh->h = NULL;
1949 eh->id_sec = NULL;
1950 }
1951
1952 return entry;
1953}
1954
1955
1956/* Copy the extra info we tack onto an elf_link_hash_entry. */
1957
1958static void
1959elf64_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
1960 struct elf_link_hash_entry *dir,
1961 struct elf_link_hash_entry *ind)
1962{
1963 struct elf64_aarch64_link_hash_entry *edir, *eind;
1964
1965 edir = (struct elf64_aarch64_link_hash_entry *) dir;
1966 eind = (struct elf64_aarch64_link_hash_entry *) ind;
1967
1968 if (eind->dyn_relocs != NULL)
1969 {
1970 if (edir->dyn_relocs != NULL)
1971 {
1972 struct elf_dyn_relocs **pp;
1973 struct elf_dyn_relocs *p;
1974
1975 /* Add reloc counts against the indirect sym to the direct sym
1976 list. Merge any entries against the same section. */
1977 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
1978 {
1979 struct elf_dyn_relocs *q;
1980
1981 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1982 if (q->sec == p->sec)
1983 {
1984 q->pc_count += p->pc_count;
1985 q->count += p->count;
1986 *pp = p->next;
1987 break;
1988 }
1989 if (q == NULL)
1990 pp = &p->next;
1991 }
1992 *pp = edir->dyn_relocs;
1993 }
1994
1995 edir->dyn_relocs = eind->dyn_relocs;
1996 eind->dyn_relocs = NULL;
1997 }
1998
a06ea964
NC
1999 if (ind->root.type == bfd_link_hash_indirect)
2000 {
2001 /* Copy over PLT info. */
2002 if (dir->got.refcount <= 0)
2003 {
2004 edir->got_type = eind->got_type;
2005 eind->got_type = GOT_UNKNOWN;
2006 }
2007 }
2008
2009 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2010}
2011
2012/* Create an AArch64 elf linker hash table. */
2013
2014static struct bfd_link_hash_table *
2015elf64_aarch64_link_hash_table_create (bfd *abfd)
2016{
2017 struct elf64_aarch64_link_hash_table *ret;
2018 bfd_size_type amt = sizeof (struct elf64_aarch64_link_hash_table);
2019
7bf52ea2 2020 ret = bfd_zmalloc (amt);
a06ea964
NC
2021 if (ret == NULL)
2022 return NULL;
2023
2024 if (!_bfd_elf_link_hash_table_init
2025 (&ret->root, abfd, elf64_aarch64_link_hash_newfunc,
2026 sizeof (struct elf64_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2027 {
2028 free (ret);
2029 return NULL;
2030 }
2031
a06ea964
NC
2032 ret->plt_header_size = PLT_ENTRY_SIZE;
2033 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
a06ea964 2034 ret->obfd = abfd;
a06ea964
NC
2035 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2036
2037 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2038 sizeof (struct elf64_aarch64_stub_hash_entry)))
2039 {
2040 free (ret);
2041 return NULL;
2042 }
2043
2044 return &ret->root.root;
2045}
2046
2047/* Free the derived linker hash table. */
2048
2049static void
2050elf64_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
2051{
2052 struct elf64_aarch64_link_hash_table *ret
2053 = (struct elf64_aarch64_link_hash_table *) hash;
2054
2055 bfd_hash_table_free (&ret->stub_hash_table);
9f7c3e5e 2056 _bfd_elf_link_hash_table_free (hash);
a06ea964
NC
2057}
2058
2059static bfd_vma
2060aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
2061 bfd_vma addend, bfd_boolean weak_undef_p)
2062{
2063 switch (r_type)
2064 {
2065 case R_AARCH64_TLSDESC_CALL:
2066 case R_AARCH64_NONE:
2067 case R_AARCH64_NULL:
2068 break;
2069
2070 case R_AARCH64_ADR_PREL_LO21:
2071 case R_AARCH64_CONDBR19:
2072 case R_AARCH64_LD_PREL_LO19:
2073 case R_AARCH64_PREL16:
2074 case R_AARCH64_PREL32:
2075 case R_AARCH64_PREL64:
2076 case R_AARCH64_TSTBR14:
2077 if (weak_undef_p)
2078 value = place;
2079 value = value + addend - place;
2080 break;
2081
2082 case R_AARCH64_CALL26:
2083 case R_AARCH64_JUMP26:
2084 value = value + addend - place;
2085 break;
2086
2087 case R_AARCH64_ABS16:
2088 case R_AARCH64_ABS32:
2089 case R_AARCH64_MOVW_SABS_G0:
2090 case R_AARCH64_MOVW_SABS_G1:
2091 case R_AARCH64_MOVW_SABS_G2:
2092 case R_AARCH64_MOVW_UABS_G0:
2093 case R_AARCH64_MOVW_UABS_G0_NC:
2094 case R_AARCH64_MOVW_UABS_G1:
2095 case R_AARCH64_MOVW_UABS_G1_NC:
2096 case R_AARCH64_MOVW_UABS_G2:
2097 case R_AARCH64_MOVW_UABS_G2_NC:
2098 case R_AARCH64_MOVW_UABS_G3:
2099 value = value + addend;
2100 break;
2101
2102 case R_AARCH64_ADR_PREL_PG_HI21:
2103 case R_AARCH64_ADR_PREL_PG_HI21_NC:
2104 if (weak_undef_p)
2105 value = PG (place);
2106 value = PG (value + addend) - PG (place);
2107 break;
2108
f41aef5f
RE
2109 case R_AARCH64_GOT_LD_PREL19:
2110 value = value + addend - place;
2111 break;
2112
a06ea964 2113 case R_AARCH64_ADR_GOT_PAGE:
418009c2 2114 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
2115 case R_AARCH64_TLSGD_ADR_PAGE21:
2116 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
2117 value = PG (value + addend) - PG (place);
2118 break;
2119
2120 case R_AARCH64_ADD_ABS_LO12_NC:
2121 case R_AARCH64_LD64_GOT_LO12_NC:
2122 case R_AARCH64_LDST8_ABS_LO12_NC:
2123 case R_AARCH64_LDST16_ABS_LO12_NC:
2124 case R_AARCH64_LDST32_ABS_LO12_NC:
2125 case R_AARCH64_LDST64_ABS_LO12_NC:
2126 case R_AARCH64_LDST128_ABS_LO12_NC:
2127 case R_AARCH64_TLSDESC_ADD_LO12_NC:
2128 case R_AARCH64_TLSDESC_ADD:
2129 case R_AARCH64_TLSDESC_LD64_LO12_NC:
2130 case R_AARCH64_TLSDESC_LDR:
2131 case R_AARCH64_TLSGD_ADD_LO12_NC:
2132 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
2133 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
2134 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
2135 value = PG_OFFSET (value + addend);
2136 break;
2137
2138 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
2139 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
2140 value = (value + addend) & (bfd_vma) 0xffff0000;
2141 break;
2142 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
2143 value = (value + addend) & (bfd_vma) 0xfff000;
2144 break;
2145
2146 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
2147 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
2148 value = (value + addend) & (bfd_vma) 0xffff;
2149 break;
2150
2151 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
2152 value = (value + addend) & ~(bfd_vma) 0xffffffff;
2153 value -= place & ~(bfd_vma) 0xffffffff;
2154 break;
2155 }
2156 return value;
2157}
2158
2159static bfd_boolean
2160aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2161 bfd_vma offset, bfd_vma value)
2162{
2163 reloc_howto_type *howto;
2164 bfd_vma place;
2165
2166 howto = elf64_aarch64_howto_from_type (r_type);
2167 place = (input_section->output_section->vma + input_section->output_offset
2168 + offset);
2169 value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
2170 return bfd_elf_aarch64_put_addend (input_bfd,
2171 input_section->contents + offset,
2172 howto, value);
2173}
2174
2175static enum elf64_aarch64_stub_type
2176aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2177{
2178 if (aarch64_valid_for_adrp_p (value, place))
2179 return aarch64_stub_adrp_branch;
2180 return aarch64_stub_long_branch;
2181}
2182
2183/* Determine the type of stub needed, if any, for a call. */
2184
2185static enum elf64_aarch64_stub_type
2186aarch64_type_of_stub (struct bfd_link_info *info,
2187 asection *input_sec,
2188 const Elf_Internal_Rela *rel,
2189 unsigned char st_type,
2190 struct elf64_aarch64_link_hash_entry *hash,
2191 bfd_vma destination)
2192{
2193 bfd_vma location;
2194 bfd_signed_vma branch_offset;
2195 unsigned int r_type;
2196 struct elf64_aarch64_link_hash_table *globals;
2197 enum elf64_aarch64_stub_type stub_type = aarch64_stub_none;
2198 bfd_boolean via_plt_p;
2199
2200 if (st_type != STT_FUNC)
2201 return stub_type;
2202
2203 globals = elf64_aarch64_hash_table (info);
2204 via_plt_p = (globals->root.splt != NULL && hash != NULL
2205 && hash->root.plt.offset != (bfd_vma) - 1);
2206
2207 if (via_plt_p)
2208 return stub_type;
2209
2210 /* Determine where the call point is. */
2211 location = (input_sec->output_offset
2212 + input_sec->output_section->vma + rel->r_offset);
2213
2214 branch_offset = (bfd_signed_vma) (destination - location);
2215
2216 r_type = ELF64_R_TYPE (rel->r_info);
2217
2218 /* We don't want to redirect any old unconditional jump in this way,
2219 only one which is being used for a sibcall, where it is
2220 acceptable for the IP0 and IP1 registers to be clobbered. */
2221 if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26)
2222 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2223 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2224 {
2225 stub_type = aarch64_stub_long_branch;
2226 }
2227
2228 return stub_type;
2229}
2230
2231/* Build a name for an entry in the stub hash table. */
2232
2233static char *
2234elf64_aarch64_stub_name (const asection *input_section,
2235 const asection *sym_sec,
2236 const struct elf64_aarch64_link_hash_entry *hash,
2237 const Elf_Internal_Rela *rel)
2238{
2239 char *stub_name;
2240 bfd_size_type len;
2241
2242 if (hash)
2243 {
2244 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2245 stub_name = bfd_malloc (len);
2246 if (stub_name != NULL)
2247 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2248 (unsigned int) input_section->id,
2249 hash->root.root.root.string,
2250 rel->r_addend);
2251 }
2252 else
2253 {
2254 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2255 stub_name = bfd_malloc (len);
2256 if (stub_name != NULL)
2257 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2258 (unsigned int) input_section->id,
2259 (unsigned int) sym_sec->id,
2260 (unsigned int) ELF64_R_SYM (rel->r_info),
2261 rel->r_addend);
2262 }
2263
2264 return stub_name;
2265}
2266
2267/* Look up an entry in the stub hash. Stub entries are cached because
2268 creating the stub name takes a bit of time. */
2269
2270static struct elf64_aarch64_stub_hash_entry *
2271elf64_aarch64_get_stub_entry (const asection *input_section,
2272 const asection *sym_sec,
2273 struct elf_link_hash_entry *hash,
2274 const Elf_Internal_Rela *rel,
2275 struct elf64_aarch64_link_hash_table *htab)
2276{
2277 struct elf64_aarch64_stub_hash_entry *stub_entry;
2278 struct elf64_aarch64_link_hash_entry *h =
2279 (struct elf64_aarch64_link_hash_entry *) hash;
2280 const asection *id_sec;
2281
2282 if ((input_section->flags & SEC_CODE) == 0)
2283 return NULL;
2284
2285 /* If this input section is part of a group of sections sharing one
2286 stub section, then use the id of the first section in the group.
2287 Stub names need to include a section id, as there may well be
2288 more than one stub used to reach say, printf, and we need to
2289 distinguish between them. */
2290 id_sec = htab->stub_group[input_section->id].link_sec;
2291
2292 if (h != NULL && h->stub_cache != NULL
2293 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2294 {
2295 stub_entry = h->stub_cache;
2296 }
2297 else
2298 {
2299 char *stub_name;
2300
2301 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, h, rel);
2302 if (stub_name == NULL)
2303 return NULL;
2304
2305 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2306 stub_name, FALSE, FALSE);
2307 if (h != NULL)
2308 h->stub_cache = stub_entry;
2309
2310 free (stub_name);
2311 }
2312
2313 return stub_entry;
2314}
2315
2316/* Add a new stub entry to the stub hash. Not all fields of the new
2317 stub entry are initialised. */
2318
2319static struct elf64_aarch64_stub_hash_entry *
2320elf64_aarch64_add_stub (const char *stub_name,
2321 asection *section,
2322 struct elf64_aarch64_link_hash_table *htab)
2323{
2324 asection *link_sec;
2325 asection *stub_sec;
2326 struct elf64_aarch64_stub_hash_entry *stub_entry;
2327
2328 link_sec = htab->stub_group[section->id].link_sec;
2329 stub_sec = htab->stub_group[section->id].stub_sec;
2330 if (stub_sec == NULL)
2331 {
2332 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2333 if (stub_sec == NULL)
2334 {
2335 size_t namelen;
2336 bfd_size_type len;
2337 char *s_name;
2338
2339 namelen = strlen (link_sec->name);
2340 len = namelen + sizeof (STUB_SUFFIX);
2341 s_name = bfd_alloc (htab->stub_bfd, len);
2342 if (s_name == NULL)
2343 return NULL;
2344
2345 memcpy (s_name, link_sec->name, namelen);
2346 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2347 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2348 if (stub_sec == NULL)
2349 return NULL;
2350 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2351 }
2352 htab->stub_group[section->id].stub_sec = stub_sec;
2353 }
2354
2355 /* Enter this entry into the linker stub hash table. */
2356 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2357 TRUE, FALSE);
2358 if (stub_entry == NULL)
2359 {
2360 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2361 section->owner, stub_name);
2362 return NULL;
2363 }
2364
2365 stub_entry->stub_sec = stub_sec;
2366 stub_entry->stub_offset = 0;
2367 stub_entry->id_sec = link_sec;
2368
2369 return stub_entry;
2370}
2371
2372static bfd_boolean
2373aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2374 void *in_arg ATTRIBUTE_UNUSED)
2375{
2376 struct elf64_aarch64_stub_hash_entry *stub_entry;
2377 asection *stub_sec;
2378 bfd *stub_bfd;
2379 bfd_byte *loc;
2380 bfd_vma sym_value;
2381 unsigned int template_size;
2382 const uint32_t *template;
2383 unsigned int i;
2384
2385 /* Massage our args to the form they really have. */
2386 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2387
2388 stub_sec = stub_entry->stub_sec;
2389
2390 /* Make a note of the offset within the stubs for this entry. */
2391 stub_entry->stub_offset = stub_sec->size;
2392 loc = stub_sec->contents + stub_entry->stub_offset;
2393
2394 stub_bfd = stub_sec->owner;
2395
2396 /* This is the address of the stub destination. */
2397 sym_value = (stub_entry->target_value
2398 + stub_entry->target_section->output_offset
2399 + stub_entry->target_section->output_section->vma);
2400
2401 if (stub_entry->stub_type == aarch64_stub_long_branch)
2402 {
2403 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2404 + stub_sec->output_offset);
2405
2406 /* See if we can relax the stub. */
2407 if (aarch64_valid_for_adrp_p (sym_value, place))
2408 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2409 }
2410
2411 switch (stub_entry->stub_type)
2412 {
2413 case aarch64_stub_adrp_branch:
2414 template = aarch64_adrp_branch_stub;
2415 template_size = sizeof (aarch64_adrp_branch_stub);
2416 break;
2417 case aarch64_stub_long_branch:
2418 template = aarch64_long_branch_stub;
2419 template_size = sizeof (aarch64_long_branch_stub);
2420 break;
2421 default:
2422 BFD_FAIL ();
2423 return FALSE;
2424 }
2425
2426 for (i = 0; i < (template_size / sizeof template[0]); i++)
2427 {
2428 bfd_putl32 (template[i], loc);
2429 loc += 4;
2430 }
2431
2432 template_size = (template_size + 7) & ~7;
2433 stub_sec->size += template_size;
2434
2435 switch (stub_entry->stub_type)
2436 {
2437 case aarch64_stub_adrp_branch:
2438 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec,
2439 stub_entry->stub_offset, sym_value))
2440 /* The stub would not have been relaxed if the offset was out
2441 of range. */
2442 BFD_FAIL ();
2443
2444 _bfd_final_link_relocate
2445 (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC),
2446 stub_bfd,
2447 stub_sec,
2448 stub_sec->contents,
2449 stub_entry->stub_offset + 4,
2450 sym_value,
2451 0);
2452 break;
2453
2454 case aarch64_stub_long_branch:
2455 /* We want the value relative to the address 12 bytes back from the
2456 value itself. */
2457 _bfd_final_link_relocate (elf64_aarch64_howto_from_type
2458 (R_AARCH64_PREL64), stub_bfd, stub_sec,
2459 stub_sec->contents,
2460 stub_entry->stub_offset + 16,
2461 sym_value + 12, 0);
2462 break;
2463 default:
2464 break;
2465 }
2466
2467 return TRUE;
2468}
2469
2470/* As above, but don't actually build the stub. Just bump offset so
2471 we know stub section sizes. */
2472
2473static bfd_boolean
2474aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2475 void *in_arg ATTRIBUTE_UNUSED)
2476{
2477 struct elf64_aarch64_stub_hash_entry *stub_entry;
2478 int size;
2479
2480 /* Massage our args to the form they really have. */
2481 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2482
2483 switch (stub_entry->stub_type)
2484 {
2485 case aarch64_stub_adrp_branch:
2486 size = sizeof (aarch64_adrp_branch_stub);
2487 break;
2488 case aarch64_stub_long_branch:
2489 size = sizeof (aarch64_long_branch_stub);
2490 break;
2491 default:
2492 BFD_FAIL ();
2493 return FALSE;
2494 break;
2495 }
2496
2497 size = (size + 7) & ~7;
2498 stub_entry->stub_sec->size += size;
2499 return TRUE;
2500}
2501
2502/* External entry points for sizing and building linker stubs. */
2503
2504/* Set up various things so that we can make a list of input sections
2505 for each output section included in the link. Returns -1 on error,
2506 0 when no stubs will be needed, and 1 on success. */
2507
2508int
2509elf64_aarch64_setup_section_lists (bfd *output_bfd,
2510 struct bfd_link_info *info)
2511{
2512 bfd *input_bfd;
2513 unsigned int bfd_count;
2514 int top_id, top_index;
2515 asection *section;
2516 asection **input_list, **list;
2517 bfd_size_type amt;
2518 struct elf64_aarch64_link_hash_table *htab =
2519 elf64_aarch64_hash_table (info);
2520
2521 if (!is_elf_hash_table (htab))
2522 return 0;
2523
2524 /* Count the number of input BFDs and find the top input section id. */
2525 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2526 input_bfd != NULL; input_bfd = input_bfd->link_next)
2527 {
2528 bfd_count += 1;
2529 for (section = input_bfd->sections;
2530 section != NULL; section = section->next)
2531 {
2532 if (top_id < section->id)
2533 top_id = section->id;
2534 }
2535 }
2536 htab->bfd_count = bfd_count;
2537
2538 amt = sizeof (struct map_stub) * (top_id + 1);
2539 htab->stub_group = bfd_zmalloc (amt);
2540 if (htab->stub_group == NULL)
2541 return -1;
2542
2543 /* We can't use output_bfd->section_count here to find the top output
2544 section index as some sections may have been removed, and
2545 _bfd_strip_section_from_output doesn't renumber the indices. */
2546 for (section = output_bfd->sections, top_index = 0;
2547 section != NULL; section = section->next)
2548 {
2549 if (top_index < section->index)
2550 top_index = section->index;
2551 }
2552
2553 htab->top_index = top_index;
2554 amt = sizeof (asection *) * (top_index + 1);
2555 input_list = bfd_malloc (amt);
2556 htab->input_list = input_list;
2557 if (input_list == NULL)
2558 return -1;
2559
2560 /* For sections we aren't interested in, mark their entries with a
2561 value we can check later. */
2562 list = input_list + top_index;
2563 do
2564 *list = bfd_abs_section_ptr;
2565 while (list-- != input_list);
2566
2567 for (section = output_bfd->sections;
2568 section != NULL; section = section->next)
2569 {
2570 if ((section->flags & SEC_CODE) != 0)
2571 input_list[section->index] = NULL;
2572 }
2573
2574 return 1;
2575}
2576
2577/* Used by elf64_aarch64_next_input_section and group_sections. */
2578#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2579
2580/* The linker repeatedly calls this function for each input section,
2581 in the order that input sections are linked into output sections.
2582 Build lists of input sections to determine groupings between which
2583 we may insert linker stubs. */
2584
2585void
2586elf64_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2587{
2588 struct elf64_aarch64_link_hash_table *htab =
2589 elf64_aarch64_hash_table (info);
2590
2591 if (isec->output_section->index <= htab->top_index)
2592 {
2593 asection **list = htab->input_list + isec->output_section->index;
2594
2595 if (*list != bfd_abs_section_ptr)
2596 {
2597 /* Steal the link_sec pointer for our list. */
2598 /* This happens to make the list in reverse order,
2599 which is what we want. */
2600 PREV_SEC (isec) = *list;
2601 *list = isec;
2602 }
2603 }
2604}
2605
2606/* See whether we can group stub sections together. Grouping stub
2607 sections may result in fewer stubs. More importantly, we need to
2608 put all .init* and .fini* stubs at the beginning of the .init or
2609 .fini output sections respectively, because glibc splits the
2610 _init and _fini functions into multiple parts. Putting a stub in
2611 the middle of a function is not a good idea. */
2612
2613static void
2614group_sections (struct elf64_aarch64_link_hash_table *htab,
2615 bfd_size_type stub_group_size,
2616 bfd_boolean stubs_always_before_branch)
2617{
2618 asection **list = htab->input_list + htab->top_index;
2619
2620 do
2621 {
2622 asection *tail = *list;
2623
2624 if (tail == bfd_abs_section_ptr)
2625 continue;
2626
2627 while (tail != NULL)
2628 {
2629 asection *curr;
2630 asection *prev;
2631 bfd_size_type total;
2632
2633 curr = tail;
2634 total = tail->size;
2635 while ((prev = PREV_SEC (curr)) != NULL
2636 && ((total += curr->output_offset - prev->output_offset)
2637 < stub_group_size))
2638 curr = prev;
2639
2640 /* OK, the size from the start of CURR to the end is less
2641 than stub_group_size and thus can be handled by one stub
2642 section. (Or the tail section is itself larger than
2643 stub_group_size, in which case we may be toast.)
2644 We should really be keeping track of the total size of
2645 stubs added here, as stubs contribute to the final output
2646 section size. */
2647 do
2648 {
2649 prev = PREV_SEC (tail);
2650 /* Set up this stub group. */
2651 htab->stub_group[tail->id].link_sec = curr;
2652 }
2653 while (tail != curr && (tail = prev) != NULL);
2654
2655 /* But wait, there's more! Input sections up to stub_group_size
2656 bytes before the stub section can be handled by it too. */
2657 if (!stubs_always_before_branch)
2658 {
2659 total = 0;
2660 while (prev != NULL
2661 && ((total += tail->output_offset - prev->output_offset)
2662 < stub_group_size))
2663 {
2664 tail = prev;
2665 prev = PREV_SEC (tail);
2666 htab->stub_group[tail->id].link_sec = curr;
2667 }
2668 }
2669 tail = prev;
2670 }
2671 }
2672 while (list-- != htab->input_list);
2673
2674 free (htab->input_list);
2675}
2676
2677#undef PREV_SEC
2678
2679/* Determine and set the size of the stub section for a final link.
2680
2681 The basic idea here is to examine all the relocations looking for
2682 PC-relative calls to a target that is unreachable with a "bl"
2683 instruction. */
2684
2685bfd_boolean
2686elf64_aarch64_size_stubs (bfd *output_bfd,
2687 bfd *stub_bfd,
2688 struct bfd_link_info *info,
2689 bfd_signed_vma group_size,
2690 asection * (*add_stub_section) (const char *,
2691 asection *),
2692 void (*layout_sections_again) (void))
2693{
2694 bfd_size_type stub_group_size;
2695 bfd_boolean stubs_always_before_branch;
2696 bfd_boolean stub_changed = 0;
2697 struct elf64_aarch64_link_hash_table *htab = elf64_aarch64_hash_table (info);
2698
2699 /* Propagate mach to stub bfd, because it may not have been
2700 finalized when we created stub_bfd. */
2701 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
2702 bfd_get_mach (output_bfd));
2703
2704 /* Stash our params away. */
2705 htab->stub_bfd = stub_bfd;
2706 htab->add_stub_section = add_stub_section;
2707 htab->layout_sections_again = layout_sections_again;
2708 stubs_always_before_branch = group_size < 0;
2709 if (group_size < 0)
2710 stub_group_size = -group_size;
2711 else
2712 stub_group_size = group_size;
2713
2714 if (stub_group_size == 1)
2715 {
2716 /* Default values. */
b9eead84 2717 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
a06ea964
NC
2718 stub_group_size = 127 * 1024 * 1024;
2719 }
2720
2721 group_sections (htab, stub_group_size, stubs_always_before_branch);
2722
2723 while (1)
2724 {
2725 bfd *input_bfd;
2726 unsigned int bfd_indx;
2727 asection *stub_sec;
2728
2729 for (input_bfd = info->input_bfds, bfd_indx = 0;
2730 input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++)
2731 {
2732 Elf_Internal_Shdr *symtab_hdr;
2733 asection *section;
2734 Elf_Internal_Sym *local_syms = NULL;
2735
2736 /* We'll need the symbol table in a second. */
2737 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2738 if (symtab_hdr->sh_info == 0)
2739 continue;
2740
2741 /* Walk over each section attached to the input bfd. */
2742 for (section = input_bfd->sections;
2743 section != NULL; section = section->next)
2744 {
2745 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2746
2747 /* If there aren't any relocs, then there's nothing more
2748 to do. */
2749 if ((section->flags & SEC_RELOC) == 0
2750 || section->reloc_count == 0
2751 || (section->flags & SEC_CODE) == 0)
2752 continue;
2753
2754 /* If this section is a link-once section that will be
2755 discarded, then don't create any stubs. */
2756 if (section->output_section == NULL
2757 || section->output_section->owner != output_bfd)
2758 continue;
2759
2760 /* Get the relocs. */
2761 internal_relocs
2762 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
2763 NULL, info->keep_memory);
2764 if (internal_relocs == NULL)
2765 goto error_ret_free_local;
2766
2767 /* Now examine each relocation. */
2768 irela = internal_relocs;
2769 irelaend = irela + section->reloc_count;
2770 for (; irela < irelaend; irela++)
2771 {
2772 unsigned int r_type, r_indx;
2773 enum elf64_aarch64_stub_type stub_type;
2774 struct elf64_aarch64_stub_hash_entry *stub_entry;
2775 asection *sym_sec;
2776 bfd_vma sym_value;
2777 bfd_vma destination;
2778 struct elf64_aarch64_link_hash_entry *hash;
2779 const char *sym_name;
2780 char *stub_name;
2781 const asection *id_sec;
2782 unsigned char st_type;
2783 bfd_size_type len;
2784
2785 r_type = ELF64_R_TYPE (irela->r_info);
2786 r_indx = ELF64_R_SYM (irela->r_info);
2787
2788 if (r_type >= (unsigned int) R_AARCH64_end)
2789 {
2790 bfd_set_error (bfd_error_bad_value);
2791 error_ret_free_internal:
2792 if (elf_section_data (section)->relocs == NULL)
2793 free (internal_relocs);
2794 goto error_ret_free_local;
2795 }
2796
2797 /* Only look for stubs on unconditional branch and
2798 branch and link instructions. */
2799 if (r_type != (unsigned int) R_AARCH64_CALL26
2800 && r_type != (unsigned int) R_AARCH64_JUMP26)
2801 continue;
2802
2803 /* Now determine the call target, its name, value,
2804 section. */
2805 sym_sec = NULL;
2806 sym_value = 0;
2807 destination = 0;
2808 hash = NULL;
2809 sym_name = NULL;
2810 if (r_indx < symtab_hdr->sh_info)
2811 {
2812 /* It's a local symbol. */
2813 Elf_Internal_Sym *sym;
2814 Elf_Internal_Shdr *hdr;
2815
2816 if (local_syms == NULL)
2817 {
2818 local_syms
2819 = (Elf_Internal_Sym *) symtab_hdr->contents;
2820 if (local_syms == NULL)
2821 local_syms
2822 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
2823 symtab_hdr->sh_info, 0,
2824 NULL, NULL, NULL);
2825 if (local_syms == NULL)
2826 goto error_ret_free_internal;
2827 }
2828
2829 sym = local_syms + r_indx;
2830 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
2831 sym_sec = hdr->bfd_section;
2832 if (!sym_sec)
2833 /* This is an undefined symbol. It can never
2834 be resolved. */
2835 continue;
2836
2837 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
2838 sym_value = sym->st_value;
2839 destination = (sym_value + irela->r_addend
2840 + sym_sec->output_offset
2841 + sym_sec->output_section->vma);
2842 st_type = ELF_ST_TYPE (sym->st_info);
2843 sym_name
2844 = bfd_elf_string_from_elf_section (input_bfd,
2845 symtab_hdr->sh_link,
2846 sym->st_name);
2847 }
2848 else
2849 {
2850 int e_indx;
2851
2852 e_indx = r_indx - symtab_hdr->sh_info;
2853 hash = ((struct elf64_aarch64_link_hash_entry *)
2854 elf_sym_hashes (input_bfd)[e_indx]);
2855
2856 while (hash->root.root.type == bfd_link_hash_indirect
2857 || hash->root.root.type == bfd_link_hash_warning)
2858 hash = ((struct elf64_aarch64_link_hash_entry *)
2859 hash->root.root.u.i.link);
2860
2861 if (hash->root.root.type == bfd_link_hash_defined
2862 || hash->root.root.type == bfd_link_hash_defweak)
2863 {
2864 struct elf64_aarch64_link_hash_table *globals =
2865 elf64_aarch64_hash_table (info);
2866 sym_sec = hash->root.root.u.def.section;
2867 sym_value = hash->root.root.u.def.value;
2868 /* For a destination in a shared library,
2869 use the PLT stub as target address to
2870 decide whether a branch stub is
2871 needed. */
2872 if (globals->root.splt != NULL && hash != NULL
2873 && hash->root.plt.offset != (bfd_vma) - 1)
2874 {
2875 sym_sec = globals->root.splt;
2876 sym_value = hash->root.plt.offset;
2877 if (sym_sec->output_section != NULL)
2878 destination = (sym_value
2879 + sym_sec->output_offset
2880 +
2881 sym_sec->output_section->vma);
2882 }
2883 else if (sym_sec->output_section != NULL)
2884 destination = (sym_value + irela->r_addend
2885 + sym_sec->output_offset
2886 + sym_sec->output_section->vma);
2887 }
2888 else if (hash->root.root.type == bfd_link_hash_undefined
2889 || (hash->root.root.type
2890 == bfd_link_hash_undefweak))
2891 {
2892 /* For a shared library, use the PLT stub as
2893 target address to decide whether a long
2894 branch stub is needed.
2895 For absolute code, they cannot be handled. */
2896 struct elf64_aarch64_link_hash_table *globals =
2897 elf64_aarch64_hash_table (info);
2898
2899 if (globals->root.splt != NULL && hash != NULL
2900 && hash->root.plt.offset != (bfd_vma) - 1)
2901 {
2902 sym_sec = globals->root.splt;
2903 sym_value = hash->root.plt.offset;
2904 if (sym_sec->output_section != NULL)
2905 destination = (sym_value
2906 + sym_sec->output_offset
2907 +
2908 sym_sec->output_section->vma);
2909 }
2910 else
2911 continue;
2912 }
2913 else
2914 {
2915 bfd_set_error (bfd_error_bad_value);
2916 goto error_ret_free_internal;
2917 }
2918 st_type = ELF_ST_TYPE (hash->root.type);
2919 sym_name = hash->root.root.root.string;
2920 }
2921
2922 /* Determine what (if any) linker stub is needed. */
2923 stub_type = aarch64_type_of_stub
2924 (info, section, irela, st_type, hash, destination);
2925 if (stub_type == aarch64_stub_none)
2926 continue;
2927
2928 /* Support for grouping stub sections. */
2929 id_sec = htab->stub_group[section->id].link_sec;
2930
2931 /* Get the name of this stub. */
2932 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, hash,
2933 irela);
2934 if (!stub_name)
2935 goto error_ret_free_internal;
2936
2937 stub_entry =
2938 aarch64_stub_hash_lookup (&htab->stub_hash_table,
2939 stub_name, FALSE, FALSE);
2940 if (stub_entry != NULL)
2941 {
2942 /* The proper stub has already been created. */
2943 free (stub_name);
2944 continue;
2945 }
2946
2947 stub_entry = elf64_aarch64_add_stub (stub_name, section,
2948 htab);
2949 if (stub_entry == NULL)
2950 {
2951 free (stub_name);
2952 goto error_ret_free_internal;
2953 }
2954
2955 stub_entry->target_value = sym_value;
2956 stub_entry->target_section = sym_sec;
2957 stub_entry->stub_type = stub_type;
2958 stub_entry->h = hash;
2959 stub_entry->st_type = st_type;
2960
2961 if (sym_name == NULL)
2962 sym_name = "unnamed";
2963 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
2964 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
2965 if (stub_entry->output_name == NULL)
2966 {
2967 free (stub_name);
2968 goto error_ret_free_internal;
2969 }
2970
2971 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
2972 sym_name);
2973
2974 stub_changed = TRUE;
2975 }
2976
2977 /* We're done with the internal relocs, free them. */
2978 if (elf_section_data (section)->relocs == NULL)
2979 free (internal_relocs);
2980 }
2981 }
2982
2983 if (!stub_changed)
2984 break;
2985
2986 /* OK, we've added some stubs. Find out the new size of the
2987 stub sections. */
2988 for (stub_sec = htab->stub_bfd->sections;
2989 stub_sec != NULL; stub_sec = stub_sec->next)
2990 stub_sec->size = 0;
2991
2992 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
2993
2994 /* Ask the linker to do its stuff. */
2995 (*htab->layout_sections_again) ();
2996 stub_changed = FALSE;
2997 }
2998
2999 return TRUE;
3000
3001error_ret_free_local:
3002 return FALSE;
3003}
3004
3005/* Build all the stubs associated with the current output file. The
3006 stubs are kept in a hash table attached to the main linker hash
3007 table. We also set up the .plt entries for statically linked PIC
3008 functions here. This function is called via aarch64_elf_finish in the
3009 linker. */
3010
3011bfd_boolean
3012elf64_aarch64_build_stubs (struct bfd_link_info *info)
3013{
3014 asection *stub_sec;
3015 struct bfd_hash_table *table;
3016 struct elf64_aarch64_link_hash_table *htab;
3017
3018 htab = elf64_aarch64_hash_table (info);
3019
3020 for (stub_sec = htab->stub_bfd->sections;
3021 stub_sec != NULL; stub_sec = stub_sec->next)
3022 {
3023 bfd_size_type size;
3024
3025 /* Ignore non-stub sections. */
3026 if (!strstr (stub_sec->name, STUB_SUFFIX))
3027 continue;
3028
3029 /* Allocate memory to hold the linker stubs. */
3030 size = stub_sec->size;
3031 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3032 if (stub_sec->contents == NULL && size != 0)
3033 return FALSE;
3034 stub_sec->size = 0;
3035 }
3036
3037 /* Build the stubs as directed by the stub hash table. */
3038 table = &htab->stub_hash_table;
3039 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3040
3041 return TRUE;
3042}
3043
3044
3045/* Add an entry to the code/data map for section SEC. */
3046
3047static void
3048elf64_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3049{
3050 struct _aarch64_elf_section_data *sec_data =
3051 elf64_aarch64_section_data (sec);
3052 unsigned int newidx;
3053
3054 if (sec_data->map == NULL)
3055 {
3056 sec_data->map = bfd_malloc (sizeof (elf64_aarch64_section_map));
3057 sec_data->mapcount = 0;
3058 sec_data->mapsize = 1;
3059 }
3060
3061 newidx = sec_data->mapcount++;
3062
3063 if (sec_data->mapcount > sec_data->mapsize)
3064 {
3065 sec_data->mapsize *= 2;
3066 sec_data->map = bfd_realloc_or_free
3067 (sec_data->map, sec_data->mapsize * sizeof (elf64_aarch64_section_map));
3068 }
3069
3070 if (sec_data->map)
3071 {
3072 sec_data->map[newidx].vma = vma;
3073 sec_data->map[newidx].type = type;
3074 }
3075}
3076
3077
3078/* Initialise maps of insn/data for input BFDs. */
3079void
3080bfd_elf64_aarch64_init_maps (bfd *abfd)
3081{
3082 Elf_Internal_Sym *isymbuf;
3083 Elf_Internal_Shdr *hdr;
3084 unsigned int i, localsyms;
3085
3086 /* Make sure that we are dealing with an AArch64 elf binary. */
3087 if (!is_aarch64_elf (abfd))
3088 return;
3089
3090 if ((abfd->flags & DYNAMIC) != 0)
3091 return;
3092
3093 hdr = &elf_symtab_hdr (abfd);
3094 localsyms = hdr->sh_info;
3095
3096 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3097 should contain the number of local symbols, which should come before any
3098 global symbols. Mapping symbols are always local. */
3099 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3100
3101 /* No internal symbols read? Skip this BFD. */
3102 if (isymbuf == NULL)
3103 return;
3104
3105 for (i = 0; i < localsyms; i++)
3106 {
3107 Elf_Internal_Sym *isym = &isymbuf[i];
3108 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3109 const char *name;
3110
3111 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3112 {
3113 name = bfd_elf_string_from_elf_section (abfd,
3114 hdr->sh_link,
3115 isym->st_name);
3116
3117 if (bfd_is_aarch64_special_symbol_name
3118 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3119 elf64_aarch64_section_map_add (sec, name[1], isym->st_value);
3120 }
3121 }
3122}
3123
3124/* Set option values needed during linking. */
3125void
3126bfd_elf64_aarch64_set_options (struct bfd *output_bfd,
3127 struct bfd_link_info *link_info,
3128 int no_enum_warn,
3129 int no_wchar_warn, int pic_veneer)
3130{
3131 struct elf64_aarch64_link_hash_table *globals;
3132
3133 globals = elf64_aarch64_hash_table (link_info);
3134 globals->pic_veneer = pic_veneer;
3135
3136 BFD_ASSERT (is_aarch64_elf (output_bfd));
3137 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3138 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3139}
3140
3141#define MASK(n) ((1u << (n)) - 1)
3142
3143/* Decode the 26-bit offset of unconditional branch. */
3144static inline uint32_t
3145decode_branch_ofs_26 (uint32_t insn)
3146{
3147 return insn & MASK (26);
3148}
3149
3150/* Decode the 19-bit offset of conditional branch and compare & branch. */
3151static inline uint32_t
3152decode_cond_branch_ofs_19 (uint32_t insn)
3153{
3154 return (insn >> 5) & MASK (19);
3155}
3156
3157/* Decode the 19-bit offset of load literal. */
3158static inline uint32_t
3159decode_ld_lit_ofs_19 (uint32_t insn)
3160{
3161 return (insn >> 5) & MASK (19);
3162}
3163
3164/* Decode the 14-bit offset of test & branch. */
3165static inline uint32_t
3166decode_tst_branch_ofs_14 (uint32_t insn)
3167{
3168 return (insn >> 5) & MASK (14);
3169}
3170
3171/* Decode the 16-bit imm of move wide. */
3172static inline uint32_t
3173decode_movw_imm (uint32_t insn)
3174{
3175 return (insn >> 5) & MASK (16);
3176}
3177
3178/* Decode the 21-bit imm of adr. */
3179static inline uint32_t
3180decode_adr_imm (uint32_t insn)
3181{
3182 return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
3183}
3184
3185/* Decode the 12-bit imm of add immediate. */
3186static inline uint32_t
3187decode_add_imm (uint32_t insn)
3188{
3189 return (insn >> 10) & MASK (12);
3190}
3191
3192
3193/* Encode the 26-bit offset of unconditional branch. */
3194static inline uint32_t
3195reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
3196{
3197 return (insn & ~MASK (26)) | (ofs & MASK (26));
3198}
3199
3200/* Encode the 19-bit offset of conditional branch and compare & branch. */
3201static inline uint32_t
3202reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
3203{
3204 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3205}
3206
3207/* Decode the 19-bit offset of load literal. */
3208static inline uint32_t
3209reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
3210{
3211 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3212}
3213
3214/* Encode the 14-bit offset of test & branch. */
3215static inline uint32_t
3216reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
3217{
3218 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
3219}
3220
3221/* Reencode the imm field of move wide. */
3222static inline uint32_t
3223reencode_movw_imm (uint32_t insn, uint32_t imm)
3224{
3225 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
3226}
3227
3228/* Reencode the imm field of adr. */
3229static inline uint32_t
3230reencode_adr_imm (uint32_t insn, uint32_t imm)
3231{
3232 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
3233 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
3234}
3235
3236/* Reencode the imm field of ld/st pos immediate. */
3237static inline uint32_t
3238reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
3239{
3240 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3241}
3242
3243/* Reencode the imm field of add immediate. */
3244static inline uint32_t
3245reencode_add_imm (uint32_t insn, uint32_t imm)
3246{
3247 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3248}
3249
3250/* Reencode mov[zn] to movz. */
3251static inline uint32_t
3252reencode_movzn_to_movz (uint32_t opcode)
3253{
3254 return opcode | (1 << 30);
3255}
3256
3257/* Reencode mov[zn] to movn. */
3258static inline uint32_t
3259reencode_movzn_to_movn (uint32_t opcode)
3260{
3261 return opcode & ~(1 << 30);
3262}
3263
3264/* Insert the addend/value into the instruction or data object being
3265 relocated. */
3266static bfd_reloc_status_type
3267bfd_elf_aarch64_put_addend (bfd *abfd,
3268 bfd_byte *address,
3269 reloc_howto_type *howto, bfd_signed_vma addend)
3270{
3271 bfd_reloc_status_type status = bfd_reloc_ok;
3272 bfd_signed_vma old_addend = addend;
3273 bfd_vma contents;
3274 int size;
3275
3276 size = bfd_get_reloc_size (howto);
3277 switch (size)
3278 {
3279 case 2:
3280 contents = bfd_get_16 (abfd, address);
3281 break;
3282 case 4:
3283 if (howto->src_mask != 0xffffffff)
3284 /* Must be 32-bit instruction, always little-endian. */
3285 contents = bfd_getl32 (address);
3286 else
3287 /* Must be 32-bit data (endianness dependent). */
3288 contents = bfd_get_32 (abfd, address);
3289 break;
3290 case 8:
3291 contents = bfd_get_64 (abfd, address);
3292 break;
3293 default:
3294 abort ();
3295 }
3296
3297 switch (howto->complain_on_overflow)
3298 {
3299 case complain_overflow_dont:
3300 break;
3301 case complain_overflow_signed:
3302 status = aarch64_signed_overflow (addend,
3303 howto->bitsize + howto->rightshift);
3304 break;
3305 case complain_overflow_unsigned:
3306 status = aarch64_unsigned_overflow (addend,
3307 howto->bitsize + howto->rightshift);
3308 break;
3309 case complain_overflow_bitfield:
3310 default:
3311 abort ();
3312 }
3313
3314 addend >>= howto->rightshift;
3315
3316 switch (howto->type)
3317 {
3318 case R_AARCH64_JUMP26:
3319 case R_AARCH64_CALL26:
3320 contents = reencode_branch_ofs_26 (contents, addend);
3321 break;
3322
3323 case R_AARCH64_CONDBR19:
3324 contents = reencode_cond_branch_ofs_19 (contents, addend);
3325 break;
3326
3327 case R_AARCH64_TSTBR14:
3328 contents = reencode_tst_branch_ofs_14 (contents, addend);
3329 break;
3330
3331 case R_AARCH64_LD_PREL_LO19:
f41aef5f 3332 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3333 if (old_addend & ((1 << howto->rightshift) - 1))
3334 return bfd_reloc_overflow;
3335 contents = reencode_ld_lit_ofs_19 (contents, addend);
3336 break;
3337
3338 case R_AARCH64_TLSDESC_CALL:
3339 break;
3340
3341 case R_AARCH64_TLSGD_ADR_PAGE21:
3342 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
418009c2 3343 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
3344 case R_AARCH64_ADR_GOT_PAGE:
3345 case R_AARCH64_ADR_PREL_LO21:
3346 case R_AARCH64_ADR_PREL_PG_HI21:
3347 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3348 contents = reencode_adr_imm (contents, addend);
3349 break;
3350
3351 case R_AARCH64_TLSGD_ADD_LO12_NC:
3352 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3353 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3354 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3355 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3356 case R_AARCH64_ADD_ABS_LO12_NC:
3357 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3358 12 bits of the page offset following
3359 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3360 (pc-relative) page base. */
3361 contents = reencode_add_imm (contents, addend);
3362 break;
3363
3364 case R_AARCH64_LDST8_ABS_LO12_NC:
3365 case R_AARCH64_LDST16_ABS_LO12_NC:
3366 case R_AARCH64_LDST32_ABS_LO12_NC:
3367 case R_AARCH64_LDST64_ABS_LO12_NC:
3368 case R_AARCH64_LDST128_ABS_LO12_NC:
3369 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3370 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3371 case R_AARCH64_LD64_GOT_LO12_NC:
3372 if (old_addend & ((1 << howto->rightshift) - 1))
3373 return bfd_reloc_overflow;
3374 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3375 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3376 which computes the (pc-relative) page base. */
3377 contents = reencode_ldst_pos_imm (contents, addend);
3378 break;
3379
3380 /* Group relocations to create high bits of a 16, 32, 48 or 64
3381 bit signed data or abs address inline. Will change
3382 instruction to MOVN or MOVZ depending on sign of calculated
3383 value. */
3384
3385 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3386 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3387 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3388 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3389 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3390 case R_AARCH64_MOVW_SABS_G0:
3391 case R_AARCH64_MOVW_SABS_G1:
3392 case R_AARCH64_MOVW_SABS_G2:
3393 /* NOTE: We can only come here with movz or movn. */
3394 if (addend < 0)
3395 {
3396 /* Force use of MOVN. */
3397 addend = ~addend;
3398 contents = reencode_movzn_to_movn (contents);
3399 }
3400 else
3401 {
3402 /* Force use of MOVZ. */
3403 contents = reencode_movzn_to_movz (contents);
3404 }
3405 /* fall through */
3406
3407 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3408 data or abs address inline. */
3409
3410 case R_AARCH64_MOVW_UABS_G0:
3411 case R_AARCH64_MOVW_UABS_G0_NC:
3412 case R_AARCH64_MOVW_UABS_G1:
3413 case R_AARCH64_MOVW_UABS_G1_NC:
3414 case R_AARCH64_MOVW_UABS_G2:
3415 case R_AARCH64_MOVW_UABS_G2_NC:
3416 case R_AARCH64_MOVW_UABS_G3:
3417 contents = reencode_movw_imm (contents, addend);
3418 break;
3419
3420 default:
3421 /* Repack simple data */
3422 if (howto->dst_mask & (howto->dst_mask + 1))
3423 return bfd_reloc_notsupported;
3424
3425 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
3426 break;
3427 }
3428
3429 switch (size)
3430 {
3431 case 2:
3432 bfd_put_16 (abfd, contents, address);
3433 break;
3434 case 4:
3435 if (howto->dst_mask != 0xffffffff)
3436 /* must be 32-bit instruction, always little-endian */
3437 bfd_putl32 (contents, address);
3438 else
3439 /* must be 32-bit data (endianness dependent) */
3440 bfd_put_32 (abfd, contents, address);
3441 break;
3442 case 8:
3443 bfd_put_64 (abfd, contents, address);
3444 break;
3445 default:
3446 abort ();
3447 }
3448
3449 return status;
3450}
3451
3452static bfd_vma
3453aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3454 struct elf64_aarch64_link_hash_table
3455 *globals, struct bfd_link_info *info,
3456 bfd_vma value, bfd *output_bfd,
3457 bfd_boolean *unresolved_reloc_p)
3458{
3459 bfd_vma off = (bfd_vma) - 1;
3460 asection *basegot = globals->root.sgot;
3461 bfd_boolean dyn = globals->root.dynamic_sections_created;
3462
3463 if (h != NULL)
3464 {
3465 off = h->got.offset;
3466 BFD_ASSERT (off != (bfd_vma) - 1);
3467 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3468 || (info->shared
3469 && SYMBOL_REFERENCES_LOCAL (info, h))
3470 || (ELF_ST_VISIBILITY (h->other)
3471 && h->root.type == bfd_link_hash_undefweak))
3472 {
3473 /* This is actually a static link, or it is a -Bsymbolic link
3474 and the symbol is defined locally. We must initialize this
3475 entry in the global offset table. Since the offset must
3476 always be a multiple of 8, we use the least significant bit
3477 to record whether we have initialized it already.
3478 When doing a dynamic link, we create a .rel(a).got relocation
3479 entry to initialize the value. This is done in the
3480 finish_dynamic_symbol routine. */
3481 if ((off & 1) != 0)
3482 off &= ~1;
3483 else
3484 {
3485 bfd_put_64 (output_bfd, value, basegot->contents + off);
3486 h->got.offset |= 1;
3487 }
3488 }
3489 else
3490 *unresolved_reloc_p = FALSE;
3491
3492 off = off + basegot->output_section->vma + basegot->output_offset;
3493 }
3494
3495 return off;
3496}
3497
3498/* Change R_TYPE to a more efficient access model where possible,
3499 return the new reloc type. */
3500
3501static unsigned int
3502aarch64_tls_transition_without_check (unsigned int r_type,
3503 struct elf_link_hash_entry *h)
3504{
3505 bfd_boolean is_local = h == NULL;
3506 switch (r_type)
3507 {
3508 case R_AARCH64_TLSGD_ADR_PAGE21:
418009c2 3509 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
3510 return is_local
3511 ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
3512
3513 case R_AARCH64_TLSGD_ADD_LO12_NC:
3514 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3515 return is_local
3516 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3517 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
3518
3519 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3520 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3521
3522 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3523 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3524
3525 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3526 case R_AARCH64_TLSDESC_CALL:
3527 /* Instructions with these relocations will become NOPs. */
3528 return R_AARCH64_NONE;
3529 }
3530
3531 return r_type;
3532}
3533
3534static unsigned int
3535aarch64_reloc_got_type (unsigned int r_type)
3536{
3537 switch (r_type)
3538 {
3539 case R_AARCH64_LD64_GOT_LO12_NC:
3540 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 3541 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3542 return GOT_NORMAL;
3543
3544 case R_AARCH64_TLSGD_ADR_PAGE21:
3545 case R_AARCH64_TLSGD_ADD_LO12_NC:
3546 return GOT_TLS_GD;
3547
3548 case R_AARCH64_TLSDESC_ADD_LO12_NC:
418009c2 3549 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
3550 case R_AARCH64_TLSDESC_CALL:
3551 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3552 return GOT_TLSDESC_GD;
3553
3554 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3555 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3556 return GOT_TLS_IE;
3557
3558 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3559 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3560 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3561 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3562 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3563 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3564 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3565 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3566 return GOT_UNKNOWN;
3567 }
3568 return GOT_UNKNOWN;
3569}
3570
3571static bfd_boolean
3572aarch64_can_relax_tls (bfd *input_bfd,
3573 struct bfd_link_info *info,
3574 unsigned int r_type,
3575 struct elf_link_hash_entry *h,
3576 unsigned long r_symndx)
3577{
3578 unsigned int symbol_got_type;
3579 unsigned int reloc_got_type;
3580
3581 if (! IS_AARCH64_TLS_RELOC (r_type))
3582 return FALSE;
3583
3584 symbol_got_type = elf64_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3585 reloc_got_type = aarch64_reloc_got_type (r_type);
3586
3587 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3588 return TRUE;
3589
3590 if (info->shared)
3591 return FALSE;
3592
3593 if (h && h->root.type == bfd_link_hash_undefweak)
3594 return FALSE;
3595
3596 return TRUE;
3597}
3598
3599static unsigned int
3600aarch64_tls_transition (bfd *input_bfd,
3601 struct bfd_link_info *info,
3602 unsigned int r_type,
3603 struct elf_link_hash_entry *h,
3604 unsigned long r_symndx)
3605{
3606 if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx))
3607 return r_type;
3608
3609 return aarch64_tls_transition_without_check (r_type, h);
3610}
3611
3612/* Return the base VMA address which should be subtracted from real addresses
3613 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3614
3615static bfd_vma
3616dtpoff_base (struct bfd_link_info *info)
3617{
3618 /* If tls_sec is NULL, we should have signalled an error already. */
3619 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3620 return elf_hash_table (info)->tls_sec->vma;
3621}
3622
3623
3624/* Return the base VMA address which should be subtracted from real addresses
3625 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3626
3627static bfd_vma
3628tpoff_base (struct bfd_link_info *info)
3629{
3630 struct elf_link_hash_table *htab = elf_hash_table (info);
3631
3632 /* If tls_sec is NULL, we should have signalled an error already. */
3633 if (htab->tls_sec == NULL)
3634 return 0;
3635
3636 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3637 htab->tls_sec->alignment_power);
3638 return htab->tls_sec->vma - base;
3639}
3640
3641static bfd_vma *
3642symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3643 unsigned long r_symndx)
3644{
3645 /* Calculate the address of the GOT entry for symbol
3646 referred to in h. */
3647 if (h != NULL)
3648 return &h->got.offset;
3649 else
3650 {
3651 /* local symbol */
3652 struct elf_aarch64_local_symbol *l;
3653
3654 l = elf64_aarch64_locals (input_bfd);
3655 return &l[r_symndx].got_offset;
3656 }
3657}
3658
3659static void
3660symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3661 unsigned long r_symndx)
3662{
3663 bfd_vma *p;
3664 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3665 *p |= 1;
3666}
3667
3668static int
3669symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3670 unsigned long r_symndx)
3671{
3672 bfd_vma value;
3673 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3674 return value & 1;
3675}
3676
3677static bfd_vma
3678symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3679 unsigned long r_symndx)
3680{
3681 bfd_vma value;
3682 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3683 value &= ~1;
3684 return value;
3685}
3686
3687static bfd_vma *
3688symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3689 unsigned long r_symndx)
3690{
3691 /* Calculate the address of the GOT entry for symbol
3692 referred to in h. */
3693 if (h != NULL)
3694 {
3695 struct elf64_aarch64_link_hash_entry *eh;
3696 eh = (struct elf64_aarch64_link_hash_entry *) h;
3697 return &eh->tlsdesc_got_jump_table_offset;
3698 }
3699 else
3700 {
3701 /* local symbol */
3702 struct elf_aarch64_local_symbol *l;
3703
3704 l = elf64_aarch64_locals (input_bfd);
3705 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3706 }
3707}
3708
3709static void
3710symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3711 unsigned long r_symndx)
3712{
3713 bfd_vma *p;
3714 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3715 *p |= 1;
3716}
3717
3718static int
3719symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3720 struct elf_link_hash_entry *h,
3721 unsigned long r_symndx)
3722{
3723 bfd_vma value;
3724 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3725 return value & 1;
3726}
3727
3728static bfd_vma
3729symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3730 unsigned long r_symndx)
3731{
3732 bfd_vma value;
3733 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3734 value &= ~1;
3735 return value;
3736}
3737
3738/* Perform a relocation as part of a final link. */
3739static bfd_reloc_status_type
3740elf64_aarch64_final_link_relocate (reloc_howto_type *howto,
3741 bfd *input_bfd,
3742 bfd *output_bfd,
3743 asection *input_section,
3744 bfd_byte *contents,
3745 Elf_Internal_Rela *rel,
3746 bfd_vma value,
3747 struct bfd_link_info *info,
3748 asection *sym_sec,
3749 struct elf_link_hash_entry *h,
3750 bfd_boolean *unresolved_reloc_p,
3751 bfd_boolean save_addend,
8847944f 3752 bfd_vma *saved_addend)
a06ea964
NC
3753{
3754 unsigned int r_type = howto->type;
3755 unsigned long r_symndx;
3756 bfd_byte *hit_data = contents + rel->r_offset;
3757 bfd_vma place;
3758 bfd_signed_vma signed_addend;
3759 struct elf64_aarch64_link_hash_table *globals;
3760 bfd_boolean weak_undef_p;
3761
3762 globals = elf64_aarch64_hash_table (info);
3763
3764 BFD_ASSERT (is_aarch64_elf (input_bfd));
3765
3766 r_symndx = ELF64_R_SYM (rel->r_info);
3767
3768 /* It is possible to have linker relaxations on some TLS access
3769 models. Update our information here. */
3770 r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
3771
3772 if (r_type != howto->type)
3773 howto = elf64_aarch64_howto_from_type (r_type);
3774
3775 place = input_section->output_section->vma
3776 + input_section->output_offset + rel->r_offset;
3777
3778 /* Get addend, accumulating the addend for consecutive relocs
3779 which refer to the same offset. */
3780 signed_addend = saved_addend ? *saved_addend : 0;
3781 signed_addend += rel->r_addend;
3782
3783 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
3784 : bfd_is_und_section (sym_sec));
3785 switch (r_type)
3786 {
3787 case R_AARCH64_NONE:
3788 case R_AARCH64_NULL:
3789 case R_AARCH64_TLSDESC_CALL:
3790 *unresolved_reloc_p = FALSE;
3791 return bfd_reloc_ok;
3792
3793 case R_AARCH64_ABS64:
3794
3795 /* When generating a shared object or relocatable executable, these
3796 relocations are copied into the output file to be resolved at
3797 run time. */
3798 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
3799 && (input_section->flags & SEC_ALLOC)
3800 && (h == NULL
3801 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3802 || h->root.type != bfd_link_hash_undefweak))
3803 {
3804 Elf_Internal_Rela outrel;
3805 bfd_byte *loc;
3806 bfd_boolean skip, relocate;
3807 asection *sreloc;
3808
3809 *unresolved_reloc_p = FALSE;
3810
8847944f
WN
3811 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd,
3812 input_section, 1);
3813 if (sreloc == NULL)
3814 return bfd_reloc_notsupported;
3815
a06ea964
NC
3816 skip = FALSE;
3817 relocate = FALSE;
3818
3819 outrel.r_addend = signed_addend;
3820 outrel.r_offset =
3821 _bfd_elf_section_offset (output_bfd, info, input_section,
3822 rel->r_offset);
3823 if (outrel.r_offset == (bfd_vma) - 1)
3824 skip = TRUE;
3825 else if (outrel.r_offset == (bfd_vma) - 2)
3826 {
3827 skip = TRUE;
3828 relocate = TRUE;
3829 }
3830
3831 outrel.r_offset += (input_section->output_section->vma
3832 + input_section->output_offset);
3833
3834 if (skip)
3835 memset (&outrel, 0, sizeof outrel);
3836 else if (h != NULL
3837 && h->dynindx != -1
3838 && (!info->shared || !info->symbolic || !h->def_regular))
3839 outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
3840 else
3841 {
3842 int symbol;
3843
3844 /* On SVR4-ish systems, the dynamic loader cannot
3845 relocate the text and data segments independently,
3846 so the symbol does not matter. */
3847 symbol = 0;
3848 outrel.r_info = ELF64_R_INFO (symbol, R_AARCH64_RELATIVE);
3849 outrel.r_addend += value;
3850 }
3851
8847944f 3852 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab);
a06ea964
NC
3853 bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
3854
8847944f 3855 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
a06ea964
NC
3856 {
3857 /* Sanity to check that we have previously allocated
3858 sufficient space in the relocation section for the
3859 number of relocations we actually want to emit. */
3860 abort ();
3861 }
3862
3863 /* If this reloc is against an external symbol, we do not want to
3864 fiddle with the addend. Otherwise, we need to include the symbol
3865 value so that it becomes an addend for the dynamic reloc. */
3866 if (!relocate)
3867 return bfd_reloc_ok;
3868
3869 return _bfd_final_link_relocate (howto, input_bfd, input_section,
3870 contents, rel->r_offset, value,
3871 signed_addend);
3872 }
3873 else
3874 value += signed_addend;
3875 break;
3876
3877 case R_AARCH64_JUMP26:
3878 case R_AARCH64_CALL26:
3879 {
3880 asection *splt = globals->root.splt;
3881 bfd_boolean via_plt_p =
3882 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
3883
3884 /* A call to an undefined weak symbol is converted to a jump to
3885 the next instruction unless a PLT entry will be created.
3886 The jump to the next instruction is optimized as a NOP.
3887 Do the same for local undefined symbols. */
3888 if (weak_undef_p && ! via_plt_p)
3889 {
3890 bfd_putl32 (INSN_NOP, hit_data);
3891 return bfd_reloc_ok;
3892 }
3893
3894 /* If the call goes through a PLT entry, make sure to
3895 check distance to the right destination address. */
3896 if (via_plt_p)
3897 {
3898 value = (splt->output_section->vma
3899 + splt->output_offset + h->plt.offset);
3900 *unresolved_reloc_p = FALSE;
3901 }
3902
3903 /* If the target symbol is global and marked as a function the
3904 relocation applies a function call or a tail call. In this
3905 situation we can veneer out of range branches. The veneers
3906 use IP0 and IP1 hence cannot be used arbitrary out of range
3907 branches that occur within the body of a function. */
3908 if (h && h->type == STT_FUNC)
3909 {
3910 /* Check if a stub has to be inserted because the destination
3911 is too far away. */
3912 if (! aarch64_valid_branch_p (value, place))
3913 {
3914 /* The target is out of reach, so redirect the branch to
3915 the local stub for this function. */
3916 struct elf64_aarch64_stub_hash_entry *stub_entry;
3917 stub_entry = elf64_aarch64_get_stub_entry (input_section,
3918 sym_sec, h,
3919 rel, globals);
3920 if (stub_entry != NULL)
3921 value = (stub_entry->stub_offset
3922 + stub_entry->stub_sec->output_offset
3923 + stub_entry->stub_sec->output_section->vma);
3924 }
3925 }
3926 }
3927 value = aarch64_resolve_relocation (r_type, place, value,
3928 signed_addend, weak_undef_p);
3929 break;
3930
3931 case R_AARCH64_ABS16:
3932 case R_AARCH64_ABS32:
3933 case R_AARCH64_ADD_ABS_LO12_NC:
3934 case R_AARCH64_ADR_PREL_LO21:
3935 case R_AARCH64_ADR_PREL_PG_HI21:
3936 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3937 case R_AARCH64_CONDBR19:
3938 case R_AARCH64_LD_PREL_LO19:
3939 case R_AARCH64_LDST8_ABS_LO12_NC:
3940 case R_AARCH64_LDST16_ABS_LO12_NC:
3941 case R_AARCH64_LDST32_ABS_LO12_NC:
3942 case R_AARCH64_LDST64_ABS_LO12_NC:
3943 case R_AARCH64_LDST128_ABS_LO12_NC:
3944 case R_AARCH64_MOVW_SABS_G0:
3945 case R_AARCH64_MOVW_SABS_G1:
3946 case R_AARCH64_MOVW_SABS_G2:
3947 case R_AARCH64_MOVW_UABS_G0:
3948 case R_AARCH64_MOVW_UABS_G0_NC:
3949 case R_AARCH64_MOVW_UABS_G1:
3950 case R_AARCH64_MOVW_UABS_G1_NC:
3951 case R_AARCH64_MOVW_UABS_G2:
3952 case R_AARCH64_MOVW_UABS_G2_NC:
3953 case R_AARCH64_MOVW_UABS_G3:
3954 case R_AARCH64_PREL16:
3955 case R_AARCH64_PREL32:
3956 case R_AARCH64_PREL64:
3957 case R_AARCH64_TSTBR14:
3958 value = aarch64_resolve_relocation (r_type, place, value,
3959 signed_addend, weak_undef_p);
3960 break;
3961
3962 case R_AARCH64_LD64_GOT_LO12_NC:
3963 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 3964 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3965 if (globals->root.sgot == NULL)
3966 BFD_ASSERT (h != NULL);
3967
3968 if (h != NULL)
3969 {
3970 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
3971 output_bfd,
3972 unresolved_reloc_p);
3973 value = aarch64_resolve_relocation (r_type, place, value,
3974 0, weak_undef_p);
3975 }
3976 break;
3977
3978 case R_AARCH64_TLSGD_ADR_PAGE21:
3979 case R_AARCH64_TLSGD_ADD_LO12_NC:
3980 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3981 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3982 if (globals->root.sgot == NULL)
3983 return bfd_reloc_notsupported;
3984
3985 value = (symbol_got_offset (input_bfd, h, r_symndx)
3986 + globals->root.sgot->output_section->vma
3987 + globals->root.sgot->output_section->output_offset);
3988
3989 value = aarch64_resolve_relocation (r_type, place, value,
3990 0, weak_undef_p);
3991 *unresolved_reloc_p = FALSE;
3992 break;
3993
3994 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3995 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3996 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3997 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3998 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3999 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4000 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4001 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4002 value = aarch64_resolve_relocation (r_type, place, value,
bb3f9ed8 4003 signed_addend - tpoff_base (info), weak_undef_p);
a06ea964
NC
4004 *unresolved_reloc_p = FALSE;
4005 break;
4006
418009c2 4007 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
4008 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4009 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4010 case R_AARCH64_TLSDESC_ADD:
4011 case R_AARCH64_TLSDESC_LDR:
4012 if (globals->root.sgot == NULL)
4013 return bfd_reloc_notsupported;
4014
4015 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4016 + globals->root.sgotplt->output_section->vma
4017 + globals->root.sgotplt->output_section->output_offset
4018 + globals->sgotplt_jump_table_size);
4019
4020 value = aarch64_resolve_relocation (r_type, place, value,
4021 0, weak_undef_p);
4022 *unresolved_reloc_p = FALSE;
4023 break;
4024
4025 default:
4026 return bfd_reloc_notsupported;
4027 }
4028
4029 if (saved_addend)
4030 *saved_addend = value;
4031
4032 /* Only apply the final relocation in a sequence. */
4033 if (save_addend)
4034 return bfd_reloc_continue;
4035
4036 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4037}
4038
4039/* Handle TLS relaxations. Relaxing is possible for symbols that use
4040 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4041 link.
4042
4043 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4044 is to then call final_link_relocate. Return other values in the
4045 case of error. */
4046
4047static bfd_reloc_status_type
4048elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table *globals,
4049 bfd *input_bfd, bfd_byte *contents,
4050 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4051{
4052 bfd_boolean is_local = h == NULL;
4053 unsigned int r_type = ELF64_R_TYPE (rel->r_info);
4054 unsigned long insn;
4055
4056 BFD_ASSERT (globals && input_bfd && contents && rel);
4057
4058 switch (r_type)
4059 {
4060 case R_AARCH64_TLSGD_ADR_PAGE21:
418009c2 4061 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
4062 if (is_local)
4063 {
4064 /* GD->LE relaxation:
4065 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4066 or
4067 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4068 */
4069 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4070 return bfd_reloc_continue;
4071 }
4072 else
4073 {
4074 /* GD->IE relaxation:
4075 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4076 or
4077 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4078 */
4079 insn = bfd_getl32 (contents + rel->r_offset);
4080 return bfd_reloc_continue;
4081 }
4082
4083 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4084 if (is_local)
4085 {
4086 /* GD->LE relaxation:
4087 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4088 */
4089 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4090 return bfd_reloc_continue;
4091 }
4092 else
4093 {
4094 /* GD->IE relaxation:
4095 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4096 */
4097 insn = bfd_getl32 (contents + rel->r_offset);
4098 insn &= 0xfffffff0;
4099 bfd_putl32 (insn, contents + rel->r_offset);
4100 return bfd_reloc_continue;
4101 }
4102
4103 case R_AARCH64_TLSGD_ADD_LO12_NC:
4104 if (is_local)
4105 {
4106 /* GD->LE relaxation
4107 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4108 bl __tls_get_addr => mrs x1, tpidr_el0
4109 nop => add x0, x1, x0
4110 */
4111
4112 /* First kill the tls_get_addr reloc on the bl instruction. */
4113 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4114 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4115
4116 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4117 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4118 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4119 return bfd_reloc_continue;
4120 }
4121 else
4122 {
4123 /* GD->IE relaxation
4124 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4125 BL __tls_get_addr => mrs x1, tpidr_el0
4126 R_AARCH64_CALL26
4127 NOP => add x0, x1, x0
4128 */
4129
4130 BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26);
4131
4132 /* Remove the relocation on the BL instruction. */
4133 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4134
4135 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4136
4137 /* We choose to fixup the BL and NOP instructions using the
4138 offset from the second relocation to allow flexibility in
4139 scheduling instructions between the ADD and BL. */
4140 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4141 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4142 return bfd_reloc_continue;
4143 }
4144
4145 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4146 case R_AARCH64_TLSDESC_CALL:
4147 /* GD->IE/LE relaxation:
4148 add x0, x0, #:tlsdesc_lo12:var => nop
4149 blr xd => nop
4150 */
4151 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4152 return bfd_reloc_ok;
4153
4154 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4155 /* IE->LE relaxation:
4156 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4157 */
4158 if (is_local)
4159 {
4160 insn = bfd_getl32 (contents + rel->r_offset);
4161 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4162 }
4163 return bfd_reloc_continue;
4164
4165 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4166 /* IE->LE relaxation:
4167 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4168 */
4169 if (is_local)
4170 {
4171 insn = bfd_getl32 (contents + rel->r_offset);
4172 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4173 }
4174 return bfd_reloc_continue;
4175
4176 default:
4177 return bfd_reloc_continue;
4178 }
4179
4180 return bfd_reloc_ok;
4181}
4182
4183/* Relocate an AArch64 ELF section. */
4184
4185static bfd_boolean
4186elf64_aarch64_relocate_section (bfd *output_bfd,
4187 struct bfd_link_info *info,
4188 bfd *input_bfd,
4189 asection *input_section,
4190 bfd_byte *contents,
4191 Elf_Internal_Rela *relocs,
4192 Elf_Internal_Sym *local_syms,
4193 asection **local_sections)
4194{
4195 Elf_Internal_Shdr *symtab_hdr;
4196 struct elf_link_hash_entry **sym_hashes;
4197 Elf_Internal_Rela *rel;
4198 Elf_Internal_Rela *relend;
4199 const char *name;
4200 struct elf64_aarch64_link_hash_table *globals;
4201 bfd_boolean save_addend = FALSE;
4202 bfd_vma addend = 0;
4203
4204 globals = elf64_aarch64_hash_table (info);
4205
4206 symtab_hdr = &elf_symtab_hdr (input_bfd);
4207 sym_hashes = elf_sym_hashes (input_bfd);
4208
4209 rel = relocs;
4210 relend = relocs + input_section->reloc_count;
4211 for (; rel < relend; rel++)
4212 {
4213 unsigned int r_type;
4214 unsigned int relaxed_r_type;
4215 reloc_howto_type *howto;
4216 unsigned long r_symndx;
4217 Elf_Internal_Sym *sym;
4218 asection *sec;
4219 struct elf_link_hash_entry *h;
4220 bfd_vma relocation;
4221 bfd_reloc_status_type r;
4222 arelent bfd_reloc;
4223 char sym_type;
4224 bfd_boolean unresolved_reloc = FALSE;
4225 char *error_message = NULL;
4226
4227 r_symndx = ELF64_R_SYM (rel->r_info);
4228 r_type = ELF64_R_TYPE (rel->r_info);
4229
4230 bfd_reloc.howto = elf64_aarch64_howto_from_type (r_type);
4231 howto = bfd_reloc.howto;
4232
7fcfd62d
NC
4233 if (howto == NULL)
4234 {
4235 (*_bfd_error_handler)
4236 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4237 input_bfd, input_section, r_type);
4238 return FALSE;
4239 }
4240
a06ea964
NC
4241 h = NULL;
4242 sym = NULL;
4243 sec = NULL;
4244
4245 if (r_symndx < symtab_hdr->sh_info)
4246 {
4247 sym = local_syms + r_symndx;
4248 sym_type = ELF64_ST_TYPE (sym->st_info);
4249 sec = local_sections[r_symndx];
4250
4251 /* An object file might have a reference to a local
4252 undefined symbol. This is a daft object file, but we
4253 should at least do something about it. */
4254 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4255 && bfd_is_und_section (sec)
4256 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4257 {
4258 if (!info->callbacks->undefined_symbol
4259 (info, bfd_elf_string_from_elf_section
4260 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4261 input_bfd, input_section, rel->r_offset, TRUE))
4262 return FALSE;
4263 }
4264
a06ea964
NC
4265 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4266 }
4267 else
4268 {
4269 bfd_boolean warned;
4270
4271 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4272 r_symndx, symtab_hdr, sym_hashes,
4273 h, sec, relocation,
4274 unresolved_reloc, warned);
4275
4276 sym_type = h->type;
4277 }
4278
4279 if (sec != NULL && discarded_section (sec))
4280 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4281 rel, 1, relend, howto, 0, contents);
4282
4283 if (info->relocatable)
4284 {
4285 /* This is a relocatable link. We don't have to change
4286 anything, unless the reloc is against a section symbol,
4287 in which case we have to adjust according to where the
4288 section symbol winds up in the output section. */
4289 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
4290 rel->r_addend += sec->output_offset;
4291 continue;
4292 }
4293
4294 if (h != NULL)
4295 name = h->root.root.string;
4296 else
4297 {
4298 name = (bfd_elf_string_from_elf_section
4299 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4300 if (name == NULL || *name == '\0')
4301 name = bfd_section_name (input_bfd, sec);
4302 }
4303
4304 if (r_symndx != 0
4305 && r_type != R_AARCH64_NONE
4306 && r_type != R_AARCH64_NULL
4307 && (h == NULL
4308 || h->root.type == bfd_link_hash_defined
4309 || h->root.type == bfd_link_hash_defweak)
4310 && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
4311 {
4312 (*_bfd_error_handler)
4313 ((sym_type == STT_TLS
4314 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4315 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4316 input_bfd,
4317 input_section, (long) rel->r_offset, howto->name, name);
4318 }
4319
4320
4321 /* We relax only if we can see that there can be a valid transition
4322 from a reloc type to another.
4323 We call elf64_aarch64_final_link_relocate unless we're completely
4324 done, i.e., the relaxation produced the final output we want. */
4325
4326 relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4327 h, r_symndx);
4328 if (relaxed_r_type != r_type)
4329 {
4330 r_type = relaxed_r_type;
4331 howto = elf64_aarch64_howto_from_type (r_type);
4332
4333 r = elf64_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4334 unresolved_reloc = 0;
4335 }
4336 else
4337 r = bfd_reloc_continue;
4338
4339 /* There may be multiple consecutive relocations for the
4340 same offset. In that case we are supposed to treat the
4341 output of each relocation as the addend for the next. */
4342 if (rel + 1 < relend
4343 && rel->r_offset == rel[1].r_offset
4344 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4345 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4346 save_addend = TRUE;
4347 else
4348 save_addend = FALSE;
4349
4350 if (r == bfd_reloc_continue)
4351 r = elf64_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4352 input_section, contents, rel,
4353 relocation, info, sec,
4354 h, &unresolved_reloc,
8847944f 4355 save_addend, &addend);
a06ea964
NC
4356
4357 switch (r_type)
4358 {
4359 case R_AARCH64_TLSGD_ADR_PAGE21:
4360 case R_AARCH64_TLSGD_ADD_LO12_NC:
4361 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4362 {
4363 bfd_boolean need_relocs = FALSE;
4364 bfd_byte *loc;
4365 int indx;
4366 bfd_vma off;
4367
4368 off = symbol_got_offset (input_bfd, h, r_symndx);
4369 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4370
4371 need_relocs =
4372 (info->shared || indx != 0) &&
4373 (h == NULL
4374 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4375 || h->root.type != bfd_link_hash_undefweak);
4376
4377 BFD_ASSERT (globals->root.srelgot != NULL);
4378
4379 if (need_relocs)
4380 {
4381 Elf_Internal_Rela rela;
4382 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_DTPMOD64);
4383 rela.r_addend = 0;
4384 rela.r_offset = globals->root.sgot->output_section->vma +
4385 globals->root.sgot->output_offset + off;
4386
4387
4388 loc = globals->root.srelgot->contents;
4389 loc += globals->root.srelgot->reloc_count++
4390 * RELOC_SIZE (htab);
4391 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4392
4393 if (indx == 0)
4394 {
4395 bfd_put_64 (output_bfd,
4396 relocation - dtpoff_base (info),
4397 globals->root.sgot->contents + off
4398 + GOT_ENTRY_SIZE);
4399 }
4400 else
4401 {
4402 /* This TLS symbol is global. We emit a
4403 relocation to fixup the tls offset at load
4404 time. */
4405 rela.r_info =
4406 ELF64_R_INFO (indx, R_AARCH64_TLS_DTPREL64);
4407 rela.r_addend = 0;
4408 rela.r_offset =
4409 (globals->root.sgot->output_section->vma
4410 + globals->root.sgot->output_offset + off
4411 + GOT_ENTRY_SIZE);
4412
4413 loc = globals->root.srelgot->contents;
4414 loc += globals->root.srelgot->reloc_count++
4415 * RELOC_SIZE (globals);
4416 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4417 bfd_put_64 (output_bfd, (bfd_vma) 0,
4418 globals->root.sgot->contents + off
4419 + GOT_ENTRY_SIZE);
4420 }
4421 }
4422 else
4423 {
4424 bfd_put_64 (output_bfd, (bfd_vma) 1,
4425 globals->root.sgot->contents + off);
4426 bfd_put_64 (output_bfd,
4427 relocation - dtpoff_base (info),
4428 globals->root.sgot->contents + off
4429 + GOT_ENTRY_SIZE);
4430 }
4431
4432 symbol_got_offset_mark (input_bfd, h, r_symndx);
4433 }
4434 break;
4435
4436 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4437 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4438 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4439 {
4440 bfd_boolean need_relocs = FALSE;
4441 bfd_byte *loc;
4442 int indx;
4443 bfd_vma off;
4444
4445 off = symbol_got_offset (input_bfd, h, r_symndx);
4446
4447 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4448
4449 need_relocs =
4450 (info->shared || indx != 0) &&
4451 (h == NULL
4452 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4453 || h->root.type != bfd_link_hash_undefweak);
4454
4455 BFD_ASSERT (globals->root.srelgot != NULL);
4456
4457 if (need_relocs)
4458 {
4459 Elf_Internal_Rela rela;
4460
4461 if (indx == 0)
4462 rela.r_addend = relocation - dtpoff_base (info);
4463 else
4464 rela.r_addend = 0;
4465
4466 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_TPREL64);
4467 rela.r_offset = globals->root.sgot->output_section->vma +
4468 globals->root.sgot->output_offset + off;
4469
4470 loc = globals->root.srelgot->contents;
4471 loc += globals->root.srelgot->reloc_count++
4472 * RELOC_SIZE (htab);
4473
4474 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4475
4476 bfd_put_64 (output_bfd, rela.r_addend,
4477 globals->root.sgot->contents + off);
4478 }
4479 else
4480 bfd_put_64 (output_bfd, relocation - tpoff_base (info),
4481 globals->root.sgot->contents + off);
4482
4483 symbol_got_offset_mark (input_bfd, h, r_symndx);
4484 }
4485 break;
4486
4487 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4488 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4489 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4490 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4491 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4492 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4493 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4494 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4495 break;
4496
418009c2 4497 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
4498 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4499 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4500 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
4501 {
4502 bfd_boolean need_relocs = FALSE;
4503 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
4504 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
4505
4506 need_relocs = (h == NULL
4507 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4508 || h->root.type != bfd_link_hash_undefweak);
4509
4510 BFD_ASSERT (globals->root.srelgot != NULL);
4511 BFD_ASSERT (globals->root.sgot != NULL);
4512
4513 if (need_relocs)
4514 {
4515 bfd_byte *loc;
4516 Elf_Internal_Rela rela;
4517 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLSDESC);
4518 rela.r_addend = 0;
4519 rela.r_offset = (globals->root.sgotplt->output_section->vma
4520 + globals->root.sgotplt->output_offset
4521 + off + globals->sgotplt_jump_table_size);
4522
4523 if (indx == 0)
4524 rela.r_addend = relocation - dtpoff_base (info);
4525
4526 /* Allocate the next available slot in the PLT reloc
4527 section to hold our R_AARCH64_TLSDESC, the next
4528 available slot is determined from reloc_count,
4529 which we step. But note, reloc_count was
4530 artifically moved down while allocating slots for
4531 real PLT relocs such that all of the PLT relocs
4532 will fit above the initial reloc_count and the
4533 extra stuff will fit below. */
4534 loc = globals->root.srelplt->contents;
4535 loc += globals->root.srelplt->reloc_count++
4536 * RELOC_SIZE (globals);
4537
4538 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4539
4540 bfd_put_64 (output_bfd, (bfd_vma) 0,
4541 globals->root.sgotplt->contents + off +
4542 globals->sgotplt_jump_table_size);
4543 bfd_put_64 (output_bfd, (bfd_vma) 0,
4544 globals->root.sgotplt->contents + off +
4545 globals->sgotplt_jump_table_size +
4546 GOT_ENTRY_SIZE);
4547 }
4548
4549 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
4550 }
4551 break;
4552 }
4553
4554 if (!save_addend)
4555 addend = 0;
4556
4557
4558 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4559 because such sections are not SEC_ALLOC and thus ld.so will
4560 not process them. */
4561 if (unresolved_reloc
4562 && !((input_section->flags & SEC_DEBUGGING) != 0
4563 && h->def_dynamic)
4564 && _bfd_elf_section_offset (output_bfd, info, input_section,
4565 +rel->r_offset) != (bfd_vma) - 1)
4566 {
4567 (*_bfd_error_handler)
4568 (_
4569 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4570 input_bfd, input_section, (long) rel->r_offset, howto->name,
4571 h->root.root.string);
4572 return FALSE;
4573 }
4574
4575 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
4576 {
4577 switch (r)
4578 {
4579 case bfd_reloc_overflow:
4580 /* If the overflowing reloc was to an undefined symbol,
4581 we have already printed one error message and there
4582 is no point complaining again. */
4583 if ((!h ||
4584 h->root.type != bfd_link_hash_undefined)
4585 && (!((*info->callbacks->reloc_overflow)
4586 (info, (h ? &h->root : NULL), name, howto->name,
4587 (bfd_vma) 0, input_bfd, input_section,
4588 rel->r_offset))))
4589 return FALSE;
4590 break;
4591
4592 case bfd_reloc_undefined:
4593 if (!((*info->callbacks->undefined_symbol)
4594 (info, name, input_bfd, input_section,
4595 rel->r_offset, TRUE)))
4596 return FALSE;
4597 break;
4598
4599 case bfd_reloc_outofrange:
4600 error_message = _("out of range");
4601 goto common_error;
4602
4603 case bfd_reloc_notsupported:
4604 error_message = _("unsupported relocation");
4605 goto common_error;
4606
4607 case bfd_reloc_dangerous:
4608 /* error_message should already be set. */
4609 goto common_error;
4610
4611 default:
4612 error_message = _("unknown error");
4613 /* Fall through. */
4614
4615 common_error:
4616 BFD_ASSERT (error_message != NULL);
4617 if (!((*info->callbacks->reloc_dangerous)
4618 (info, error_message, input_bfd, input_section,
4619 rel->r_offset)))
4620 return FALSE;
4621 break;
4622 }
4623 }
4624 }
4625
4626 return TRUE;
4627}
4628
4629/* Set the right machine number. */
4630
4631static bfd_boolean
4632elf64_aarch64_object_p (bfd *abfd)
4633{
4634 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
4635 return TRUE;
4636}
4637
4638/* Function to keep AArch64 specific flags in the ELF header. */
4639
4640static bfd_boolean
4641elf64_aarch64_set_private_flags (bfd *abfd, flagword flags)
4642{
4643 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
4644 {
4645 }
4646 else
4647 {
4648 elf_elfheader (abfd)->e_flags = flags;
4649 elf_flags_init (abfd) = TRUE;
4650 }
4651
4652 return TRUE;
4653}
4654
4655/* Copy backend specific data from one object module to another. */
4656
4657static bfd_boolean
4658elf64_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
4659{
4660 flagword in_flags;
4661
4662 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4663 return TRUE;
4664
4665 in_flags = elf_elfheader (ibfd)->e_flags;
4666
4667 elf_elfheader (obfd)->e_flags = in_flags;
4668 elf_flags_init (obfd) = TRUE;
4669
4670 /* Also copy the EI_OSABI field. */
4671 elf_elfheader (obfd)->e_ident[EI_OSABI] =
4672 elf_elfheader (ibfd)->e_ident[EI_OSABI];
4673
4674 /* Copy object attributes. */
4675 _bfd_elf_copy_obj_attributes (ibfd, obfd);
4676
4677 return TRUE;
4678}
4679
4680/* Merge backend specific data from an object file to the output
4681 object file when linking. */
4682
4683static bfd_boolean
4684elf64_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
4685{
4686 flagword out_flags;
4687 flagword in_flags;
4688 bfd_boolean flags_compatible = TRUE;
4689 asection *sec;
4690
4691 /* Check if we have the same endianess. */
4692 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
4693 return FALSE;
4694
4695 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4696 return TRUE;
4697
4698 /* The input BFD must have had its flags initialised. */
4699 /* The following seems bogus to me -- The flags are initialized in
4700 the assembler but I don't think an elf_flags_init field is
4701 written into the object. */
4702 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4703
4704 in_flags = elf_elfheader (ibfd)->e_flags;
4705 out_flags = elf_elfheader (obfd)->e_flags;
4706
4707 if (!elf_flags_init (obfd))
4708 {
4709 /* If the input is the default architecture and had the default
4710 flags then do not bother setting the flags for the output
4711 architecture, instead allow future merges to do this. If no
4712 future merges ever set these flags then they will retain their
4713 uninitialised values, which surprise surprise, correspond
4714 to the default values. */
4715 if (bfd_get_arch_info (ibfd)->the_default
4716 && elf_elfheader (ibfd)->e_flags == 0)
4717 return TRUE;
4718
4719 elf_flags_init (obfd) = TRUE;
4720 elf_elfheader (obfd)->e_flags = in_flags;
4721
4722 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
4723 && bfd_get_arch_info (obfd)->the_default)
4724 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
4725 bfd_get_mach (ibfd));
4726
4727 return TRUE;
4728 }
4729
4730 /* Identical flags must be compatible. */
4731 if (in_flags == out_flags)
4732 return TRUE;
4733
4734 /* Check to see if the input BFD actually contains any sections. If
4735 not, its flags may not have been initialised either, but it
4736 cannot actually cause any incompatiblity. Do not short-circuit
4737 dynamic objects; their section list may be emptied by
4738 elf_link_add_object_symbols.
4739
4740 Also check to see if there are no code sections in the input.
4741 In this case there is no need to check for code specific flags.
4742 XXX - do we need to worry about floating-point format compatability
4743 in data sections ? */
4744 if (!(ibfd->flags & DYNAMIC))
4745 {
4746 bfd_boolean null_input_bfd = TRUE;
4747 bfd_boolean only_data_sections = TRUE;
4748
4749 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4750 {
4751 if ((bfd_get_section_flags (ibfd, sec)
4752 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4753 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4754 only_data_sections = FALSE;
4755
4756 null_input_bfd = FALSE;
4757 break;
4758 }
4759
4760 if (null_input_bfd || only_data_sections)
4761 return TRUE;
4762 }
4763
4764 return flags_compatible;
4765}
4766
4767/* Display the flags field. */
4768
4769static bfd_boolean
4770elf64_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
4771{
4772 FILE *file = (FILE *) ptr;
4773 unsigned long flags;
4774
4775 BFD_ASSERT (abfd != NULL && ptr != NULL);
4776
4777 /* Print normal ELF private data. */
4778 _bfd_elf_print_private_bfd_data (abfd, ptr);
4779
4780 flags = elf_elfheader (abfd)->e_flags;
4781 /* Ignore init flag - it may not be set, despite the flags field
4782 containing valid data. */
4783
4784 /* xgettext:c-format */
4785 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
4786
4787 if (flags)
4788 fprintf (file, _("<Unrecognised flag bits set>"));
4789
4790 fputc ('\n', file);
4791
4792 return TRUE;
4793}
4794
4795/* Update the got entry reference counts for the section being removed. */
4796
4797static bfd_boolean
cb8af559
NC
4798elf64_aarch64_gc_sweep_hook (bfd *abfd,
4799 struct bfd_link_info *info,
4800 asection *sec,
4801 const Elf_Internal_Rela * relocs)
a06ea964 4802{
59c108f7
NC
4803 struct elf64_aarch64_link_hash_table *htab;
4804 Elf_Internal_Shdr *symtab_hdr;
4805 struct elf_link_hash_entry **sym_hashes;
cb8af559 4806 struct elf_aarch64_local_symbol *locals;
59c108f7
NC
4807 const Elf_Internal_Rela *rel, *relend;
4808
4809 if (info->relocatable)
4810 return TRUE;
4811
4812 htab = elf64_aarch64_hash_table (info);
4813
4814 if (htab == NULL)
4815 return FALSE;
4816
4817 elf_section_data (sec)->local_dynrel = NULL;
4818
4819 symtab_hdr = &elf_symtab_hdr (abfd);
4820 sym_hashes = elf_sym_hashes (abfd);
4821
cb8af559 4822 locals = elf64_aarch64_locals (abfd);
59c108f7
NC
4823
4824 relend = relocs + sec->reloc_count;
4825 for (rel = relocs; rel < relend; rel++)
4826 {
4827 unsigned long r_symndx;
4828 unsigned int r_type;
4829 struct elf_link_hash_entry *h = NULL;
4830
4831 r_symndx = ELF64_R_SYM (rel->r_info);
8847944f 4832
59c108f7
NC
4833 if (r_symndx >= symtab_hdr->sh_info)
4834 {
8847944f
WN
4835 struct elf64_aarch64_link_hash_entry *eh;
4836 struct elf_dyn_relocs **pp;
4837 struct elf_dyn_relocs *p;
4838
59c108f7
NC
4839 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4840 while (h->root.type == bfd_link_hash_indirect
4841 || h->root.type == bfd_link_hash_warning)
4842 h = (struct elf_link_hash_entry *) h->root.u.i.link;
8847944f
WN
4843 eh = (struct elf64_aarch64_link_hash_entry *) h;
4844
4845 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
4846 {
4847 if (p->sec == sec)
4848 {
4849 /* Everything must go for SEC. */
4850 *pp = p->next;
4851 break;
4852 }
4853 }
59c108f7
NC
4854 }
4855 else
4856 {
4857 Elf_Internal_Sym *isym;
4858
8847944f 4859 /* A local symbol. */
59c108f7
NC
4860 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
4861 abfd, r_symndx);
8847944f
WN
4862 if (isym == NULL)
4863 return FALSE;
59c108f7
NC
4864 }
4865
4866 r_type = ELF64_R_TYPE (rel->r_info);
4867 r_type = aarch64_tls_transition (abfd,info, r_type, h ,r_symndx);
4868 switch (r_type)
4869 {
4870 case R_AARCH64_LD64_GOT_LO12_NC:
4871 case R_AARCH64_GOT_LD_PREL19:
4872 case R_AARCH64_ADR_GOT_PAGE:
4873 case R_AARCH64_TLSGD_ADR_PAGE21:
4874 case R_AARCH64_TLSGD_ADD_LO12_NC:
4875 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4876 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4877 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4878 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4879 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4880 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4881 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4882 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4883 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4884 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
418009c2 4885 case R_AARCH64_TLSDESC_ADR_PAGE21:
59c108f7
NC
4886 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4887 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4888 if (h != NULL)
4889 {
4890 if (h->got.refcount > 0)
4891 h->got.refcount -= 1;
4892 }
cb8af559 4893 else if (locals != NULL)
59c108f7 4894 {
cb8af559
NC
4895 if (locals[r_symndx].got_refcount > 0)
4896 locals[r_symndx].got_refcount -= 1;
59c108f7
NC
4897 }
4898 break;
4899
4900 case R_AARCH64_ADR_PREL_PG_HI21_NC:
4901 case R_AARCH64_ADR_PREL_PG_HI21:
4902 case R_AARCH64_ADR_PREL_LO21:
4903 if (h != NULL && info->executable)
4904 {
4905 if (h->plt.refcount > 0)
4906 h->plt.refcount -= 1;
4907 }
4908 break;
4909
4910 case R_AARCH64_CALL26:
4911 case R_AARCH64_JUMP26:
4912 /* If this is a local symbol then we resolve it
4913 directly without creating a PLT entry. */
4914 if (h == NULL)
4915 continue;
4916
4917 if (h->plt.refcount > 0)
4918 h->plt.refcount -= 1;
4919 break;
4920
4921 case R_AARCH64_ABS64:
8847944f 4922 if (h != NULL && info->executable)
59c108f7
NC
4923 {
4924 if (h->plt.refcount > 0)
4925 h->plt.refcount -= 1;
4926 }
4927 break;
4928
4929 default:
4930 break;
4931 }
4932 }
4933
a06ea964
NC
4934 return TRUE;
4935}
4936
4937/* Adjust a symbol defined by a dynamic object and referenced by a
4938 regular object. The current definition is in some section of the
4939 dynamic object, but we're not including those sections. We have to
4940 change the definition to something the rest of the link can
4941 understand. */
4942
4943static bfd_boolean
4944elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
4945 struct elf_link_hash_entry *h)
4946{
4947 struct elf64_aarch64_link_hash_table *htab;
4948 asection *s;
4949
4950 /* If this is a function, put it in the procedure linkage table. We
4951 will fill in the contents of the procedure linkage table later,
4952 when we know the address of the .got section. */
8847944f 4953 if (h->type == STT_FUNC || h->needs_plt)
a06ea964
NC
4954 {
4955 if (h->plt.refcount <= 0
8847944f
WN
4956 || SYMBOL_CALLS_LOCAL (info, h)
4957 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
4958 && h->root.type == bfd_link_hash_undefweak))
a06ea964
NC
4959 {
4960 /* This case can occur if we saw a CALL26 reloc in
4961 an input file, but the symbol wasn't referred to
4962 by a dynamic object or all references were
4963 garbage collected. In which case we can end up
4964 resolving. */
4965 h->plt.offset = (bfd_vma) - 1;
4966 h->needs_plt = 0;
4967 }
4968
4969 return TRUE;
4970 }
4971 else
4972 /* It's possible that we incorrectly decided a .plt reloc was
4973 needed for an R_X86_64_PC32 reloc to a non-function sym in
4974 check_relocs. We can't decide accurately between function and
4975 non-function syms in check-relocs; Objects loaded later in
4976 the link may change h->type. So fix it now. */
4977 h->plt.offset = (bfd_vma) - 1;
4978
4979
4980 /* If this is a weak symbol, and there is a real definition, the
4981 processor independent code will have arranged for us to see the
4982 real definition first, and we can just use the same value. */
4983 if (h->u.weakdef != NULL)
4984 {
4985 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
4986 || h->u.weakdef->root.type == bfd_link_hash_defweak);
4987 h->root.u.def.section = h->u.weakdef->root.u.def.section;
4988 h->root.u.def.value = h->u.weakdef->root.u.def.value;
4989 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
4990 h->non_got_ref = h->u.weakdef->non_got_ref;
4991 return TRUE;
4992 }
4993
4994 /* If we are creating a shared library, we must presume that the
4995 only references to the symbol are via the global offset table.
4996 For such cases we need not do anything here; the relocations will
4997 be handled correctly by relocate_section. */
4998 if (info->shared)
4999 return TRUE;
5000
5001 /* If there are no references to this symbol that do not use the
5002 GOT, we don't need to generate a copy reloc. */
5003 if (!h->non_got_ref)
5004 return TRUE;
5005
5006 /* If -z nocopyreloc was given, we won't generate them either. */
5007 if (info->nocopyreloc)
5008 {
5009 h->non_got_ref = 0;
5010 return TRUE;
5011 }
5012
5013 /* We must allocate the symbol in our .dynbss section, which will
5014 become part of the .bss section of the executable. There will be
5015 an entry for this symbol in the .dynsym section. The dynamic
5016 object will contain position independent code, so all references
5017 from the dynamic object to this symbol will go through the global
5018 offset table. The dynamic linker will use the .dynsym entry to
5019 determine the address it must put in the global offset table, so
5020 both the dynamic object and the regular object will refer to the
5021 same memory location for the variable. */
5022
5023 htab = elf64_aarch64_hash_table (info);
5024
5025 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5026 to copy the initial value out of the dynamic object and into the
5027 runtime process image. */
5028 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5029 {
5030 htab->srelbss->size += RELOC_SIZE (htab);
5031 h->needs_copy = 1;
5032 }
5033
5034 s = htab->sdynbss;
5035
5036 return _bfd_elf_adjust_dynamic_copy (h, s);
5037
5038}
5039
5040static bfd_boolean
5041elf64_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5042{
5043 struct elf_aarch64_local_symbol *locals;
5044 locals = elf64_aarch64_locals (abfd);
5045 if (locals == NULL)
5046 {
5047 locals = (struct elf_aarch64_local_symbol *)
5048 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5049 if (locals == NULL)
5050 return FALSE;
5051 elf64_aarch64_locals (abfd) = locals;
5052 }
5053 return TRUE;
5054}
5055
5056/* Look through the relocs for a section during the first phase. */
5057
5058static bfd_boolean
5059elf64_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5060 asection *sec, const Elf_Internal_Rela *relocs)
5061{
5062 Elf_Internal_Shdr *symtab_hdr;
5063 struct elf_link_hash_entry **sym_hashes;
5064 const Elf_Internal_Rela *rel;
5065 const Elf_Internal_Rela *rel_end;
5066 asection *sreloc;
5067
5068 struct elf64_aarch64_link_hash_table *htab;
5069
a06ea964
NC
5070 if (info->relocatable)
5071 return TRUE;
5072
5073 BFD_ASSERT (is_aarch64_elf (abfd));
5074
5075 htab = elf64_aarch64_hash_table (info);
5076 sreloc = NULL;
5077
5078 symtab_hdr = &elf_symtab_hdr (abfd);
5079 sym_hashes = elf_sym_hashes (abfd);
a06ea964
NC
5080
5081 rel_end = relocs + sec->reloc_count;
5082 for (rel = relocs; rel < rel_end; rel++)
5083 {
5084 struct elf_link_hash_entry *h;
5085 unsigned long r_symndx;
5086 unsigned int r_type;
5087
5088 r_symndx = ELF64_R_SYM (rel->r_info);
5089 r_type = ELF64_R_TYPE (rel->r_info);
5090
5091 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5092 {
5093 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5094 r_symndx);
5095 return FALSE;
5096 }
5097
ed5acf27 5098 if (r_symndx < symtab_hdr->sh_info)
8847944f 5099 h = NULL;
a06ea964
NC
5100 else
5101 {
5102 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5103 while (h->root.type == bfd_link_hash_indirect
5104 || h->root.type == bfd_link_hash_warning)
5105 h = (struct elf_link_hash_entry *) h->root.u.i.link;
81fbe831
AM
5106
5107 /* PR15323, ref flags aren't set for references in the same
5108 object. */
5109 h->root.non_ir_ref = 1;
a06ea964
NC
5110 }
5111
5112 /* Could be done earlier, if h were already available. */
5113 r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5114
5115 switch (r_type)
5116 {
5117 case R_AARCH64_ABS64:
5118
5119 /* We don't need to handle relocs into sections not going into
5120 the "real" output. */
5121 if ((sec->flags & SEC_ALLOC) == 0)
5122 break;
5123
5124 if (h != NULL)
5125 {
5126 if (!info->shared)
5127 h->non_got_ref = 1;
5128
5129 h->plt.refcount += 1;
5130 h->pointer_equality_needed = 1;
5131 }
5132
5133 /* No need to do anything if we're not creating a shared
5134 object. */
5135 if (! info->shared)
5136 break;
5137
5138 {
5139 struct elf_dyn_relocs *p;
5140 struct elf_dyn_relocs **head;
5141
5142 /* We must copy these reloc types into the output file.
5143 Create a reloc section in dynobj and make room for
5144 this reloc. */
5145 if (sreloc == NULL)
5146 {
5147 if (htab->root.dynobj == NULL)
5148 htab->root.dynobj = abfd;
5149
5150 sreloc = _bfd_elf_make_dynamic_reloc_section
5151 (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
5152
5153 if (sreloc == NULL)
5154 return FALSE;
5155 }
5156
5157 /* If this is a global symbol, we count the number of
5158 relocations we need for this symbol. */
5159 if (h != NULL)
5160 {
5161 struct elf64_aarch64_link_hash_entry *eh;
5162 eh = (struct elf64_aarch64_link_hash_entry *) h;
5163 head = &eh->dyn_relocs;
5164 }
5165 else
5166 {
5167 /* Track dynamic relocs needed for local syms too.
5168 We really need local syms available to do this
5169 easily. Oh well. */
5170
5171 asection *s;
5172 void **vpp;
8847944f 5173 Elf_Internal_Sym *isym;
a06ea964
NC
5174
5175 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5176 abfd, r_symndx);
5177 if (isym == NULL)
5178 return FALSE;
5179
5180 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5181 if (s == NULL)
5182 s = sec;
5183
5184 /* Beware of type punned pointers vs strict aliasing
5185 rules. */
5186 vpp = &(elf_section_data (s)->local_dynrel);
5187 head = (struct elf_dyn_relocs **) vpp;
5188 }
5189
5190 p = *head;
5191 if (p == NULL || p->sec != sec)
5192 {
5193 bfd_size_type amt = sizeof *p;
5194 p = ((struct elf_dyn_relocs *)
5195 bfd_zalloc (htab->root.dynobj, amt));
5196 if (p == NULL)
5197 return FALSE;
5198 p->next = *head;
5199 *head = p;
5200 p->sec = sec;
5201 }
5202
5203 p->count += 1;
5204
5205 }
5206 break;
5207
5208 /* RR: We probably want to keep a consistency check that
5209 there are no dangling GOT_PAGE relocs. */
5210 case R_AARCH64_LD64_GOT_LO12_NC:
f41aef5f 5211 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
5212 case R_AARCH64_ADR_GOT_PAGE:
5213 case R_AARCH64_TLSGD_ADR_PAGE21:
5214 case R_AARCH64_TLSGD_ADD_LO12_NC:
5215 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5216 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5217 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
5218 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
5219 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5220 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
5221 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
5222 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5223 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
5224 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
418009c2 5225 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
5226 case R_AARCH64_TLSDESC_ADD_LO12_NC:
5227 case R_AARCH64_TLSDESC_LD64_LO12_NC:
5228 {
5229 unsigned got_type;
5230 unsigned old_got_type;
5231
5232 got_type = aarch64_reloc_got_type (r_type);
5233
5234 if (h)
5235 {
5236 h->got.refcount += 1;
5237 old_got_type = elf64_aarch64_hash_entry (h)->got_type;
5238 }
5239 else
5240 {
5241 struct elf_aarch64_local_symbol *locals;
5242
5243 if (!elf64_aarch64_allocate_local_symbols
5244 (abfd, symtab_hdr->sh_info))
5245 return FALSE;
5246
5247 locals = elf64_aarch64_locals (abfd);
5248 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5249 locals[r_symndx].got_refcount += 1;
5250 old_got_type = locals[r_symndx].got_type;
5251 }
5252
5253 /* If a variable is accessed with both general dynamic TLS
5254 methods, two slots may be created. */
5255 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5256 got_type |= old_got_type;
5257
5258 /* We will already have issued an error message if there
5259 is a TLS/non-TLS mismatch, based on the symbol type.
5260 So just combine any TLS types needed. */
5261 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5262 && got_type != GOT_NORMAL)
5263 got_type |= old_got_type;
5264
5265 /* If the symbol is accessed by both IE and GD methods, we
5266 are able to relax. Turn off the GD flag, without
5267 messing up with any other kind of TLS types that may be
5268 involved. */
5269 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5270 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5271
5272 if (old_got_type != got_type)
5273 {
5274 if (h != NULL)
5275 elf64_aarch64_hash_entry (h)->got_type = got_type;
5276 else
5277 {
5278 struct elf_aarch64_local_symbol *locals;
5279 locals = elf64_aarch64_locals (abfd);
5280 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5281 locals[r_symndx].got_type = got_type;
5282 }
5283 }
5284
5285 if (htab->root.sgot == NULL)
5286 {
5287 if (htab->root.dynobj == NULL)
5288 htab->root.dynobj = abfd;
5289 if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
5290 return FALSE;
5291 }
5292 break;
5293 }
5294
5295 case R_AARCH64_ADR_PREL_PG_HI21_NC:
5296 case R_AARCH64_ADR_PREL_PG_HI21:
f41aef5f 5297 case R_AARCH64_ADR_PREL_LO21:
a06ea964
NC
5298 if (h != NULL && info->executable)
5299 {
5300 /* If this reloc is in a read-only section, we might
5301 need a copy reloc. We can't check reliably at this
5302 stage whether the section is read-only, as input
5303 sections have not yet been mapped to output sections.
5304 Tentatively set the flag for now, and correct in
5305 adjust_dynamic_symbol. */
5306 h->non_got_ref = 1;
5307 h->plt.refcount += 1;
5308 h->pointer_equality_needed = 1;
5309 }
5310 /* FIXME:: RR need to handle these in shared libraries
5311 and essentially bomb out as these being non-PIC
5312 relocations in shared libraries. */
5313 break;
5314
5315 case R_AARCH64_CALL26:
5316 case R_AARCH64_JUMP26:
5317 /* If this is a local symbol then we resolve it
5318 directly without creating a PLT entry. */
5319 if (h == NULL)
5320 continue;
5321
5322 h->needs_plt = 1;
8847944f 5323 h->plt.refcount += 1;
a06ea964
NC
5324 break;
5325 }
5326 }
5327 return TRUE;
5328}
5329
5330/* Treat mapping symbols as special target symbols. */
5331
5332static bfd_boolean
5333elf64_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
5334 asymbol *sym)
5335{
5336 return bfd_is_aarch64_special_symbol_name (sym->name,
5337 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
5338}
5339
5340/* This is a copy of elf_find_function () from elf.c except that
5341 AArch64 mapping symbols are ignored when looking for function names. */
5342
5343static bfd_boolean
5344aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
5345 asection *section,
5346 asymbol **symbols,
5347 bfd_vma offset,
5348 const char **filename_ptr,
5349 const char **functionname_ptr)
5350{
5351 const char *filename = NULL;
5352 asymbol *func = NULL;
5353 bfd_vma low_func = 0;
5354 asymbol **p;
5355
5356 for (p = symbols; *p != NULL; p++)
5357 {
5358 elf_symbol_type *q;
5359
5360 q = (elf_symbol_type *) * p;
5361
5362 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
5363 {
5364 default:
5365 break;
5366 case STT_FILE:
5367 filename = bfd_asymbol_name (&q->symbol);
5368 break;
5369 case STT_FUNC:
5370 case STT_NOTYPE:
5371 /* Skip mapping symbols. */
5372 if ((q->symbol.flags & BSF_LOCAL)
5373 && (bfd_is_aarch64_special_symbol_name
5374 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
5375 continue;
5376 /* Fall through. */
5377 if (bfd_get_section (&q->symbol) == section
5378 && q->symbol.value >= low_func && q->symbol.value <= offset)
5379 {
5380 func = (asymbol *) q;
5381 low_func = q->symbol.value;
5382 }
5383 break;
5384 }
5385 }
5386
5387 if (func == NULL)
5388 return FALSE;
5389
5390 if (filename_ptr)
5391 *filename_ptr = filename;
5392 if (functionname_ptr)
5393 *functionname_ptr = bfd_asymbol_name (func);
5394
5395 return TRUE;
5396}
5397
5398
5399/* Find the nearest line to a particular section and offset, for error
5400 reporting. This code is a duplicate of the code in elf.c, except
5401 that it uses aarch64_elf_find_function. */
5402
5403static bfd_boolean
5404elf64_aarch64_find_nearest_line (bfd *abfd,
5405 asection *section,
5406 asymbol **symbols,
5407 bfd_vma offset,
5408 const char **filename_ptr,
5409 const char **functionname_ptr,
5410 unsigned int *line_ptr)
5411{
5412 bfd_boolean found = FALSE;
5413
5414 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5415 toolchain uses it. */
5416
5417 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
5418 section, symbols, offset,
5419 filename_ptr, functionname_ptr,
5420 line_ptr, NULL, 0,
5421 &elf_tdata (abfd)->dwarf2_find_line_info))
5422 {
5423 if (!*functionname_ptr)
5424 aarch64_elf_find_function (abfd, section, symbols, offset,
5425 *filename_ptr ? NULL : filename_ptr,
5426 functionname_ptr);
5427
5428 return TRUE;
5429 }
5430
5431 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
5432 &found, filename_ptr,
5433 functionname_ptr, line_ptr,
5434 &elf_tdata (abfd)->line_info))
5435 return FALSE;
5436
5437 if (found && (*functionname_ptr || *line_ptr))
5438 return TRUE;
5439
5440 if (symbols == NULL)
5441 return FALSE;
5442
5443 if (!aarch64_elf_find_function (abfd, section, symbols, offset,
5444 filename_ptr, functionname_ptr))
5445 return FALSE;
5446
5447 *line_ptr = 0;
5448 return TRUE;
5449}
5450
5451static bfd_boolean
5452elf64_aarch64_find_inliner_info (bfd *abfd,
5453 const char **filename_ptr,
5454 const char **functionname_ptr,
5455 unsigned int *line_ptr)
5456{
5457 bfd_boolean found;
5458 found = _bfd_dwarf2_find_inliner_info
5459 (abfd, filename_ptr,
5460 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
5461 return found;
5462}
5463
5464
5465static void
5466elf64_aarch64_post_process_headers (bfd *abfd,
8847944f
WN
5467 struct bfd_link_info *link_info
5468 ATTRIBUTE_UNUSED)
a06ea964
NC
5469{
5470 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
5471
5472 i_ehdrp = elf_elfheader (abfd);
8847944f 5473 i_ehdrp->e_ident[EI_OSABI] = 0;
a06ea964
NC
5474 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
5475}
5476
5477static enum elf_reloc_type_class
7e612e98
AM
5478elf64_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5479 const asection *rel_sec ATTRIBUTE_UNUSED,
5480 const Elf_Internal_Rela *rela)
a06ea964
NC
5481{
5482 switch ((int) ELF64_R_TYPE (rela->r_info))
5483 {
5484 case R_AARCH64_RELATIVE:
5485 return reloc_class_relative;
5486 case R_AARCH64_JUMP_SLOT:
5487 return reloc_class_plt;
5488 case R_AARCH64_COPY:
5489 return reloc_class_copy;
5490 default:
5491 return reloc_class_normal;
5492 }
5493}
5494
5495/* Set the right machine number for an AArch64 ELF file. */
5496
5497static bfd_boolean
5498elf64_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
5499{
5500 if (hdr->sh_type == SHT_NOTE)
5501 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
5502
5503 return TRUE;
5504}
5505
5506/* Handle an AArch64 specific section when reading an object file. This is
5507 called when bfd_section_from_shdr finds a section with an unknown
5508 type. */
5509
5510static bfd_boolean
5511elf64_aarch64_section_from_shdr (bfd *abfd,
5512 Elf_Internal_Shdr *hdr,
5513 const char *name, int shindex)
5514{
5515 /* There ought to be a place to keep ELF backend specific flags, but
5516 at the moment there isn't one. We just keep track of the
5517 sections by their name, instead. Fortunately, the ABI gives
5518 names for all the AArch64 specific sections, so we will probably get
5519 away with this. */
5520 switch (hdr->sh_type)
5521 {
5522 case SHT_AARCH64_ATTRIBUTES:
5523 break;
5524
5525 default:
5526 return FALSE;
5527 }
5528
5529 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5530 return FALSE;
5531
5532 return TRUE;
5533}
5534
5535/* A structure used to record a list of sections, independently
5536 of the next and prev fields in the asection structure. */
5537typedef struct section_list
5538{
5539 asection *sec;
5540 struct section_list *next;
5541 struct section_list *prev;
5542}
5543section_list;
5544
5545/* Unfortunately we need to keep a list of sections for which
5546 an _aarch64_elf_section_data structure has been allocated. This
5547 is because it is possible for functions like elf64_aarch64_write_section
5548 to be called on a section which has had an elf_data_structure
5549 allocated for it (and so the used_by_bfd field is valid) but
5550 for which the AArch64 extended version of this structure - the
5551 _aarch64_elf_section_data structure - has not been allocated. */
5552static section_list *sections_with_aarch64_elf_section_data = NULL;
5553
5554static void
5555record_section_with_aarch64_elf_section_data (asection *sec)
5556{
5557 struct section_list *entry;
5558
5559 entry = bfd_malloc (sizeof (*entry));
5560 if (entry == NULL)
5561 return;
5562 entry->sec = sec;
5563 entry->next = sections_with_aarch64_elf_section_data;
5564 entry->prev = NULL;
5565 if (entry->next != NULL)
5566 entry->next->prev = entry;
5567 sections_with_aarch64_elf_section_data = entry;
5568}
5569
5570static struct section_list *
5571find_aarch64_elf_section_entry (asection *sec)
5572{
5573 struct section_list *entry;
5574 static struct section_list *last_entry = NULL;
5575
5576 /* This is a short cut for the typical case where the sections are added
5577 to the sections_with_aarch64_elf_section_data list in forward order and
5578 then looked up here in backwards order. This makes a real difference
5579 to the ld-srec/sec64k.exp linker test. */
5580 entry = sections_with_aarch64_elf_section_data;
5581 if (last_entry != NULL)
5582 {
5583 if (last_entry->sec == sec)
5584 entry = last_entry;
5585 else if (last_entry->next != NULL && last_entry->next->sec == sec)
5586 entry = last_entry->next;
5587 }
5588
5589 for (; entry; entry = entry->next)
5590 if (entry->sec == sec)
5591 break;
5592
5593 if (entry)
5594 /* Record the entry prior to this one - it is the entry we are
5595 most likely to want to locate next time. Also this way if we
5596 have been called from
5597 unrecord_section_with_aarch64_elf_section_data () we will not
5598 be caching a pointer that is about to be freed. */
5599 last_entry = entry->prev;
5600
5601 return entry;
5602}
5603
5604static void
5605unrecord_section_with_aarch64_elf_section_data (asection *sec)
5606{
5607 struct section_list *entry;
5608
5609 entry = find_aarch64_elf_section_entry (sec);
5610
5611 if (entry)
5612 {
5613 if (entry->prev != NULL)
5614 entry->prev->next = entry->next;
5615 if (entry->next != NULL)
5616 entry->next->prev = entry->prev;
5617 if (entry == sections_with_aarch64_elf_section_data)
5618 sections_with_aarch64_elf_section_data = entry->next;
5619 free (entry);
5620 }
5621}
5622
5623
5624typedef struct
5625{
5626 void *finfo;
5627 struct bfd_link_info *info;
5628 asection *sec;
5629 int sec_shndx;
5630 int (*func) (void *, const char *, Elf_Internal_Sym *,
5631 asection *, struct elf_link_hash_entry *);
5632} output_arch_syminfo;
5633
5634enum map_symbol_type
5635{
5636 AARCH64_MAP_INSN,
5637 AARCH64_MAP_DATA
5638};
5639
5640
5641/* Output a single mapping symbol. */
5642
5643static bfd_boolean
5644elf64_aarch64_output_map_sym (output_arch_syminfo *osi,
5645 enum map_symbol_type type, bfd_vma offset)
5646{
5647 static const char *names[2] = { "$x", "$d" };
5648 Elf_Internal_Sym sym;
5649
5650 sym.st_value = (osi->sec->output_section->vma
5651 + osi->sec->output_offset + offset);
5652 sym.st_size = 0;
5653 sym.st_other = 0;
5654 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5655 sym.st_shndx = osi->sec_shndx;
5656 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
5657}
5658
5659
5660
5661/* Output mapping symbols for PLT entries associated with H. */
5662
5663static bfd_boolean
5664elf64_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
5665{
5666 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
5667 bfd_vma addr;
5668
5669 if (h->root.type == bfd_link_hash_indirect)
5670 return TRUE;
5671
5672 if (h->root.type == bfd_link_hash_warning)
5673 /* When warning symbols are created, they **replace** the "real"
5674 entry in the hash table, thus we never get to see the real
5675 symbol in a hash traversal. So look at it now. */
5676 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5677
5678 if (h->plt.offset == (bfd_vma) - 1)
5679 return TRUE;
5680
5681 addr = h->plt.offset;
5682 if (addr == 32)
5683 {
5684 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5685 return FALSE;
5686 }
5687 return TRUE;
5688}
5689
5690
5691/* Output a single local symbol for a generated stub. */
5692
5693static bfd_boolean
5694elf64_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
5695 bfd_vma offset, bfd_vma size)
5696{
5697 Elf_Internal_Sym sym;
5698
5699 sym.st_value = (osi->sec->output_section->vma
5700 + osi->sec->output_offset + offset);
5701 sym.st_size = size;
5702 sym.st_other = 0;
5703 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5704 sym.st_shndx = osi->sec_shndx;
5705 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
5706}
5707
5708static bfd_boolean
5709aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
5710{
5711 struct elf64_aarch64_stub_hash_entry *stub_entry;
5712 asection *stub_sec;
5713 bfd_vma addr;
5714 char *stub_name;
5715 output_arch_syminfo *osi;
5716
5717 /* Massage our args to the form they really have. */
5718 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
5719 osi = (output_arch_syminfo *) in_arg;
5720
5721 stub_sec = stub_entry->stub_sec;
5722
5723 /* Ensure this stub is attached to the current section being
5724 processed. */
5725 if (stub_sec != osi->sec)
5726 return TRUE;
5727
5728 addr = (bfd_vma) stub_entry->stub_offset;
5729
5730 stub_name = stub_entry->output_name;
5731
5732 switch (stub_entry->stub_type)
5733 {
5734 case aarch64_stub_adrp_branch:
5735 if (!elf64_aarch64_output_stub_sym (osi, stub_name, addr,
5736 sizeof (aarch64_adrp_branch_stub)))
5737 return FALSE;
5738 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5739 return FALSE;
5740 break;
5741 case aarch64_stub_long_branch:
5742 if (!elf64_aarch64_output_stub_sym
5743 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
5744 return FALSE;
5745 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5746 return FALSE;
5747 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
5748 return FALSE;
5749 break;
5750 default:
5751 BFD_FAIL ();
5752 }
5753
5754 return TRUE;
5755}
5756
5757/* Output mapping symbols for linker generated sections. */
5758
5759static bfd_boolean
5760elf64_aarch64_output_arch_local_syms (bfd *output_bfd,
5761 struct bfd_link_info *info,
5762 void *finfo,
5763 int (*func) (void *, const char *,
5764 Elf_Internal_Sym *,
5765 asection *,
5766 struct elf_link_hash_entry
5767 *))
5768{
5769 output_arch_syminfo osi;
5770 struct elf64_aarch64_link_hash_table *htab;
5771
5772 htab = elf64_aarch64_hash_table (info);
5773
5774 osi.finfo = finfo;
5775 osi.info = info;
5776 osi.func = func;
5777
5778 /* Long calls stubs. */
5779 if (htab->stub_bfd && htab->stub_bfd->sections)
5780 {
5781 asection *stub_sec;
5782
5783 for (stub_sec = htab->stub_bfd->sections;
5784 stub_sec != NULL; stub_sec = stub_sec->next)
5785 {
5786 /* Ignore non-stub sections. */
5787 if (!strstr (stub_sec->name, STUB_SUFFIX))
5788 continue;
5789
5790 osi.sec = stub_sec;
5791
5792 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5793 (output_bfd, osi.sec->output_section);
5794
5795 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
5796 &osi);
5797 }
5798 }
5799
5800 /* Finally, output mapping symbols for the PLT. */
5801 if (!htab->root.splt || htab->root.splt->size == 0)
5802 return TRUE;
5803
5804 /* For now live without mapping symbols for the plt. */
5805 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5806 (output_bfd, htab->root.splt->output_section);
5807 osi.sec = htab->root.splt;
5808
5809 elf_link_hash_traverse (&htab->root, elf64_aarch64_output_plt_map,
5810 (void *) &osi);
5811
5812 return TRUE;
5813
5814}
5815
5816/* Allocate target specific section data. */
5817
5818static bfd_boolean
5819elf64_aarch64_new_section_hook (bfd *abfd, asection *sec)
5820{
5821 if (!sec->used_by_bfd)
5822 {
5823 _aarch64_elf_section_data *sdata;
5824 bfd_size_type amt = sizeof (*sdata);
5825
5826 sdata = bfd_zalloc (abfd, amt);
5827 if (sdata == NULL)
5828 return FALSE;
5829 sec->used_by_bfd = sdata;
5830 }
5831
5832 record_section_with_aarch64_elf_section_data (sec);
5833
5834 return _bfd_elf_new_section_hook (abfd, sec);
5835}
5836
5837
5838static void
5839unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
5840 asection *sec,
5841 void *ignore ATTRIBUTE_UNUSED)
5842{
5843 unrecord_section_with_aarch64_elf_section_data (sec);
5844}
5845
5846static bfd_boolean
5847elf64_aarch64_close_and_cleanup (bfd *abfd)
5848{
5849 if (abfd->sections)
5850 bfd_map_over_sections (abfd,
5851 unrecord_section_via_map_over_sections, NULL);
5852
5853 return _bfd_elf_close_and_cleanup (abfd);
5854}
5855
5856static bfd_boolean
5857elf64_aarch64_bfd_free_cached_info (bfd *abfd)
5858{
5859 if (abfd->sections)
5860 bfd_map_over_sections (abfd,
5861 unrecord_section_via_map_over_sections, NULL);
5862
5863 return _bfd_free_cached_info (abfd);
5864}
5865
8847944f
WN
5866static bfd_boolean
5867elf64_aarch64_is_function_type (unsigned int type)
5868{
5869 return type == STT_FUNC;
5870}
5871
a06ea964
NC
5872/* Create dynamic sections. This is different from the ARM backend in that
5873 the got, plt, gotplt and their relocation sections are all created in the
5874 standard part of the bfd elf backend. */
5875
5876static bfd_boolean
5877elf64_aarch64_create_dynamic_sections (bfd *dynobj,
5878 struct bfd_link_info *info)
5879{
5880 struct elf64_aarch64_link_hash_table *htab;
5881 struct elf_link_hash_entry *h;
5882
5883 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
5884 return FALSE;
5885
5886 htab = elf64_aarch64_hash_table (info);
5887 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
5888 if (!info->shared)
5889 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
5890
5891 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
5892 abort ();
5893
5894 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5895 dynobj's .got section. We don't do this in the linker script
5896 because we don't want to define the symbol if we are not creating
5897 a global offset table. */
5898 h = _bfd_elf_define_linkage_sym (dynobj, info,
5899 htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
5900 elf_hash_table (info)->hgot = h;
5901 if (h == NULL)
5902 return FALSE;
5903
5904 return TRUE;
5905}
5906
5907
5908/* Allocate space in .plt, .got and associated reloc sections for
5909 dynamic relocs. */
5910
5911static bfd_boolean
5912elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
5913{
5914 struct bfd_link_info *info;
5915 struct elf64_aarch64_link_hash_table *htab;
5916 struct elf64_aarch64_link_hash_entry *eh;
5917 struct elf_dyn_relocs *p;
5918
5919 /* An example of a bfd_link_hash_indirect symbol is versioned
5920 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
5921 -> __gxx_personality_v0(bfd_link_hash_defined)
5922
5923 There is no need to process bfd_link_hash_indirect symbols here
5924 because we will also be presented with the concrete instance of
5925 the symbol and elf64_aarch64_copy_indirect_symbol () will have been
5926 called to copy all relevant data from the generic to the concrete
5927 symbol instance.
5928 */
5929 if (h->root.type == bfd_link_hash_indirect)
5930 return TRUE;
5931
5932 if (h->root.type == bfd_link_hash_warning)
5933 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5934
5935 info = (struct bfd_link_info *) inf;
5936 htab = elf64_aarch64_hash_table (info);
5937
8847944f 5938 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
a06ea964
NC
5939 {
5940 /* Make sure this symbol is output as a dynamic symbol.
5941 Undefined weak syms won't yet be marked as dynamic. */
5942 if (h->dynindx == -1 && !h->forced_local)
5943 {
5944 if (!bfd_elf_link_record_dynamic_symbol (info, h))
5945 return FALSE;
5946 }
5947
5948 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
5949 {
5950 asection *s = htab->root.splt;
5951
5952 /* If this is the first .plt entry, make room for the special
5953 first entry. */
5954 if (s->size == 0)
5955 s->size += htab->plt_header_size;
5956
5957 h->plt.offset = s->size;
5958
5959 /* If this symbol is not defined in a regular file, and we are
5960 not generating a shared library, then set the symbol to this
5961 location in the .plt. This is required to make function
5962 pointers compare as equal between the normal executable and
5963 the shared library. */
5964 if (!info->shared && !h->def_regular)
5965 {
5966 h->root.u.def.section = s;
5967 h->root.u.def.value = h->plt.offset;
5968 }
5969
5970 /* Make room for this entry. For now we only create the
5971 small model PLT entries. We later need to find a way
5972 of relaxing into these from the large model PLT entries. */
5973 s->size += PLT_SMALL_ENTRY_SIZE;
5974
5975 /* We also need to make an entry in the .got.plt section, which
5976 will be placed in the .got section by the linker script. */
5977 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
5978
5979 /* We also need to make an entry in the .rela.plt section. */
5980 htab->root.srelplt->size += RELOC_SIZE (htab);
5981
5982 /* We need to ensure that all GOT entries that serve the PLT
5983 are consecutive with the special GOT slots [0] [1] and
5984 [2]. Any addtional relocations, such as
5985 R_AARCH64_TLSDESC, must be placed after the PLT related
5986 entries. We abuse the reloc_count such that during
5987 sizing we adjust reloc_count to indicate the number of
5988 PLT related reserved entries. In subsequent phases when
5989 filling in the contents of the reloc entries, PLT related
5990 entries are placed by computing their PLT index (0
5991 .. reloc_count). While other none PLT relocs are placed
5992 at the slot indicated by reloc_count and reloc_count is
5993 updated. */
5994
5995 htab->root.srelplt->reloc_count++;
5996 }
5997 else
5998 {
5999 h->plt.offset = (bfd_vma) - 1;
6000 h->needs_plt = 0;
6001 }
6002 }
6003 else
6004 {
6005 h->plt.offset = (bfd_vma) - 1;
6006 h->needs_plt = 0;
6007 }
6008
8847944f 6009 eh = (struct elf64_aarch64_link_hash_entry *) h;
a06ea964
NC
6010 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6011
6012 if (h->got.refcount > 0)
6013 {
6014 bfd_boolean dyn;
6015 unsigned got_type = elf64_aarch64_hash_entry (h)->got_type;
6016
6017 h->got.offset = (bfd_vma) - 1;
6018
6019 dyn = htab->root.dynamic_sections_created;
6020
6021 /* Make sure this symbol is output as a dynamic symbol.
6022 Undefined weak syms won't yet be marked as dynamic. */
6023 if (dyn && h->dynindx == -1 && !h->forced_local)
6024 {
6025 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6026 return FALSE;
6027 }
6028
6029 if (got_type == GOT_UNKNOWN)
6030 {
6031 }
6032 else if (got_type == GOT_NORMAL)
6033 {
6034 h->got.offset = htab->root.sgot->size;
6035 htab->root.sgot->size += GOT_ENTRY_SIZE;
6036 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6037 || h->root.type != bfd_link_hash_undefweak)
6038 && (info->shared
6039 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6040 {
6041 htab->root.srelgot->size += RELOC_SIZE (htab);
6042 }
6043 }
6044 else
6045 {
6046 int indx;
6047 if (got_type & GOT_TLSDESC_GD)
6048 {
6049 eh->tlsdesc_got_jump_table_offset =
6050 (htab->root.sgotplt->size
6051 - aarch64_compute_jump_table_size (htab));
6052 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6053 h->got.offset = (bfd_vma) - 2;
6054 }
6055
6056 if (got_type & GOT_TLS_GD)
6057 {
6058 h->got.offset = htab->root.sgot->size;
6059 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6060 }
6061
6062 if (got_type & GOT_TLS_IE)
6063 {
6064 h->got.offset = htab->root.sgot->size;
6065 htab->root.sgot->size += GOT_ENTRY_SIZE;
6066 }
6067
6068 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6069 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6070 || h->root.type != bfd_link_hash_undefweak)
6071 && (info->shared
6072 || indx != 0
6073 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6074 {
6075 if (got_type & GOT_TLSDESC_GD)
6076 {
6077 htab->root.srelplt->size += RELOC_SIZE (htab);
6078 /* Note reloc_count not incremented here! We have
6079 already adjusted reloc_count for this relocation
6080 type. */
6081
6082 /* TLSDESC PLT is now needed, but not yet determined. */
6083 htab->tlsdesc_plt = (bfd_vma) - 1;
6084 }
6085
6086 if (got_type & GOT_TLS_GD)
6087 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6088
6089 if (got_type & GOT_TLS_IE)
6090 htab->root.srelgot->size += RELOC_SIZE (htab);
6091 }
6092 }
6093 }
6094 else
6095 {
6096 h->got.offset = (bfd_vma) - 1;
6097 }
6098
6099 if (eh->dyn_relocs == NULL)
6100 return TRUE;
6101
6102 /* In the shared -Bsymbolic case, discard space allocated for
6103 dynamic pc-relative relocs against symbols which turn out to be
6104 defined in regular objects. For the normal shared case, discard
6105 space for pc-relative relocs that have become local due to symbol
6106 visibility changes. */
6107
6108 if (info->shared)
6109 {
6110 /* Relocs that use pc_count are those that appear on a call
6111 insn, or certain REL relocs that can generated via assembly.
6112 We want calls to protected symbols to resolve directly to the
6113 function rather than going via the plt. If people want
6114 function pointer comparisons to work as expected then they
6115 should avoid writing weird assembly. */
6116 if (SYMBOL_CALLS_LOCAL (info, h))
6117 {
6118 struct elf_dyn_relocs **pp;
6119
6120 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6121 {
6122 p->count -= p->pc_count;
6123 p->pc_count = 0;
6124 if (p->count == 0)
6125 *pp = p->next;
6126 else
6127 pp = &p->next;
6128 }
6129 }
6130
6131 /* Also discard relocs on undefined weak syms with non-default
6132 visibility. */
6133 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6134 {
6135 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6136 eh->dyn_relocs = NULL;
6137
6138 /* Make sure undefined weak symbols are output as a dynamic
6139 symbol in PIEs. */
6140 else if (h->dynindx == -1
6141 && !h->forced_local
6142 && !bfd_elf_link_record_dynamic_symbol (info, h))
6143 return FALSE;
6144 }
6145
6146 }
6147 else if (ELIMINATE_COPY_RELOCS)
6148 {
6149 /* For the non-shared case, discard space for relocs against
6150 symbols which turn out to need copy relocs or are not
6151 dynamic. */
6152
6153 if (!h->non_got_ref
6154 && ((h->def_dynamic
6155 && !h->def_regular)
6156 || (htab->root.dynamic_sections_created
6157 && (h->root.type == bfd_link_hash_undefweak
6158 || h->root.type == bfd_link_hash_undefined))))
6159 {
6160 /* Make sure this symbol is output as a dynamic symbol.
6161 Undefined weak syms won't yet be marked as dynamic. */
6162 if (h->dynindx == -1
6163 && !h->forced_local
6164 && !bfd_elf_link_record_dynamic_symbol (info, h))
6165 return FALSE;
6166
6167 /* If that succeeded, we know we'll be keeping all the
6168 relocs. */
6169 if (h->dynindx != -1)
6170 goto keep;
6171 }
6172
6173 eh->dyn_relocs = NULL;
6174
6175 keep:;
6176 }
6177
6178 /* Finally, allocate space. */
6179 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6180 {
6181 asection *sreloc;
6182
6183 sreloc = elf_section_data (p->sec)->sreloc;
6184
6185 BFD_ASSERT (sreloc != NULL);
6186
6187 sreloc->size += p->count * RELOC_SIZE (htab);
6188 }
6189
6190 return TRUE;
6191}
6192
6193
6194
6195
6196/* This is the most important function of all . Innocuosly named
6197 though ! */
6198static bfd_boolean
6199elf64_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6200 struct bfd_link_info *info)
6201{
6202 struct elf64_aarch64_link_hash_table *htab;
6203 bfd *dynobj;
6204 asection *s;
6205 bfd_boolean relocs;
6206 bfd *ibfd;
6207
6208 htab = elf64_aarch64_hash_table ((info));
6209 dynobj = htab->root.dynobj;
6210
6211 BFD_ASSERT (dynobj != NULL);
6212
6213 if (htab->root.dynamic_sections_created)
6214 {
6215 if (info->executable)
6216 {
6217 s = bfd_get_linker_section (dynobj, ".interp");
6218 if (s == NULL)
6219 abort ();
6220 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
6221 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
6222 }
6223 }
6224
6225 /* Set up .got offsets for local syms, and space for local dynamic
6226 relocs. */
6227 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
6228 {
6229 struct elf_aarch64_local_symbol *locals = NULL;
6230 Elf_Internal_Shdr *symtab_hdr;
6231 asection *srel;
6232 unsigned int i;
6233
6234 if (!is_aarch64_elf (ibfd))
6235 continue;
6236
6237 for (s = ibfd->sections; s != NULL; s = s->next)
6238 {
6239 struct elf_dyn_relocs *p;
6240
6241 for (p = (struct elf_dyn_relocs *)
6242 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
6243 {
6244 if (!bfd_is_abs_section (p->sec)
6245 && bfd_is_abs_section (p->sec->output_section))
6246 {
6247 /* Input section has been discarded, either because
6248 it is a copy of a linkonce section or due to
6249 linker script /DISCARD/, so we'll be discarding
6250 the relocs too. */
6251 }
6252 else if (p->count != 0)
6253 {
6254 srel = elf_section_data (p->sec)->sreloc;
6255 srel->size += p->count * RELOC_SIZE (htab);
6256 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
6257 info->flags |= DF_TEXTREL;
6258 }
6259 }
6260 }
6261
6262 locals = elf64_aarch64_locals (ibfd);
6263 if (!locals)
6264 continue;
6265
6266 symtab_hdr = &elf_symtab_hdr (ibfd);
6267 srel = htab->root.srelgot;
6268 for (i = 0; i < symtab_hdr->sh_info; i++)
6269 {
6270 locals[i].got_offset = (bfd_vma) - 1;
6271 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6272 if (locals[i].got_refcount > 0)
6273 {
6274 unsigned got_type = locals[i].got_type;
6275 if (got_type & GOT_TLSDESC_GD)
6276 {
6277 locals[i].tlsdesc_got_jump_table_offset =
6278 (htab->root.sgotplt->size
6279 - aarch64_compute_jump_table_size (htab));
6280 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6281 locals[i].got_offset = (bfd_vma) - 2;
6282 }
6283
6284 if (got_type & GOT_TLS_GD)
6285 {
6286 locals[i].got_offset = htab->root.sgot->size;
6287 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6288 }
6289
6290 if (got_type & GOT_TLS_IE)
6291 {
6292 locals[i].got_offset = htab->root.sgot->size;
6293 htab->root.sgot->size += GOT_ENTRY_SIZE;
6294 }
6295
6296 if (got_type == GOT_UNKNOWN)
6297 {
6298 }
6299
6300 if (got_type == GOT_NORMAL)
6301 {
6302 }
6303
6304 if (info->shared)
6305 {
6306 if (got_type & GOT_TLSDESC_GD)
6307 {
6308 htab->root.srelplt->size += RELOC_SIZE (htab);
6309 /* Note RELOC_COUNT not incremented here! */
6310 htab->tlsdesc_plt = (bfd_vma) - 1;
6311 }
6312
6313 if (got_type & GOT_TLS_GD)
6314 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6315
6316 if (got_type & GOT_TLS_IE)
6317 htab->root.srelgot->size += RELOC_SIZE (htab);
6318 }
6319 }
6320 else
6321 {
6322 locals[i].got_refcount = (bfd_vma) - 1;
6323 }
6324 }
6325 }
6326
6327
6328 /* Allocate global sym .plt and .got entries, and space for global
6329 sym dynamic relocs. */
6330 elf_link_hash_traverse (&htab->root, elf64_aarch64_allocate_dynrelocs,
6331 info);
6332
6333
6334 /* For every jump slot reserved in the sgotplt, reloc_count is
6335 incremented. However, when we reserve space for TLS descriptors,
6336 it's not incremented, so in order to compute the space reserved
6337 for them, it suffices to multiply the reloc count by the jump
6338 slot size. */
6339
6340 if (htab->root.srelplt)
8847944f 6341 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
a06ea964
NC
6342
6343 if (htab->tlsdesc_plt)
6344 {
6345 if (htab->root.splt->size == 0)
6346 htab->root.splt->size += PLT_ENTRY_SIZE;
6347
6348 htab->tlsdesc_plt = htab->root.splt->size;
6349 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
6350
6351 /* If we're not using lazy TLS relocations, don't generate the
6352 GOT entry required. */
6353 if (!(info->flags & DF_BIND_NOW))
6354 {
6355 htab->dt_tlsdesc_got = htab->root.sgot->size;
6356 htab->root.sgot->size += GOT_ENTRY_SIZE;
6357 }
6358 }
6359
6360 /* We now have determined the sizes of the various dynamic sections.
6361 Allocate memory for them. */
6362 relocs = FALSE;
6363 for (s = dynobj->sections; s != NULL; s = s->next)
6364 {
6365 if ((s->flags & SEC_LINKER_CREATED) == 0)
6366 continue;
6367
6368 if (s == htab->root.splt
6369 || s == htab->root.sgot
6370 || s == htab->root.sgotplt
6371 || s == htab->root.iplt
6372 || s == htab->root.igotplt || s == htab->sdynbss)
6373 {
6374 /* Strip this section if we don't need it; see the
6375 comment below. */
6376 }
6377 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
6378 {
6379 if (s->size != 0 && s != htab->root.srelplt)
6380 relocs = TRUE;
6381
6382 /* We use the reloc_count field as a counter if we need
6383 to copy relocs into the output file. */
6384 if (s != htab->root.srelplt)
6385 s->reloc_count = 0;
6386 }
6387 else
6388 {
6389 /* It's not one of our sections, so don't allocate space. */
6390 continue;
6391 }
6392
6393 if (s->size == 0)
6394 {
6395 /* If we don't need this section, strip it from the
6396 output file. This is mostly to handle .rela.bss and
6397 .rela.plt. We must create both sections in
6398 create_dynamic_sections, because they must be created
6399 before the linker maps input sections to output
6400 sections. The linker does that before
6401 adjust_dynamic_symbol is called, and it is that
6402 function which decides whether anything needs to go
6403 into these sections. */
6404
6405 s->flags |= SEC_EXCLUDE;
6406 continue;
6407 }
6408
6409 if ((s->flags & SEC_HAS_CONTENTS) == 0)
6410 continue;
6411
6412 /* Allocate memory for the section contents. We use bfd_zalloc
6413 here in case unused entries are not reclaimed before the
6414 section's contents are written out. This should not happen,
6415 but this way if it does, we get a R_AARCH64_NONE reloc instead
6416 of garbage. */
6417 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
6418 if (s->contents == NULL)
6419 return FALSE;
6420 }
6421
6422 if (htab->root.dynamic_sections_created)
6423 {
6424 /* Add some entries to the .dynamic section. We fill in the
6425 values later, in elf64_aarch64_finish_dynamic_sections, but we
6426 must add the entries now so that we get the correct size for
6427 the .dynamic section. The DT_DEBUG entry is filled in by the
6428 dynamic linker and used by the debugger. */
6429#define add_dynamic_entry(TAG, VAL) \
6430 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6431
6432 if (info->executable)
6433 {
6434 if (!add_dynamic_entry (DT_DEBUG, 0))
6435 return FALSE;
6436 }
6437
6438 if (htab->root.splt->size != 0)
6439 {
6440 if (!add_dynamic_entry (DT_PLTGOT, 0)
6441 || !add_dynamic_entry (DT_PLTRELSZ, 0)
6442 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
6443 || !add_dynamic_entry (DT_JMPREL, 0))
6444 return FALSE;
6445
6446 if (htab->tlsdesc_plt
6447 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
6448 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
6449 return FALSE;
6450 }
6451
6452 if (relocs)
6453 {
6454 if (!add_dynamic_entry (DT_RELA, 0)
6455 || !add_dynamic_entry (DT_RELASZ, 0)
6456 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
6457 return FALSE;
6458
6459 /* If any dynamic relocs apply to a read-only section,
6460 then we need a DT_TEXTREL entry. */
6461 if ((info->flags & DF_TEXTREL) != 0)
6462 {
6463 if (!add_dynamic_entry (DT_TEXTREL, 0))
6464 return FALSE;
6465 }
6466 }
6467 }
6468#undef add_dynamic_entry
6469
6470 return TRUE;
6471
6472
6473}
6474
6475static inline void
6476elf64_aarch64_update_plt_entry (bfd *output_bfd,
6477 unsigned int r_type,
6478 bfd_byte *plt_entry, bfd_vma value)
6479{
6480 reloc_howto_type *howto;
6481 howto = elf64_aarch64_howto_from_type (r_type);
6482 bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
6483}
6484
6485static void
6486elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
6487 struct elf64_aarch64_link_hash_table
8847944f 6488 *htab, bfd *output_bfd)
a06ea964
NC
6489{
6490 bfd_byte *plt_entry;
6491 bfd_vma plt_index;
6492 bfd_vma got_offset;
6493 bfd_vma gotplt_entry_address;
6494 bfd_vma plt_entry_address;
6495 Elf_Internal_Rela rela;
6496 bfd_byte *loc;
6497
8847944f 6498 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
692e2b8b 6499
8847944f
WN
6500 /* Offset in the GOT is PLT index plus got GOT headers(3)
6501 times 8. */
6502 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
6503 plt_entry = htab->root.splt->contents + h->plt.offset;
6504 plt_entry_address = htab->root.splt->output_section->vma
6505 + htab->root.splt->output_section->output_offset + h->plt.offset;
6506 gotplt_entry_address = htab->root.sgotplt->output_section->vma +
6507 htab->root.sgotplt->output_offset + got_offset;
a06ea964
NC
6508
6509 /* Copy in the boiler-plate for the PLTn entry. */
6510 memcpy (plt_entry, elf64_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
6511
6512 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6513 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6514 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6515 plt_entry,
6516 PG (gotplt_entry_address) -
6517 PG (plt_entry_address));
6518
6519 /* Fill in the lo12 bits for the load from the pltgot. */
6520 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6521 plt_entry + 4,
6522 PG_OFFSET (gotplt_entry_address));
6523
6524 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6525 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6526 plt_entry + 8,
6527 PG_OFFSET (gotplt_entry_address));
6528
6529 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6530 bfd_put_64 (output_bfd,
8847944f
WN
6531 (htab->root.splt->output_section->vma
6532 + htab->root.splt->output_offset),
6533 htab->root.sgotplt->contents + got_offset);
a06ea964 6534
8847944f 6535 /* Fill in the entry in the .rela.plt section. */
a06ea964 6536 rela.r_offset = gotplt_entry_address;
8847944f
WN
6537 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_JUMP_SLOT);
6538 rela.r_addend = 0;
a06ea964
NC
6539
6540 /* Compute the relocation entry to used based on PLT index and do
6541 not adjust reloc_count. The reloc_count has already been adjusted
6542 to account for this entry. */
8847944f 6543 loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab);
a06ea964
NC
6544 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6545}
6546
6547/* Size sections even though they're not dynamic. We use it to setup
6548 _TLS_MODULE_BASE_, if needed. */
6549
6550static bfd_boolean
6551elf64_aarch64_always_size_sections (bfd *output_bfd,
6552 struct bfd_link_info *info)
6553{
6554 asection *tls_sec;
6555
6556 if (info->relocatable)
6557 return TRUE;
6558
6559 tls_sec = elf_hash_table (info)->tls_sec;
6560
6561 if (tls_sec)
6562 {
6563 struct elf_link_hash_entry *tlsbase;
6564
6565 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
6566 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
6567
6568 if (tlsbase)
6569 {
6570 struct bfd_link_hash_entry *h = NULL;
6571 const struct elf_backend_data *bed =
6572 get_elf_backend_data (output_bfd);
6573
6574 if (!(_bfd_generic_link_add_one_symbol
6575 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
6576 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
6577 return FALSE;
6578
6579 tlsbase->type = STT_TLS;
6580 tlsbase = (struct elf_link_hash_entry *) h;
6581 tlsbase->def_regular = 1;
6582 tlsbase->other = STV_HIDDEN;
6583 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
6584 }
6585 }
6586
6587 return TRUE;
6588}
6589
6590/* Finish up dynamic symbol handling. We set the contents of various
6591 dynamic sections here. */
6592static bfd_boolean
6593elf64_aarch64_finish_dynamic_symbol (bfd *output_bfd,
6594 struct bfd_link_info *info,
6595 struct elf_link_hash_entry *h,
6596 Elf_Internal_Sym *sym)
6597{
6598 struct elf64_aarch64_link_hash_table *htab;
6599 htab = elf64_aarch64_hash_table (info);
6600
6601 if (h->plt.offset != (bfd_vma) - 1)
6602 {
6603 /* This symbol has an entry in the procedure linkage table. Set
6604 it up. */
6605
8847944f
WN
6606 if (h->dynindx == -1
6607 || htab->root.splt == NULL
6608 || htab->root.sgotplt == NULL || htab->root.srelplt == NULL)
a06ea964
NC
6609 abort ();
6610
8847944f 6611 elf64_aarch64_create_small_pltn_entry (h, htab, output_bfd);
a06ea964
NC
6612 if (!h->def_regular)
6613 {
6614 /* Mark the symbol as undefined, rather than as defined in
6615 the .plt section. Leave the value alone. This is a clue
6616 for the dynamic linker, to make function pointer
6617 comparisons work between an application and shared
6618 library. */
6619 sym->st_shndx = SHN_UNDEF;
6620 }
6621 }
6622
6623 if (h->got.offset != (bfd_vma) - 1
6624 && elf64_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
6625 {
6626 Elf_Internal_Rela rela;
6627 bfd_byte *loc;
6628
6629 /* This symbol has an entry in the global offset table. Set it
6630 up. */
6631 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
6632 abort ();
6633
6634 rela.r_offset = (htab->root.sgot->output_section->vma
6635 + htab->root.sgot->output_offset
6636 + (h->got.offset & ~(bfd_vma) 1));
6637
6638 if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
6639 {
6640 if (!h->def_regular)
6641 return FALSE;
6642
6643 BFD_ASSERT ((h->got.offset & 1) != 0);
6644 rela.r_info = ELF64_R_INFO (0, R_AARCH64_RELATIVE);
6645 rela.r_addend = (h->root.u.def.value
6646 + h->root.u.def.section->output_section->vma
6647 + h->root.u.def.section->output_offset);
6648 }
6649 else
6650 {
6651 BFD_ASSERT ((h->got.offset & 1) == 0);
6652 bfd_put_64 (output_bfd, (bfd_vma) 0,
6653 htab->root.sgot->contents + h->got.offset);
6654 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT);
6655 rela.r_addend = 0;
6656 }
6657
6658 loc = htab->root.srelgot->contents;
6659 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
6660 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6661 }
6662
6663 if (h->needs_copy)
6664 {
6665 Elf_Internal_Rela rela;
6666 bfd_byte *loc;
6667
6668 /* This symbol needs a copy reloc. Set it up. */
6669
6670 if (h->dynindx == -1
6671 || (h->root.type != bfd_link_hash_defined
6672 && h->root.type != bfd_link_hash_defweak)
6673 || htab->srelbss == NULL)
6674 abort ();
6675
6676 rela.r_offset = (h->root.u.def.value
6677 + h->root.u.def.section->output_section->vma
6678 + h->root.u.def.section->output_offset);
6679 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_COPY);
6680 rela.r_addend = 0;
6681 loc = htab->srelbss->contents;
6682 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
6683 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6684 }
6685
6686 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6687 be NULL for local symbols. */
6688 if (sym != NULL
9637f6ef 6689 && (h == elf_hash_table (info)->hdynamic
a06ea964
NC
6690 || h == elf_hash_table (info)->hgot))
6691 sym->st_shndx = SHN_ABS;
6692
6693 return TRUE;
6694}
6695
6696static void
6697elf64_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
6698 struct elf64_aarch64_link_hash_table
6699 *htab)
6700{
6701 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6702 small and large plts and at the minute just generates
6703 the small PLT. */
6704
6705 /* PLT0 of the small PLT looks like this -
6706 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6707 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6708 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6709 // symbol resolver
6710 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6711 // GOTPLT entry for this.
6712 br x17
6713 */
6714 bfd_vma plt_got_base;
6715 bfd_vma plt_base;
6716
6717
6718 memcpy (htab->root.splt->contents, elf64_aarch64_small_plt0_entry,
6719 PLT_ENTRY_SIZE);
6720 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
6721 PLT_ENTRY_SIZE;
6722
6723 plt_got_base = (htab->root.sgotplt->output_section->vma
6724 + htab->root.sgotplt->output_offset);
6725
6726 plt_base = htab->root.splt->output_section->vma +
6727 htab->root.splt->output_section->output_offset;
6728
6729 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6730 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6731 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6732 htab->root.splt->contents + 4,
6733 PG (plt_got_base + 16) - PG (plt_base + 4));
6734
6735 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6736 htab->root.splt->contents + 8,
6737 PG_OFFSET (plt_got_base + 16));
6738
6739 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6740 htab->root.splt->contents + 12,
6741 PG_OFFSET (plt_got_base + 16));
6742}
6743
6744static bfd_boolean
6745elf64_aarch64_finish_dynamic_sections (bfd *output_bfd,
6746 struct bfd_link_info *info)
6747{
6748 struct elf64_aarch64_link_hash_table *htab;
6749 bfd *dynobj;
6750 asection *sdyn;
6751
6752 htab = elf64_aarch64_hash_table (info);
6753 dynobj = htab->root.dynobj;
6754 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6755
6756 if (htab->root.dynamic_sections_created)
6757 {
6758 Elf64_External_Dyn *dyncon, *dynconend;
6759
6760 if (sdyn == NULL || htab->root.sgot == NULL)
6761 abort ();
6762
6763 dyncon = (Elf64_External_Dyn *) sdyn->contents;
6764 dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
6765 for (; dyncon < dynconend; dyncon++)
6766 {
6767 Elf_Internal_Dyn dyn;
6768 asection *s;
6769
6770 bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
6771
6772 switch (dyn.d_tag)
6773 {
6774 default:
6775 continue;
6776
6777 case DT_PLTGOT:
6778 s = htab->root.sgotplt;
6779 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6780 break;
6781
6782 case DT_JMPREL:
6783 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
6784 break;
6785
6786 case DT_PLTRELSZ:
6787 s = htab->root.srelplt->output_section;
6788 dyn.d_un.d_val = s->size;
6789 break;
6790
6791 case DT_RELASZ:
6792 /* The procedure linkage table relocs (DT_JMPREL) should
6793 not be included in the overall relocs (DT_RELA).
6794 Therefore, we override the DT_RELASZ entry here to
6795 make it not include the JMPREL relocs. Since the
6796 linker script arranges for .rela.plt to follow all
6797 other relocation sections, we don't have to worry
6798 about changing the DT_RELA entry. */
6799 if (htab->root.srelplt != NULL)
6800 {
6801 s = htab->root.srelplt->output_section;
6802 dyn.d_un.d_val -= s->size;
6803 }
6804 break;
6805
6806 case DT_TLSDESC_PLT:
6807 s = htab->root.splt;
6808 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6809 + htab->tlsdesc_plt;
6810 break;
6811
6812 case DT_TLSDESC_GOT:
6813 s = htab->root.sgot;
6814 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6815 + htab->dt_tlsdesc_got;
6816 break;
6817 }
6818
6819 bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
6820 }
6821
6822 }
6823
6824 /* Fill in the special first entry in the procedure linkage table. */
6825 if (htab->root.splt && htab->root.splt->size > 0)
6826 {
6827 elf64_aarch64_init_small_plt0_entry (output_bfd, htab);
6828
6829 elf_section_data (htab->root.splt->output_section)->
6830 this_hdr.sh_entsize = htab->plt_entry_size;
6831
6832
6833 if (htab->tlsdesc_plt)
6834 {
6835 bfd_put_64 (output_bfd, (bfd_vma) 0,
6836 htab->root.sgot->contents + htab->dt_tlsdesc_got);
6837
6838 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
6839 elf64_aarch64_tlsdesc_small_plt_entry,
6840 sizeof (elf64_aarch64_tlsdesc_small_plt_entry));
6841
6842 {
6843 bfd_vma adrp1_addr =
6844 htab->root.splt->output_section->vma
6845 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
6846
6847 bfd_vma adrp2_addr =
6848 htab->root.splt->output_section->vma
6849 + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
6850
6851 bfd_vma got_addr =
6852 htab->root.sgot->output_section->vma
6853 + htab->root.sgot->output_offset;
6854
6855 bfd_vma pltgot_addr =
6856 htab->root.sgotplt->output_section->vma
6857 + htab->root.sgotplt->output_offset;
6858
6859 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
6860 bfd_vma opcode;
6861
6862 /* adrp x2, DT_TLSDESC_GOT */
6863 opcode = bfd_get_32 (output_bfd,
6864 htab->root.splt->contents
6865 + htab->tlsdesc_plt + 4);
6866 opcode = reencode_adr_imm
6867 (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
6868 bfd_put_32 (output_bfd, opcode,
6869 htab->root.splt->contents + htab->tlsdesc_plt + 4);
6870
6871 /* adrp x3, 0 */
6872 opcode = bfd_get_32 (output_bfd,
6873 htab->root.splt->contents
6874 + htab->tlsdesc_plt + 8);
6875 opcode = reencode_adr_imm
6876 (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
6877 bfd_put_32 (output_bfd, opcode,
6878 htab->root.splt->contents + htab->tlsdesc_plt + 8);
6879
6880 /* ldr x2, [x2, #0] */
6881 opcode = bfd_get_32 (output_bfd,
6882 htab->root.splt->contents
6883 + htab->tlsdesc_plt + 12);
6884 opcode = reencode_ldst_pos_imm (opcode,
6885 PG_OFFSET (dt_tlsdesc_got) >> 3);
6886 bfd_put_32 (output_bfd, opcode,
6887 htab->root.splt->contents + htab->tlsdesc_plt + 12);
6888
6889 /* add x3, x3, 0 */
6890 opcode = bfd_get_32 (output_bfd,
6891 htab->root.splt->contents
6892 + htab->tlsdesc_plt + 16);
6893 opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
6894 bfd_put_32 (output_bfd, opcode,
6895 htab->root.splt->contents + htab->tlsdesc_plt + 16);
6896 }
6897 }
6898 }
6899
6900 if (htab->root.sgotplt)
6901 {
6902 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
6903 {
6904 (*_bfd_error_handler)
6905 (_("discarded output section: `%A'"), htab->root.sgotplt);
6906 return FALSE;
6907 }
6908
6909 /* Fill in the first three entries in the global offset table. */
6910 if (htab->root.sgotplt->size > 0)
6911 {
6912 /* Set the first entry in the global offset table to the address of
6913 the dynamic section. */
6914 if (sdyn == NULL)
6915 bfd_put_64 (output_bfd, (bfd_vma) 0,
6916 htab->root.sgotplt->contents);
6917 else
6918 bfd_put_64 (output_bfd,
6919 sdyn->output_section->vma + sdyn->output_offset,
6920 htab->root.sgotplt->contents);
6921 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6922 bfd_put_64 (output_bfd,
6923 (bfd_vma) 0,
6924 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
6925 bfd_put_64 (output_bfd,
6926 (bfd_vma) 0,
6927 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
6928 }
6929
6930 elf_section_data (htab->root.sgotplt->output_section)->
6931 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
6932 }
6933
6934 if (htab->root.sgot && htab->root.sgot->size > 0)
6935 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
6936 = GOT_ENTRY_SIZE;
6937
6938 return TRUE;
6939}
6940
6941/* Return address for Ith PLT stub in section PLT, for relocation REL
6942 or (bfd_vma) -1 if it should not be included. */
6943
6944static bfd_vma
6945elf64_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
6946 const arelent *rel ATTRIBUTE_UNUSED)
6947{
6948 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
6949}
6950
6951
6952/* We use this so we can override certain functions
6953 (though currently we don't). */
6954
6955const struct elf_size_info elf64_aarch64_size_info =
6956{
6957 sizeof (Elf64_External_Ehdr),
6958 sizeof (Elf64_External_Phdr),
6959 sizeof (Elf64_External_Shdr),
6960 sizeof (Elf64_External_Rel),
6961 sizeof (Elf64_External_Rela),
6962 sizeof (Elf64_External_Sym),
6963 sizeof (Elf64_External_Dyn),
6964 sizeof (Elf_External_Note),
6965 4, /* Hash table entry size. */
6966 1, /* Internal relocs per external relocs. */
6967 64, /* Arch size. */
6968 3, /* Log_file_align. */
6969 ELFCLASS64, EV_CURRENT,
6970 bfd_elf64_write_out_phdrs,
6971 bfd_elf64_write_shdrs_and_ehdr,
6972 bfd_elf64_checksum_contents,
6973 bfd_elf64_write_relocs,
6974 bfd_elf64_swap_symbol_in,
6975 bfd_elf64_swap_symbol_out,
6976 bfd_elf64_slurp_reloc_table,
6977 bfd_elf64_slurp_symbol_table,
6978 bfd_elf64_swap_dyn_in,
6979 bfd_elf64_swap_dyn_out,
6980 bfd_elf64_swap_reloc_in,
6981 bfd_elf64_swap_reloc_out,
6982 bfd_elf64_swap_reloca_in,
6983 bfd_elf64_swap_reloca_out
6984};
6985
6986#define ELF_ARCH bfd_arch_aarch64
6987#define ELF_MACHINE_CODE EM_AARCH64
6988#define ELF_MAXPAGESIZE 0x10000
6989#define ELF_MINPAGESIZE 0x1000
6990#define ELF_COMMONPAGESIZE 0x1000
6991
6992#define bfd_elf64_close_and_cleanup \
6993 elf64_aarch64_close_and_cleanup
6994
6995#define bfd_elf64_bfd_copy_private_bfd_data \
6996 elf64_aarch64_copy_private_bfd_data
6997
6998#define bfd_elf64_bfd_free_cached_info \
6999 elf64_aarch64_bfd_free_cached_info
7000
7001#define bfd_elf64_bfd_is_target_special_symbol \
7002 elf64_aarch64_is_target_special_symbol
7003
7004#define bfd_elf64_bfd_link_hash_table_create \
7005 elf64_aarch64_link_hash_table_create
7006
7007#define bfd_elf64_bfd_link_hash_table_free \
7008 elf64_aarch64_hash_table_free
7009
7010#define bfd_elf64_bfd_merge_private_bfd_data \
7011 elf64_aarch64_merge_private_bfd_data
7012
7013#define bfd_elf64_bfd_print_private_bfd_data \
7014 elf64_aarch64_print_private_bfd_data
7015
7016#define bfd_elf64_bfd_reloc_type_lookup \
7017 elf64_aarch64_reloc_type_lookup
7018
7019#define bfd_elf64_bfd_reloc_name_lookup \
7020 elf64_aarch64_reloc_name_lookup
7021
7022#define bfd_elf64_bfd_set_private_flags \
7023 elf64_aarch64_set_private_flags
7024
7025#define bfd_elf64_find_inliner_info \
7026 elf64_aarch64_find_inliner_info
7027
7028#define bfd_elf64_find_nearest_line \
7029 elf64_aarch64_find_nearest_line
7030
7031#define bfd_elf64_mkobject \
7032 elf64_aarch64_mkobject
7033
7034#define bfd_elf64_new_section_hook \
7035 elf64_aarch64_new_section_hook
7036
7037#define elf_backend_adjust_dynamic_symbol \
7038 elf64_aarch64_adjust_dynamic_symbol
7039
7040#define elf_backend_always_size_sections \
7041 elf64_aarch64_always_size_sections
7042
7043#define elf_backend_check_relocs \
7044 elf64_aarch64_check_relocs
7045
7046#define elf_backend_copy_indirect_symbol \
7047 elf64_aarch64_copy_indirect_symbol
7048
7049/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7050 to them in our hash. */
7051#define elf_backend_create_dynamic_sections \
7052 elf64_aarch64_create_dynamic_sections
7053
7054#define elf_backend_init_index_section \
7055 _bfd_elf_init_2_index_sections
7056
8847944f
WN
7057#define elf_backend_is_function_type \
7058 elf64_aarch64_is_function_type
7059
a06ea964
NC
7060#define elf_backend_finish_dynamic_sections \
7061 elf64_aarch64_finish_dynamic_sections
7062
7063#define elf_backend_finish_dynamic_symbol \
7064 elf64_aarch64_finish_dynamic_symbol
7065
7066#define elf_backend_gc_sweep_hook \
7067 elf64_aarch64_gc_sweep_hook
7068
7069#define elf_backend_object_p \
7070 elf64_aarch64_object_p
7071
7072#define elf_backend_output_arch_local_syms \
7073 elf64_aarch64_output_arch_local_syms
7074
7075#define elf_backend_plt_sym_val \
7076 elf64_aarch64_plt_sym_val
7077
7078#define elf_backend_post_process_headers \
7079 elf64_aarch64_post_process_headers
7080
7081#define elf_backend_relocate_section \
7082 elf64_aarch64_relocate_section
7083
7084#define elf_backend_reloc_type_class \
7085 elf64_aarch64_reloc_type_class
7086
7087#define elf_backend_section_flags \
7088 elf64_aarch64_section_flags
7089
7090#define elf_backend_section_from_shdr \
7091 elf64_aarch64_section_from_shdr
7092
7093#define elf_backend_size_dynamic_sections \
7094 elf64_aarch64_size_dynamic_sections
7095
7096#define elf_backend_size_info \
7097 elf64_aarch64_size_info
7098
7099#define elf_backend_can_refcount 1
59c108f7 7100#define elf_backend_can_gc_sections 1
a06ea964
NC
7101#define elf_backend_plt_readonly 1
7102#define elf_backend_want_got_plt 1
7103#define elf_backend_want_plt_sym 0
7104#define elf_backend_may_use_rel_p 0
7105#define elf_backend_may_use_rela_p 1
7106#define elf_backend_default_use_rela_p 1
7107#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
c495064d 7108#define elf_backend_default_execstack 0
a06ea964
NC
7109
7110#undef elf_backend_obj_attrs_section
7111#define elf_backend_obj_attrs_section ".ARM.attributes"
7112
7113#include "elf64-target.h"
This page took 0.56916 seconds and 4 git commands to generate.