gas/
[deliverable/binutils-gdb.git] / bfd / elf64-aarch64.c
CommitLineData
a06ea964
NC
1/* ELF support for AArch64.
2 Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21/* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elf64_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elf64_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elf64_aarch64_relocate_section ()
123
124 Calls elf64_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elf64_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138#include "sysdep.h"
139#include "bfd.h"
140#include "libiberty.h"
141#include "libbfd.h"
142#include "bfd_stdint.h"
143#include "elf-bfd.h"
144#include "bfdlink.h"
145#include "elf/aarch64.h"
146
147static bfd_reloc_status_type
148bfd_elf_aarch64_put_addend (bfd *abfd,
149 bfd_byte *address,
150 reloc_howto_type *howto, bfd_signed_vma addend);
151
152#define IS_AARCH64_TLS_RELOC(R_TYPE) \
153 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
154 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
155 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
156 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
157 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
158 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
159 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
160 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
161 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
162 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
163 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
164 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
165 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
166 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
167 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
168 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
169 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
170 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
171 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
172
173#define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
174 ((R_TYPE) == R_AARCH64_TLSDESC_LD64_PREL19 \
175 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
176 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE \
177 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
178 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
179 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
180 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
181 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
182 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
183 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
184 || (R_TYPE) == R_AARCH64_TLSDESC)
185
186#define ELIMINATE_COPY_RELOCS 0
187
188/* Return the relocation section associated with NAME. HTAB is the
189 bfd's elf64_aarch64_link_hash_entry. */
190#define RELOC_SECTION(HTAB, NAME) \
191 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
192
193/* Return size of a relocation entry. HTAB is the bfd's
194 elf64_aarch64_link_hash_entry. */
195#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
196
197/* Return function to swap relocations in. HTAB is the bfd's
198 elf64_aarch64_link_hash_entry. */
199#define SWAP_RELOC_IN(HTAB) (bfd_elf64_swap_reloca_in)
200
201/* Return function to swap relocations out. HTAB is the bfd's
202 elf64_aarch64_link_hash_entry. */
203#define SWAP_RELOC_OUT(HTAB) (bfd_elf64_swap_reloca_out)
204
205/* GOT Entry size - 8 bytes. */
206#define GOT_ENTRY_SIZE (8)
207#define PLT_ENTRY_SIZE (32)
208#define PLT_SMALL_ENTRY_SIZE (16)
209#define PLT_TLSDESC_ENTRY_SIZE (32)
210
211/* Take the PAGE component of an address or offset. */
212#define PG(x) ((x) & ~ 0xfff)
213#define PG_OFFSET(x) ((x) & 0xfff)
214
215/* Encoding of the nop instruction */
216#define INSN_NOP 0xd503201f
217
218#define aarch64_compute_jump_table_size(htab) \
219 (((htab)->root.srelplt == NULL) ? 0 \
220 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
221
222/* The first entry in a procedure linkage table looks like this
223 if the distance between the PLTGOT and the PLT is < 4GB use
224 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
225 in x16 and needs to work out PLTGOT[1] by using an address of
226 [x16,#-8]. */
227static const bfd_byte elf64_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
228{
229 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
230 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
231 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
232 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
233 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
234 0x1f, 0x20, 0x03, 0xd5, /* nop */
235 0x1f, 0x20, 0x03, 0xd5, /* nop */
236 0x1f, 0x20, 0x03, 0xd5, /* nop */
237};
238
239/* Per function entry in a procedure linkage table looks like this
240 if the distance between the PLTGOT and the PLT is < 4GB use
241 these PLT entries. */
242static const bfd_byte elf64_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
243{
244 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
245 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
246 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
247 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
248};
249
250static const bfd_byte
251elf64_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
252{
253 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
254 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
255 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
256 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
257 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
258 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
259 0x1f, 0x20, 0x03, 0xd5, /* nop */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
261};
262
263#define elf_info_to_howto elf64_aarch64_info_to_howto
264#define elf_info_to_howto_rel elf64_aarch64_info_to_howto
265
266#define AARCH64_ELF_ABI_VERSION 0
267#define AARCH64_ELF_OS_ABI_VERSION 0
268
269/* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
270#define ALL_ONES (~ (bfd_vma) 0)
271
272static reloc_howto_type elf64_aarch64_howto_none =
273 HOWTO (R_AARCH64_NONE, /* type */
274 0, /* rightshift */
275 0, /* size (0 = byte, 1 = short, 2 = long) */
276 0, /* bitsize */
277 FALSE, /* pc_relative */
278 0, /* bitpos */
279 complain_overflow_dont,/* complain_on_overflow */
280 bfd_elf_generic_reloc, /* special_function */
281 "R_AARCH64_NONE", /* name */
282 FALSE, /* partial_inplace */
283 0, /* src_mask */
284 0, /* dst_mask */
285 FALSE); /* pcrel_offset */
286
287static reloc_howto_type elf64_aarch64_howto_dynrelocs[] =
288{
289 HOWTO (R_AARCH64_COPY, /* type */
290 0, /* rightshift */
291 2, /* size (0 = byte, 1 = short, 2 = long) */
292 64, /* bitsize */
293 FALSE, /* pc_relative */
294 0, /* bitpos */
295 complain_overflow_bitfield, /* complain_on_overflow */
296 bfd_elf_generic_reloc, /* special_function */
297 "R_AARCH64_COPY", /* name */
298 TRUE, /* partial_inplace */
299 0xffffffff, /* src_mask */
300 0xffffffff, /* dst_mask */
301 FALSE), /* pcrel_offset */
302
303 HOWTO (R_AARCH64_GLOB_DAT, /* type */
304 0, /* rightshift */
305 2, /* size (0 = byte, 1 = short, 2 = long) */
306 64, /* bitsize */
307 FALSE, /* pc_relative */
308 0, /* bitpos */
309 complain_overflow_bitfield, /* complain_on_overflow */
310 bfd_elf_generic_reloc, /* special_function */
311 "R_AARCH64_GLOB_DAT", /* name */
312 TRUE, /* partial_inplace */
313 0xffffffff, /* src_mask */
314 0xffffffff, /* dst_mask */
315 FALSE), /* pcrel_offset */
316
317 HOWTO (R_AARCH64_JUMP_SLOT, /* type */
318 0, /* rightshift */
319 2, /* size (0 = byte, 1 = short, 2 = long) */
320 64, /* bitsize */
321 FALSE, /* pc_relative */
322 0, /* bitpos */
323 complain_overflow_bitfield, /* complain_on_overflow */
324 bfd_elf_generic_reloc, /* special_function */
325 "R_AARCH64_JUMP_SLOT", /* name */
326 TRUE, /* partial_inplace */
327 0xffffffff, /* src_mask */
328 0xffffffff, /* dst_mask */
329 FALSE), /* pcrel_offset */
330
331 HOWTO (R_AARCH64_RELATIVE, /* type */
332 0, /* rightshift */
333 2, /* size (0 = byte, 1 = short, 2 = long) */
334 64, /* bitsize */
335 FALSE, /* pc_relative */
336 0, /* bitpos */
337 complain_overflow_bitfield, /* complain_on_overflow */
338 bfd_elf_generic_reloc, /* special_function */
339 "R_AARCH64_RELATIVE", /* name */
340 TRUE, /* partial_inplace */
341 ALL_ONES, /* src_mask */
342 ALL_ONES, /* dst_mask */
343 FALSE), /* pcrel_offset */
344
345 HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */
346 0, /* rightshift */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
348 64, /* bitsize */
349 FALSE, /* pc_relative */
350 0, /* bitpos */
351 complain_overflow_dont, /* complain_on_overflow */
352 bfd_elf_generic_reloc, /* special_function */
353 "R_AARCH64_TLS_DTPMOD64", /* name */
354 FALSE, /* partial_inplace */
355 0, /* src_mask */
356 ALL_ONES, /* dst_mask */
357 FALSE), /* pc_reloffset */
358
359 HOWTO (R_AARCH64_TLS_DTPREL64, /* type */
360 0, /* rightshift */
361 2, /* size (0 = byte, 1 = short, 2 = long) */
362 64, /* bitsize */
363 FALSE, /* pc_relative */
364 0, /* bitpos */
365 complain_overflow_dont, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 "R_AARCH64_TLS_DTPREL64", /* name */
368 FALSE, /* partial_inplace */
369 0, /* src_mask */
370 ALL_ONES, /* dst_mask */
371 FALSE), /* pcrel_offset */
372
373 HOWTO (R_AARCH64_TLS_TPREL64, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 64, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_dont, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_AARCH64_TLS_TPREL64", /* name */
382 FALSE, /* partial_inplace */
383 0, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 HOWTO (R_AARCH64_TLSDESC, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 64, /* bitsize */
391 FALSE, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_dont, /* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_AARCH64_TLSDESC", /* name */
396 FALSE, /* partial_inplace */
397 0, /* src_mask */
398 ALL_ONES, /* dst_mask */
399 FALSE), /* pcrel_offset */
400
401};
402
403/* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
404 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
405 in that slot. */
406
407static reloc_howto_type elf64_aarch64_howto_table[] =
408{
409 /* Basic data relocations. */
410
411 HOWTO (R_AARCH64_NULL, /* type */
412 0, /* rightshift */
413 0, /* size (0 = byte, 1 = short, 2 = long) */
414 0, /* bitsize */
415 FALSE, /* pc_relative */
416 0, /* bitpos */
417 complain_overflow_dont, /* complain_on_overflow */
418 bfd_elf_generic_reloc, /* special_function */
419 "R_AARCH64_NULL", /* name */
420 FALSE, /* partial_inplace */
421 0, /* src_mask */
422 0, /* dst_mask */
423 FALSE), /* pcrel_offset */
424
425 /* .xword: (S+A) */
426 HOWTO (R_AARCH64_ABS64, /* type */
427 0, /* rightshift */
428 4, /* size (4 = long long) */
429 64, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_unsigned, /* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_AARCH64_ABS64", /* name */
435 FALSE, /* partial_inplace */
436 ALL_ONES, /* src_mask */
437 ALL_ONES, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 /* .word: (S+A) */
441 HOWTO (R_AARCH64_ABS32, /* type */
442 0, /* rightshift */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
444 32, /* bitsize */
445 FALSE, /* pc_relative */
446 0, /* bitpos */
447 complain_overflow_unsigned, /* complain_on_overflow */
448 bfd_elf_generic_reloc, /* special_function */
449 "R_AARCH64_ABS32", /* name */
450 FALSE, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 FALSE), /* pcrel_offset */
454
455 /* .half: (S+A) */
456 HOWTO (R_AARCH64_ABS16, /* type */
457 0, /* rightshift */
458 1, /* size (0 = byte, 1 = short, 2 = long) */
459 16, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_unsigned, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_ABS16", /* name */
465 FALSE, /* partial_inplace */
466 0xffff, /* src_mask */
467 0xffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 /* .xword: (S+A-P) */
471 HOWTO (R_AARCH64_PREL64, /* type */
472 0, /* rightshift */
473 4, /* size (4 = long long) */
474 64, /* bitsize */
475 TRUE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_signed, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_AARCH64_PREL64", /* name */
480 FALSE, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 TRUE), /* pcrel_offset */
484
485 /* .word: (S+A-P) */
486 HOWTO (R_AARCH64_PREL32, /* type */
487 0, /* rightshift */
488 2, /* size (0 = byte, 1 = short, 2 = long) */
489 32, /* bitsize */
490 TRUE, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_signed, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 "R_AARCH64_PREL32", /* name */
495 FALSE, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 TRUE), /* pcrel_offset */
499
500 /* .half: (S+A-P) */
501 HOWTO (R_AARCH64_PREL16, /* type */
502 0, /* rightshift */
503 1, /* size (0 = byte, 1 = short, 2 = long) */
504 16, /* bitsize */
505 TRUE, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_signed, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 "R_AARCH64_PREL16", /* name */
510 FALSE, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 TRUE), /* pcrel_offset */
514
515 /* Group relocations to create a 16, 32, 48 or 64 bit
516 unsigned data or abs address inline. */
517
518 /* MOVZ: ((S+A) >> 0) & 0xffff */
519 HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */
520 0, /* rightshift */
521 2, /* size (0 = byte, 1 = short, 2 = long) */
522 16, /* bitsize */
523 FALSE, /* pc_relative */
524 0, /* bitpos */
525 complain_overflow_unsigned, /* complain_on_overflow */
526 bfd_elf_generic_reloc, /* special_function */
527 "R_AARCH64_MOVW_UABS_G0", /* name */
528 FALSE, /* partial_inplace */
529 0xffff, /* src_mask */
530 0xffff, /* dst_mask */
531 FALSE), /* pcrel_offset */
532
533 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
534 HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */
535 0, /* rightshift */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
537 16, /* bitsize */
538 FALSE, /* pc_relative */
539 0, /* bitpos */
540 complain_overflow_dont, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
547
548 /* MOVZ: ((S+A) >> 16) & 0xffff */
549 HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */
550 16, /* rightshift */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
552 16, /* bitsize */
553 FALSE, /* pc_relative */
554 0, /* bitpos */
555 complain_overflow_unsigned, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 "R_AARCH64_MOVW_UABS_G1", /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
562
563 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
564 HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */
565 16, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 16, /* bitsize */
568 FALSE, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_dont, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
577
578 /* MOVZ: ((S+A) >> 32) & 0xffff */
579 HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */
580 32, /* rightshift */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
582 16, /* bitsize */
583 FALSE, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_unsigned, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 "R_AARCH64_MOVW_UABS_G2", /* name */
588 FALSE, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE), /* pcrel_offset */
592
593 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
594 HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */
595 32, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 16, /* bitsize */
598 FALSE, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_dont, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
603 FALSE, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 /* MOVZ: ((S+A) >> 48) & 0xffff */
609 HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */
610 48, /* rightshift */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
612 16, /* bitsize */
613 FALSE, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_unsigned, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 "R_AARCH64_MOVW_UABS_G3", /* name */
618 FALSE, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE), /* pcrel_offset */
622
623 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
624 signed data or abs address inline. Will change instruction
625 to MOVN or MOVZ depending on sign of calculated value. */
626
627 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
628 HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */
629 0, /* rightshift */
630 2, /* size (0 = byte, 1 = short, 2 = long) */
631 16, /* bitsize */
632 FALSE, /* pc_relative */
633 0, /* bitpos */
634 complain_overflow_signed, /* complain_on_overflow */
635 bfd_elf_generic_reloc, /* special_function */
636 "R_AARCH64_MOVW_SABS_G0", /* name */
637 FALSE, /* partial_inplace */
638 0xffff, /* src_mask */
639 0xffff, /* dst_mask */
640 FALSE), /* pcrel_offset */
641
642 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
643 HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */
644 16, /* rightshift */
645 2, /* size (0 = byte, 1 = short, 2 = long) */
646 16, /* bitsize */
647 FALSE, /* pc_relative */
648 0, /* bitpos */
649 complain_overflow_signed, /* complain_on_overflow */
650 bfd_elf_generic_reloc, /* special_function */
651 "R_AARCH64_MOVW_SABS_G1", /* name */
652 FALSE, /* partial_inplace */
653 0xffff, /* src_mask */
654 0xffff, /* dst_mask */
655 FALSE), /* pcrel_offset */
656
657 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
658 HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */
659 32, /* rightshift */
660 2, /* size (0 = byte, 1 = short, 2 = long) */
661 16, /* bitsize */
662 FALSE, /* pc_relative */
663 0, /* bitpos */
664 complain_overflow_signed, /* complain_on_overflow */
665 bfd_elf_generic_reloc, /* special_function */
666 "R_AARCH64_MOVW_SABS_G2", /* name */
667 FALSE, /* partial_inplace */
668 0xffff, /* src_mask */
669 0xffff, /* dst_mask */
670 FALSE), /* pcrel_offset */
671
672/* Relocations to generate 19, 21 and 33 bit PC-relative load/store
673 addresses: PG(x) is (x & ~0xfff). */
674
675 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
676 HOWTO (R_AARCH64_LD_PREL_LO19, /* type */
677 2, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 19, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed, /* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_AARCH64_LD_PREL_LO19", /* name */
685 FALSE, /* partial_inplace */
686 0x7ffff, /* src_mask */
687 0x7ffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 /* ADR: (S+A-P) & 0x1fffff */
691 HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */
692 0, /* rightshift */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
694 21, /* bitsize */
695 TRUE, /* pc_relative */
696 0, /* bitpos */
697 complain_overflow_signed, /* complain_on_overflow */
698 bfd_elf_generic_reloc, /* special_function */
699 "R_AARCH64_ADR_PREL_LO21", /* name */
700 FALSE, /* partial_inplace */
701 0x1fffff, /* src_mask */
702 0x1fffff, /* dst_mask */
703 TRUE), /* pcrel_offset */
704
705 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
706 HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */
707 12, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 21, /* bitsize */
710 TRUE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_signed, /* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
715 FALSE, /* partial_inplace */
716 0x1fffff, /* src_mask */
717 0x1fffff, /* dst_mask */
718 TRUE), /* pcrel_offset */
719
720 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
721 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */
722 12, /* rightshift */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
724 21, /* bitsize */
725 TRUE, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_dont, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
730 FALSE, /* partial_inplace */
731 0x1fffff, /* src_mask */
732 0x1fffff, /* dst_mask */
733 TRUE), /* pcrel_offset */
734
735 /* ADD: (S+A) & 0xfff [no overflow check] */
736 HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 12, /* bitsize */
740 FALSE, /* pc_relative */
741 10, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
745 FALSE, /* partial_inplace */
746 0x3ffc00, /* src_mask */
747 0x3ffc00, /* dst_mask */
748 FALSE), /* pcrel_offset */
749
750 /* LD/ST8: (S+A) & 0xfff */
751 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 12, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
760 FALSE, /* partial_inplace */
761 0xfff, /* src_mask */
762 0xfff, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 /* Relocations for control-flow instructions. */
766
767 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
768 HOWTO (R_AARCH64_TSTBR14, /* type */
769 2, /* rightshift */
770 2, /* size (0 = byte, 1 = short, 2 = long) */
771 14, /* bitsize */
772 TRUE, /* pc_relative */
773 0, /* bitpos */
774 complain_overflow_signed, /* complain_on_overflow */
775 bfd_elf_generic_reloc, /* special_function */
776 "R_AARCH64_TSTBR14", /* name */
777 FALSE, /* partial_inplace */
778 0x3fff, /* src_mask */
779 0x3fff, /* dst_mask */
780 TRUE), /* pcrel_offset */
781
782 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
783 HOWTO (R_AARCH64_CONDBR19, /* type */
784 2, /* rightshift */
785 2, /* size (0 = byte, 1 = short, 2 = long) */
786 19, /* bitsize */
787 TRUE, /* pc_relative */
788 0, /* bitpos */
789 complain_overflow_signed, /* complain_on_overflow */
790 bfd_elf_generic_reloc, /* special_function */
791 "R_AARCH64_CONDBR19", /* name */
792 FALSE, /* partial_inplace */
793 0x7ffff, /* src_mask */
794 0x7ffff, /* dst_mask */
795 TRUE), /* pcrel_offset */
796
797 EMPTY_HOWTO (281),
798
799 /* B: ((S+A-P) >> 2) & 0x3ffffff */
800 HOWTO (R_AARCH64_JUMP26, /* type */
801 2, /* rightshift */
802 2, /* size (0 = byte, 1 = short, 2 = long) */
803 26, /* bitsize */
804 TRUE, /* pc_relative */
805 0, /* bitpos */
806 complain_overflow_signed, /* complain_on_overflow */
807 bfd_elf_generic_reloc, /* special_function */
808 "R_AARCH64_JUMP26", /* name */
809 FALSE, /* partial_inplace */
810 0x3ffffff, /* src_mask */
811 0x3ffffff, /* dst_mask */
812 TRUE), /* pcrel_offset */
813
814 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
815 HOWTO (R_AARCH64_CALL26, /* type */
816 2, /* rightshift */
817 2, /* size (0 = byte, 1 = short, 2 = long) */
818 26, /* bitsize */
819 TRUE, /* pc_relative */
820 0, /* bitpos */
821 complain_overflow_signed, /* complain_on_overflow */
822 bfd_elf_generic_reloc, /* special_function */
823 "R_AARCH64_CALL26", /* name */
824 FALSE, /* partial_inplace */
825 0x3ffffff, /* src_mask */
826 0x3ffffff, /* dst_mask */
827 TRUE), /* pcrel_offset */
828
829 /* LD/ST16: (S+A) & 0xffe */
830 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */
831 1, /* rightshift */
832 2, /* size (0 = byte, 1 = short, 2 = long) */
833 12, /* bitsize */
834 FALSE, /* pc_relative */
835 0, /* bitpos */
836 complain_overflow_dont, /* complain_on_overflow */
837 bfd_elf_generic_reloc, /* special_function */
838 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
839 FALSE, /* partial_inplace */
840 0xffe, /* src_mask */
841 0xffe, /* dst_mask */
842 FALSE), /* pcrel_offset */
843
844 /* LD/ST32: (S+A) & 0xffc */
845 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */
846 2, /* rightshift */
847 2, /* size (0 = byte, 1 = short, 2 = long) */
848 12, /* bitsize */
849 FALSE, /* pc_relative */
850 0, /* bitpos */
851 complain_overflow_dont, /* complain_on_overflow */
852 bfd_elf_generic_reloc, /* special_function */
853 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
854 FALSE, /* partial_inplace */
855 0xffc, /* src_mask */
856 0xffc, /* dst_mask */
857 FALSE), /* pcrel_offset */
858
859 /* LD/ST64: (S+A) & 0xff8 */
860 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */
861 3, /* rightshift */
862 2, /* size (0 = byte, 1 = short, 2 = long) */
863 12, /* bitsize */
864 FALSE, /* pc_relative */
865 0, /* bitpos */
866 complain_overflow_dont, /* complain_on_overflow */
867 bfd_elf_generic_reloc, /* special_function */
868 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
869 FALSE, /* partial_inplace */
870 0xff8, /* src_mask */
871 0xff8, /* dst_mask */
872 FALSE), /* pcrel_offset */
873
874 EMPTY_HOWTO (287),
875 EMPTY_HOWTO (288),
876 EMPTY_HOWTO (289),
877 EMPTY_HOWTO (290),
878 EMPTY_HOWTO (291),
879 EMPTY_HOWTO (292),
880 EMPTY_HOWTO (293),
881 EMPTY_HOWTO (294),
882 EMPTY_HOWTO (295),
883 EMPTY_HOWTO (296),
884 EMPTY_HOWTO (297),
885 EMPTY_HOWTO (298),
886
887 /* LD/ST128: (S+A) & 0xff0 */
888 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */
889 4, /* rightshift */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
891 12, /* bitsize */
892 FALSE, /* pc_relative */
893 0, /* bitpos */
894 complain_overflow_dont, /* complain_on_overflow */
895 bfd_elf_generic_reloc, /* special_function */
896 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
897 FALSE, /* partial_inplace */
898 0xff0, /* src_mask */
899 0xff0, /* dst_mask */
900 FALSE), /* pcrel_offset */
901
902 EMPTY_HOWTO (300),
903 EMPTY_HOWTO (301),
904 EMPTY_HOWTO (302),
905 EMPTY_HOWTO (303),
906 EMPTY_HOWTO (304),
907 EMPTY_HOWTO (305),
908 EMPTY_HOWTO (306),
909 EMPTY_HOWTO (307),
910 EMPTY_HOWTO (308),
f41aef5f
RE
911
912 /* Set a load-literal immediate field to bits
913 0x1FFFFC of G(S)-P */
914 HOWTO (R_AARCH64_GOT_LD_PREL19, /* type */
915 2, /* rightshift */
916 2, /* size (0 = byte,1 = short,2 = long) */
917 19, /* bitsize */
918 TRUE, /* pc_relative */
919 0, /* bitpos */
920 complain_overflow_signed, /* complain_on_overflow */
921 bfd_elf_generic_reloc, /* special_function */
922 "R_AARCH64_GOT_LD_PREL19", /* name */
923 FALSE, /* partial_inplace */
924 0xffffe0, /* src_mask */
925 0xffffe0, /* dst_mask */
926 TRUE), /* pcrel_offset */
927
a06ea964
NC
928 EMPTY_HOWTO (310),
929
930 /* Get to the page for the GOT entry for the symbol
931 (G(S) - P) using an ADRP instruction. */
932 HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */
933 12, /* rightshift */
934 2, /* size (0 = byte, 1 = short, 2 = long) */
935 21, /* bitsize */
936 TRUE, /* pc_relative */
937 0, /* bitpos */
938 complain_overflow_dont, /* complain_on_overflow */
939 bfd_elf_generic_reloc, /* special_function */
940 "R_AARCH64_ADR_GOT_PAGE", /* name */
941 FALSE, /* partial_inplace */
942 0x1fffff, /* src_mask */
943 0x1fffff, /* dst_mask */
944 TRUE), /* pcrel_offset */
945
946 /* LD64: GOT offset G(S) & 0xff8 */
947 HOWTO (R_AARCH64_LD64_GOT_LO12_NC, /* type */
948 3, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 12, /* bitsize */
951 FALSE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_AARCH64_LD64_GOT_LO12_NC", /* name */
956 FALSE, /* partial_inplace */
957 0xff8, /* src_mask */
958 0xff8, /* dst_mask */
959 FALSE) /* pcrel_offset */
960};
961
962static reloc_howto_type elf64_aarch64_tls_howto_table[] =
963{
964 EMPTY_HOWTO (512),
965
966 /* Get to the page for the GOT entry for the symbol
967 (G(S) - P) using an ADRP instruction. */
968 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */
969 12, /* rightshift */
970 2, /* size (0 = byte, 1 = short, 2 = long) */
971 21, /* bitsize */
972 TRUE, /* pc_relative */
973 0, /* bitpos */
974 complain_overflow_dont, /* complain_on_overflow */
975 bfd_elf_generic_reloc, /* special_function */
976 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
977 FALSE, /* partial_inplace */
978 0x1fffff, /* src_mask */
979 0x1fffff, /* dst_mask */
980 TRUE), /* pcrel_offset */
981
982 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
983 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */
984 0, /* rightshift */
985 2, /* size (0 = byte, 1 = short, 2 = long) */
986 12, /* bitsize */
987 FALSE, /* pc_relative */
988 0, /* bitpos */
989 complain_overflow_dont, /* complain_on_overflow */
990 bfd_elf_generic_reloc, /* special_function */
991 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
992 FALSE, /* partial_inplace */
993 0xfff, /* src_mask */
994 0xfff, /* dst_mask */
995 FALSE), /* pcrel_offset */
996
997 EMPTY_HOWTO (515),
998 EMPTY_HOWTO (516),
999 EMPTY_HOWTO (517),
1000 EMPTY_HOWTO (518),
1001 EMPTY_HOWTO (519),
1002 EMPTY_HOWTO (520),
1003 EMPTY_HOWTO (521),
1004 EMPTY_HOWTO (522),
1005 EMPTY_HOWTO (523),
1006 EMPTY_HOWTO (524),
1007 EMPTY_HOWTO (525),
1008 EMPTY_HOWTO (526),
1009 EMPTY_HOWTO (527),
1010 EMPTY_HOWTO (528),
1011 EMPTY_HOWTO (529),
1012 EMPTY_HOWTO (530),
1013 EMPTY_HOWTO (531),
1014 EMPTY_HOWTO (532),
1015 EMPTY_HOWTO (533),
1016 EMPTY_HOWTO (534),
1017 EMPTY_HOWTO (535),
1018 EMPTY_HOWTO (536),
1019 EMPTY_HOWTO (537),
1020 EMPTY_HOWTO (538),
1021
1022 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */
1023 16, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 16, /* bitsize */
1026 FALSE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1035
1036 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 FALSE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1049
1050 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 21, /* bitsize */
1054 FALSE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1059 FALSE, /* partial_inplace */
1060 0x1fffff, /* src_mask */
1061 0x1fffff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1063
1064 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */
1065 3, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 12, /* bitsize */
1068 FALSE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1073 FALSE, /* partial_inplace */
1074 0xff8, /* src_mask */
1075 0xff8, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1077
1078 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */
bb3f9ed8 1079 2, /* rightshift */
a06ea964
NC
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 21, /* bitsize */
1082 FALSE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1087 FALSE, /* partial_inplace */
1088 0x1ffffc, /* src_mask */
1089 0x1ffffc, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1091
1092 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */
bb3f9ed8 1093 32, /* rightshift */
a06ea964
NC
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 12, /* bitsize */
1096 FALSE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffff, /* src_mask */
1103 0xffff, /* dst_mask */
1104 FALSE), /* pcrel_offset */
1105
1106 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */
bb3f9ed8 1107 16, /* rightshift */
a06ea964
NC
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 12, /* bitsize */
1110 FALSE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffff, /* src_mask */
1117 0xffff, /* dst_mask */
1118 FALSE), /* pcrel_offset */
1119
1120 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */
bb3f9ed8 1121 16, /* rightshift */
a06ea964
NC
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 12, /* bitsize */
1124 FALSE, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont, /* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffff, /* src_mask */
1131 0xffff, /* dst_mask */
1132 FALSE), /* pcrel_offset */
1133
1134 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 12, /* bitsize */
1138 FALSE, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont, /* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffff, /* src_mask */
1145 0xffff, /* dst_mask */
1146 FALSE), /* pcrel_offset */
1147
1148 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 12, /* bitsize */
1152 FALSE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont, /* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffff, /* src_mask */
1159 0xffff, /* dst_mask */
1160 FALSE), /* pcrel_offset */
1161
1162 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */
bb3f9ed8 1163 12, /* rightshift */
a06ea964
NC
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 12, /* bitsize */
1166 FALSE, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont, /* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1171 FALSE, /* partial_inplace */
1172 0xfff, /* src_mask */
1173 0xfff, /* dst_mask */
1174 FALSE), /* pcrel_offset */
1175
1176 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 12, /* bitsize */
1180 FALSE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont, /* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1185 FALSE, /* partial_inplace */
1186 0xfff, /* src_mask */
1187 0xfff, /* dst_mask */
1188 FALSE), /* pcrel_offset */
1189
1190 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 12, /* bitsize */
1194 FALSE, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont, /* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1199 FALSE, /* partial_inplace */
1200 0xfff, /* src_mask */
1201 0xfff, /* dst_mask */
1202 FALSE), /* pcrel_offset */
1203};
1204
1205static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] =
1206{
1207 HOWTO (R_AARCH64_TLSDESC_LD64_PREL19, /* type */
bb3f9ed8 1208 2, /* rightshift */
a06ea964
NC
1209 2, /* size (0 = byte, 1 = short, 2 = long) */
1210 21, /* bitsize */
1211 TRUE, /* pc_relative */
1212 0, /* bitpos */
1213 complain_overflow_dont, /* complain_on_overflow */
1214 bfd_elf_generic_reloc, /* special_function */
1215 "R_AARCH64_TLSDESC_LD64_PREL19", /* name */
1216 FALSE, /* partial_inplace */
1217 0x1ffffc, /* src_mask */
1218 0x1ffffc, /* dst_mask */
1219 TRUE), /* pcrel_offset */
1220
1221 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */
1222 0, /* rightshift */
1223 2, /* size (0 = byte, 1 = short, 2 = long) */
1224 21, /* bitsize */
1225 TRUE, /* pc_relative */
1226 0, /* bitpos */
1227 complain_overflow_dont, /* complain_on_overflow */
1228 bfd_elf_generic_reloc, /* special_function */
1229 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1230 FALSE, /* partial_inplace */
1231 0x1fffff, /* src_mask */
1232 0x1fffff, /* dst_mask */
1233 TRUE), /* pcrel_offset */
1234
1235 /* Get to the page for the GOT entry for the symbol
1236 (G(S) - P) using an ADRP instruction. */
1237 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE, /* type */
1238 12, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1240 21, /* bitsize */
1241 TRUE, /* pc_relative */
1242 0, /* bitpos */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 "R_AARCH64_TLSDESC_ADR_PAGE", /* name */
1246 FALSE, /* partial_inplace */
1247 0x1fffff, /* src_mask */
1248 0x1fffff, /* dst_mask */
1249 TRUE), /* pcrel_offset */
1250
1251 /* LD64: GOT offset G(S) & 0xfff. */
1252 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */
1253 3, /* rightshift */
1254 2, /* size (0 = byte, 1 = short, 2 = long) */
1255 12, /* bitsize */
1256 FALSE, /* pc_relative */
1257 0, /* bitpos */
1258 complain_overflow_dont, /* complain_on_overflow */
1259 bfd_elf_generic_reloc, /* special_function */
1260 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1261 FALSE, /* partial_inplace */
1262 0xfff, /* src_mask */
1263 0xfff, /* dst_mask */
1264 FALSE), /* pcrel_offset */
1265
1266 /* ADD: GOT offset G(S) & 0xfff. */
1267 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */
1268 0, /* rightshift */
1269 2, /* size (0 = byte, 1 = short, 2 = long) */
1270 12, /* bitsize */
1271 FALSE, /* pc_relative */
1272 0, /* bitpos */
1273 complain_overflow_dont, /* complain_on_overflow */
1274 bfd_elf_generic_reloc, /* special_function */
1275 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1276 FALSE, /* partial_inplace */
1277 0xfff, /* src_mask */
1278 0xfff, /* dst_mask */
1279 FALSE), /* pcrel_offset */
1280
1281 HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */
bb3f9ed8 1282 16, /* rightshift */
a06ea964
NC
1283 2, /* size (0 = byte, 1 = short, 2 = long) */
1284 12, /* bitsize */
1285 FALSE, /* pc_relative */
1286 0, /* bitpos */
1287 complain_overflow_dont, /* complain_on_overflow */
1288 bfd_elf_generic_reloc, /* special_function */
1289 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1290 FALSE, /* partial_inplace */
1291 0xffff, /* src_mask */
1292 0xffff, /* dst_mask */
1293 FALSE), /* pcrel_offset */
1294
1295 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */
1296 0, /* rightshift */
1297 2, /* size (0 = byte, 1 = short, 2 = long) */
1298 12, /* bitsize */
1299 FALSE, /* pc_relative */
1300 0, /* bitpos */
1301 complain_overflow_dont, /* complain_on_overflow */
1302 bfd_elf_generic_reloc, /* special_function */
1303 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1304 FALSE, /* partial_inplace */
1305 0xffff, /* src_mask */
1306 0xffff, /* dst_mask */
1307 FALSE), /* pcrel_offset */
1308
1309 HOWTO (R_AARCH64_TLSDESC_LDR, /* type */
1310 0, /* rightshift */
1311 2, /* size (0 = byte, 1 = short, 2 = long) */
1312 12, /* bitsize */
1313 FALSE, /* pc_relative */
1314 0, /* bitpos */
1315 complain_overflow_dont, /* complain_on_overflow */
1316 bfd_elf_generic_reloc, /* special_function */
1317 "R_AARCH64_TLSDESC_LDR", /* name */
1318 FALSE, /* partial_inplace */
1319 0x0, /* src_mask */
1320 0x0, /* dst_mask */
1321 FALSE), /* pcrel_offset */
1322
1323 HOWTO (R_AARCH64_TLSDESC_ADD, /* type */
1324 0, /* rightshift */
1325 2, /* size (0 = byte, 1 = short, 2 = long) */
1326 12, /* bitsize */
1327 FALSE, /* pc_relative */
1328 0, /* bitpos */
1329 complain_overflow_dont, /* complain_on_overflow */
1330 bfd_elf_generic_reloc, /* special_function */
1331 "R_AARCH64_TLSDESC_ADD", /* name */
1332 FALSE, /* partial_inplace */
1333 0x0, /* src_mask */
1334 0x0, /* dst_mask */
1335 FALSE), /* pcrel_offset */
1336
1337 HOWTO (R_AARCH64_TLSDESC_CALL, /* type */
1338 0, /* rightshift */
1339 2, /* size (0 = byte, 1 = short, 2 = long) */
1340 12, /* bitsize */
1341 FALSE, /* pc_relative */
1342 0, /* bitpos */
1343 complain_overflow_dont, /* complain_on_overflow */
1344 bfd_elf_generic_reloc, /* special_function */
1345 "R_AARCH64_TLSDESC_CALL", /* name */
1346 FALSE, /* partial_inplace */
1347 0x0, /* src_mask */
1348 0x0, /* dst_mask */
1349 FALSE), /* pcrel_offset */
1350};
1351
1352static reloc_howto_type *
1353elf64_aarch64_howto_from_type (unsigned int r_type)
1354{
1355 if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max)
1356 return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min];
1357
1358 if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max)
1359 return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min];
1360
1361 if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max)
1362 return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min];
1363
1364 if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max)
1365 return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min];
1366
1367 switch (r_type)
1368 {
1369 case R_AARCH64_NONE:
1370 return &elf64_aarch64_howto_none;
1371
1372 }
1373 bfd_set_error (bfd_error_bad_value);
1374 return NULL;
1375}
1376
1377static void
1378elf64_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1379 Elf_Internal_Rela *elf_reloc)
1380{
1381 unsigned int r_type;
1382
1383 r_type = ELF64_R_TYPE (elf_reloc->r_info);
1384 bfd_reloc->howto = elf64_aarch64_howto_from_type (r_type);
1385}
1386
1387struct elf64_aarch64_reloc_map
1388{
1389 bfd_reloc_code_real_type bfd_reloc_val;
1390 unsigned int elf_reloc_val;
1391};
1392
1393/* All entries in this list must also be present in
1394 elf64_aarch64_howto_table. */
1395static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] =
1396{
1397 {BFD_RELOC_NONE, R_AARCH64_NONE},
1398
1399 /* Basic data relocations. */
1400 {BFD_RELOC_CTOR, R_AARCH64_ABS64},
1401 {BFD_RELOC_64, R_AARCH64_ABS64},
1402 {BFD_RELOC_32, R_AARCH64_ABS32},
1403 {BFD_RELOC_16, R_AARCH64_ABS16},
1404 {BFD_RELOC_64_PCREL, R_AARCH64_PREL64},
1405 {BFD_RELOC_32_PCREL, R_AARCH64_PREL32},
1406 {BFD_RELOC_16_PCREL, R_AARCH64_PREL16},
1407
1408 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1409 value inline. */
1410 {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC},
1411 {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC},
1412 {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC},
1413
1414 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1415 signed value inline. */
1416 {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0},
1417 {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1},
1418 {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2},
1419
1420 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1421 unsigned value inline. */
1422 {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0},
1423 {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1},
1424 {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2},
1425 {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3},
1426
1427 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1428 {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19},
1429 {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21},
1430 {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21},
1431 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC},
1432 {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC},
1433 {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC},
1434 {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC},
1435 {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC},
1436 {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC},
1437 {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC},
1438
1439 /* Relocations for control-flow instructions. */
1440 {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14},
1441 {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19},
1442 {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26},
1443 {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26},
1444
1445 /* Relocations for PIC. */
f41aef5f 1446 {BFD_RELOC_AARCH64_GOT_LD_PREL19, R_AARCH64_GOT_LD_PREL19},
a06ea964
NC
1447 {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE},
1448 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC},
1449
1450 /* Relocations for TLS. */
1451 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21},
1452 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC},
1453 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
1454 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1},
1455 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
1456 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC},
1457 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
1458 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21},
1459 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
1460 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
1461 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
1462 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
1463 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2},
1464 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1},
1465 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
1466 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC},
1467 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0},
1468 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
1469 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC},
1470 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12},
1471 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12},
1472 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
1473 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC},
1474 {BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19, R_AARCH64_TLSDESC_LD64_PREL19},
1475 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21},
1476 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE, R_AARCH64_TLSDESC_ADR_PAGE},
1477 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC},
1478 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC},
1479 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1},
1480 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC},
1481 {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR},
1482 {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD},
1483 {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL},
1484 {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64},
1485 {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64},
1486 {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64},
1487 {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC},
1488};
1489
1490static reloc_howto_type *
1491elf64_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1492 bfd_reloc_code_real_type code)
1493{
1494 unsigned int i;
1495
1496 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++)
1497 if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code)
1498 return elf64_aarch64_howto_from_type
1499 (elf64_aarch64_reloc_map[i].elf_reloc_val);
1500
1501 bfd_set_error (bfd_error_bad_value);
1502 return NULL;
1503}
1504
1505static reloc_howto_type *
1506elf64_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1507 const char *r_name)
1508{
1509 unsigned int i;
1510
1511 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++)
1512 if (elf64_aarch64_howto_table[i].name != NULL
1513 && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0)
1514 return &elf64_aarch64_howto_table[i];
1515
1516 return NULL;
1517}
1518
1519#define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
1520#define TARGET_LITTLE_NAME "elf64-littleaarch64"
1521#define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
1522#define TARGET_BIG_NAME "elf64-bigaarch64"
1523
1524typedef unsigned long int insn32;
1525
1526/* The linker script knows the section names for placement.
1527 The entry_names are used to do simple name mangling on the stubs.
1528 Given a function name, and its type, the stub can be found. The
1529 name can be changed. The only requirement is the %s be present. */
1530#define STUB_ENTRY_NAME "__%s_veneer"
1531
1532/* The name of the dynamic interpreter. This is put in the .interp
1533 section. */
1534#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1535
1536#define AARCH64_MAX_FWD_BRANCH_OFFSET \
1537 (((1 << 25) - 1) << 2)
1538#define AARCH64_MAX_BWD_BRANCH_OFFSET \
1539 (-((1 << 25) << 2))
1540
1541#define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1542#define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1543
1544static int
1545aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1546{
1547 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1548 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1549}
1550
1551static int
1552aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1553{
1554 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1555 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1556 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1557}
1558
1559static const uint32_t aarch64_adrp_branch_stub [] =
1560{
1561 0x90000010, /* adrp ip0, X */
1562 /* R_AARCH64_ADR_HI21_PCREL(X) */
1563 0x91000210, /* add ip0, ip0, :lo12:X */
1564 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1565 0xd61f0200, /* br ip0 */
1566};
1567
1568static const uint32_t aarch64_long_branch_stub[] =
1569{
1570 0x58000090, /* ldr ip0, 1f */
1571 0x10000011, /* adr ip1, #0 */
1572 0x8b110210, /* add ip0, ip0, ip1 */
1573 0xd61f0200, /* br ip0 */
1574 0x00000000, /* 1: .xword
1575 R_AARCH64_PREL64(X) + 12
1576 */
1577 0x00000000,
1578};
1579
1580/* Section name for stubs is the associated section name plus this
1581 string. */
1582#define STUB_SUFFIX ".stub"
1583
1584enum elf64_aarch64_stub_type
1585{
1586 aarch64_stub_none,
1587 aarch64_stub_adrp_branch,
1588 aarch64_stub_long_branch,
1589};
1590
1591struct elf64_aarch64_stub_hash_entry
1592{
1593 /* Base hash table entry structure. */
1594 struct bfd_hash_entry root;
1595
1596 /* The stub section. */
1597 asection *stub_sec;
1598
1599 /* Offset within stub_sec of the beginning of this stub. */
1600 bfd_vma stub_offset;
1601
1602 /* Given the symbol's value and its section we can determine its final
1603 value when building the stubs (so the stub knows where to jump). */
1604 bfd_vma target_value;
1605 asection *target_section;
1606
1607 enum elf64_aarch64_stub_type stub_type;
1608
1609 /* The symbol table entry, if any, that this was derived from. */
1610 struct elf64_aarch64_link_hash_entry *h;
1611
1612 /* Destination symbol type */
1613 unsigned char st_type;
1614
1615 /* Where this stub is being called from, or, in the case of combined
1616 stub sections, the first input section in the group. */
1617 asection *id_sec;
1618
1619 /* The name for the local symbol at the start of this stub. The
1620 stub name in the hash table has to be unique; this does not, so
1621 it can be friendlier. */
1622 char *output_name;
1623};
1624
1625/* Used to build a map of a section. This is required for mixed-endian
1626 code/data. */
1627
1628typedef struct elf64_elf_section_map
1629{
1630 bfd_vma vma;
1631 char type;
1632}
1633elf64_aarch64_section_map;
1634
1635
1636typedef struct _aarch64_elf_section_data
1637{
1638 struct bfd_elf_section_data elf;
1639 unsigned int mapcount;
1640 unsigned int mapsize;
1641 elf64_aarch64_section_map *map;
1642}
1643_aarch64_elf_section_data;
1644
1645#define elf64_aarch64_section_data(sec) \
1646 ((_aarch64_elf_section_data *) elf_section_data (sec))
1647
1648/* The size of the thread control block. */
1649#define TCB_SIZE 16
1650
1651struct elf_aarch64_local_symbol
1652{
1653 unsigned int got_type;
1654 bfd_signed_vma got_refcount;
1655 bfd_vma got_offset;
1656
1657 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1658 offset is from the end of the jump table and reserved entries
1659 within the PLTGOT.
1660
1661 The magic value (bfd_vma) -1 indicates that an offset has not be
1662 allocated. */
1663 bfd_vma tlsdesc_got_jump_table_offset;
1664};
1665
1666struct elf_aarch64_obj_tdata
1667{
1668 struct elf_obj_tdata root;
1669
1670 /* local symbol descriptors */
1671 struct elf_aarch64_local_symbol *locals;
1672
1673 /* Zero to warn when linking objects with incompatible enum sizes. */
1674 int no_enum_size_warning;
1675
1676 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1677 int no_wchar_size_warning;
1678};
1679
1680#define elf_aarch64_tdata(bfd) \
1681 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1682
1683#define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1684
1685#define is_aarch64_elf(bfd) \
1686 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1687 && elf_tdata (bfd) != NULL \
1688 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1689
1690static bfd_boolean
1691elf64_aarch64_mkobject (bfd *abfd)
1692{
1693 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1694 AARCH64_ELF_DATA);
1695}
1696
1697/* The AArch64 linker needs to keep track of the number of relocs that it
1698 decides to copy in check_relocs for each symbol. This is so that
1699 it can discard PC relative relocs if it doesn't need them when
1700 linking with -Bsymbolic. We store the information in a field
1701 extending the regular ELF linker hash table. */
1702
1703/* This structure keeps track of the number of relocs we have copied
1704 for a given symbol. */
1705struct elf64_aarch64_relocs_copied
1706{
1707 /* Next section. */
1708 struct elf64_aarch64_relocs_copied *next;
1709 /* A section in dynobj. */
1710 asection *section;
1711 /* Number of relocs copied in this section. */
1712 bfd_size_type count;
1713 /* Number of PC-relative relocs copied in this section. */
1714 bfd_size_type pc_count;
1715};
1716
1717#define elf64_aarch64_hash_entry(ent) \
1718 ((struct elf64_aarch64_link_hash_entry *)(ent))
1719
1720#define GOT_UNKNOWN 0
1721#define GOT_NORMAL 1
1722#define GOT_TLS_GD 2
1723#define GOT_TLS_IE 4
1724#define GOT_TLSDESC_GD 8
1725
1726#define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1727
1728/* AArch64 ELF linker hash entry. */
1729struct elf64_aarch64_link_hash_entry
1730{
1731 struct elf_link_hash_entry root;
1732
1733 /* Track dynamic relocs copied for this symbol. */
1734 struct elf_dyn_relocs *dyn_relocs;
1735
1736 /* Number of PC relative relocs copied for this symbol. */
1737 struct elf64_aarch64_relocs_copied *relocs_copied;
1738
1739 /* Since PLT entries have variable size, we need to record the
1740 index into .got.plt instead of recomputing it from the PLT
1741 offset. */
1742 bfd_signed_vma plt_got_offset;
1743
1744 /* Bit mask representing the type of GOT entry(s) if any required by
1745 this symbol. */
1746 unsigned int got_type;
1747
1748 /* A pointer to the most recently used stub hash entry against this
1749 symbol. */
1750 struct elf64_aarch64_stub_hash_entry *stub_cache;
1751
1752 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1753 is from the end of the jump table and reserved entries within the PLTGOT.
1754
1755 The magic value (bfd_vma) -1 indicates that an offset has not
1756 be allocated. */
1757 bfd_vma tlsdesc_got_jump_table_offset;
1758};
1759
1760static unsigned int
1761elf64_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1762 bfd *abfd,
1763 unsigned long r_symndx)
1764{
1765 if (h)
1766 return elf64_aarch64_hash_entry (h)->got_type;
1767
1768 if (! elf64_aarch64_locals (abfd))
1769 return GOT_UNKNOWN;
1770
1771 return elf64_aarch64_locals (abfd)[r_symndx].got_type;
1772}
1773
1774/* Traverse an AArch64 ELF linker hash table. */
1775#define elf64_aarch64_link_hash_traverse(table, func, info) \
1776 (elf_link_hash_traverse \
1777 (&(table)->root, \
1778 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
1779 (info)))
1780
1781/* Get the AArch64 elf linker hash table from a link_info structure. */
1782#define elf64_aarch64_hash_table(info) \
1783 ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
1784
1785#define aarch64_stub_hash_lookup(table, string, create, copy) \
1786 ((struct elf64_aarch64_stub_hash_entry *) \
1787 bfd_hash_lookup ((table), (string), (create), (copy)))
1788
1789/* AArch64 ELF linker hash table. */
1790struct elf64_aarch64_link_hash_table
1791{
1792 /* The main hash table. */
1793 struct elf_link_hash_table root;
1794
1795 /* Nonzero to force PIC branch veneers. */
1796 int pic_veneer;
1797
1798 /* The number of bytes in the initial entry in the PLT. */
1799 bfd_size_type plt_header_size;
1800
1801 /* The number of bytes in the subsequent PLT etries. */
1802 bfd_size_type plt_entry_size;
1803
1804 /* Short-cuts to get to dynamic linker sections. */
1805 asection *sdynbss;
1806 asection *srelbss;
1807
1808 /* Small local sym cache. */
1809 struct sym_cache sym_cache;
1810
1811 /* For convenience in allocate_dynrelocs. */
1812 bfd *obfd;
1813
1814 /* The amount of space used by the reserved portion of the sgotplt
1815 section, plus whatever space is used by the jump slots. */
1816 bfd_vma sgotplt_jump_table_size;
1817
1818 /* The stub hash table. */
1819 struct bfd_hash_table stub_hash_table;
1820
1821 /* Linker stub bfd. */
1822 bfd *stub_bfd;
1823
1824 /* Linker call-backs. */
1825 asection *(*add_stub_section) (const char *, asection *);
1826 void (*layout_sections_again) (void);
1827
1828 /* Array to keep track of which stub sections have been created, and
1829 information on stub grouping. */
1830 struct map_stub
1831 {
1832 /* This is the section to which stubs in the group will be
1833 attached. */
1834 asection *link_sec;
1835 /* The stub section. */
1836 asection *stub_sec;
1837 } *stub_group;
1838
1839 /* Assorted information used by elf64_aarch64_size_stubs. */
1840 unsigned int bfd_count;
1841 int top_index;
1842 asection **input_list;
1843
1844 /* The offset into splt of the PLT entry for the TLS descriptor
1845 resolver. Special values are 0, if not necessary (or not found
1846 to be necessary yet), and -1 if needed but not determined
1847 yet. */
1848 bfd_vma tlsdesc_plt;
1849
1850 /* The GOT offset for the lazy trampoline. Communicated to the
1851 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1852 indicates an offset is not allocated. */
1853 bfd_vma dt_tlsdesc_got;
1854};
1855
1856
1857/* Return non-zero if the indicated VALUE has overflowed the maximum
1858 range expressible by a unsigned number with the indicated number of
1859 BITS. */
1860
1861static bfd_reloc_status_type
1862aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
1863{
1864 bfd_vma lim;
1865 if (bits >= sizeof (bfd_vma) * 8)
1866 return bfd_reloc_ok;
1867 lim = (bfd_vma) 1 << bits;
1868 if (value >= lim)
1869 return bfd_reloc_overflow;
1870 return bfd_reloc_ok;
1871}
1872
1873
1874/* Return non-zero if the indicated VALUE has overflowed the maximum
1875 range expressible by an signed number with the indicated number of
1876 BITS. */
1877
1878static bfd_reloc_status_type
1879aarch64_signed_overflow (bfd_vma value, unsigned int bits)
1880{
1881 bfd_signed_vma svalue = (bfd_signed_vma) value;
1882 bfd_signed_vma lim;
1883
1884 if (bits >= sizeof (bfd_vma) * 8)
1885 return bfd_reloc_ok;
1886 lim = (bfd_signed_vma) 1 << (bits - 1);
1887 if (svalue < -lim || svalue >= lim)
1888 return bfd_reloc_overflow;
1889 return bfd_reloc_ok;
1890}
1891
1892/* Create an entry in an AArch64 ELF linker hash table. */
1893
1894static struct bfd_hash_entry *
1895elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1896 struct bfd_hash_table *table,
1897 const char *string)
1898{
1899 struct elf64_aarch64_link_hash_entry *ret =
1900 (struct elf64_aarch64_link_hash_entry *) entry;
1901
1902 /* Allocate the structure if it has not already been allocated by a
1903 subclass. */
1904 if (ret == NULL)
1905 ret = bfd_hash_allocate (table,
1906 sizeof (struct elf64_aarch64_link_hash_entry));
1907 if (ret == NULL)
1908 return (struct bfd_hash_entry *) ret;
1909
1910 /* Call the allocation method of the superclass. */
1911 ret = ((struct elf64_aarch64_link_hash_entry *)
1912 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1913 table, string));
1914 if (ret != NULL)
1915 {
1916 ret->dyn_relocs = NULL;
1917 ret->relocs_copied = NULL;
1918 ret->got_type = GOT_UNKNOWN;
1919 ret->plt_got_offset = (bfd_vma) - 1;
1920 ret->stub_cache = NULL;
1921 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1922 }
1923
1924 return (struct bfd_hash_entry *) ret;
1925}
1926
1927/* Initialize an entry in the stub hash table. */
1928
1929static struct bfd_hash_entry *
1930stub_hash_newfunc (struct bfd_hash_entry *entry,
1931 struct bfd_hash_table *table, const char *string)
1932{
1933 /* Allocate the structure if it has not already been allocated by a
1934 subclass. */
1935 if (entry == NULL)
1936 {
1937 entry = bfd_hash_allocate (table,
1938 sizeof (struct
1939 elf64_aarch64_stub_hash_entry));
1940 if (entry == NULL)
1941 return entry;
1942 }
1943
1944 /* Call the allocation method of the superclass. */
1945 entry = bfd_hash_newfunc (entry, table, string);
1946 if (entry != NULL)
1947 {
1948 struct elf64_aarch64_stub_hash_entry *eh;
1949
1950 /* Initialize the local fields. */
1951 eh = (struct elf64_aarch64_stub_hash_entry *) entry;
1952 eh->stub_sec = NULL;
1953 eh->stub_offset = 0;
1954 eh->target_value = 0;
1955 eh->target_section = NULL;
1956 eh->stub_type = aarch64_stub_none;
1957 eh->h = NULL;
1958 eh->id_sec = NULL;
1959 }
1960
1961 return entry;
1962}
1963
1964
1965/* Copy the extra info we tack onto an elf_link_hash_entry. */
1966
1967static void
1968elf64_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
1969 struct elf_link_hash_entry *dir,
1970 struct elf_link_hash_entry *ind)
1971{
1972 struct elf64_aarch64_link_hash_entry *edir, *eind;
1973
1974 edir = (struct elf64_aarch64_link_hash_entry *) dir;
1975 eind = (struct elf64_aarch64_link_hash_entry *) ind;
1976
1977 if (eind->dyn_relocs != NULL)
1978 {
1979 if (edir->dyn_relocs != NULL)
1980 {
1981 struct elf_dyn_relocs **pp;
1982 struct elf_dyn_relocs *p;
1983
1984 /* Add reloc counts against the indirect sym to the direct sym
1985 list. Merge any entries against the same section. */
1986 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
1987 {
1988 struct elf_dyn_relocs *q;
1989
1990 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1991 if (q->sec == p->sec)
1992 {
1993 q->pc_count += p->pc_count;
1994 q->count += p->count;
1995 *pp = p->next;
1996 break;
1997 }
1998 if (q == NULL)
1999 pp = &p->next;
2000 }
2001 *pp = edir->dyn_relocs;
2002 }
2003
2004 edir->dyn_relocs = eind->dyn_relocs;
2005 eind->dyn_relocs = NULL;
2006 }
2007
2008 if (eind->relocs_copied != NULL)
2009 {
2010 if (edir->relocs_copied != NULL)
2011 {
2012 struct elf64_aarch64_relocs_copied **pp;
2013 struct elf64_aarch64_relocs_copied *p;
2014
2015 /* Add reloc counts against the indirect sym to the direct sym
2016 list. Merge any entries against the same section. */
2017 for (pp = &eind->relocs_copied; (p = *pp) != NULL;)
2018 {
2019 struct elf64_aarch64_relocs_copied *q;
2020
2021 for (q = edir->relocs_copied; q != NULL; q = q->next)
2022 if (q->section == p->section)
2023 {
2024 q->pc_count += p->pc_count;
2025 q->count += p->count;
2026 *pp = p->next;
2027 break;
2028 }
2029 if (q == NULL)
2030 pp = &p->next;
2031 }
2032 *pp = edir->relocs_copied;
2033 }
2034
2035 edir->relocs_copied = eind->relocs_copied;
2036 eind->relocs_copied = NULL;
2037 }
2038
2039 if (ind->root.type == bfd_link_hash_indirect)
2040 {
2041 /* Copy over PLT info. */
2042 if (dir->got.refcount <= 0)
2043 {
2044 edir->got_type = eind->got_type;
2045 eind->got_type = GOT_UNKNOWN;
2046 }
2047 }
2048
2049 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2050}
2051
2052/* Create an AArch64 elf linker hash table. */
2053
2054static struct bfd_link_hash_table *
2055elf64_aarch64_link_hash_table_create (bfd *abfd)
2056{
2057 struct elf64_aarch64_link_hash_table *ret;
2058 bfd_size_type amt = sizeof (struct elf64_aarch64_link_hash_table);
2059
2060 ret = bfd_malloc (amt);
2061 if (ret == NULL)
2062 return NULL;
2063
2064 if (!_bfd_elf_link_hash_table_init
2065 (&ret->root, abfd, elf64_aarch64_link_hash_newfunc,
2066 sizeof (struct elf64_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2067 {
2068 free (ret);
2069 return NULL;
2070 }
2071
2072 ret->sdynbss = NULL;
2073 ret->srelbss = NULL;
2074
2075 ret->plt_header_size = PLT_ENTRY_SIZE;
2076 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2077
2078 ret->sym_cache.abfd = NULL;
2079 ret->obfd = abfd;
2080
2081 ret->stub_bfd = NULL;
2082 ret->add_stub_section = NULL;
2083 ret->layout_sections_again = NULL;
2084 ret->stub_group = NULL;
2085 ret->bfd_count = 0;
2086 ret->top_index = 0;
2087 ret->input_list = NULL;
2088 ret->tlsdesc_plt = 0;
2089 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2090
2091 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2092 sizeof (struct elf64_aarch64_stub_hash_entry)))
2093 {
2094 free (ret);
2095 return NULL;
2096 }
2097
2098 return &ret->root.root;
2099}
2100
2101/* Free the derived linker hash table. */
2102
2103static void
2104elf64_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
2105{
2106 struct elf64_aarch64_link_hash_table *ret
2107 = (struct elf64_aarch64_link_hash_table *) hash;
2108
2109 bfd_hash_table_free (&ret->stub_hash_table);
2110 _bfd_generic_link_hash_table_free (hash);
2111}
2112
2113static bfd_vma
2114aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
2115 bfd_vma addend, bfd_boolean weak_undef_p)
2116{
2117 switch (r_type)
2118 {
2119 case R_AARCH64_TLSDESC_CALL:
2120 case R_AARCH64_NONE:
2121 case R_AARCH64_NULL:
2122 break;
2123
2124 case R_AARCH64_ADR_PREL_LO21:
2125 case R_AARCH64_CONDBR19:
2126 case R_AARCH64_LD_PREL_LO19:
2127 case R_AARCH64_PREL16:
2128 case R_AARCH64_PREL32:
2129 case R_AARCH64_PREL64:
2130 case R_AARCH64_TSTBR14:
2131 if (weak_undef_p)
2132 value = place;
2133 value = value + addend - place;
2134 break;
2135
2136 case R_AARCH64_CALL26:
2137 case R_AARCH64_JUMP26:
2138 value = value + addend - place;
2139 break;
2140
2141 case R_AARCH64_ABS16:
2142 case R_AARCH64_ABS32:
2143 case R_AARCH64_MOVW_SABS_G0:
2144 case R_AARCH64_MOVW_SABS_G1:
2145 case R_AARCH64_MOVW_SABS_G2:
2146 case R_AARCH64_MOVW_UABS_G0:
2147 case R_AARCH64_MOVW_UABS_G0_NC:
2148 case R_AARCH64_MOVW_UABS_G1:
2149 case R_AARCH64_MOVW_UABS_G1_NC:
2150 case R_AARCH64_MOVW_UABS_G2:
2151 case R_AARCH64_MOVW_UABS_G2_NC:
2152 case R_AARCH64_MOVW_UABS_G3:
2153 value = value + addend;
2154 break;
2155
2156 case R_AARCH64_ADR_PREL_PG_HI21:
2157 case R_AARCH64_ADR_PREL_PG_HI21_NC:
2158 if (weak_undef_p)
2159 value = PG (place);
2160 value = PG (value + addend) - PG (place);
2161 break;
2162
f41aef5f
RE
2163 case R_AARCH64_GOT_LD_PREL19:
2164 value = value + addend - place;
2165 break;
2166
a06ea964
NC
2167 case R_AARCH64_ADR_GOT_PAGE:
2168 case R_AARCH64_TLSDESC_ADR_PAGE:
2169 case R_AARCH64_TLSGD_ADR_PAGE21:
2170 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
2171 value = PG (value + addend) - PG (place);
2172 break;
2173
2174 case R_AARCH64_ADD_ABS_LO12_NC:
2175 case R_AARCH64_LD64_GOT_LO12_NC:
2176 case R_AARCH64_LDST8_ABS_LO12_NC:
2177 case R_AARCH64_LDST16_ABS_LO12_NC:
2178 case R_AARCH64_LDST32_ABS_LO12_NC:
2179 case R_AARCH64_LDST64_ABS_LO12_NC:
2180 case R_AARCH64_LDST128_ABS_LO12_NC:
2181 case R_AARCH64_TLSDESC_ADD_LO12_NC:
2182 case R_AARCH64_TLSDESC_ADD:
2183 case R_AARCH64_TLSDESC_LD64_LO12_NC:
2184 case R_AARCH64_TLSDESC_LDR:
2185 case R_AARCH64_TLSGD_ADD_LO12_NC:
2186 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
2187 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
2188 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
2189 value = PG_OFFSET (value + addend);
2190 break;
2191
2192 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
2193 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
2194 value = (value + addend) & (bfd_vma) 0xffff0000;
2195 break;
2196 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
2197 value = (value + addend) & (bfd_vma) 0xfff000;
2198 break;
2199
2200 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
2201 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
2202 value = (value + addend) & (bfd_vma) 0xffff;
2203 break;
2204
2205 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
2206 value = (value + addend) & ~(bfd_vma) 0xffffffff;
2207 value -= place & ~(bfd_vma) 0xffffffff;
2208 break;
2209 }
2210 return value;
2211}
2212
2213static bfd_boolean
2214aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2215 bfd_vma offset, bfd_vma value)
2216{
2217 reloc_howto_type *howto;
2218 bfd_vma place;
2219
2220 howto = elf64_aarch64_howto_from_type (r_type);
2221 place = (input_section->output_section->vma + input_section->output_offset
2222 + offset);
2223 value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
2224 return bfd_elf_aarch64_put_addend (input_bfd,
2225 input_section->contents + offset,
2226 howto, value);
2227}
2228
2229static enum elf64_aarch64_stub_type
2230aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2231{
2232 if (aarch64_valid_for_adrp_p (value, place))
2233 return aarch64_stub_adrp_branch;
2234 return aarch64_stub_long_branch;
2235}
2236
2237/* Determine the type of stub needed, if any, for a call. */
2238
2239static enum elf64_aarch64_stub_type
2240aarch64_type_of_stub (struct bfd_link_info *info,
2241 asection *input_sec,
2242 const Elf_Internal_Rela *rel,
2243 unsigned char st_type,
2244 struct elf64_aarch64_link_hash_entry *hash,
2245 bfd_vma destination)
2246{
2247 bfd_vma location;
2248 bfd_signed_vma branch_offset;
2249 unsigned int r_type;
2250 struct elf64_aarch64_link_hash_table *globals;
2251 enum elf64_aarch64_stub_type stub_type = aarch64_stub_none;
2252 bfd_boolean via_plt_p;
2253
2254 if (st_type != STT_FUNC)
2255 return stub_type;
2256
2257 globals = elf64_aarch64_hash_table (info);
2258 via_plt_p = (globals->root.splt != NULL && hash != NULL
2259 && hash->root.plt.offset != (bfd_vma) - 1);
2260
2261 if (via_plt_p)
2262 return stub_type;
2263
2264 /* Determine where the call point is. */
2265 location = (input_sec->output_offset
2266 + input_sec->output_section->vma + rel->r_offset);
2267
2268 branch_offset = (bfd_signed_vma) (destination - location);
2269
2270 r_type = ELF64_R_TYPE (rel->r_info);
2271
2272 /* We don't want to redirect any old unconditional jump in this way,
2273 only one which is being used for a sibcall, where it is
2274 acceptable for the IP0 and IP1 registers to be clobbered. */
2275 if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26)
2276 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2277 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2278 {
2279 stub_type = aarch64_stub_long_branch;
2280 }
2281
2282 return stub_type;
2283}
2284
2285/* Build a name for an entry in the stub hash table. */
2286
2287static char *
2288elf64_aarch64_stub_name (const asection *input_section,
2289 const asection *sym_sec,
2290 const struct elf64_aarch64_link_hash_entry *hash,
2291 const Elf_Internal_Rela *rel)
2292{
2293 char *stub_name;
2294 bfd_size_type len;
2295
2296 if (hash)
2297 {
2298 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2299 stub_name = bfd_malloc (len);
2300 if (stub_name != NULL)
2301 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2302 (unsigned int) input_section->id,
2303 hash->root.root.root.string,
2304 rel->r_addend);
2305 }
2306 else
2307 {
2308 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2309 stub_name = bfd_malloc (len);
2310 if (stub_name != NULL)
2311 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2312 (unsigned int) input_section->id,
2313 (unsigned int) sym_sec->id,
2314 (unsigned int) ELF64_R_SYM (rel->r_info),
2315 rel->r_addend);
2316 }
2317
2318 return stub_name;
2319}
2320
2321/* Look up an entry in the stub hash. Stub entries are cached because
2322 creating the stub name takes a bit of time. */
2323
2324static struct elf64_aarch64_stub_hash_entry *
2325elf64_aarch64_get_stub_entry (const asection *input_section,
2326 const asection *sym_sec,
2327 struct elf_link_hash_entry *hash,
2328 const Elf_Internal_Rela *rel,
2329 struct elf64_aarch64_link_hash_table *htab)
2330{
2331 struct elf64_aarch64_stub_hash_entry *stub_entry;
2332 struct elf64_aarch64_link_hash_entry *h =
2333 (struct elf64_aarch64_link_hash_entry *) hash;
2334 const asection *id_sec;
2335
2336 if ((input_section->flags & SEC_CODE) == 0)
2337 return NULL;
2338
2339 /* If this input section is part of a group of sections sharing one
2340 stub section, then use the id of the first section in the group.
2341 Stub names need to include a section id, as there may well be
2342 more than one stub used to reach say, printf, and we need to
2343 distinguish between them. */
2344 id_sec = htab->stub_group[input_section->id].link_sec;
2345
2346 if (h != NULL && h->stub_cache != NULL
2347 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2348 {
2349 stub_entry = h->stub_cache;
2350 }
2351 else
2352 {
2353 char *stub_name;
2354
2355 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, h, rel);
2356 if (stub_name == NULL)
2357 return NULL;
2358
2359 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2360 stub_name, FALSE, FALSE);
2361 if (h != NULL)
2362 h->stub_cache = stub_entry;
2363
2364 free (stub_name);
2365 }
2366
2367 return stub_entry;
2368}
2369
2370/* Add a new stub entry to the stub hash. Not all fields of the new
2371 stub entry are initialised. */
2372
2373static struct elf64_aarch64_stub_hash_entry *
2374elf64_aarch64_add_stub (const char *stub_name,
2375 asection *section,
2376 struct elf64_aarch64_link_hash_table *htab)
2377{
2378 asection *link_sec;
2379 asection *stub_sec;
2380 struct elf64_aarch64_stub_hash_entry *stub_entry;
2381
2382 link_sec = htab->stub_group[section->id].link_sec;
2383 stub_sec = htab->stub_group[section->id].stub_sec;
2384 if (stub_sec == NULL)
2385 {
2386 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2387 if (stub_sec == NULL)
2388 {
2389 size_t namelen;
2390 bfd_size_type len;
2391 char *s_name;
2392
2393 namelen = strlen (link_sec->name);
2394 len = namelen + sizeof (STUB_SUFFIX);
2395 s_name = bfd_alloc (htab->stub_bfd, len);
2396 if (s_name == NULL)
2397 return NULL;
2398
2399 memcpy (s_name, link_sec->name, namelen);
2400 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2401 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2402 if (stub_sec == NULL)
2403 return NULL;
2404 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2405 }
2406 htab->stub_group[section->id].stub_sec = stub_sec;
2407 }
2408
2409 /* Enter this entry into the linker stub hash table. */
2410 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2411 TRUE, FALSE);
2412 if (stub_entry == NULL)
2413 {
2414 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2415 section->owner, stub_name);
2416 return NULL;
2417 }
2418
2419 stub_entry->stub_sec = stub_sec;
2420 stub_entry->stub_offset = 0;
2421 stub_entry->id_sec = link_sec;
2422
2423 return stub_entry;
2424}
2425
2426static bfd_boolean
2427aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2428 void *in_arg ATTRIBUTE_UNUSED)
2429{
2430 struct elf64_aarch64_stub_hash_entry *stub_entry;
2431 asection *stub_sec;
2432 bfd *stub_bfd;
2433 bfd_byte *loc;
2434 bfd_vma sym_value;
2435 unsigned int template_size;
2436 const uint32_t *template;
2437 unsigned int i;
2438
2439 /* Massage our args to the form they really have. */
2440 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2441
2442 stub_sec = stub_entry->stub_sec;
2443
2444 /* Make a note of the offset within the stubs for this entry. */
2445 stub_entry->stub_offset = stub_sec->size;
2446 loc = stub_sec->contents + stub_entry->stub_offset;
2447
2448 stub_bfd = stub_sec->owner;
2449
2450 /* This is the address of the stub destination. */
2451 sym_value = (stub_entry->target_value
2452 + stub_entry->target_section->output_offset
2453 + stub_entry->target_section->output_section->vma);
2454
2455 if (stub_entry->stub_type == aarch64_stub_long_branch)
2456 {
2457 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2458 + stub_sec->output_offset);
2459
2460 /* See if we can relax the stub. */
2461 if (aarch64_valid_for_adrp_p (sym_value, place))
2462 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2463 }
2464
2465 switch (stub_entry->stub_type)
2466 {
2467 case aarch64_stub_adrp_branch:
2468 template = aarch64_adrp_branch_stub;
2469 template_size = sizeof (aarch64_adrp_branch_stub);
2470 break;
2471 case aarch64_stub_long_branch:
2472 template = aarch64_long_branch_stub;
2473 template_size = sizeof (aarch64_long_branch_stub);
2474 break;
2475 default:
2476 BFD_FAIL ();
2477 return FALSE;
2478 }
2479
2480 for (i = 0; i < (template_size / sizeof template[0]); i++)
2481 {
2482 bfd_putl32 (template[i], loc);
2483 loc += 4;
2484 }
2485
2486 template_size = (template_size + 7) & ~7;
2487 stub_sec->size += template_size;
2488
2489 switch (stub_entry->stub_type)
2490 {
2491 case aarch64_stub_adrp_branch:
2492 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec,
2493 stub_entry->stub_offset, sym_value))
2494 /* The stub would not have been relaxed if the offset was out
2495 of range. */
2496 BFD_FAIL ();
2497
2498 _bfd_final_link_relocate
2499 (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC),
2500 stub_bfd,
2501 stub_sec,
2502 stub_sec->contents,
2503 stub_entry->stub_offset + 4,
2504 sym_value,
2505 0);
2506 break;
2507
2508 case aarch64_stub_long_branch:
2509 /* We want the value relative to the address 12 bytes back from the
2510 value itself. */
2511 _bfd_final_link_relocate (elf64_aarch64_howto_from_type
2512 (R_AARCH64_PREL64), stub_bfd, stub_sec,
2513 stub_sec->contents,
2514 stub_entry->stub_offset + 16,
2515 sym_value + 12, 0);
2516 break;
2517 default:
2518 break;
2519 }
2520
2521 return TRUE;
2522}
2523
2524/* As above, but don't actually build the stub. Just bump offset so
2525 we know stub section sizes. */
2526
2527static bfd_boolean
2528aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2529 void *in_arg ATTRIBUTE_UNUSED)
2530{
2531 struct elf64_aarch64_stub_hash_entry *stub_entry;
2532 int size;
2533
2534 /* Massage our args to the form they really have. */
2535 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2536
2537 switch (stub_entry->stub_type)
2538 {
2539 case aarch64_stub_adrp_branch:
2540 size = sizeof (aarch64_adrp_branch_stub);
2541 break;
2542 case aarch64_stub_long_branch:
2543 size = sizeof (aarch64_long_branch_stub);
2544 break;
2545 default:
2546 BFD_FAIL ();
2547 return FALSE;
2548 break;
2549 }
2550
2551 size = (size + 7) & ~7;
2552 stub_entry->stub_sec->size += size;
2553 return TRUE;
2554}
2555
2556/* External entry points for sizing and building linker stubs. */
2557
2558/* Set up various things so that we can make a list of input sections
2559 for each output section included in the link. Returns -1 on error,
2560 0 when no stubs will be needed, and 1 on success. */
2561
2562int
2563elf64_aarch64_setup_section_lists (bfd *output_bfd,
2564 struct bfd_link_info *info)
2565{
2566 bfd *input_bfd;
2567 unsigned int bfd_count;
2568 int top_id, top_index;
2569 asection *section;
2570 asection **input_list, **list;
2571 bfd_size_type amt;
2572 struct elf64_aarch64_link_hash_table *htab =
2573 elf64_aarch64_hash_table (info);
2574
2575 if (!is_elf_hash_table (htab))
2576 return 0;
2577
2578 /* Count the number of input BFDs and find the top input section id. */
2579 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2580 input_bfd != NULL; input_bfd = input_bfd->link_next)
2581 {
2582 bfd_count += 1;
2583 for (section = input_bfd->sections;
2584 section != NULL; section = section->next)
2585 {
2586 if (top_id < section->id)
2587 top_id = section->id;
2588 }
2589 }
2590 htab->bfd_count = bfd_count;
2591
2592 amt = sizeof (struct map_stub) * (top_id + 1);
2593 htab->stub_group = bfd_zmalloc (amt);
2594 if (htab->stub_group == NULL)
2595 return -1;
2596
2597 /* We can't use output_bfd->section_count here to find the top output
2598 section index as some sections may have been removed, and
2599 _bfd_strip_section_from_output doesn't renumber the indices. */
2600 for (section = output_bfd->sections, top_index = 0;
2601 section != NULL; section = section->next)
2602 {
2603 if (top_index < section->index)
2604 top_index = section->index;
2605 }
2606
2607 htab->top_index = top_index;
2608 amt = sizeof (asection *) * (top_index + 1);
2609 input_list = bfd_malloc (amt);
2610 htab->input_list = input_list;
2611 if (input_list == NULL)
2612 return -1;
2613
2614 /* For sections we aren't interested in, mark their entries with a
2615 value we can check later. */
2616 list = input_list + top_index;
2617 do
2618 *list = bfd_abs_section_ptr;
2619 while (list-- != input_list);
2620
2621 for (section = output_bfd->sections;
2622 section != NULL; section = section->next)
2623 {
2624 if ((section->flags & SEC_CODE) != 0)
2625 input_list[section->index] = NULL;
2626 }
2627
2628 return 1;
2629}
2630
2631/* Used by elf64_aarch64_next_input_section and group_sections. */
2632#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2633
2634/* The linker repeatedly calls this function for each input section,
2635 in the order that input sections are linked into output sections.
2636 Build lists of input sections to determine groupings between which
2637 we may insert linker stubs. */
2638
2639void
2640elf64_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2641{
2642 struct elf64_aarch64_link_hash_table *htab =
2643 elf64_aarch64_hash_table (info);
2644
2645 if (isec->output_section->index <= htab->top_index)
2646 {
2647 asection **list = htab->input_list + isec->output_section->index;
2648
2649 if (*list != bfd_abs_section_ptr)
2650 {
2651 /* Steal the link_sec pointer for our list. */
2652 /* This happens to make the list in reverse order,
2653 which is what we want. */
2654 PREV_SEC (isec) = *list;
2655 *list = isec;
2656 }
2657 }
2658}
2659
2660/* See whether we can group stub sections together. Grouping stub
2661 sections may result in fewer stubs. More importantly, we need to
2662 put all .init* and .fini* stubs at the beginning of the .init or
2663 .fini output sections respectively, because glibc splits the
2664 _init and _fini functions into multiple parts. Putting a stub in
2665 the middle of a function is not a good idea. */
2666
2667static void
2668group_sections (struct elf64_aarch64_link_hash_table *htab,
2669 bfd_size_type stub_group_size,
2670 bfd_boolean stubs_always_before_branch)
2671{
2672 asection **list = htab->input_list + htab->top_index;
2673
2674 do
2675 {
2676 asection *tail = *list;
2677
2678 if (tail == bfd_abs_section_ptr)
2679 continue;
2680
2681 while (tail != NULL)
2682 {
2683 asection *curr;
2684 asection *prev;
2685 bfd_size_type total;
2686
2687 curr = tail;
2688 total = tail->size;
2689 while ((prev = PREV_SEC (curr)) != NULL
2690 && ((total += curr->output_offset - prev->output_offset)
2691 < stub_group_size))
2692 curr = prev;
2693
2694 /* OK, the size from the start of CURR to the end is less
2695 than stub_group_size and thus can be handled by one stub
2696 section. (Or the tail section is itself larger than
2697 stub_group_size, in which case we may be toast.)
2698 We should really be keeping track of the total size of
2699 stubs added here, as stubs contribute to the final output
2700 section size. */
2701 do
2702 {
2703 prev = PREV_SEC (tail);
2704 /* Set up this stub group. */
2705 htab->stub_group[tail->id].link_sec = curr;
2706 }
2707 while (tail != curr && (tail = prev) != NULL);
2708
2709 /* But wait, there's more! Input sections up to stub_group_size
2710 bytes before the stub section can be handled by it too. */
2711 if (!stubs_always_before_branch)
2712 {
2713 total = 0;
2714 while (prev != NULL
2715 && ((total += tail->output_offset - prev->output_offset)
2716 < stub_group_size))
2717 {
2718 tail = prev;
2719 prev = PREV_SEC (tail);
2720 htab->stub_group[tail->id].link_sec = curr;
2721 }
2722 }
2723 tail = prev;
2724 }
2725 }
2726 while (list-- != htab->input_list);
2727
2728 free (htab->input_list);
2729}
2730
2731#undef PREV_SEC
2732
2733/* Determine and set the size of the stub section for a final link.
2734
2735 The basic idea here is to examine all the relocations looking for
2736 PC-relative calls to a target that is unreachable with a "bl"
2737 instruction. */
2738
2739bfd_boolean
2740elf64_aarch64_size_stubs (bfd *output_bfd,
2741 bfd *stub_bfd,
2742 struct bfd_link_info *info,
2743 bfd_signed_vma group_size,
2744 asection * (*add_stub_section) (const char *,
2745 asection *),
2746 void (*layout_sections_again) (void))
2747{
2748 bfd_size_type stub_group_size;
2749 bfd_boolean stubs_always_before_branch;
2750 bfd_boolean stub_changed = 0;
2751 struct elf64_aarch64_link_hash_table *htab = elf64_aarch64_hash_table (info);
2752
2753 /* Propagate mach to stub bfd, because it may not have been
2754 finalized when we created stub_bfd. */
2755 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
2756 bfd_get_mach (output_bfd));
2757
2758 /* Stash our params away. */
2759 htab->stub_bfd = stub_bfd;
2760 htab->add_stub_section = add_stub_section;
2761 htab->layout_sections_again = layout_sections_again;
2762 stubs_always_before_branch = group_size < 0;
2763 if (group_size < 0)
2764 stub_group_size = -group_size;
2765 else
2766 stub_group_size = group_size;
2767
2768 if (stub_group_size == 1)
2769 {
2770 /* Default values. */
2771 /* Aarch64 branch range is +-128MB. The value used is 1MB less. */
2772 stub_group_size = 127 * 1024 * 1024;
2773 }
2774
2775 group_sections (htab, stub_group_size, stubs_always_before_branch);
2776
2777 while (1)
2778 {
2779 bfd *input_bfd;
2780 unsigned int bfd_indx;
2781 asection *stub_sec;
2782
2783 for (input_bfd = info->input_bfds, bfd_indx = 0;
2784 input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++)
2785 {
2786 Elf_Internal_Shdr *symtab_hdr;
2787 asection *section;
2788 Elf_Internal_Sym *local_syms = NULL;
2789
2790 /* We'll need the symbol table in a second. */
2791 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2792 if (symtab_hdr->sh_info == 0)
2793 continue;
2794
2795 /* Walk over each section attached to the input bfd. */
2796 for (section = input_bfd->sections;
2797 section != NULL; section = section->next)
2798 {
2799 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2800
2801 /* If there aren't any relocs, then there's nothing more
2802 to do. */
2803 if ((section->flags & SEC_RELOC) == 0
2804 || section->reloc_count == 0
2805 || (section->flags & SEC_CODE) == 0)
2806 continue;
2807
2808 /* If this section is a link-once section that will be
2809 discarded, then don't create any stubs. */
2810 if (section->output_section == NULL
2811 || section->output_section->owner != output_bfd)
2812 continue;
2813
2814 /* Get the relocs. */
2815 internal_relocs
2816 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
2817 NULL, info->keep_memory);
2818 if (internal_relocs == NULL)
2819 goto error_ret_free_local;
2820
2821 /* Now examine each relocation. */
2822 irela = internal_relocs;
2823 irelaend = irela + section->reloc_count;
2824 for (; irela < irelaend; irela++)
2825 {
2826 unsigned int r_type, r_indx;
2827 enum elf64_aarch64_stub_type stub_type;
2828 struct elf64_aarch64_stub_hash_entry *stub_entry;
2829 asection *sym_sec;
2830 bfd_vma sym_value;
2831 bfd_vma destination;
2832 struct elf64_aarch64_link_hash_entry *hash;
2833 const char *sym_name;
2834 char *stub_name;
2835 const asection *id_sec;
2836 unsigned char st_type;
2837 bfd_size_type len;
2838
2839 r_type = ELF64_R_TYPE (irela->r_info);
2840 r_indx = ELF64_R_SYM (irela->r_info);
2841
2842 if (r_type >= (unsigned int) R_AARCH64_end)
2843 {
2844 bfd_set_error (bfd_error_bad_value);
2845 error_ret_free_internal:
2846 if (elf_section_data (section)->relocs == NULL)
2847 free (internal_relocs);
2848 goto error_ret_free_local;
2849 }
2850
2851 /* Only look for stubs on unconditional branch and
2852 branch and link instructions. */
2853 if (r_type != (unsigned int) R_AARCH64_CALL26
2854 && r_type != (unsigned int) R_AARCH64_JUMP26)
2855 continue;
2856
2857 /* Now determine the call target, its name, value,
2858 section. */
2859 sym_sec = NULL;
2860 sym_value = 0;
2861 destination = 0;
2862 hash = NULL;
2863 sym_name = NULL;
2864 if (r_indx < symtab_hdr->sh_info)
2865 {
2866 /* It's a local symbol. */
2867 Elf_Internal_Sym *sym;
2868 Elf_Internal_Shdr *hdr;
2869
2870 if (local_syms == NULL)
2871 {
2872 local_syms
2873 = (Elf_Internal_Sym *) symtab_hdr->contents;
2874 if (local_syms == NULL)
2875 local_syms
2876 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
2877 symtab_hdr->sh_info, 0,
2878 NULL, NULL, NULL);
2879 if (local_syms == NULL)
2880 goto error_ret_free_internal;
2881 }
2882
2883 sym = local_syms + r_indx;
2884 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
2885 sym_sec = hdr->bfd_section;
2886 if (!sym_sec)
2887 /* This is an undefined symbol. It can never
2888 be resolved. */
2889 continue;
2890
2891 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
2892 sym_value = sym->st_value;
2893 destination = (sym_value + irela->r_addend
2894 + sym_sec->output_offset
2895 + sym_sec->output_section->vma);
2896 st_type = ELF_ST_TYPE (sym->st_info);
2897 sym_name
2898 = bfd_elf_string_from_elf_section (input_bfd,
2899 symtab_hdr->sh_link,
2900 sym->st_name);
2901 }
2902 else
2903 {
2904 int e_indx;
2905
2906 e_indx = r_indx - symtab_hdr->sh_info;
2907 hash = ((struct elf64_aarch64_link_hash_entry *)
2908 elf_sym_hashes (input_bfd)[e_indx]);
2909
2910 while (hash->root.root.type == bfd_link_hash_indirect
2911 || hash->root.root.type == bfd_link_hash_warning)
2912 hash = ((struct elf64_aarch64_link_hash_entry *)
2913 hash->root.root.u.i.link);
2914
2915 if (hash->root.root.type == bfd_link_hash_defined
2916 || hash->root.root.type == bfd_link_hash_defweak)
2917 {
2918 struct elf64_aarch64_link_hash_table *globals =
2919 elf64_aarch64_hash_table (info);
2920 sym_sec = hash->root.root.u.def.section;
2921 sym_value = hash->root.root.u.def.value;
2922 /* For a destination in a shared library,
2923 use the PLT stub as target address to
2924 decide whether a branch stub is
2925 needed. */
2926 if (globals->root.splt != NULL && hash != NULL
2927 && hash->root.plt.offset != (bfd_vma) - 1)
2928 {
2929 sym_sec = globals->root.splt;
2930 sym_value = hash->root.plt.offset;
2931 if (sym_sec->output_section != NULL)
2932 destination = (sym_value
2933 + sym_sec->output_offset
2934 +
2935 sym_sec->output_section->vma);
2936 }
2937 else if (sym_sec->output_section != NULL)
2938 destination = (sym_value + irela->r_addend
2939 + sym_sec->output_offset
2940 + sym_sec->output_section->vma);
2941 }
2942 else if (hash->root.root.type == bfd_link_hash_undefined
2943 || (hash->root.root.type
2944 == bfd_link_hash_undefweak))
2945 {
2946 /* For a shared library, use the PLT stub as
2947 target address to decide whether a long
2948 branch stub is needed.
2949 For absolute code, they cannot be handled. */
2950 struct elf64_aarch64_link_hash_table *globals =
2951 elf64_aarch64_hash_table (info);
2952
2953 if (globals->root.splt != NULL && hash != NULL
2954 && hash->root.plt.offset != (bfd_vma) - 1)
2955 {
2956 sym_sec = globals->root.splt;
2957 sym_value = hash->root.plt.offset;
2958 if (sym_sec->output_section != NULL)
2959 destination = (sym_value
2960 + sym_sec->output_offset
2961 +
2962 sym_sec->output_section->vma);
2963 }
2964 else
2965 continue;
2966 }
2967 else
2968 {
2969 bfd_set_error (bfd_error_bad_value);
2970 goto error_ret_free_internal;
2971 }
2972 st_type = ELF_ST_TYPE (hash->root.type);
2973 sym_name = hash->root.root.root.string;
2974 }
2975
2976 /* Determine what (if any) linker stub is needed. */
2977 stub_type = aarch64_type_of_stub
2978 (info, section, irela, st_type, hash, destination);
2979 if (stub_type == aarch64_stub_none)
2980 continue;
2981
2982 /* Support for grouping stub sections. */
2983 id_sec = htab->stub_group[section->id].link_sec;
2984
2985 /* Get the name of this stub. */
2986 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, hash,
2987 irela);
2988 if (!stub_name)
2989 goto error_ret_free_internal;
2990
2991 stub_entry =
2992 aarch64_stub_hash_lookup (&htab->stub_hash_table,
2993 stub_name, FALSE, FALSE);
2994 if (stub_entry != NULL)
2995 {
2996 /* The proper stub has already been created. */
2997 free (stub_name);
2998 continue;
2999 }
3000
3001 stub_entry = elf64_aarch64_add_stub (stub_name, section,
3002 htab);
3003 if (stub_entry == NULL)
3004 {
3005 free (stub_name);
3006 goto error_ret_free_internal;
3007 }
3008
3009 stub_entry->target_value = sym_value;
3010 stub_entry->target_section = sym_sec;
3011 stub_entry->stub_type = stub_type;
3012 stub_entry->h = hash;
3013 stub_entry->st_type = st_type;
3014
3015 if (sym_name == NULL)
3016 sym_name = "unnamed";
3017 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3018 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3019 if (stub_entry->output_name == NULL)
3020 {
3021 free (stub_name);
3022 goto error_ret_free_internal;
3023 }
3024
3025 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3026 sym_name);
3027
3028 stub_changed = TRUE;
3029 }
3030
3031 /* We're done with the internal relocs, free them. */
3032 if (elf_section_data (section)->relocs == NULL)
3033 free (internal_relocs);
3034 }
3035 }
3036
3037 if (!stub_changed)
3038 break;
3039
3040 /* OK, we've added some stubs. Find out the new size of the
3041 stub sections. */
3042 for (stub_sec = htab->stub_bfd->sections;
3043 stub_sec != NULL; stub_sec = stub_sec->next)
3044 stub_sec->size = 0;
3045
3046 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3047
3048 /* Ask the linker to do its stuff. */
3049 (*htab->layout_sections_again) ();
3050 stub_changed = FALSE;
3051 }
3052
3053 return TRUE;
3054
3055error_ret_free_local:
3056 return FALSE;
3057}
3058
3059/* Build all the stubs associated with the current output file. The
3060 stubs are kept in a hash table attached to the main linker hash
3061 table. We also set up the .plt entries for statically linked PIC
3062 functions here. This function is called via aarch64_elf_finish in the
3063 linker. */
3064
3065bfd_boolean
3066elf64_aarch64_build_stubs (struct bfd_link_info *info)
3067{
3068 asection *stub_sec;
3069 struct bfd_hash_table *table;
3070 struct elf64_aarch64_link_hash_table *htab;
3071
3072 htab = elf64_aarch64_hash_table (info);
3073
3074 for (stub_sec = htab->stub_bfd->sections;
3075 stub_sec != NULL; stub_sec = stub_sec->next)
3076 {
3077 bfd_size_type size;
3078
3079 /* Ignore non-stub sections. */
3080 if (!strstr (stub_sec->name, STUB_SUFFIX))
3081 continue;
3082
3083 /* Allocate memory to hold the linker stubs. */
3084 size = stub_sec->size;
3085 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3086 if (stub_sec->contents == NULL && size != 0)
3087 return FALSE;
3088 stub_sec->size = 0;
3089 }
3090
3091 /* Build the stubs as directed by the stub hash table. */
3092 table = &htab->stub_hash_table;
3093 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3094
3095 return TRUE;
3096}
3097
3098
3099/* Add an entry to the code/data map for section SEC. */
3100
3101static void
3102elf64_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3103{
3104 struct _aarch64_elf_section_data *sec_data =
3105 elf64_aarch64_section_data (sec);
3106 unsigned int newidx;
3107
3108 if (sec_data->map == NULL)
3109 {
3110 sec_data->map = bfd_malloc (sizeof (elf64_aarch64_section_map));
3111 sec_data->mapcount = 0;
3112 sec_data->mapsize = 1;
3113 }
3114
3115 newidx = sec_data->mapcount++;
3116
3117 if (sec_data->mapcount > sec_data->mapsize)
3118 {
3119 sec_data->mapsize *= 2;
3120 sec_data->map = bfd_realloc_or_free
3121 (sec_data->map, sec_data->mapsize * sizeof (elf64_aarch64_section_map));
3122 }
3123
3124 if (sec_data->map)
3125 {
3126 sec_data->map[newidx].vma = vma;
3127 sec_data->map[newidx].type = type;
3128 }
3129}
3130
3131
3132/* Initialise maps of insn/data for input BFDs. */
3133void
3134bfd_elf64_aarch64_init_maps (bfd *abfd)
3135{
3136 Elf_Internal_Sym *isymbuf;
3137 Elf_Internal_Shdr *hdr;
3138 unsigned int i, localsyms;
3139
3140 /* Make sure that we are dealing with an AArch64 elf binary. */
3141 if (!is_aarch64_elf (abfd))
3142 return;
3143
3144 if ((abfd->flags & DYNAMIC) != 0)
3145 return;
3146
3147 hdr = &elf_symtab_hdr (abfd);
3148 localsyms = hdr->sh_info;
3149
3150 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3151 should contain the number of local symbols, which should come before any
3152 global symbols. Mapping symbols are always local. */
3153 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3154
3155 /* No internal symbols read? Skip this BFD. */
3156 if (isymbuf == NULL)
3157 return;
3158
3159 for (i = 0; i < localsyms; i++)
3160 {
3161 Elf_Internal_Sym *isym = &isymbuf[i];
3162 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3163 const char *name;
3164
3165 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3166 {
3167 name = bfd_elf_string_from_elf_section (abfd,
3168 hdr->sh_link,
3169 isym->st_name);
3170
3171 if (bfd_is_aarch64_special_symbol_name
3172 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3173 elf64_aarch64_section_map_add (sec, name[1], isym->st_value);
3174 }
3175 }
3176}
3177
3178/* Set option values needed during linking. */
3179void
3180bfd_elf64_aarch64_set_options (struct bfd *output_bfd,
3181 struct bfd_link_info *link_info,
3182 int no_enum_warn,
3183 int no_wchar_warn, int pic_veneer)
3184{
3185 struct elf64_aarch64_link_hash_table *globals;
3186
3187 globals = elf64_aarch64_hash_table (link_info);
3188 globals->pic_veneer = pic_veneer;
3189
3190 BFD_ASSERT (is_aarch64_elf (output_bfd));
3191 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3192 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3193}
3194
3195#define MASK(n) ((1u << (n)) - 1)
3196
3197/* Decode the 26-bit offset of unconditional branch. */
3198static inline uint32_t
3199decode_branch_ofs_26 (uint32_t insn)
3200{
3201 return insn & MASK (26);
3202}
3203
3204/* Decode the 19-bit offset of conditional branch and compare & branch. */
3205static inline uint32_t
3206decode_cond_branch_ofs_19 (uint32_t insn)
3207{
3208 return (insn >> 5) & MASK (19);
3209}
3210
3211/* Decode the 19-bit offset of load literal. */
3212static inline uint32_t
3213decode_ld_lit_ofs_19 (uint32_t insn)
3214{
3215 return (insn >> 5) & MASK (19);
3216}
3217
3218/* Decode the 14-bit offset of test & branch. */
3219static inline uint32_t
3220decode_tst_branch_ofs_14 (uint32_t insn)
3221{
3222 return (insn >> 5) & MASK (14);
3223}
3224
3225/* Decode the 16-bit imm of move wide. */
3226static inline uint32_t
3227decode_movw_imm (uint32_t insn)
3228{
3229 return (insn >> 5) & MASK (16);
3230}
3231
3232/* Decode the 21-bit imm of adr. */
3233static inline uint32_t
3234decode_adr_imm (uint32_t insn)
3235{
3236 return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
3237}
3238
3239/* Decode the 12-bit imm of add immediate. */
3240static inline uint32_t
3241decode_add_imm (uint32_t insn)
3242{
3243 return (insn >> 10) & MASK (12);
3244}
3245
3246
3247/* Encode the 26-bit offset of unconditional branch. */
3248static inline uint32_t
3249reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
3250{
3251 return (insn & ~MASK (26)) | (ofs & MASK (26));
3252}
3253
3254/* Encode the 19-bit offset of conditional branch and compare & branch. */
3255static inline uint32_t
3256reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
3257{
3258 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3259}
3260
3261/* Decode the 19-bit offset of load literal. */
3262static inline uint32_t
3263reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
3264{
3265 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3266}
3267
3268/* Encode the 14-bit offset of test & branch. */
3269static inline uint32_t
3270reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
3271{
3272 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
3273}
3274
3275/* Reencode the imm field of move wide. */
3276static inline uint32_t
3277reencode_movw_imm (uint32_t insn, uint32_t imm)
3278{
3279 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
3280}
3281
3282/* Reencode the imm field of adr. */
3283static inline uint32_t
3284reencode_adr_imm (uint32_t insn, uint32_t imm)
3285{
3286 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
3287 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
3288}
3289
3290/* Reencode the imm field of ld/st pos immediate. */
3291static inline uint32_t
3292reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
3293{
3294 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3295}
3296
3297/* Reencode the imm field of add immediate. */
3298static inline uint32_t
3299reencode_add_imm (uint32_t insn, uint32_t imm)
3300{
3301 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3302}
3303
3304/* Reencode mov[zn] to movz. */
3305static inline uint32_t
3306reencode_movzn_to_movz (uint32_t opcode)
3307{
3308 return opcode | (1 << 30);
3309}
3310
3311/* Reencode mov[zn] to movn. */
3312static inline uint32_t
3313reencode_movzn_to_movn (uint32_t opcode)
3314{
3315 return opcode & ~(1 << 30);
3316}
3317
3318/* Insert the addend/value into the instruction or data object being
3319 relocated. */
3320static bfd_reloc_status_type
3321bfd_elf_aarch64_put_addend (bfd *abfd,
3322 bfd_byte *address,
3323 reloc_howto_type *howto, bfd_signed_vma addend)
3324{
3325 bfd_reloc_status_type status = bfd_reloc_ok;
3326 bfd_signed_vma old_addend = addend;
3327 bfd_vma contents;
3328 int size;
3329
3330 size = bfd_get_reloc_size (howto);
3331 switch (size)
3332 {
3333 case 2:
3334 contents = bfd_get_16 (abfd, address);
3335 break;
3336 case 4:
3337 if (howto->src_mask != 0xffffffff)
3338 /* Must be 32-bit instruction, always little-endian. */
3339 contents = bfd_getl32 (address);
3340 else
3341 /* Must be 32-bit data (endianness dependent). */
3342 contents = bfd_get_32 (abfd, address);
3343 break;
3344 case 8:
3345 contents = bfd_get_64 (abfd, address);
3346 break;
3347 default:
3348 abort ();
3349 }
3350
3351 switch (howto->complain_on_overflow)
3352 {
3353 case complain_overflow_dont:
3354 break;
3355 case complain_overflow_signed:
3356 status = aarch64_signed_overflow (addend,
3357 howto->bitsize + howto->rightshift);
3358 break;
3359 case complain_overflow_unsigned:
3360 status = aarch64_unsigned_overflow (addend,
3361 howto->bitsize + howto->rightshift);
3362 break;
3363 case complain_overflow_bitfield:
3364 default:
3365 abort ();
3366 }
3367
3368 addend >>= howto->rightshift;
3369
3370 switch (howto->type)
3371 {
3372 case R_AARCH64_JUMP26:
3373 case R_AARCH64_CALL26:
3374 contents = reencode_branch_ofs_26 (contents, addend);
3375 break;
3376
3377 case R_AARCH64_CONDBR19:
3378 contents = reencode_cond_branch_ofs_19 (contents, addend);
3379 break;
3380
3381 case R_AARCH64_TSTBR14:
3382 contents = reencode_tst_branch_ofs_14 (contents, addend);
3383 break;
3384
3385 case R_AARCH64_LD_PREL_LO19:
f41aef5f 3386 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3387 if (old_addend & ((1 << howto->rightshift) - 1))
3388 return bfd_reloc_overflow;
3389 contents = reencode_ld_lit_ofs_19 (contents, addend);
3390 break;
3391
3392 case R_AARCH64_TLSDESC_CALL:
3393 break;
3394
3395 case R_AARCH64_TLSGD_ADR_PAGE21:
3396 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3397 case R_AARCH64_TLSDESC_ADR_PAGE:
3398 case R_AARCH64_ADR_GOT_PAGE:
3399 case R_AARCH64_ADR_PREL_LO21:
3400 case R_AARCH64_ADR_PREL_PG_HI21:
3401 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3402 contents = reencode_adr_imm (contents, addend);
3403 break;
3404
3405 case R_AARCH64_TLSGD_ADD_LO12_NC:
3406 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3407 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3408 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3409 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3410 case R_AARCH64_ADD_ABS_LO12_NC:
3411 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3412 12 bits of the page offset following
3413 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3414 (pc-relative) page base. */
3415 contents = reencode_add_imm (contents, addend);
3416 break;
3417
3418 case R_AARCH64_LDST8_ABS_LO12_NC:
3419 case R_AARCH64_LDST16_ABS_LO12_NC:
3420 case R_AARCH64_LDST32_ABS_LO12_NC:
3421 case R_AARCH64_LDST64_ABS_LO12_NC:
3422 case R_AARCH64_LDST128_ABS_LO12_NC:
3423 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3424 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3425 case R_AARCH64_LD64_GOT_LO12_NC:
3426 if (old_addend & ((1 << howto->rightshift) - 1))
3427 return bfd_reloc_overflow;
3428 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3429 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3430 which computes the (pc-relative) page base. */
3431 contents = reencode_ldst_pos_imm (contents, addend);
3432 break;
3433
3434 /* Group relocations to create high bits of a 16, 32, 48 or 64
3435 bit signed data or abs address inline. Will change
3436 instruction to MOVN or MOVZ depending on sign of calculated
3437 value. */
3438
3439 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3440 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3441 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3442 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3443 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3444 case R_AARCH64_MOVW_SABS_G0:
3445 case R_AARCH64_MOVW_SABS_G1:
3446 case R_AARCH64_MOVW_SABS_G2:
3447 /* NOTE: We can only come here with movz or movn. */
3448 if (addend < 0)
3449 {
3450 /* Force use of MOVN. */
3451 addend = ~addend;
3452 contents = reencode_movzn_to_movn (contents);
3453 }
3454 else
3455 {
3456 /* Force use of MOVZ. */
3457 contents = reencode_movzn_to_movz (contents);
3458 }
3459 /* fall through */
3460
3461 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3462 data or abs address inline. */
3463
3464 case R_AARCH64_MOVW_UABS_G0:
3465 case R_AARCH64_MOVW_UABS_G0_NC:
3466 case R_AARCH64_MOVW_UABS_G1:
3467 case R_AARCH64_MOVW_UABS_G1_NC:
3468 case R_AARCH64_MOVW_UABS_G2:
3469 case R_AARCH64_MOVW_UABS_G2_NC:
3470 case R_AARCH64_MOVW_UABS_G3:
3471 contents = reencode_movw_imm (contents, addend);
3472 break;
3473
3474 default:
3475 /* Repack simple data */
3476 if (howto->dst_mask & (howto->dst_mask + 1))
3477 return bfd_reloc_notsupported;
3478
3479 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
3480 break;
3481 }
3482
3483 switch (size)
3484 {
3485 case 2:
3486 bfd_put_16 (abfd, contents, address);
3487 break;
3488 case 4:
3489 if (howto->dst_mask != 0xffffffff)
3490 /* must be 32-bit instruction, always little-endian */
3491 bfd_putl32 (contents, address);
3492 else
3493 /* must be 32-bit data (endianness dependent) */
3494 bfd_put_32 (abfd, contents, address);
3495 break;
3496 case 8:
3497 bfd_put_64 (abfd, contents, address);
3498 break;
3499 default:
3500 abort ();
3501 }
3502
3503 return status;
3504}
3505
3506static bfd_vma
3507aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3508 struct elf64_aarch64_link_hash_table
3509 *globals, struct bfd_link_info *info,
3510 bfd_vma value, bfd *output_bfd,
3511 bfd_boolean *unresolved_reloc_p)
3512{
3513 bfd_vma off = (bfd_vma) - 1;
3514 asection *basegot = globals->root.sgot;
3515 bfd_boolean dyn = globals->root.dynamic_sections_created;
3516
3517 if (h != NULL)
3518 {
3519 off = h->got.offset;
3520 BFD_ASSERT (off != (bfd_vma) - 1);
3521 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3522 || (info->shared
3523 && SYMBOL_REFERENCES_LOCAL (info, h))
3524 || (ELF_ST_VISIBILITY (h->other)
3525 && h->root.type == bfd_link_hash_undefweak))
3526 {
3527 /* This is actually a static link, or it is a -Bsymbolic link
3528 and the symbol is defined locally. We must initialize this
3529 entry in the global offset table. Since the offset must
3530 always be a multiple of 8, we use the least significant bit
3531 to record whether we have initialized it already.
3532 When doing a dynamic link, we create a .rel(a).got relocation
3533 entry to initialize the value. This is done in the
3534 finish_dynamic_symbol routine. */
3535 if ((off & 1) != 0)
3536 off &= ~1;
3537 else
3538 {
3539 bfd_put_64 (output_bfd, value, basegot->contents + off);
3540 h->got.offset |= 1;
3541 }
3542 }
3543 else
3544 *unresolved_reloc_p = FALSE;
3545
3546 off = off + basegot->output_section->vma + basegot->output_offset;
3547 }
3548
3549 return off;
3550}
3551
3552/* Change R_TYPE to a more efficient access model where possible,
3553 return the new reloc type. */
3554
3555static unsigned int
3556aarch64_tls_transition_without_check (unsigned int r_type,
3557 struct elf_link_hash_entry *h)
3558{
3559 bfd_boolean is_local = h == NULL;
3560 switch (r_type)
3561 {
3562 case R_AARCH64_TLSGD_ADR_PAGE21:
3563 case R_AARCH64_TLSDESC_ADR_PAGE:
3564 return is_local
3565 ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
3566
3567 case R_AARCH64_TLSGD_ADD_LO12_NC:
3568 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3569 return is_local
3570 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3571 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
3572
3573 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3574 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3575
3576 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3577 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3578
3579 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3580 case R_AARCH64_TLSDESC_CALL:
3581 /* Instructions with these relocations will become NOPs. */
3582 return R_AARCH64_NONE;
3583 }
3584
3585 return r_type;
3586}
3587
3588static unsigned int
3589aarch64_reloc_got_type (unsigned int r_type)
3590{
3591 switch (r_type)
3592 {
3593 case R_AARCH64_LD64_GOT_LO12_NC:
3594 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 3595 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3596 return GOT_NORMAL;
3597
3598 case R_AARCH64_TLSGD_ADR_PAGE21:
3599 case R_AARCH64_TLSGD_ADD_LO12_NC:
3600 return GOT_TLS_GD;
3601
3602 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3603 case R_AARCH64_TLSDESC_ADR_PAGE:
3604 case R_AARCH64_TLSDESC_CALL:
3605 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3606 return GOT_TLSDESC_GD;
3607
3608 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3609 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3610 return GOT_TLS_IE;
3611
3612 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3613 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3614 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3615 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3616 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3617 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3618 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3619 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3620 return GOT_UNKNOWN;
3621 }
3622 return GOT_UNKNOWN;
3623}
3624
3625static bfd_boolean
3626aarch64_can_relax_tls (bfd *input_bfd,
3627 struct bfd_link_info *info,
3628 unsigned int r_type,
3629 struct elf_link_hash_entry *h,
3630 unsigned long r_symndx)
3631{
3632 unsigned int symbol_got_type;
3633 unsigned int reloc_got_type;
3634
3635 if (! IS_AARCH64_TLS_RELOC (r_type))
3636 return FALSE;
3637
3638 symbol_got_type = elf64_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3639 reloc_got_type = aarch64_reloc_got_type (r_type);
3640
3641 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3642 return TRUE;
3643
3644 if (info->shared)
3645 return FALSE;
3646
3647 if (h && h->root.type == bfd_link_hash_undefweak)
3648 return FALSE;
3649
3650 return TRUE;
3651}
3652
3653static unsigned int
3654aarch64_tls_transition (bfd *input_bfd,
3655 struct bfd_link_info *info,
3656 unsigned int r_type,
3657 struct elf_link_hash_entry *h,
3658 unsigned long r_symndx)
3659{
3660 if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx))
3661 return r_type;
3662
3663 return aarch64_tls_transition_without_check (r_type, h);
3664}
3665
3666/* Return the base VMA address which should be subtracted from real addresses
3667 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3668
3669static bfd_vma
3670dtpoff_base (struct bfd_link_info *info)
3671{
3672 /* If tls_sec is NULL, we should have signalled an error already. */
3673 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3674 return elf_hash_table (info)->tls_sec->vma;
3675}
3676
3677
3678/* Return the base VMA address which should be subtracted from real addresses
3679 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3680
3681static bfd_vma
3682tpoff_base (struct bfd_link_info *info)
3683{
3684 struct elf_link_hash_table *htab = elf_hash_table (info);
3685
3686 /* If tls_sec is NULL, we should have signalled an error already. */
3687 if (htab->tls_sec == NULL)
3688 return 0;
3689
3690 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3691 htab->tls_sec->alignment_power);
3692 return htab->tls_sec->vma - base;
3693}
3694
3695static bfd_vma *
3696symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3697 unsigned long r_symndx)
3698{
3699 /* Calculate the address of the GOT entry for symbol
3700 referred to in h. */
3701 if (h != NULL)
3702 return &h->got.offset;
3703 else
3704 {
3705 /* local symbol */
3706 struct elf_aarch64_local_symbol *l;
3707
3708 l = elf64_aarch64_locals (input_bfd);
3709 return &l[r_symndx].got_offset;
3710 }
3711}
3712
3713static void
3714symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3715 unsigned long r_symndx)
3716{
3717 bfd_vma *p;
3718 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3719 *p |= 1;
3720}
3721
3722static int
3723symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3724 unsigned long r_symndx)
3725{
3726 bfd_vma value;
3727 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3728 return value & 1;
3729}
3730
3731static bfd_vma
3732symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3733 unsigned long r_symndx)
3734{
3735 bfd_vma value;
3736 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3737 value &= ~1;
3738 return value;
3739}
3740
3741static bfd_vma *
3742symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3743 unsigned long r_symndx)
3744{
3745 /* Calculate the address of the GOT entry for symbol
3746 referred to in h. */
3747 if (h != NULL)
3748 {
3749 struct elf64_aarch64_link_hash_entry *eh;
3750 eh = (struct elf64_aarch64_link_hash_entry *) h;
3751 return &eh->tlsdesc_got_jump_table_offset;
3752 }
3753 else
3754 {
3755 /* local symbol */
3756 struct elf_aarch64_local_symbol *l;
3757
3758 l = elf64_aarch64_locals (input_bfd);
3759 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3760 }
3761}
3762
3763static void
3764symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3765 unsigned long r_symndx)
3766{
3767 bfd_vma *p;
3768 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3769 *p |= 1;
3770}
3771
3772static int
3773symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3774 struct elf_link_hash_entry *h,
3775 unsigned long r_symndx)
3776{
3777 bfd_vma value;
3778 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3779 return value & 1;
3780}
3781
3782static bfd_vma
3783symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3784 unsigned long r_symndx)
3785{
3786 bfd_vma value;
3787 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3788 value &= ~1;
3789 return value;
3790}
3791
3792/* Perform a relocation as part of a final link. */
3793static bfd_reloc_status_type
3794elf64_aarch64_final_link_relocate (reloc_howto_type *howto,
3795 bfd *input_bfd,
3796 bfd *output_bfd,
3797 asection *input_section,
3798 bfd_byte *contents,
3799 Elf_Internal_Rela *rel,
3800 bfd_vma value,
3801 struct bfd_link_info *info,
3802 asection *sym_sec,
3803 struct elf_link_hash_entry *h,
3804 bfd_boolean *unresolved_reloc_p,
3805 bfd_boolean save_addend,
3806 bfd_vma *saved_addend)
3807{
3808 unsigned int r_type = howto->type;
3809 unsigned long r_symndx;
3810 bfd_byte *hit_data = contents + rel->r_offset;
3811 bfd_vma place;
3812 bfd_signed_vma signed_addend;
3813 struct elf64_aarch64_link_hash_table *globals;
3814 bfd_boolean weak_undef_p;
3815
3816 globals = elf64_aarch64_hash_table (info);
3817
3818 BFD_ASSERT (is_aarch64_elf (input_bfd));
3819
3820 r_symndx = ELF64_R_SYM (rel->r_info);
3821
3822 /* It is possible to have linker relaxations on some TLS access
3823 models. Update our information here. */
3824 r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
3825
3826 if (r_type != howto->type)
3827 howto = elf64_aarch64_howto_from_type (r_type);
3828
3829 place = input_section->output_section->vma
3830 + input_section->output_offset + rel->r_offset;
3831
3832 /* Get addend, accumulating the addend for consecutive relocs
3833 which refer to the same offset. */
3834 signed_addend = saved_addend ? *saved_addend : 0;
3835 signed_addend += rel->r_addend;
3836
3837 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
3838 : bfd_is_und_section (sym_sec));
3839 switch (r_type)
3840 {
3841 case R_AARCH64_NONE:
3842 case R_AARCH64_NULL:
3843 case R_AARCH64_TLSDESC_CALL:
3844 *unresolved_reloc_p = FALSE;
3845 return bfd_reloc_ok;
3846
3847 case R_AARCH64_ABS64:
3848
3849 /* When generating a shared object or relocatable executable, these
3850 relocations are copied into the output file to be resolved at
3851 run time. */
3852 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
3853 && (input_section->flags & SEC_ALLOC)
3854 && (h == NULL
3855 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3856 || h->root.type != bfd_link_hash_undefweak))
3857 {
3858 Elf_Internal_Rela outrel;
3859 bfd_byte *loc;
3860 bfd_boolean skip, relocate;
3861 asection *sreloc;
3862
3863 *unresolved_reloc_p = FALSE;
3864
3865 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd,
3866 input_section, 1);
3867 if (sreloc == NULL)
3868 return bfd_reloc_notsupported;
3869
3870 skip = FALSE;
3871 relocate = FALSE;
3872
3873 outrel.r_addend = signed_addend;
3874 outrel.r_offset =
3875 _bfd_elf_section_offset (output_bfd, info, input_section,
3876 rel->r_offset);
3877 if (outrel.r_offset == (bfd_vma) - 1)
3878 skip = TRUE;
3879 else if (outrel.r_offset == (bfd_vma) - 2)
3880 {
3881 skip = TRUE;
3882 relocate = TRUE;
3883 }
3884
3885 outrel.r_offset += (input_section->output_section->vma
3886 + input_section->output_offset);
3887
3888 if (skip)
3889 memset (&outrel, 0, sizeof outrel);
3890 else if (h != NULL
3891 && h->dynindx != -1
3892 && (!info->shared || !info->symbolic || !h->def_regular))
3893 outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
3894 else
3895 {
3896 int symbol;
3897
3898 /* On SVR4-ish systems, the dynamic loader cannot
3899 relocate the text and data segments independently,
3900 so the symbol does not matter. */
3901 symbol = 0;
3902 outrel.r_info = ELF64_R_INFO (symbol, R_AARCH64_RELATIVE);
3903 outrel.r_addend += value;
3904 }
3905
3906 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab);
3907 bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
3908
3909 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
3910 {
3911 /* Sanity to check that we have previously allocated
3912 sufficient space in the relocation section for the
3913 number of relocations we actually want to emit. */
3914 abort ();
3915 }
3916
3917 /* If this reloc is against an external symbol, we do not want to
3918 fiddle with the addend. Otherwise, we need to include the symbol
3919 value so that it becomes an addend for the dynamic reloc. */
3920 if (!relocate)
3921 return bfd_reloc_ok;
3922
3923 return _bfd_final_link_relocate (howto, input_bfd, input_section,
3924 contents, rel->r_offset, value,
3925 signed_addend);
3926 }
3927 else
3928 value += signed_addend;
3929 break;
3930
3931 case R_AARCH64_JUMP26:
3932 case R_AARCH64_CALL26:
3933 {
3934 asection *splt = globals->root.splt;
3935 bfd_boolean via_plt_p =
3936 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
3937
3938 /* A call to an undefined weak symbol is converted to a jump to
3939 the next instruction unless a PLT entry will be created.
3940 The jump to the next instruction is optimized as a NOP.
3941 Do the same for local undefined symbols. */
3942 if (weak_undef_p && ! via_plt_p)
3943 {
3944 bfd_putl32 (INSN_NOP, hit_data);
3945 return bfd_reloc_ok;
3946 }
3947
3948 /* If the call goes through a PLT entry, make sure to
3949 check distance to the right destination address. */
3950 if (via_plt_p)
3951 {
3952 value = (splt->output_section->vma
3953 + splt->output_offset + h->plt.offset);
3954 *unresolved_reloc_p = FALSE;
3955 }
3956
3957 /* If the target symbol is global and marked as a function the
3958 relocation applies a function call or a tail call. In this
3959 situation we can veneer out of range branches. The veneers
3960 use IP0 and IP1 hence cannot be used arbitrary out of range
3961 branches that occur within the body of a function. */
3962 if (h && h->type == STT_FUNC)
3963 {
3964 /* Check if a stub has to be inserted because the destination
3965 is too far away. */
3966 if (! aarch64_valid_branch_p (value, place))
3967 {
3968 /* The target is out of reach, so redirect the branch to
3969 the local stub for this function. */
3970 struct elf64_aarch64_stub_hash_entry *stub_entry;
3971 stub_entry = elf64_aarch64_get_stub_entry (input_section,
3972 sym_sec, h,
3973 rel, globals);
3974 if (stub_entry != NULL)
3975 value = (stub_entry->stub_offset
3976 + stub_entry->stub_sec->output_offset
3977 + stub_entry->stub_sec->output_section->vma);
3978 }
3979 }
3980 }
3981 value = aarch64_resolve_relocation (r_type, place, value,
3982 signed_addend, weak_undef_p);
3983 break;
3984
3985 case R_AARCH64_ABS16:
3986 case R_AARCH64_ABS32:
3987 case R_AARCH64_ADD_ABS_LO12_NC:
3988 case R_AARCH64_ADR_PREL_LO21:
3989 case R_AARCH64_ADR_PREL_PG_HI21:
3990 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3991 case R_AARCH64_CONDBR19:
3992 case R_AARCH64_LD_PREL_LO19:
3993 case R_AARCH64_LDST8_ABS_LO12_NC:
3994 case R_AARCH64_LDST16_ABS_LO12_NC:
3995 case R_AARCH64_LDST32_ABS_LO12_NC:
3996 case R_AARCH64_LDST64_ABS_LO12_NC:
3997 case R_AARCH64_LDST128_ABS_LO12_NC:
3998 case R_AARCH64_MOVW_SABS_G0:
3999 case R_AARCH64_MOVW_SABS_G1:
4000 case R_AARCH64_MOVW_SABS_G2:
4001 case R_AARCH64_MOVW_UABS_G0:
4002 case R_AARCH64_MOVW_UABS_G0_NC:
4003 case R_AARCH64_MOVW_UABS_G1:
4004 case R_AARCH64_MOVW_UABS_G1_NC:
4005 case R_AARCH64_MOVW_UABS_G2:
4006 case R_AARCH64_MOVW_UABS_G2_NC:
4007 case R_AARCH64_MOVW_UABS_G3:
4008 case R_AARCH64_PREL16:
4009 case R_AARCH64_PREL32:
4010 case R_AARCH64_PREL64:
4011 case R_AARCH64_TSTBR14:
4012 value = aarch64_resolve_relocation (r_type, place, value,
4013 signed_addend, weak_undef_p);
4014 break;
4015
4016 case R_AARCH64_LD64_GOT_LO12_NC:
4017 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 4018 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
4019 if (globals->root.sgot == NULL)
4020 BFD_ASSERT (h != NULL);
4021
4022 if (h != NULL)
4023 {
4024 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4025 output_bfd,
4026 unresolved_reloc_p);
4027 value = aarch64_resolve_relocation (r_type, place, value,
4028 0, weak_undef_p);
4029 }
4030 break;
4031
4032 case R_AARCH64_TLSGD_ADR_PAGE21:
4033 case R_AARCH64_TLSGD_ADD_LO12_NC:
4034 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4035 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4036 if (globals->root.sgot == NULL)
4037 return bfd_reloc_notsupported;
4038
4039 value = (symbol_got_offset (input_bfd, h, r_symndx)
4040 + globals->root.sgot->output_section->vma
4041 + globals->root.sgot->output_section->output_offset);
4042
4043 value = aarch64_resolve_relocation (r_type, place, value,
4044 0, weak_undef_p);
4045 *unresolved_reloc_p = FALSE;
4046 break;
4047
4048 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4049 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4050 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4051 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4052 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4053 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4054 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4055 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4056 value = aarch64_resolve_relocation (r_type, place, value,
bb3f9ed8 4057 signed_addend - tpoff_base (info), weak_undef_p);
a06ea964
NC
4058 *unresolved_reloc_p = FALSE;
4059 break;
4060
4061 case R_AARCH64_TLSDESC_ADR_PAGE:
4062 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4063 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4064 case R_AARCH64_TLSDESC_ADD:
4065 case R_AARCH64_TLSDESC_LDR:
4066 if (globals->root.sgot == NULL)
4067 return bfd_reloc_notsupported;
4068
4069 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4070 + globals->root.sgotplt->output_section->vma
4071 + globals->root.sgotplt->output_section->output_offset
4072 + globals->sgotplt_jump_table_size);
4073
4074 value = aarch64_resolve_relocation (r_type, place, value,
4075 0, weak_undef_p);
4076 *unresolved_reloc_p = FALSE;
4077 break;
4078
4079 default:
4080 return bfd_reloc_notsupported;
4081 }
4082
4083 if (saved_addend)
4084 *saved_addend = value;
4085
4086 /* Only apply the final relocation in a sequence. */
4087 if (save_addend)
4088 return bfd_reloc_continue;
4089
4090 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4091}
4092
4093/* Handle TLS relaxations. Relaxing is possible for symbols that use
4094 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4095 link.
4096
4097 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4098 is to then call final_link_relocate. Return other values in the
4099 case of error. */
4100
4101static bfd_reloc_status_type
4102elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table *globals,
4103 bfd *input_bfd, bfd_byte *contents,
4104 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4105{
4106 bfd_boolean is_local = h == NULL;
4107 unsigned int r_type = ELF64_R_TYPE (rel->r_info);
4108 unsigned long insn;
4109
4110 BFD_ASSERT (globals && input_bfd && contents && rel);
4111
4112 switch (r_type)
4113 {
4114 case R_AARCH64_TLSGD_ADR_PAGE21:
4115 case R_AARCH64_TLSDESC_ADR_PAGE:
4116 if (is_local)
4117 {
4118 /* GD->LE relaxation:
4119 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4120 or
4121 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4122 */
4123 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4124 return bfd_reloc_continue;
4125 }
4126 else
4127 {
4128 /* GD->IE relaxation:
4129 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4130 or
4131 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4132 */
4133 insn = bfd_getl32 (contents + rel->r_offset);
4134 return bfd_reloc_continue;
4135 }
4136
4137 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4138 if (is_local)
4139 {
4140 /* GD->LE relaxation:
4141 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4142 */
4143 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4144 return bfd_reloc_continue;
4145 }
4146 else
4147 {
4148 /* GD->IE relaxation:
4149 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4150 */
4151 insn = bfd_getl32 (contents + rel->r_offset);
4152 insn &= 0xfffffff0;
4153 bfd_putl32 (insn, contents + rel->r_offset);
4154 return bfd_reloc_continue;
4155 }
4156
4157 case R_AARCH64_TLSGD_ADD_LO12_NC:
4158 if (is_local)
4159 {
4160 /* GD->LE relaxation
4161 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4162 bl __tls_get_addr => mrs x1, tpidr_el0
4163 nop => add x0, x1, x0
4164 */
4165
4166 /* First kill the tls_get_addr reloc on the bl instruction. */
4167 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4168 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4169
4170 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4171 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4172 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4173 return bfd_reloc_continue;
4174 }
4175 else
4176 {
4177 /* GD->IE relaxation
4178 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4179 BL __tls_get_addr => mrs x1, tpidr_el0
4180 R_AARCH64_CALL26
4181 NOP => add x0, x1, x0
4182 */
4183
4184 BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26);
4185
4186 /* Remove the relocation on the BL instruction. */
4187 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4188
4189 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4190
4191 /* We choose to fixup the BL and NOP instructions using the
4192 offset from the second relocation to allow flexibility in
4193 scheduling instructions between the ADD and BL. */
4194 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4195 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4196 return bfd_reloc_continue;
4197 }
4198
4199 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4200 case R_AARCH64_TLSDESC_CALL:
4201 /* GD->IE/LE relaxation:
4202 add x0, x0, #:tlsdesc_lo12:var => nop
4203 blr xd => nop
4204 */
4205 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4206 return bfd_reloc_ok;
4207
4208 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4209 /* IE->LE relaxation:
4210 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4211 */
4212 if (is_local)
4213 {
4214 insn = bfd_getl32 (contents + rel->r_offset);
4215 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4216 }
4217 return bfd_reloc_continue;
4218
4219 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4220 /* IE->LE relaxation:
4221 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4222 */
4223 if (is_local)
4224 {
4225 insn = bfd_getl32 (contents + rel->r_offset);
4226 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4227 }
4228 return bfd_reloc_continue;
4229
4230 default:
4231 return bfd_reloc_continue;
4232 }
4233
4234 return bfd_reloc_ok;
4235}
4236
4237/* Relocate an AArch64 ELF section. */
4238
4239static bfd_boolean
4240elf64_aarch64_relocate_section (bfd *output_bfd,
4241 struct bfd_link_info *info,
4242 bfd *input_bfd,
4243 asection *input_section,
4244 bfd_byte *contents,
4245 Elf_Internal_Rela *relocs,
4246 Elf_Internal_Sym *local_syms,
4247 asection **local_sections)
4248{
4249 Elf_Internal_Shdr *symtab_hdr;
4250 struct elf_link_hash_entry **sym_hashes;
4251 Elf_Internal_Rela *rel;
4252 Elf_Internal_Rela *relend;
4253 const char *name;
4254 struct elf64_aarch64_link_hash_table *globals;
4255 bfd_boolean save_addend = FALSE;
4256 bfd_vma addend = 0;
4257
4258 globals = elf64_aarch64_hash_table (info);
4259
4260 symtab_hdr = &elf_symtab_hdr (input_bfd);
4261 sym_hashes = elf_sym_hashes (input_bfd);
4262
4263 rel = relocs;
4264 relend = relocs + input_section->reloc_count;
4265 for (; rel < relend; rel++)
4266 {
4267 unsigned int r_type;
4268 unsigned int relaxed_r_type;
4269 reloc_howto_type *howto;
4270 unsigned long r_symndx;
4271 Elf_Internal_Sym *sym;
4272 asection *sec;
4273 struct elf_link_hash_entry *h;
4274 bfd_vma relocation;
4275 bfd_reloc_status_type r;
4276 arelent bfd_reloc;
4277 char sym_type;
4278 bfd_boolean unresolved_reloc = FALSE;
4279 char *error_message = NULL;
4280
4281 r_symndx = ELF64_R_SYM (rel->r_info);
4282 r_type = ELF64_R_TYPE (rel->r_info);
4283
4284 bfd_reloc.howto = elf64_aarch64_howto_from_type (r_type);
4285 howto = bfd_reloc.howto;
4286
4287 h = NULL;
4288 sym = NULL;
4289 sec = NULL;
4290
4291 if (r_symndx < symtab_hdr->sh_info)
4292 {
4293 sym = local_syms + r_symndx;
4294 sym_type = ELF64_ST_TYPE (sym->st_info);
4295 sec = local_sections[r_symndx];
4296
4297 /* An object file might have a reference to a local
4298 undefined symbol. This is a daft object file, but we
4299 should at least do something about it. */
4300 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4301 && bfd_is_und_section (sec)
4302 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4303 {
4304 if (!info->callbacks->undefined_symbol
4305 (info, bfd_elf_string_from_elf_section
4306 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4307 input_bfd, input_section, rel->r_offset, TRUE))
4308 return FALSE;
4309 }
4310
4311 if (r_type >= R_AARCH64_dyn_max)
4312 {
4313 bfd_set_error (bfd_error_bad_value);
4314 return FALSE;
4315 }
4316
4317 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4318 }
4319 else
4320 {
4321 bfd_boolean warned;
4322
4323 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4324 r_symndx, symtab_hdr, sym_hashes,
4325 h, sec, relocation,
4326 unresolved_reloc, warned);
4327
4328 sym_type = h->type;
4329 }
4330
4331 if (sec != NULL && discarded_section (sec))
4332 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4333 rel, 1, relend, howto, 0, contents);
4334
4335 if (info->relocatable)
4336 {
4337 /* This is a relocatable link. We don't have to change
4338 anything, unless the reloc is against a section symbol,
4339 in which case we have to adjust according to where the
4340 section symbol winds up in the output section. */
4341 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
4342 rel->r_addend += sec->output_offset;
4343 continue;
4344 }
4345
4346 if (h != NULL)
4347 name = h->root.root.string;
4348 else
4349 {
4350 name = (bfd_elf_string_from_elf_section
4351 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4352 if (name == NULL || *name == '\0')
4353 name = bfd_section_name (input_bfd, sec);
4354 }
4355
4356 if (r_symndx != 0
4357 && r_type != R_AARCH64_NONE
4358 && r_type != R_AARCH64_NULL
4359 && (h == NULL
4360 || h->root.type == bfd_link_hash_defined
4361 || h->root.type == bfd_link_hash_defweak)
4362 && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
4363 {
4364 (*_bfd_error_handler)
4365 ((sym_type == STT_TLS
4366 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4367 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4368 input_bfd,
4369 input_section, (long) rel->r_offset, howto->name, name);
4370 }
4371
4372
4373 /* We relax only if we can see that there can be a valid transition
4374 from a reloc type to another.
4375 We call elf64_aarch64_final_link_relocate unless we're completely
4376 done, i.e., the relaxation produced the final output we want. */
4377
4378 relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4379 h, r_symndx);
4380 if (relaxed_r_type != r_type)
4381 {
4382 r_type = relaxed_r_type;
4383 howto = elf64_aarch64_howto_from_type (r_type);
4384
4385 r = elf64_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4386 unresolved_reloc = 0;
4387 }
4388 else
4389 r = bfd_reloc_continue;
4390
4391 /* There may be multiple consecutive relocations for the
4392 same offset. In that case we are supposed to treat the
4393 output of each relocation as the addend for the next. */
4394 if (rel + 1 < relend
4395 && rel->r_offset == rel[1].r_offset
4396 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4397 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4398 save_addend = TRUE;
4399 else
4400 save_addend = FALSE;
4401
4402 if (r == bfd_reloc_continue)
4403 r = elf64_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4404 input_section, contents, rel,
4405 relocation, info, sec,
4406 h, &unresolved_reloc,
4407 save_addend, &addend);
4408
4409 switch (r_type)
4410 {
4411 case R_AARCH64_TLSGD_ADR_PAGE21:
4412 case R_AARCH64_TLSGD_ADD_LO12_NC:
4413 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4414 {
4415 bfd_boolean need_relocs = FALSE;
4416 bfd_byte *loc;
4417 int indx;
4418 bfd_vma off;
4419
4420 off = symbol_got_offset (input_bfd, h, r_symndx);
4421 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4422
4423 need_relocs =
4424 (info->shared || indx != 0) &&
4425 (h == NULL
4426 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4427 || h->root.type != bfd_link_hash_undefweak);
4428
4429 BFD_ASSERT (globals->root.srelgot != NULL);
4430
4431 if (need_relocs)
4432 {
4433 Elf_Internal_Rela rela;
4434 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_DTPMOD64);
4435 rela.r_addend = 0;
4436 rela.r_offset = globals->root.sgot->output_section->vma +
4437 globals->root.sgot->output_offset + off;
4438
4439
4440 loc = globals->root.srelgot->contents;
4441 loc += globals->root.srelgot->reloc_count++
4442 * RELOC_SIZE (htab);
4443 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4444
4445 if (indx == 0)
4446 {
4447 bfd_put_64 (output_bfd,
4448 relocation - dtpoff_base (info),
4449 globals->root.sgot->contents + off
4450 + GOT_ENTRY_SIZE);
4451 }
4452 else
4453 {
4454 /* This TLS symbol is global. We emit a
4455 relocation to fixup the tls offset at load
4456 time. */
4457 rela.r_info =
4458 ELF64_R_INFO (indx, R_AARCH64_TLS_DTPREL64);
4459 rela.r_addend = 0;
4460 rela.r_offset =
4461 (globals->root.sgot->output_section->vma
4462 + globals->root.sgot->output_offset + off
4463 + GOT_ENTRY_SIZE);
4464
4465 loc = globals->root.srelgot->contents;
4466 loc += globals->root.srelgot->reloc_count++
4467 * RELOC_SIZE (globals);
4468 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4469 bfd_put_64 (output_bfd, (bfd_vma) 0,
4470 globals->root.sgot->contents + off
4471 + GOT_ENTRY_SIZE);
4472 }
4473 }
4474 else
4475 {
4476 bfd_put_64 (output_bfd, (bfd_vma) 1,
4477 globals->root.sgot->contents + off);
4478 bfd_put_64 (output_bfd,
4479 relocation - dtpoff_base (info),
4480 globals->root.sgot->contents + off
4481 + GOT_ENTRY_SIZE);
4482 }
4483
4484 symbol_got_offset_mark (input_bfd, h, r_symndx);
4485 }
4486 break;
4487
4488 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4489 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4490 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4491 {
4492 bfd_boolean need_relocs = FALSE;
4493 bfd_byte *loc;
4494 int indx;
4495 bfd_vma off;
4496
4497 off = symbol_got_offset (input_bfd, h, r_symndx);
4498
4499 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4500
4501 need_relocs =
4502 (info->shared || indx != 0) &&
4503 (h == NULL
4504 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4505 || h->root.type != bfd_link_hash_undefweak);
4506
4507 BFD_ASSERT (globals->root.srelgot != NULL);
4508
4509 if (need_relocs)
4510 {
4511 Elf_Internal_Rela rela;
4512
4513 if (indx == 0)
4514 rela.r_addend = relocation - dtpoff_base (info);
4515 else
4516 rela.r_addend = 0;
4517
4518 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_TPREL64);
4519 rela.r_offset = globals->root.sgot->output_section->vma +
4520 globals->root.sgot->output_offset + off;
4521
4522 loc = globals->root.srelgot->contents;
4523 loc += globals->root.srelgot->reloc_count++
4524 * RELOC_SIZE (htab);
4525
4526 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4527
4528 bfd_put_64 (output_bfd, rela.r_addend,
4529 globals->root.sgot->contents + off);
4530 }
4531 else
4532 bfd_put_64 (output_bfd, relocation - tpoff_base (info),
4533 globals->root.sgot->contents + off);
4534
4535 symbol_got_offset_mark (input_bfd, h, r_symndx);
4536 }
4537 break;
4538
4539 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4540 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4541 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4542 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4543 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4544 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4545 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4546 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4547 break;
4548
4549 case R_AARCH64_TLSDESC_ADR_PAGE:
4550 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4551 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4552 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
4553 {
4554 bfd_boolean need_relocs = FALSE;
4555 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
4556 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
4557
4558 need_relocs = (h == NULL
4559 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4560 || h->root.type != bfd_link_hash_undefweak);
4561
4562 BFD_ASSERT (globals->root.srelgot != NULL);
4563 BFD_ASSERT (globals->root.sgot != NULL);
4564
4565 if (need_relocs)
4566 {
4567 bfd_byte *loc;
4568 Elf_Internal_Rela rela;
4569 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLSDESC);
4570 rela.r_addend = 0;
4571 rela.r_offset = (globals->root.sgotplt->output_section->vma
4572 + globals->root.sgotplt->output_offset
4573 + off + globals->sgotplt_jump_table_size);
4574
4575 if (indx == 0)
4576 rela.r_addend = relocation - dtpoff_base (info);
4577
4578 /* Allocate the next available slot in the PLT reloc
4579 section to hold our R_AARCH64_TLSDESC, the next
4580 available slot is determined from reloc_count,
4581 which we step. But note, reloc_count was
4582 artifically moved down while allocating slots for
4583 real PLT relocs such that all of the PLT relocs
4584 will fit above the initial reloc_count and the
4585 extra stuff will fit below. */
4586 loc = globals->root.srelplt->contents;
4587 loc += globals->root.srelplt->reloc_count++
4588 * RELOC_SIZE (globals);
4589
4590 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4591
4592 bfd_put_64 (output_bfd, (bfd_vma) 0,
4593 globals->root.sgotplt->contents + off +
4594 globals->sgotplt_jump_table_size);
4595 bfd_put_64 (output_bfd, (bfd_vma) 0,
4596 globals->root.sgotplt->contents + off +
4597 globals->sgotplt_jump_table_size +
4598 GOT_ENTRY_SIZE);
4599 }
4600
4601 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
4602 }
4603 break;
4604 }
4605
4606 if (!save_addend)
4607 addend = 0;
4608
4609
4610 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4611 because such sections are not SEC_ALLOC and thus ld.so will
4612 not process them. */
4613 if (unresolved_reloc
4614 && !((input_section->flags & SEC_DEBUGGING) != 0
4615 && h->def_dynamic)
4616 && _bfd_elf_section_offset (output_bfd, info, input_section,
4617 +rel->r_offset) != (bfd_vma) - 1)
4618 {
4619 (*_bfd_error_handler)
4620 (_
4621 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4622 input_bfd, input_section, (long) rel->r_offset, howto->name,
4623 h->root.root.string);
4624 return FALSE;
4625 }
4626
4627 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
4628 {
4629 switch (r)
4630 {
4631 case bfd_reloc_overflow:
4632 /* If the overflowing reloc was to an undefined symbol,
4633 we have already printed one error message and there
4634 is no point complaining again. */
4635 if ((!h ||
4636 h->root.type != bfd_link_hash_undefined)
4637 && (!((*info->callbacks->reloc_overflow)
4638 (info, (h ? &h->root : NULL), name, howto->name,
4639 (bfd_vma) 0, input_bfd, input_section,
4640 rel->r_offset))))
4641 return FALSE;
4642 break;
4643
4644 case bfd_reloc_undefined:
4645 if (!((*info->callbacks->undefined_symbol)
4646 (info, name, input_bfd, input_section,
4647 rel->r_offset, TRUE)))
4648 return FALSE;
4649 break;
4650
4651 case bfd_reloc_outofrange:
4652 error_message = _("out of range");
4653 goto common_error;
4654
4655 case bfd_reloc_notsupported:
4656 error_message = _("unsupported relocation");
4657 goto common_error;
4658
4659 case bfd_reloc_dangerous:
4660 /* error_message should already be set. */
4661 goto common_error;
4662
4663 default:
4664 error_message = _("unknown error");
4665 /* Fall through. */
4666
4667 common_error:
4668 BFD_ASSERT (error_message != NULL);
4669 if (!((*info->callbacks->reloc_dangerous)
4670 (info, error_message, input_bfd, input_section,
4671 rel->r_offset)))
4672 return FALSE;
4673 break;
4674 }
4675 }
4676 }
4677
4678 return TRUE;
4679}
4680
4681/* Set the right machine number. */
4682
4683static bfd_boolean
4684elf64_aarch64_object_p (bfd *abfd)
4685{
4686 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
4687 return TRUE;
4688}
4689
4690/* Function to keep AArch64 specific flags in the ELF header. */
4691
4692static bfd_boolean
4693elf64_aarch64_set_private_flags (bfd *abfd, flagword flags)
4694{
4695 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
4696 {
4697 }
4698 else
4699 {
4700 elf_elfheader (abfd)->e_flags = flags;
4701 elf_flags_init (abfd) = TRUE;
4702 }
4703
4704 return TRUE;
4705}
4706
4707/* Copy backend specific data from one object module to another. */
4708
4709static bfd_boolean
4710elf64_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
4711{
4712 flagword in_flags;
4713
4714 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4715 return TRUE;
4716
4717 in_flags = elf_elfheader (ibfd)->e_flags;
4718
4719 elf_elfheader (obfd)->e_flags = in_flags;
4720 elf_flags_init (obfd) = TRUE;
4721
4722 /* Also copy the EI_OSABI field. */
4723 elf_elfheader (obfd)->e_ident[EI_OSABI] =
4724 elf_elfheader (ibfd)->e_ident[EI_OSABI];
4725
4726 /* Copy object attributes. */
4727 _bfd_elf_copy_obj_attributes (ibfd, obfd);
4728
4729 return TRUE;
4730}
4731
4732/* Merge backend specific data from an object file to the output
4733 object file when linking. */
4734
4735static bfd_boolean
4736elf64_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
4737{
4738 flagword out_flags;
4739 flagword in_flags;
4740 bfd_boolean flags_compatible = TRUE;
4741 asection *sec;
4742
4743 /* Check if we have the same endianess. */
4744 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
4745 return FALSE;
4746
4747 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4748 return TRUE;
4749
4750 /* The input BFD must have had its flags initialised. */
4751 /* The following seems bogus to me -- The flags are initialized in
4752 the assembler but I don't think an elf_flags_init field is
4753 written into the object. */
4754 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4755
4756 in_flags = elf_elfheader (ibfd)->e_flags;
4757 out_flags = elf_elfheader (obfd)->e_flags;
4758
4759 if (!elf_flags_init (obfd))
4760 {
4761 /* If the input is the default architecture and had the default
4762 flags then do not bother setting the flags for the output
4763 architecture, instead allow future merges to do this. If no
4764 future merges ever set these flags then they will retain their
4765 uninitialised values, which surprise surprise, correspond
4766 to the default values. */
4767 if (bfd_get_arch_info (ibfd)->the_default
4768 && elf_elfheader (ibfd)->e_flags == 0)
4769 return TRUE;
4770
4771 elf_flags_init (obfd) = TRUE;
4772 elf_elfheader (obfd)->e_flags = in_flags;
4773
4774 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
4775 && bfd_get_arch_info (obfd)->the_default)
4776 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
4777 bfd_get_mach (ibfd));
4778
4779 return TRUE;
4780 }
4781
4782 /* Identical flags must be compatible. */
4783 if (in_flags == out_flags)
4784 return TRUE;
4785
4786 /* Check to see if the input BFD actually contains any sections. If
4787 not, its flags may not have been initialised either, but it
4788 cannot actually cause any incompatiblity. Do not short-circuit
4789 dynamic objects; their section list may be emptied by
4790 elf_link_add_object_symbols.
4791
4792 Also check to see if there are no code sections in the input.
4793 In this case there is no need to check for code specific flags.
4794 XXX - do we need to worry about floating-point format compatability
4795 in data sections ? */
4796 if (!(ibfd->flags & DYNAMIC))
4797 {
4798 bfd_boolean null_input_bfd = TRUE;
4799 bfd_boolean only_data_sections = TRUE;
4800
4801 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4802 {
4803 if ((bfd_get_section_flags (ibfd, sec)
4804 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4805 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4806 only_data_sections = FALSE;
4807
4808 null_input_bfd = FALSE;
4809 break;
4810 }
4811
4812 if (null_input_bfd || only_data_sections)
4813 return TRUE;
4814 }
4815
4816 return flags_compatible;
4817}
4818
4819/* Display the flags field. */
4820
4821static bfd_boolean
4822elf64_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
4823{
4824 FILE *file = (FILE *) ptr;
4825 unsigned long flags;
4826
4827 BFD_ASSERT (abfd != NULL && ptr != NULL);
4828
4829 /* Print normal ELF private data. */
4830 _bfd_elf_print_private_bfd_data (abfd, ptr);
4831
4832 flags = elf_elfheader (abfd)->e_flags;
4833 /* Ignore init flag - it may not be set, despite the flags field
4834 containing valid data. */
4835
4836 /* xgettext:c-format */
4837 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
4838
4839 if (flags)
4840 fprintf (file, _("<Unrecognised flag bits set>"));
4841
4842 fputc ('\n', file);
4843
4844 return TRUE;
4845}
4846
4847/* Update the got entry reference counts for the section being removed. */
4848
4849static bfd_boolean
4850elf64_aarch64_gc_sweep_hook (bfd *abfd ATTRIBUTE_UNUSED,
4851 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4852 asection *sec ATTRIBUTE_UNUSED,
4853 const Elf_Internal_Rela *
4854 relocs ATTRIBUTE_UNUSED)
4855{
4856 return TRUE;
4857}
4858
4859/* Adjust a symbol defined by a dynamic object and referenced by a
4860 regular object. The current definition is in some section of the
4861 dynamic object, but we're not including those sections. We have to
4862 change the definition to something the rest of the link can
4863 understand. */
4864
4865static bfd_boolean
4866elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
4867 struct elf_link_hash_entry *h)
4868{
4869 struct elf64_aarch64_link_hash_table *htab;
4870 asection *s;
4871
4872 /* If this is a function, put it in the procedure linkage table. We
4873 will fill in the contents of the procedure linkage table later,
4874 when we know the address of the .got section. */
4875 if (h->type == STT_FUNC || h->needs_plt)
4876 {
4877 if (h->plt.refcount <= 0
4878 || SYMBOL_CALLS_LOCAL (info, h)
4879 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
4880 && h->root.type == bfd_link_hash_undefweak))
4881 {
4882 /* This case can occur if we saw a CALL26 reloc in
4883 an input file, but the symbol wasn't referred to
4884 by a dynamic object or all references were
4885 garbage collected. In which case we can end up
4886 resolving. */
4887 h->plt.offset = (bfd_vma) - 1;
4888 h->needs_plt = 0;
4889 }
4890
4891 return TRUE;
4892 }
4893 else
4894 /* It's possible that we incorrectly decided a .plt reloc was
4895 needed for an R_X86_64_PC32 reloc to a non-function sym in
4896 check_relocs. We can't decide accurately between function and
4897 non-function syms in check-relocs; Objects loaded later in
4898 the link may change h->type. So fix it now. */
4899 h->plt.offset = (bfd_vma) - 1;
4900
4901
4902 /* If this is a weak symbol, and there is a real definition, the
4903 processor independent code will have arranged for us to see the
4904 real definition first, and we can just use the same value. */
4905 if (h->u.weakdef != NULL)
4906 {
4907 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
4908 || h->u.weakdef->root.type == bfd_link_hash_defweak);
4909 h->root.u.def.section = h->u.weakdef->root.u.def.section;
4910 h->root.u.def.value = h->u.weakdef->root.u.def.value;
4911 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
4912 h->non_got_ref = h->u.weakdef->non_got_ref;
4913 return TRUE;
4914 }
4915
4916 /* If we are creating a shared library, we must presume that the
4917 only references to the symbol are via the global offset table.
4918 For such cases we need not do anything here; the relocations will
4919 be handled correctly by relocate_section. */
4920 if (info->shared)
4921 return TRUE;
4922
4923 /* If there are no references to this symbol that do not use the
4924 GOT, we don't need to generate a copy reloc. */
4925 if (!h->non_got_ref)
4926 return TRUE;
4927
4928 /* If -z nocopyreloc was given, we won't generate them either. */
4929 if (info->nocopyreloc)
4930 {
4931 h->non_got_ref = 0;
4932 return TRUE;
4933 }
4934
4935 /* We must allocate the symbol in our .dynbss section, which will
4936 become part of the .bss section of the executable. There will be
4937 an entry for this symbol in the .dynsym section. The dynamic
4938 object will contain position independent code, so all references
4939 from the dynamic object to this symbol will go through the global
4940 offset table. The dynamic linker will use the .dynsym entry to
4941 determine the address it must put in the global offset table, so
4942 both the dynamic object and the regular object will refer to the
4943 same memory location for the variable. */
4944
4945 htab = elf64_aarch64_hash_table (info);
4946
4947 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
4948 to copy the initial value out of the dynamic object and into the
4949 runtime process image. */
4950 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
4951 {
4952 htab->srelbss->size += RELOC_SIZE (htab);
4953 h->needs_copy = 1;
4954 }
4955
4956 s = htab->sdynbss;
4957
4958 return _bfd_elf_adjust_dynamic_copy (h, s);
4959
4960}
4961
4962static bfd_boolean
4963elf64_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
4964{
4965 struct elf_aarch64_local_symbol *locals;
4966 locals = elf64_aarch64_locals (abfd);
4967 if (locals == NULL)
4968 {
4969 locals = (struct elf_aarch64_local_symbol *)
4970 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
4971 if (locals == NULL)
4972 return FALSE;
4973 elf64_aarch64_locals (abfd) = locals;
4974 }
4975 return TRUE;
4976}
4977
4978/* Look through the relocs for a section during the first phase. */
4979
4980static bfd_boolean
4981elf64_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
4982 asection *sec, const Elf_Internal_Rela *relocs)
4983{
4984 Elf_Internal_Shdr *symtab_hdr;
4985 struct elf_link_hash_entry **sym_hashes;
4986 const Elf_Internal_Rela *rel;
4987 const Elf_Internal_Rela *rel_end;
4988 asection *sreloc;
4989
4990 struct elf64_aarch64_link_hash_table *htab;
4991
4992 unsigned long nsyms;
4993
4994 if (info->relocatable)
4995 return TRUE;
4996
4997 BFD_ASSERT (is_aarch64_elf (abfd));
4998
4999 htab = elf64_aarch64_hash_table (info);
5000 sreloc = NULL;
5001
5002 symtab_hdr = &elf_symtab_hdr (abfd);
5003 sym_hashes = elf_sym_hashes (abfd);
5004 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
5005
5006 rel_end = relocs + sec->reloc_count;
5007 for (rel = relocs; rel < rel_end; rel++)
5008 {
5009 struct elf_link_hash_entry *h;
5010 unsigned long r_symndx;
5011 unsigned int r_type;
5012
5013 r_symndx = ELF64_R_SYM (rel->r_info);
5014 r_type = ELF64_R_TYPE (rel->r_info);
5015
5016 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5017 {
5018 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5019 r_symndx);
5020 return FALSE;
5021 }
5022
5023 if (r_symndx >= nsyms
5024 /* PR 9934: It is possible to have relocations that do not
5025 refer to symbols, thus it is also possible to have an
5026 object file containing relocations but no symbol table. */
5027 && (r_symndx > 0 || nsyms > 0))
5028 {
5029 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5030 r_symndx);
5031 return FALSE;
5032 }
5033
5034 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
5035 h = NULL;
5036 else
5037 {
5038 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5039 while (h->root.type == bfd_link_hash_indirect
5040 || h->root.type == bfd_link_hash_warning)
5041 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5042 }
5043
5044 /* Could be done earlier, if h were already available. */
5045 r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5046
5047 switch (r_type)
5048 {
5049 case R_AARCH64_ABS64:
5050
5051 /* We don't need to handle relocs into sections not going into
5052 the "real" output. */
5053 if ((sec->flags & SEC_ALLOC) == 0)
5054 break;
5055
5056 if (h != NULL)
5057 {
5058 if (!info->shared)
5059 h->non_got_ref = 1;
5060
5061 h->plt.refcount += 1;
5062 h->pointer_equality_needed = 1;
5063 }
5064
5065 /* No need to do anything if we're not creating a shared
5066 object. */
5067 if (! info->shared)
5068 break;
5069
5070 {
5071 struct elf_dyn_relocs *p;
5072 struct elf_dyn_relocs **head;
5073
5074 /* We must copy these reloc types into the output file.
5075 Create a reloc section in dynobj and make room for
5076 this reloc. */
5077 if (sreloc == NULL)
5078 {
5079 if (htab->root.dynobj == NULL)
5080 htab->root.dynobj = abfd;
5081
5082 sreloc = _bfd_elf_make_dynamic_reloc_section
5083 (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
5084
5085 if (sreloc == NULL)
5086 return FALSE;
5087 }
5088
5089 /* If this is a global symbol, we count the number of
5090 relocations we need for this symbol. */
5091 if (h != NULL)
5092 {
5093 struct elf64_aarch64_link_hash_entry *eh;
5094 eh = (struct elf64_aarch64_link_hash_entry *) h;
5095 head = &eh->dyn_relocs;
5096 }
5097 else
5098 {
5099 /* Track dynamic relocs needed for local syms too.
5100 We really need local syms available to do this
5101 easily. Oh well. */
5102
5103 asection *s;
5104 void **vpp;
5105 Elf_Internal_Sym *isym;
5106
5107 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5108 abfd, r_symndx);
5109 if (isym == NULL)
5110 return FALSE;
5111
5112 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5113 if (s == NULL)
5114 s = sec;
5115
5116 /* Beware of type punned pointers vs strict aliasing
5117 rules. */
5118 vpp = &(elf_section_data (s)->local_dynrel);
5119 head = (struct elf_dyn_relocs **) vpp;
5120 }
5121
5122 p = *head;
5123 if (p == NULL || p->sec != sec)
5124 {
5125 bfd_size_type amt = sizeof *p;
5126 p = ((struct elf_dyn_relocs *)
5127 bfd_zalloc (htab->root.dynobj, amt));
5128 if (p == NULL)
5129 return FALSE;
5130 p->next = *head;
5131 *head = p;
5132 p->sec = sec;
5133 }
5134
5135 p->count += 1;
5136
5137 }
5138 break;
5139
5140 /* RR: We probably want to keep a consistency check that
5141 there are no dangling GOT_PAGE relocs. */
5142 case R_AARCH64_LD64_GOT_LO12_NC:
f41aef5f 5143 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
5144 case R_AARCH64_ADR_GOT_PAGE:
5145 case R_AARCH64_TLSGD_ADR_PAGE21:
5146 case R_AARCH64_TLSGD_ADD_LO12_NC:
5147 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5148 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5149 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
5150 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
5151 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5152 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
5153 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
5154 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5155 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
5156 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5157 case R_AARCH64_TLSDESC_ADR_PAGE:
5158 case R_AARCH64_TLSDESC_ADD_LO12_NC:
5159 case R_AARCH64_TLSDESC_LD64_LO12_NC:
5160 {
5161 unsigned got_type;
5162 unsigned old_got_type;
5163
5164 got_type = aarch64_reloc_got_type (r_type);
5165
5166 if (h)
5167 {
5168 h->got.refcount += 1;
5169 old_got_type = elf64_aarch64_hash_entry (h)->got_type;
5170 }
5171 else
5172 {
5173 struct elf_aarch64_local_symbol *locals;
5174
5175 if (!elf64_aarch64_allocate_local_symbols
5176 (abfd, symtab_hdr->sh_info))
5177 return FALSE;
5178
5179 locals = elf64_aarch64_locals (abfd);
5180 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5181 locals[r_symndx].got_refcount += 1;
5182 old_got_type = locals[r_symndx].got_type;
5183 }
5184
5185 /* If a variable is accessed with both general dynamic TLS
5186 methods, two slots may be created. */
5187 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5188 got_type |= old_got_type;
5189
5190 /* We will already have issued an error message if there
5191 is a TLS/non-TLS mismatch, based on the symbol type.
5192 So just combine any TLS types needed. */
5193 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5194 && got_type != GOT_NORMAL)
5195 got_type |= old_got_type;
5196
5197 /* If the symbol is accessed by both IE and GD methods, we
5198 are able to relax. Turn off the GD flag, without
5199 messing up with any other kind of TLS types that may be
5200 involved. */
5201 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5202 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5203
5204 if (old_got_type != got_type)
5205 {
5206 if (h != NULL)
5207 elf64_aarch64_hash_entry (h)->got_type = got_type;
5208 else
5209 {
5210 struct elf_aarch64_local_symbol *locals;
5211 locals = elf64_aarch64_locals (abfd);
5212 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5213 locals[r_symndx].got_type = got_type;
5214 }
5215 }
5216
5217 if (htab->root.sgot == NULL)
5218 {
5219 if (htab->root.dynobj == NULL)
5220 htab->root.dynobj = abfd;
5221 if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
5222 return FALSE;
5223 }
5224 break;
5225 }
5226
5227 case R_AARCH64_ADR_PREL_PG_HI21_NC:
5228 case R_AARCH64_ADR_PREL_PG_HI21:
f41aef5f 5229 case R_AARCH64_ADR_PREL_LO21:
a06ea964
NC
5230 if (h != NULL && info->executable)
5231 {
5232 /* If this reloc is in a read-only section, we might
5233 need a copy reloc. We can't check reliably at this
5234 stage whether the section is read-only, as input
5235 sections have not yet been mapped to output sections.
5236 Tentatively set the flag for now, and correct in
5237 adjust_dynamic_symbol. */
5238 h->non_got_ref = 1;
5239 h->plt.refcount += 1;
5240 h->pointer_equality_needed = 1;
5241 }
5242 /* FIXME:: RR need to handle these in shared libraries
5243 and essentially bomb out as these being non-PIC
5244 relocations in shared libraries. */
5245 break;
5246
5247 case R_AARCH64_CALL26:
5248 case R_AARCH64_JUMP26:
5249 /* If this is a local symbol then we resolve it
5250 directly without creating a PLT entry. */
5251 if (h == NULL)
5252 continue;
5253
5254 h->needs_plt = 1;
5255 h->plt.refcount += 1;
5256 break;
5257 }
5258 }
5259 return TRUE;
5260}
5261
5262/* Treat mapping symbols as special target symbols. */
5263
5264static bfd_boolean
5265elf64_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
5266 asymbol *sym)
5267{
5268 return bfd_is_aarch64_special_symbol_name (sym->name,
5269 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
5270}
5271
5272/* This is a copy of elf_find_function () from elf.c except that
5273 AArch64 mapping symbols are ignored when looking for function names. */
5274
5275static bfd_boolean
5276aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
5277 asection *section,
5278 asymbol **symbols,
5279 bfd_vma offset,
5280 const char **filename_ptr,
5281 const char **functionname_ptr)
5282{
5283 const char *filename = NULL;
5284 asymbol *func = NULL;
5285 bfd_vma low_func = 0;
5286 asymbol **p;
5287
5288 for (p = symbols; *p != NULL; p++)
5289 {
5290 elf_symbol_type *q;
5291
5292 q = (elf_symbol_type *) * p;
5293
5294 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
5295 {
5296 default:
5297 break;
5298 case STT_FILE:
5299 filename = bfd_asymbol_name (&q->symbol);
5300 break;
5301 case STT_FUNC:
5302 case STT_NOTYPE:
5303 /* Skip mapping symbols. */
5304 if ((q->symbol.flags & BSF_LOCAL)
5305 && (bfd_is_aarch64_special_symbol_name
5306 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
5307 continue;
5308 /* Fall through. */
5309 if (bfd_get_section (&q->symbol) == section
5310 && q->symbol.value >= low_func && q->symbol.value <= offset)
5311 {
5312 func = (asymbol *) q;
5313 low_func = q->symbol.value;
5314 }
5315 break;
5316 }
5317 }
5318
5319 if (func == NULL)
5320 return FALSE;
5321
5322 if (filename_ptr)
5323 *filename_ptr = filename;
5324 if (functionname_ptr)
5325 *functionname_ptr = bfd_asymbol_name (func);
5326
5327 return TRUE;
5328}
5329
5330
5331/* Find the nearest line to a particular section and offset, for error
5332 reporting. This code is a duplicate of the code in elf.c, except
5333 that it uses aarch64_elf_find_function. */
5334
5335static bfd_boolean
5336elf64_aarch64_find_nearest_line (bfd *abfd,
5337 asection *section,
5338 asymbol **symbols,
5339 bfd_vma offset,
5340 const char **filename_ptr,
5341 const char **functionname_ptr,
5342 unsigned int *line_ptr)
5343{
5344 bfd_boolean found = FALSE;
5345
5346 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5347 toolchain uses it. */
5348
5349 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
5350 section, symbols, offset,
5351 filename_ptr, functionname_ptr,
5352 line_ptr, NULL, 0,
5353 &elf_tdata (abfd)->dwarf2_find_line_info))
5354 {
5355 if (!*functionname_ptr)
5356 aarch64_elf_find_function (abfd, section, symbols, offset,
5357 *filename_ptr ? NULL : filename_ptr,
5358 functionname_ptr);
5359
5360 return TRUE;
5361 }
5362
5363 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
5364 &found, filename_ptr,
5365 functionname_ptr, line_ptr,
5366 &elf_tdata (abfd)->line_info))
5367 return FALSE;
5368
5369 if (found && (*functionname_ptr || *line_ptr))
5370 return TRUE;
5371
5372 if (symbols == NULL)
5373 return FALSE;
5374
5375 if (!aarch64_elf_find_function (abfd, section, symbols, offset,
5376 filename_ptr, functionname_ptr))
5377 return FALSE;
5378
5379 *line_ptr = 0;
5380 return TRUE;
5381}
5382
5383static bfd_boolean
5384elf64_aarch64_find_inliner_info (bfd *abfd,
5385 const char **filename_ptr,
5386 const char **functionname_ptr,
5387 unsigned int *line_ptr)
5388{
5389 bfd_boolean found;
5390 found = _bfd_dwarf2_find_inliner_info
5391 (abfd, filename_ptr,
5392 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
5393 return found;
5394}
5395
5396
5397static void
5398elf64_aarch64_post_process_headers (bfd *abfd,
5399 struct bfd_link_info *link_info
5400 ATTRIBUTE_UNUSED)
5401{
5402 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
5403
5404 i_ehdrp = elf_elfheader (abfd);
5405 i_ehdrp->e_ident[EI_OSABI] = 0;
5406 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
5407}
5408
5409static enum elf_reloc_type_class
5410elf64_aarch64_reloc_type_class (const Elf_Internal_Rela *rela)
5411{
5412 switch ((int) ELF64_R_TYPE (rela->r_info))
5413 {
5414 case R_AARCH64_RELATIVE:
5415 return reloc_class_relative;
5416 case R_AARCH64_JUMP_SLOT:
5417 return reloc_class_plt;
5418 case R_AARCH64_COPY:
5419 return reloc_class_copy;
5420 default:
5421 return reloc_class_normal;
5422 }
5423}
5424
5425/* Set the right machine number for an AArch64 ELF file. */
5426
5427static bfd_boolean
5428elf64_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
5429{
5430 if (hdr->sh_type == SHT_NOTE)
5431 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
5432
5433 return TRUE;
5434}
5435
5436/* Handle an AArch64 specific section when reading an object file. This is
5437 called when bfd_section_from_shdr finds a section with an unknown
5438 type. */
5439
5440static bfd_boolean
5441elf64_aarch64_section_from_shdr (bfd *abfd,
5442 Elf_Internal_Shdr *hdr,
5443 const char *name, int shindex)
5444{
5445 /* There ought to be a place to keep ELF backend specific flags, but
5446 at the moment there isn't one. We just keep track of the
5447 sections by their name, instead. Fortunately, the ABI gives
5448 names for all the AArch64 specific sections, so we will probably get
5449 away with this. */
5450 switch (hdr->sh_type)
5451 {
5452 case SHT_AARCH64_ATTRIBUTES:
5453 break;
5454
5455 default:
5456 return FALSE;
5457 }
5458
5459 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5460 return FALSE;
5461
5462 return TRUE;
5463}
5464
5465/* A structure used to record a list of sections, independently
5466 of the next and prev fields in the asection structure. */
5467typedef struct section_list
5468{
5469 asection *sec;
5470 struct section_list *next;
5471 struct section_list *prev;
5472}
5473section_list;
5474
5475/* Unfortunately we need to keep a list of sections for which
5476 an _aarch64_elf_section_data structure has been allocated. This
5477 is because it is possible for functions like elf64_aarch64_write_section
5478 to be called on a section which has had an elf_data_structure
5479 allocated for it (and so the used_by_bfd field is valid) but
5480 for which the AArch64 extended version of this structure - the
5481 _aarch64_elf_section_data structure - has not been allocated. */
5482static section_list *sections_with_aarch64_elf_section_data = NULL;
5483
5484static void
5485record_section_with_aarch64_elf_section_data (asection *sec)
5486{
5487 struct section_list *entry;
5488
5489 entry = bfd_malloc (sizeof (*entry));
5490 if (entry == NULL)
5491 return;
5492 entry->sec = sec;
5493 entry->next = sections_with_aarch64_elf_section_data;
5494 entry->prev = NULL;
5495 if (entry->next != NULL)
5496 entry->next->prev = entry;
5497 sections_with_aarch64_elf_section_data = entry;
5498}
5499
5500static struct section_list *
5501find_aarch64_elf_section_entry (asection *sec)
5502{
5503 struct section_list *entry;
5504 static struct section_list *last_entry = NULL;
5505
5506 /* This is a short cut for the typical case where the sections are added
5507 to the sections_with_aarch64_elf_section_data list in forward order and
5508 then looked up here in backwards order. This makes a real difference
5509 to the ld-srec/sec64k.exp linker test. */
5510 entry = sections_with_aarch64_elf_section_data;
5511 if (last_entry != NULL)
5512 {
5513 if (last_entry->sec == sec)
5514 entry = last_entry;
5515 else if (last_entry->next != NULL && last_entry->next->sec == sec)
5516 entry = last_entry->next;
5517 }
5518
5519 for (; entry; entry = entry->next)
5520 if (entry->sec == sec)
5521 break;
5522
5523 if (entry)
5524 /* Record the entry prior to this one - it is the entry we are
5525 most likely to want to locate next time. Also this way if we
5526 have been called from
5527 unrecord_section_with_aarch64_elf_section_data () we will not
5528 be caching a pointer that is about to be freed. */
5529 last_entry = entry->prev;
5530
5531 return entry;
5532}
5533
5534static void
5535unrecord_section_with_aarch64_elf_section_data (asection *sec)
5536{
5537 struct section_list *entry;
5538
5539 entry = find_aarch64_elf_section_entry (sec);
5540
5541 if (entry)
5542 {
5543 if (entry->prev != NULL)
5544 entry->prev->next = entry->next;
5545 if (entry->next != NULL)
5546 entry->next->prev = entry->prev;
5547 if (entry == sections_with_aarch64_elf_section_data)
5548 sections_with_aarch64_elf_section_data = entry->next;
5549 free (entry);
5550 }
5551}
5552
5553
5554typedef struct
5555{
5556 void *finfo;
5557 struct bfd_link_info *info;
5558 asection *sec;
5559 int sec_shndx;
5560 int (*func) (void *, const char *, Elf_Internal_Sym *,
5561 asection *, struct elf_link_hash_entry *);
5562} output_arch_syminfo;
5563
5564enum map_symbol_type
5565{
5566 AARCH64_MAP_INSN,
5567 AARCH64_MAP_DATA
5568};
5569
5570
5571/* Output a single mapping symbol. */
5572
5573static bfd_boolean
5574elf64_aarch64_output_map_sym (output_arch_syminfo *osi,
5575 enum map_symbol_type type, bfd_vma offset)
5576{
5577 static const char *names[2] = { "$x", "$d" };
5578 Elf_Internal_Sym sym;
5579
5580 sym.st_value = (osi->sec->output_section->vma
5581 + osi->sec->output_offset + offset);
5582 sym.st_size = 0;
5583 sym.st_other = 0;
5584 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5585 sym.st_shndx = osi->sec_shndx;
5586 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
5587}
5588
5589
5590
5591/* Output mapping symbols for PLT entries associated with H. */
5592
5593static bfd_boolean
5594elf64_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
5595{
5596 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
5597 bfd_vma addr;
5598
5599 if (h->root.type == bfd_link_hash_indirect)
5600 return TRUE;
5601
5602 if (h->root.type == bfd_link_hash_warning)
5603 /* When warning symbols are created, they **replace** the "real"
5604 entry in the hash table, thus we never get to see the real
5605 symbol in a hash traversal. So look at it now. */
5606 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5607
5608 if (h->plt.offset == (bfd_vma) - 1)
5609 return TRUE;
5610
5611 addr = h->plt.offset;
5612 if (addr == 32)
5613 {
5614 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5615 return FALSE;
5616 }
5617 return TRUE;
5618}
5619
5620
5621/* Output a single local symbol for a generated stub. */
5622
5623static bfd_boolean
5624elf64_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
5625 bfd_vma offset, bfd_vma size)
5626{
5627 Elf_Internal_Sym sym;
5628
5629 sym.st_value = (osi->sec->output_section->vma
5630 + osi->sec->output_offset + offset);
5631 sym.st_size = size;
5632 sym.st_other = 0;
5633 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5634 sym.st_shndx = osi->sec_shndx;
5635 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
5636}
5637
5638static bfd_boolean
5639aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
5640{
5641 struct elf64_aarch64_stub_hash_entry *stub_entry;
5642 asection *stub_sec;
5643 bfd_vma addr;
5644 char *stub_name;
5645 output_arch_syminfo *osi;
5646
5647 /* Massage our args to the form they really have. */
5648 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
5649 osi = (output_arch_syminfo *) in_arg;
5650
5651 stub_sec = stub_entry->stub_sec;
5652
5653 /* Ensure this stub is attached to the current section being
5654 processed. */
5655 if (stub_sec != osi->sec)
5656 return TRUE;
5657
5658 addr = (bfd_vma) stub_entry->stub_offset;
5659
5660 stub_name = stub_entry->output_name;
5661
5662 switch (stub_entry->stub_type)
5663 {
5664 case aarch64_stub_adrp_branch:
5665 if (!elf64_aarch64_output_stub_sym (osi, stub_name, addr,
5666 sizeof (aarch64_adrp_branch_stub)))
5667 return FALSE;
5668 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5669 return FALSE;
5670 break;
5671 case aarch64_stub_long_branch:
5672 if (!elf64_aarch64_output_stub_sym
5673 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
5674 return FALSE;
5675 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5676 return FALSE;
5677 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
5678 return FALSE;
5679 break;
5680 default:
5681 BFD_FAIL ();
5682 }
5683
5684 return TRUE;
5685}
5686
5687/* Output mapping symbols for linker generated sections. */
5688
5689static bfd_boolean
5690elf64_aarch64_output_arch_local_syms (bfd *output_bfd,
5691 struct bfd_link_info *info,
5692 void *finfo,
5693 int (*func) (void *, const char *,
5694 Elf_Internal_Sym *,
5695 asection *,
5696 struct elf_link_hash_entry
5697 *))
5698{
5699 output_arch_syminfo osi;
5700 struct elf64_aarch64_link_hash_table *htab;
5701
5702 htab = elf64_aarch64_hash_table (info);
5703
5704 osi.finfo = finfo;
5705 osi.info = info;
5706 osi.func = func;
5707
5708 /* Long calls stubs. */
5709 if (htab->stub_bfd && htab->stub_bfd->sections)
5710 {
5711 asection *stub_sec;
5712
5713 for (stub_sec = htab->stub_bfd->sections;
5714 stub_sec != NULL; stub_sec = stub_sec->next)
5715 {
5716 /* Ignore non-stub sections. */
5717 if (!strstr (stub_sec->name, STUB_SUFFIX))
5718 continue;
5719
5720 osi.sec = stub_sec;
5721
5722 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5723 (output_bfd, osi.sec->output_section);
5724
5725 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
5726 &osi);
5727 }
5728 }
5729
5730 /* Finally, output mapping symbols for the PLT. */
5731 if (!htab->root.splt || htab->root.splt->size == 0)
5732 return TRUE;
5733
5734 /* For now live without mapping symbols for the plt. */
5735 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5736 (output_bfd, htab->root.splt->output_section);
5737 osi.sec = htab->root.splt;
5738
5739 elf_link_hash_traverse (&htab->root, elf64_aarch64_output_plt_map,
5740 (void *) &osi);
5741
5742 return TRUE;
5743
5744}
5745
5746/* Allocate target specific section data. */
5747
5748static bfd_boolean
5749elf64_aarch64_new_section_hook (bfd *abfd, asection *sec)
5750{
5751 if (!sec->used_by_bfd)
5752 {
5753 _aarch64_elf_section_data *sdata;
5754 bfd_size_type amt = sizeof (*sdata);
5755
5756 sdata = bfd_zalloc (abfd, amt);
5757 if (sdata == NULL)
5758 return FALSE;
5759 sec->used_by_bfd = sdata;
5760 }
5761
5762 record_section_with_aarch64_elf_section_data (sec);
5763
5764 return _bfd_elf_new_section_hook (abfd, sec);
5765}
5766
5767
5768static void
5769unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
5770 asection *sec,
5771 void *ignore ATTRIBUTE_UNUSED)
5772{
5773 unrecord_section_with_aarch64_elf_section_data (sec);
5774}
5775
5776static bfd_boolean
5777elf64_aarch64_close_and_cleanup (bfd *abfd)
5778{
5779 if (abfd->sections)
5780 bfd_map_over_sections (abfd,
5781 unrecord_section_via_map_over_sections, NULL);
5782
5783 return _bfd_elf_close_and_cleanup (abfd);
5784}
5785
5786static bfd_boolean
5787elf64_aarch64_bfd_free_cached_info (bfd *abfd)
5788{
5789 if (abfd->sections)
5790 bfd_map_over_sections (abfd,
5791 unrecord_section_via_map_over_sections, NULL);
5792
5793 return _bfd_free_cached_info (abfd);
5794}
5795
5796static bfd_boolean
5797elf64_aarch64_is_function_type (unsigned int type)
5798{
5799 return type == STT_FUNC;
5800}
5801
5802/* Create dynamic sections. This is different from the ARM backend in that
5803 the got, plt, gotplt and their relocation sections are all created in the
5804 standard part of the bfd elf backend. */
5805
5806static bfd_boolean
5807elf64_aarch64_create_dynamic_sections (bfd *dynobj,
5808 struct bfd_link_info *info)
5809{
5810 struct elf64_aarch64_link_hash_table *htab;
5811 struct elf_link_hash_entry *h;
5812
5813 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
5814 return FALSE;
5815
5816 htab = elf64_aarch64_hash_table (info);
5817 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
5818 if (!info->shared)
5819 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
5820
5821 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
5822 abort ();
5823
5824 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5825 dynobj's .got section. We don't do this in the linker script
5826 because we don't want to define the symbol if we are not creating
5827 a global offset table. */
5828 h = _bfd_elf_define_linkage_sym (dynobj, info,
5829 htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
5830 elf_hash_table (info)->hgot = h;
5831 if (h == NULL)
5832 return FALSE;
5833
5834 return TRUE;
5835}
5836
5837
5838/* Allocate space in .plt, .got and associated reloc sections for
5839 dynamic relocs. */
5840
5841static bfd_boolean
5842elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
5843{
5844 struct bfd_link_info *info;
5845 struct elf64_aarch64_link_hash_table *htab;
5846 struct elf64_aarch64_link_hash_entry *eh;
5847 struct elf_dyn_relocs *p;
5848
5849 /* An example of a bfd_link_hash_indirect symbol is versioned
5850 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
5851 -> __gxx_personality_v0(bfd_link_hash_defined)
5852
5853 There is no need to process bfd_link_hash_indirect symbols here
5854 because we will also be presented with the concrete instance of
5855 the symbol and elf64_aarch64_copy_indirect_symbol () will have been
5856 called to copy all relevant data from the generic to the concrete
5857 symbol instance.
5858 */
5859 if (h->root.type == bfd_link_hash_indirect)
5860 return TRUE;
5861
5862 if (h->root.type == bfd_link_hash_warning)
5863 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5864
5865 info = (struct bfd_link_info *) inf;
5866 htab = elf64_aarch64_hash_table (info);
5867
5868 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
5869 {
5870 /* Make sure this symbol is output as a dynamic symbol.
5871 Undefined weak syms won't yet be marked as dynamic. */
5872 if (h->dynindx == -1 && !h->forced_local)
5873 {
5874 if (!bfd_elf_link_record_dynamic_symbol (info, h))
5875 return FALSE;
5876 }
5877
5878 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
5879 {
5880 asection *s = htab->root.splt;
5881
5882 /* If this is the first .plt entry, make room for the special
5883 first entry. */
5884 if (s->size == 0)
5885 s->size += htab->plt_header_size;
5886
5887 h->plt.offset = s->size;
5888
5889 /* If this symbol is not defined in a regular file, and we are
5890 not generating a shared library, then set the symbol to this
5891 location in the .plt. This is required to make function
5892 pointers compare as equal between the normal executable and
5893 the shared library. */
5894 if (!info->shared && !h->def_regular)
5895 {
5896 h->root.u.def.section = s;
5897 h->root.u.def.value = h->plt.offset;
5898 }
5899
5900 /* Make room for this entry. For now we only create the
5901 small model PLT entries. We later need to find a way
5902 of relaxing into these from the large model PLT entries. */
5903 s->size += PLT_SMALL_ENTRY_SIZE;
5904
5905 /* We also need to make an entry in the .got.plt section, which
5906 will be placed in the .got section by the linker script. */
5907 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
5908
5909 /* We also need to make an entry in the .rela.plt section. */
5910 htab->root.srelplt->size += RELOC_SIZE (htab);
5911
5912 /* We need to ensure that all GOT entries that serve the PLT
5913 are consecutive with the special GOT slots [0] [1] and
5914 [2]. Any addtional relocations, such as
5915 R_AARCH64_TLSDESC, must be placed after the PLT related
5916 entries. We abuse the reloc_count such that during
5917 sizing we adjust reloc_count to indicate the number of
5918 PLT related reserved entries. In subsequent phases when
5919 filling in the contents of the reloc entries, PLT related
5920 entries are placed by computing their PLT index (0
5921 .. reloc_count). While other none PLT relocs are placed
5922 at the slot indicated by reloc_count and reloc_count is
5923 updated. */
5924
5925 htab->root.srelplt->reloc_count++;
5926 }
5927 else
5928 {
5929 h->plt.offset = (bfd_vma) - 1;
5930 h->needs_plt = 0;
5931 }
5932 }
5933 else
5934 {
5935 h->plt.offset = (bfd_vma) - 1;
5936 h->needs_plt = 0;
5937 }
5938
5939 eh = (struct elf64_aarch64_link_hash_entry *) h;
5940 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
5941
5942 if (h->got.refcount > 0)
5943 {
5944 bfd_boolean dyn;
5945 unsigned got_type = elf64_aarch64_hash_entry (h)->got_type;
5946
5947 h->got.offset = (bfd_vma) - 1;
5948
5949 dyn = htab->root.dynamic_sections_created;
5950
5951 /* Make sure this symbol is output as a dynamic symbol.
5952 Undefined weak syms won't yet be marked as dynamic. */
5953 if (dyn && h->dynindx == -1 && !h->forced_local)
5954 {
5955 if (!bfd_elf_link_record_dynamic_symbol (info, h))
5956 return FALSE;
5957 }
5958
5959 if (got_type == GOT_UNKNOWN)
5960 {
5961 }
5962 else if (got_type == GOT_NORMAL)
5963 {
5964 h->got.offset = htab->root.sgot->size;
5965 htab->root.sgot->size += GOT_ENTRY_SIZE;
5966 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5967 || h->root.type != bfd_link_hash_undefweak)
5968 && (info->shared
5969 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
5970 {
5971 htab->root.srelgot->size += RELOC_SIZE (htab);
5972 }
5973 }
5974 else
5975 {
5976 int indx;
5977 if (got_type & GOT_TLSDESC_GD)
5978 {
5979 eh->tlsdesc_got_jump_table_offset =
5980 (htab->root.sgotplt->size
5981 - aarch64_compute_jump_table_size (htab));
5982 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
5983 h->got.offset = (bfd_vma) - 2;
5984 }
5985
5986 if (got_type & GOT_TLS_GD)
5987 {
5988 h->got.offset = htab->root.sgot->size;
5989 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
5990 }
5991
5992 if (got_type & GOT_TLS_IE)
5993 {
5994 h->got.offset = htab->root.sgot->size;
5995 htab->root.sgot->size += GOT_ENTRY_SIZE;
5996 }
5997
5998 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5999 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6000 || h->root.type != bfd_link_hash_undefweak)
6001 && (info->shared
6002 || indx != 0
6003 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6004 {
6005 if (got_type & GOT_TLSDESC_GD)
6006 {
6007 htab->root.srelplt->size += RELOC_SIZE (htab);
6008 /* Note reloc_count not incremented here! We have
6009 already adjusted reloc_count for this relocation
6010 type. */
6011
6012 /* TLSDESC PLT is now needed, but not yet determined. */
6013 htab->tlsdesc_plt = (bfd_vma) - 1;
6014 }
6015
6016 if (got_type & GOT_TLS_GD)
6017 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6018
6019 if (got_type & GOT_TLS_IE)
6020 htab->root.srelgot->size += RELOC_SIZE (htab);
6021 }
6022 }
6023 }
6024 else
6025 {
6026 h->got.offset = (bfd_vma) - 1;
6027 }
6028
6029 if (eh->dyn_relocs == NULL)
6030 return TRUE;
6031
6032 /* In the shared -Bsymbolic case, discard space allocated for
6033 dynamic pc-relative relocs against symbols which turn out to be
6034 defined in regular objects. For the normal shared case, discard
6035 space for pc-relative relocs that have become local due to symbol
6036 visibility changes. */
6037
6038 if (info->shared)
6039 {
6040 /* Relocs that use pc_count are those that appear on a call
6041 insn, or certain REL relocs that can generated via assembly.
6042 We want calls to protected symbols to resolve directly to the
6043 function rather than going via the plt. If people want
6044 function pointer comparisons to work as expected then they
6045 should avoid writing weird assembly. */
6046 if (SYMBOL_CALLS_LOCAL (info, h))
6047 {
6048 struct elf_dyn_relocs **pp;
6049
6050 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6051 {
6052 p->count -= p->pc_count;
6053 p->pc_count = 0;
6054 if (p->count == 0)
6055 *pp = p->next;
6056 else
6057 pp = &p->next;
6058 }
6059 }
6060
6061 /* Also discard relocs on undefined weak syms with non-default
6062 visibility. */
6063 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6064 {
6065 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6066 eh->dyn_relocs = NULL;
6067
6068 /* Make sure undefined weak symbols are output as a dynamic
6069 symbol in PIEs. */
6070 else if (h->dynindx == -1
6071 && !h->forced_local
6072 && !bfd_elf_link_record_dynamic_symbol (info, h))
6073 return FALSE;
6074 }
6075
6076 }
6077 else if (ELIMINATE_COPY_RELOCS)
6078 {
6079 /* For the non-shared case, discard space for relocs against
6080 symbols which turn out to need copy relocs or are not
6081 dynamic. */
6082
6083 if (!h->non_got_ref
6084 && ((h->def_dynamic
6085 && !h->def_regular)
6086 || (htab->root.dynamic_sections_created
6087 && (h->root.type == bfd_link_hash_undefweak
6088 || h->root.type == bfd_link_hash_undefined))))
6089 {
6090 /* Make sure this symbol is output as a dynamic symbol.
6091 Undefined weak syms won't yet be marked as dynamic. */
6092 if (h->dynindx == -1
6093 && !h->forced_local
6094 && !bfd_elf_link_record_dynamic_symbol (info, h))
6095 return FALSE;
6096
6097 /* If that succeeded, we know we'll be keeping all the
6098 relocs. */
6099 if (h->dynindx != -1)
6100 goto keep;
6101 }
6102
6103 eh->dyn_relocs = NULL;
6104
6105 keep:;
6106 }
6107
6108 /* Finally, allocate space. */
6109 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6110 {
6111 asection *sreloc;
6112
6113 sreloc = elf_section_data (p->sec)->sreloc;
6114
6115 BFD_ASSERT (sreloc != NULL);
6116
6117 sreloc->size += p->count * RELOC_SIZE (htab);
6118 }
6119
6120 return TRUE;
6121}
6122
6123
6124
6125
6126/* This is the most important function of all . Innocuosly named
6127 though ! */
6128static bfd_boolean
6129elf64_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6130 struct bfd_link_info *info)
6131{
6132 struct elf64_aarch64_link_hash_table *htab;
6133 bfd *dynobj;
6134 asection *s;
6135 bfd_boolean relocs;
6136 bfd *ibfd;
6137
6138 htab = elf64_aarch64_hash_table ((info));
6139 dynobj = htab->root.dynobj;
6140
6141 BFD_ASSERT (dynobj != NULL);
6142
6143 if (htab->root.dynamic_sections_created)
6144 {
6145 if (info->executable)
6146 {
6147 s = bfd_get_linker_section (dynobj, ".interp");
6148 if (s == NULL)
6149 abort ();
6150 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
6151 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
6152 }
6153 }
6154
6155 /* Set up .got offsets for local syms, and space for local dynamic
6156 relocs. */
6157 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
6158 {
6159 struct elf_aarch64_local_symbol *locals = NULL;
6160 Elf_Internal_Shdr *symtab_hdr;
6161 asection *srel;
6162 unsigned int i;
6163
6164 if (!is_aarch64_elf (ibfd))
6165 continue;
6166
6167 for (s = ibfd->sections; s != NULL; s = s->next)
6168 {
6169 struct elf_dyn_relocs *p;
6170
6171 for (p = (struct elf_dyn_relocs *)
6172 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
6173 {
6174 if (!bfd_is_abs_section (p->sec)
6175 && bfd_is_abs_section (p->sec->output_section))
6176 {
6177 /* Input section has been discarded, either because
6178 it is a copy of a linkonce section or due to
6179 linker script /DISCARD/, so we'll be discarding
6180 the relocs too. */
6181 }
6182 else if (p->count != 0)
6183 {
6184 srel = elf_section_data (p->sec)->sreloc;
6185 srel->size += p->count * RELOC_SIZE (htab);
6186 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
6187 info->flags |= DF_TEXTREL;
6188 }
6189 }
6190 }
6191
6192 locals = elf64_aarch64_locals (ibfd);
6193 if (!locals)
6194 continue;
6195
6196 symtab_hdr = &elf_symtab_hdr (ibfd);
6197 srel = htab->root.srelgot;
6198 for (i = 0; i < symtab_hdr->sh_info; i++)
6199 {
6200 locals[i].got_offset = (bfd_vma) - 1;
6201 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6202 if (locals[i].got_refcount > 0)
6203 {
6204 unsigned got_type = locals[i].got_type;
6205 if (got_type & GOT_TLSDESC_GD)
6206 {
6207 locals[i].tlsdesc_got_jump_table_offset =
6208 (htab->root.sgotplt->size
6209 - aarch64_compute_jump_table_size (htab));
6210 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6211 locals[i].got_offset = (bfd_vma) - 2;
6212 }
6213
6214 if (got_type & GOT_TLS_GD)
6215 {
6216 locals[i].got_offset = htab->root.sgot->size;
6217 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6218 }
6219
6220 if (got_type & GOT_TLS_IE)
6221 {
6222 locals[i].got_offset = htab->root.sgot->size;
6223 htab->root.sgot->size += GOT_ENTRY_SIZE;
6224 }
6225
6226 if (got_type == GOT_UNKNOWN)
6227 {
6228 }
6229
6230 if (got_type == GOT_NORMAL)
6231 {
6232 }
6233
6234 if (info->shared)
6235 {
6236 if (got_type & GOT_TLSDESC_GD)
6237 {
6238 htab->root.srelplt->size += RELOC_SIZE (htab);
6239 /* Note RELOC_COUNT not incremented here! */
6240 htab->tlsdesc_plt = (bfd_vma) - 1;
6241 }
6242
6243 if (got_type & GOT_TLS_GD)
6244 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6245
6246 if (got_type & GOT_TLS_IE)
6247 htab->root.srelgot->size += RELOC_SIZE (htab);
6248 }
6249 }
6250 else
6251 {
6252 locals[i].got_refcount = (bfd_vma) - 1;
6253 }
6254 }
6255 }
6256
6257
6258 /* Allocate global sym .plt and .got entries, and space for global
6259 sym dynamic relocs. */
6260 elf_link_hash_traverse (&htab->root, elf64_aarch64_allocate_dynrelocs,
6261 info);
6262
6263
6264 /* For every jump slot reserved in the sgotplt, reloc_count is
6265 incremented. However, when we reserve space for TLS descriptors,
6266 it's not incremented, so in order to compute the space reserved
6267 for them, it suffices to multiply the reloc count by the jump
6268 slot size. */
6269
6270 if (htab->root.srelplt)
6271 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
6272
6273 if (htab->tlsdesc_plt)
6274 {
6275 if (htab->root.splt->size == 0)
6276 htab->root.splt->size += PLT_ENTRY_SIZE;
6277
6278 htab->tlsdesc_plt = htab->root.splt->size;
6279 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
6280
6281 /* If we're not using lazy TLS relocations, don't generate the
6282 GOT entry required. */
6283 if (!(info->flags & DF_BIND_NOW))
6284 {
6285 htab->dt_tlsdesc_got = htab->root.sgot->size;
6286 htab->root.sgot->size += GOT_ENTRY_SIZE;
6287 }
6288 }
6289
6290 /* We now have determined the sizes of the various dynamic sections.
6291 Allocate memory for them. */
6292 relocs = FALSE;
6293 for (s = dynobj->sections; s != NULL; s = s->next)
6294 {
6295 if ((s->flags & SEC_LINKER_CREATED) == 0)
6296 continue;
6297
6298 if (s == htab->root.splt
6299 || s == htab->root.sgot
6300 || s == htab->root.sgotplt
6301 || s == htab->root.iplt
6302 || s == htab->root.igotplt || s == htab->sdynbss)
6303 {
6304 /* Strip this section if we don't need it; see the
6305 comment below. */
6306 }
6307 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
6308 {
6309 if (s->size != 0 && s != htab->root.srelplt)
6310 relocs = TRUE;
6311
6312 /* We use the reloc_count field as a counter if we need
6313 to copy relocs into the output file. */
6314 if (s != htab->root.srelplt)
6315 s->reloc_count = 0;
6316 }
6317 else
6318 {
6319 /* It's not one of our sections, so don't allocate space. */
6320 continue;
6321 }
6322
6323 if (s->size == 0)
6324 {
6325 /* If we don't need this section, strip it from the
6326 output file. This is mostly to handle .rela.bss and
6327 .rela.plt. We must create both sections in
6328 create_dynamic_sections, because they must be created
6329 before the linker maps input sections to output
6330 sections. The linker does that before
6331 adjust_dynamic_symbol is called, and it is that
6332 function which decides whether anything needs to go
6333 into these sections. */
6334
6335 s->flags |= SEC_EXCLUDE;
6336 continue;
6337 }
6338
6339 if ((s->flags & SEC_HAS_CONTENTS) == 0)
6340 continue;
6341
6342 /* Allocate memory for the section contents. We use bfd_zalloc
6343 here in case unused entries are not reclaimed before the
6344 section's contents are written out. This should not happen,
6345 but this way if it does, we get a R_AARCH64_NONE reloc instead
6346 of garbage. */
6347 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
6348 if (s->contents == NULL)
6349 return FALSE;
6350 }
6351
6352 if (htab->root.dynamic_sections_created)
6353 {
6354 /* Add some entries to the .dynamic section. We fill in the
6355 values later, in elf64_aarch64_finish_dynamic_sections, but we
6356 must add the entries now so that we get the correct size for
6357 the .dynamic section. The DT_DEBUG entry is filled in by the
6358 dynamic linker and used by the debugger. */
6359#define add_dynamic_entry(TAG, VAL) \
6360 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6361
6362 if (info->executable)
6363 {
6364 if (!add_dynamic_entry (DT_DEBUG, 0))
6365 return FALSE;
6366 }
6367
6368 if (htab->root.splt->size != 0)
6369 {
6370 if (!add_dynamic_entry (DT_PLTGOT, 0)
6371 || !add_dynamic_entry (DT_PLTRELSZ, 0)
6372 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
6373 || !add_dynamic_entry (DT_JMPREL, 0))
6374 return FALSE;
6375
6376 if (htab->tlsdesc_plt
6377 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
6378 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
6379 return FALSE;
6380 }
6381
6382 if (relocs)
6383 {
6384 if (!add_dynamic_entry (DT_RELA, 0)
6385 || !add_dynamic_entry (DT_RELASZ, 0)
6386 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
6387 return FALSE;
6388
6389 /* If any dynamic relocs apply to a read-only section,
6390 then we need a DT_TEXTREL entry. */
6391 if ((info->flags & DF_TEXTREL) != 0)
6392 {
6393 if (!add_dynamic_entry (DT_TEXTREL, 0))
6394 return FALSE;
6395 }
6396 }
6397 }
6398#undef add_dynamic_entry
6399
6400 return TRUE;
6401
6402
6403}
6404
6405static inline void
6406elf64_aarch64_update_plt_entry (bfd *output_bfd,
6407 unsigned int r_type,
6408 bfd_byte *plt_entry, bfd_vma value)
6409{
6410 reloc_howto_type *howto;
6411 howto = elf64_aarch64_howto_from_type (r_type);
6412 bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
6413}
6414
6415static void
6416elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
6417 struct elf64_aarch64_link_hash_table
6418 *htab, bfd *output_bfd)
6419{
6420 bfd_byte *plt_entry;
6421 bfd_vma plt_index;
6422 bfd_vma got_offset;
6423 bfd_vma gotplt_entry_address;
6424 bfd_vma plt_entry_address;
6425 Elf_Internal_Rela rela;
6426 bfd_byte *loc;
6427
6428 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
6429
6430 /* Offset in the GOT is PLT index plus got GOT headers(3)
6431 times 8. */
6432 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
6433 plt_entry = htab->root.splt->contents + h->plt.offset;
6434 plt_entry_address = htab->root.splt->output_section->vma
6435 + htab->root.splt->output_section->output_offset + h->plt.offset;
6436 gotplt_entry_address = htab->root.sgotplt->output_section->vma +
6437 htab->root.sgotplt->output_offset + got_offset;
6438
6439 /* Copy in the boiler-plate for the PLTn entry. */
6440 memcpy (plt_entry, elf64_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
6441
6442 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6443 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6444 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6445 plt_entry,
6446 PG (gotplt_entry_address) -
6447 PG (plt_entry_address));
6448
6449 /* Fill in the lo12 bits for the load from the pltgot. */
6450 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6451 plt_entry + 4,
6452 PG_OFFSET (gotplt_entry_address));
6453
6454 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6455 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6456 plt_entry + 8,
6457 PG_OFFSET (gotplt_entry_address));
6458
6459 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6460 bfd_put_64 (output_bfd,
6461 (htab->root.splt->output_section->vma
6462 + htab->root.splt->output_offset),
6463 htab->root.sgotplt->contents + got_offset);
6464
6465 /* Fill in the entry in the .rela.plt section. */
6466 rela.r_offset = gotplt_entry_address;
6467 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_JUMP_SLOT);
6468 rela.r_addend = 0;
6469
6470 /* Compute the relocation entry to used based on PLT index and do
6471 not adjust reloc_count. The reloc_count has already been adjusted
6472 to account for this entry. */
6473 loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab);
6474 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6475}
6476
6477/* Size sections even though they're not dynamic. We use it to setup
6478 _TLS_MODULE_BASE_, if needed. */
6479
6480static bfd_boolean
6481elf64_aarch64_always_size_sections (bfd *output_bfd,
6482 struct bfd_link_info *info)
6483{
6484 asection *tls_sec;
6485
6486 if (info->relocatable)
6487 return TRUE;
6488
6489 tls_sec = elf_hash_table (info)->tls_sec;
6490
6491 if (tls_sec)
6492 {
6493 struct elf_link_hash_entry *tlsbase;
6494
6495 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
6496 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
6497
6498 if (tlsbase)
6499 {
6500 struct bfd_link_hash_entry *h = NULL;
6501 const struct elf_backend_data *bed =
6502 get_elf_backend_data (output_bfd);
6503
6504 if (!(_bfd_generic_link_add_one_symbol
6505 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
6506 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
6507 return FALSE;
6508
6509 tlsbase->type = STT_TLS;
6510 tlsbase = (struct elf_link_hash_entry *) h;
6511 tlsbase->def_regular = 1;
6512 tlsbase->other = STV_HIDDEN;
6513 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
6514 }
6515 }
6516
6517 return TRUE;
6518}
6519
6520/* Finish up dynamic symbol handling. We set the contents of various
6521 dynamic sections here. */
6522static bfd_boolean
6523elf64_aarch64_finish_dynamic_symbol (bfd *output_bfd,
6524 struct bfd_link_info *info,
6525 struct elf_link_hash_entry *h,
6526 Elf_Internal_Sym *sym)
6527{
6528 struct elf64_aarch64_link_hash_table *htab;
6529 htab = elf64_aarch64_hash_table (info);
6530
6531 if (h->plt.offset != (bfd_vma) - 1)
6532 {
6533 /* This symbol has an entry in the procedure linkage table. Set
6534 it up. */
6535
6536 if (h->dynindx == -1
6537 || htab->root.splt == NULL
6538 || htab->root.sgotplt == NULL || htab->root.srelplt == NULL)
6539 abort ();
6540
6541 elf64_aarch64_create_small_pltn_entry (h, htab, output_bfd);
6542 if (!h->def_regular)
6543 {
6544 /* Mark the symbol as undefined, rather than as defined in
6545 the .plt section. Leave the value alone. This is a clue
6546 for the dynamic linker, to make function pointer
6547 comparisons work between an application and shared
6548 library. */
6549 sym->st_shndx = SHN_UNDEF;
6550 }
6551 }
6552
6553 if (h->got.offset != (bfd_vma) - 1
6554 && elf64_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
6555 {
6556 Elf_Internal_Rela rela;
6557 bfd_byte *loc;
6558
6559 /* This symbol has an entry in the global offset table. Set it
6560 up. */
6561 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
6562 abort ();
6563
6564 rela.r_offset = (htab->root.sgot->output_section->vma
6565 + htab->root.sgot->output_offset
6566 + (h->got.offset & ~(bfd_vma) 1));
6567
6568 if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
6569 {
6570 if (!h->def_regular)
6571 return FALSE;
6572
6573 BFD_ASSERT ((h->got.offset & 1) != 0);
6574 rela.r_info = ELF64_R_INFO (0, R_AARCH64_RELATIVE);
6575 rela.r_addend = (h->root.u.def.value
6576 + h->root.u.def.section->output_section->vma
6577 + h->root.u.def.section->output_offset);
6578 }
6579 else
6580 {
6581 BFD_ASSERT ((h->got.offset & 1) == 0);
6582 bfd_put_64 (output_bfd, (bfd_vma) 0,
6583 htab->root.sgot->contents + h->got.offset);
6584 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT);
6585 rela.r_addend = 0;
6586 }
6587
6588 loc = htab->root.srelgot->contents;
6589 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
6590 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6591 }
6592
6593 if (h->needs_copy)
6594 {
6595 Elf_Internal_Rela rela;
6596 bfd_byte *loc;
6597
6598 /* This symbol needs a copy reloc. Set it up. */
6599
6600 if (h->dynindx == -1
6601 || (h->root.type != bfd_link_hash_defined
6602 && h->root.type != bfd_link_hash_defweak)
6603 || htab->srelbss == NULL)
6604 abort ();
6605
6606 rela.r_offset = (h->root.u.def.value
6607 + h->root.u.def.section->output_section->vma
6608 + h->root.u.def.section->output_offset);
6609 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_COPY);
6610 rela.r_addend = 0;
6611 loc = htab->srelbss->contents;
6612 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
6613 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6614 }
6615
6616 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6617 be NULL for local symbols. */
6618 if (sym != NULL
9637f6ef 6619 && (h == elf_hash_table (info)->hdynamic
a06ea964
NC
6620 || h == elf_hash_table (info)->hgot))
6621 sym->st_shndx = SHN_ABS;
6622
6623 return TRUE;
6624}
6625
6626static void
6627elf64_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
6628 struct elf64_aarch64_link_hash_table
6629 *htab)
6630{
6631 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6632 small and large plts and at the minute just generates
6633 the small PLT. */
6634
6635 /* PLT0 of the small PLT looks like this -
6636 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6637 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6638 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6639 // symbol resolver
6640 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6641 // GOTPLT entry for this.
6642 br x17
6643 */
6644 bfd_vma plt_got_base;
6645 bfd_vma plt_base;
6646
6647
6648 memcpy (htab->root.splt->contents, elf64_aarch64_small_plt0_entry,
6649 PLT_ENTRY_SIZE);
6650 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
6651 PLT_ENTRY_SIZE;
6652
6653 plt_got_base = (htab->root.sgotplt->output_section->vma
6654 + htab->root.sgotplt->output_offset);
6655
6656 plt_base = htab->root.splt->output_section->vma +
6657 htab->root.splt->output_section->output_offset;
6658
6659 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6660 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6661 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6662 htab->root.splt->contents + 4,
6663 PG (plt_got_base + 16) - PG (plt_base + 4));
6664
6665 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6666 htab->root.splt->contents + 8,
6667 PG_OFFSET (plt_got_base + 16));
6668
6669 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6670 htab->root.splt->contents + 12,
6671 PG_OFFSET (plt_got_base + 16));
6672}
6673
6674static bfd_boolean
6675elf64_aarch64_finish_dynamic_sections (bfd *output_bfd,
6676 struct bfd_link_info *info)
6677{
6678 struct elf64_aarch64_link_hash_table *htab;
6679 bfd *dynobj;
6680 asection *sdyn;
6681
6682 htab = elf64_aarch64_hash_table (info);
6683 dynobj = htab->root.dynobj;
6684 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6685
6686 if (htab->root.dynamic_sections_created)
6687 {
6688 Elf64_External_Dyn *dyncon, *dynconend;
6689
6690 if (sdyn == NULL || htab->root.sgot == NULL)
6691 abort ();
6692
6693 dyncon = (Elf64_External_Dyn *) sdyn->contents;
6694 dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
6695 for (; dyncon < dynconend; dyncon++)
6696 {
6697 Elf_Internal_Dyn dyn;
6698 asection *s;
6699
6700 bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
6701
6702 switch (dyn.d_tag)
6703 {
6704 default:
6705 continue;
6706
6707 case DT_PLTGOT:
6708 s = htab->root.sgotplt;
6709 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6710 break;
6711
6712 case DT_JMPREL:
6713 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
6714 break;
6715
6716 case DT_PLTRELSZ:
6717 s = htab->root.srelplt->output_section;
6718 dyn.d_un.d_val = s->size;
6719 break;
6720
6721 case DT_RELASZ:
6722 /* The procedure linkage table relocs (DT_JMPREL) should
6723 not be included in the overall relocs (DT_RELA).
6724 Therefore, we override the DT_RELASZ entry here to
6725 make it not include the JMPREL relocs. Since the
6726 linker script arranges for .rela.plt to follow all
6727 other relocation sections, we don't have to worry
6728 about changing the DT_RELA entry. */
6729 if (htab->root.srelplt != NULL)
6730 {
6731 s = htab->root.srelplt->output_section;
6732 dyn.d_un.d_val -= s->size;
6733 }
6734 break;
6735
6736 case DT_TLSDESC_PLT:
6737 s = htab->root.splt;
6738 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6739 + htab->tlsdesc_plt;
6740 break;
6741
6742 case DT_TLSDESC_GOT:
6743 s = htab->root.sgot;
6744 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6745 + htab->dt_tlsdesc_got;
6746 break;
6747 }
6748
6749 bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
6750 }
6751
6752 }
6753
6754 /* Fill in the special first entry in the procedure linkage table. */
6755 if (htab->root.splt && htab->root.splt->size > 0)
6756 {
6757 elf64_aarch64_init_small_plt0_entry (output_bfd, htab);
6758
6759 elf_section_data (htab->root.splt->output_section)->
6760 this_hdr.sh_entsize = htab->plt_entry_size;
6761
6762
6763 if (htab->tlsdesc_plt)
6764 {
6765 bfd_put_64 (output_bfd, (bfd_vma) 0,
6766 htab->root.sgot->contents + htab->dt_tlsdesc_got);
6767
6768 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
6769 elf64_aarch64_tlsdesc_small_plt_entry,
6770 sizeof (elf64_aarch64_tlsdesc_small_plt_entry));
6771
6772 {
6773 bfd_vma adrp1_addr =
6774 htab->root.splt->output_section->vma
6775 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
6776
6777 bfd_vma adrp2_addr =
6778 htab->root.splt->output_section->vma
6779 + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
6780
6781 bfd_vma got_addr =
6782 htab->root.sgot->output_section->vma
6783 + htab->root.sgot->output_offset;
6784
6785 bfd_vma pltgot_addr =
6786 htab->root.sgotplt->output_section->vma
6787 + htab->root.sgotplt->output_offset;
6788
6789 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
6790 bfd_vma opcode;
6791
6792 /* adrp x2, DT_TLSDESC_GOT */
6793 opcode = bfd_get_32 (output_bfd,
6794 htab->root.splt->contents
6795 + htab->tlsdesc_plt + 4);
6796 opcode = reencode_adr_imm
6797 (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
6798 bfd_put_32 (output_bfd, opcode,
6799 htab->root.splt->contents + htab->tlsdesc_plt + 4);
6800
6801 /* adrp x3, 0 */
6802 opcode = bfd_get_32 (output_bfd,
6803 htab->root.splt->contents
6804 + htab->tlsdesc_plt + 8);
6805 opcode = reencode_adr_imm
6806 (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
6807 bfd_put_32 (output_bfd, opcode,
6808 htab->root.splt->contents + htab->tlsdesc_plt + 8);
6809
6810 /* ldr x2, [x2, #0] */
6811 opcode = bfd_get_32 (output_bfd,
6812 htab->root.splt->contents
6813 + htab->tlsdesc_plt + 12);
6814 opcode = reencode_ldst_pos_imm (opcode,
6815 PG_OFFSET (dt_tlsdesc_got) >> 3);
6816 bfd_put_32 (output_bfd, opcode,
6817 htab->root.splt->contents + htab->tlsdesc_plt + 12);
6818
6819 /* add x3, x3, 0 */
6820 opcode = bfd_get_32 (output_bfd,
6821 htab->root.splt->contents
6822 + htab->tlsdesc_plt + 16);
6823 opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
6824 bfd_put_32 (output_bfd, opcode,
6825 htab->root.splt->contents + htab->tlsdesc_plt + 16);
6826 }
6827 }
6828 }
6829
6830 if (htab->root.sgotplt)
6831 {
6832 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
6833 {
6834 (*_bfd_error_handler)
6835 (_("discarded output section: `%A'"), htab->root.sgotplt);
6836 return FALSE;
6837 }
6838
6839 /* Fill in the first three entries in the global offset table. */
6840 if (htab->root.sgotplt->size > 0)
6841 {
6842 /* Set the first entry in the global offset table to the address of
6843 the dynamic section. */
6844 if (sdyn == NULL)
6845 bfd_put_64 (output_bfd, (bfd_vma) 0,
6846 htab->root.sgotplt->contents);
6847 else
6848 bfd_put_64 (output_bfd,
6849 sdyn->output_section->vma + sdyn->output_offset,
6850 htab->root.sgotplt->contents);
6851 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6852 bfd_put_64 (output_bfd,
6853 (bfd_vma) 0,
6854 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
6855 bfd_put_64 (output_bfd,
6856 (bfd_vma) 0,
6857 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
6858 }
6859
6860 elf_section_data (htab->root.sgotplt->output_section)->
6861 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
6862 }
6863
6864 if (htab->root.sgot && htab->root.sgot->size > 0)
6865 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
6866 = GOT_ENTRY_SIZE;
6867
6868 return TRUE;
6869}
6870
6871/* Return address for Ith PLT stub in section PLT, for relocation REL
6872 or (bfd_vma) -1 if it should not be included. */
6873
6874static bfd_vma
6875elf64_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
6876 const arelent *rel ATTRIBUTE_UNUSED)
6877{
6878 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
6879}
6880
6881
6882/* We use this so we can override certain functions
6883 (though currently we don't). */
6884
6885const struct elf_size_info elf64_aarch64_size_info =
6886{
6887 sizeof (Elf64_External_Ehdr),
6888 sizeof (Elf64_External_Phdr),
6889 sizeof (Elf64_External_Shdr),
6890 sizeof (Elf64_External_Rel),
6891 sizeof (Elf64_External_Rela),
6892 sizeof (Elf64_External_Sym),
6893 sizeof (Elf64_External_Dyn),
6894 sizeof (Elf_External_Note),
6895 4, /* Hash table entry size. */
6896 1, /* Internal relocs per external relocs. */
6897 64, /* Arch size. */
6898 3, /* Log_file_align. */
6899 ELFCLASS64, EV_CURRENT,
6900 bfd_elf64_write_out_phdrs,
6901 bfd_elf64_write_shdrs_and_ehdr,
6902 bfd_elf64_checksum_contents,
6903 bfd_elf64_write_relocs,
6904 bfd_elf64_swap_symbol_in,
6905 bfd_elf64_swap_symbol_out,
6906 bfd_elf64_slurp_reloc_table,
6907 bfd_elf64_slurp_symbol_table,
6908 bfd_elf64_swap_dyn_in,
6909 bfd_elf64_swap_dyn_out,
6910 bfd_elf64_swap_reloc_in,
6911 bfd_elf64_swap_reloc_out,
6912 bfd_elf64_swap_reloca_in,
6913 bfd_elf64_swap_reloca_out
6914};
6915
6916#define ELF_ARCH bfd_arch_aarch64
6917#define ELF_MACHINE_CODE EM_AARCH64
6918#define ELF_MAXPAGESIZE 0x10000
6919#define ELF_MINPAGESIZE 0x1000
6920#define ELF_COMMONPAGESIZE 0x1000
6921
6922#define bfd_elf64_close_and_cleanup \
6923 elf64_aarch64_close_and_cleanup
6924
6925#define bfd_elf64_bfd_copy_private_bfd_data \
6926 elf64_aarch64_copy_private_bfd_data
6927
6928#define bfd_elf64_bfd_free_cached_info \
6929 elf64_aarch64_bfd_free_cached_info
6930
6931#define bfd_elf64_bfd_is_target_special_symbol \
6932 elf64_aarch64_is_target_special_symbol
6933
6934#define bfd_elf64_bfd_link_hash_table_create \
6935 elf64_aarch64_link_hash_table_create
6936
6937#define bfd_elf64_bfd_link_hash_table_free \
6938 elf64_aarch64_hash_table_free
6939
6940#define bfd_elf64_bfd_merge_private_bfd_data \
6941 elf64_aarch64_merge_private_bfd_data
6942
6943#define bfd_elf64_bfd_print_private_bfd_data \
6944 elf64_aarch64_print_private_bfd_data
6945
6946#define bfd_elf64_bfd_reloc_type_lookup \
6947 elf64_aarch64_reloc_type_lookup
6948
6949#define bfd_elf64_bfd_reloc_name_lookup \
6950 elf64_aarch64_reloc_name_lookup
6951
6952#define bfd_elf64_bfd_set_private_flags \
6953 elf64_aarch64_set_private_flags
6954
6955#define bfd_elf64_find_inliner_info \
6956 elf64_aarch64_find_inliner_info
6957
6958#define bfd_elf64_find_nearest_line \
6959 elf64_aarch64_find_nearest_line
6960
6961#define bfd_elf64_mkobject \
6962 elf64_aarch64_mkobject
6963
6964#define bfd_elf64_new_section_hook \
6965 elf64_aarch64_new_section_hook
6966
6967#define elf_backend_adjust_dynamic_symbol \
6968 elf64_aarch64_adjust_dynamic_symbol
6969
6970#define elf_backend_always_size_sections \
6971 elf64_aarch64_always_size_sections
6972
6973#define elf_backend_check_relocs \
6974 elf64_aarch64_check_relocs
6975
6976#define elf_backend_copy_indirect_symbol \
6977 elf64_aarch64_copy_indirect_symbol
6978
6979/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
6980 to them in our hash. */
6981#define elf_backend_create_dynamic_sections \
6982 elf64_aarch64_create_dynamic_sections
6983
6984#define elf_backend_init_index_section \
6985 _bfd_elf_init_2_index_sections
6986
6987#define elf_backend_is_function_type \
6988 elf64_aarch64_is_function_type
6989
6990#define elf_backend_finish_dynamic_sections \
6991 elf64_aarch64_finish_dynamic_sections
6992
6993#define elf_backend_finish_dynamic_symbol \
6994 elf64_aarch64_finish_dynamic_symbol
6995
6996#define elf_backend_gc_sweep_hook \
6997 elf64_aarch64_gc_sweep_hook
6998
6999#define elf_backend_object_p \
7000 elf64_aarch64_object_p
7001
7002#define elf_backend_output_arch_local_syms \
7003 elf64_aarch64_output_arch_local_syms
7004
7005#define elf_backend_plt_sym_val \
7006 elf64_aarch64_plt_sym_val
7007
7008#define elf_backend_post_process_headers \
7009 elf64_aarch64_post_process_headers
7010
7011#define elf_backend_relocate_section \
7012 elf64_aarch64_relocate_section
7013
7014#define elf_backend_reloc_type_class \
7015 elf64_aarch64_reloc_type_class
7016
7017#define elf_backend_section_flags \
7018 elf64_aarch64_section_flags
7019
7020#define elf_backend_section_from_shdr \
7021 elf64_aarch64_section_from_shdr
7022
7023#define elf_backend_size_dynamic_sections \
7024 elf64_aarch64_size_dynamic_sections
7025
7026#define elf_backend_size_info \
7027 elf64_aarch64_size_info
7028
7029#define elf_backend_can_refcount 1
7030#define elf_backend_can_gc_sections 0
7031#define elf_backend_plt_readonly 1
7032#define elf_backend_want_got_plt 1
7033#define elf_backend_want_plt_sym 0
7034#define elf_backend_may_use_rel_p 0
7035#define elf_backend_may_use_rela_p 1
7036#define elf_backend_default_use_rela_p 1
7037#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
7038
7039#undef elf_backend_obj_attrs_section
7040#define elf_backend_obj_attrs_section ".ARM.attributes"
7041
7042#include "elf64-target.h"
This page took 0.36829 seconds and 4 git commands to generate.