gas/
[deliverable/binutils-gdb.git] / bfd / elf64-aarch64.c
CommitLineData
a06ea964 1/* ELF support for AArch64.
59c108f7 2 Copyright 2009-2013 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21/* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elf64_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elf64_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elf64_aarch64_relocate_section ()
123
124 Calls elf64_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elf64_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138#include "sysdep.h"
139#include "bfd.h"
140#include "libiberty.h"
141#include "libbfd.h"
142#include "bfd_stdint.h"
143#include "elf-bfd.h"
144#include "bfdlink.h"
145#include "elf/aarch64.h"
146
147static bfd_reloc_status_type
148bfd_elf_aarch64_put_addend (bfd *abfd,
149 bfd_byte *address,
150 reloc_howto_type *howto, bfd_signed_vma addend);
151
152#define IS_AARCH64_TLS_RELOC(R_TYPE) \
153 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
154 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
155 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
156 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
157 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
158 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
159 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
160 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
161 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
162 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
163 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
164 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
165 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
166 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
167 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
168 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
169 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
170 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
171 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
172
173#define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
174 ((R_TYPE) == R_AARCH64_TLSDESC_LD64_PREL19 \
175 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
176 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE \
177 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
178 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
179 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
180 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
181 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
182 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
183 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
184 || (R_TYPE) == R_AARCH64_TLSDESC)
185
186#define ELIMINATE_COPY_RELOCS 0
187
188/* Return the relocation section associated with NAME. HTAB is the
189 bfd's elf64_aarch64_link_hash_entry. */
190#define RELOC_SECTION(HTAB, NAME) \
191 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
192
193/* Return size of a relocation entry. HTAB is the bfd's
194 elf64_aarch64_link_hash_entry. */
195#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
196
197/* Return function to swap relocations in. HTAB is the bfd's
198 elf64_aarch64_link_hash_entry. */
199#define SWAP_RELOC_IN(HTAB) (bfd_elf64_swap_reloca_in)
200
201/* Return function to swap relocations out. HTAB is the bfd's
202 elf64_aarch64_link_hash_entry. */
203#define SWAP_RELOC_OUT(HTAB) (bfd_elf64_swap_reloca_out)
204
205/* GOT Entry size - 8 bytes. */
206#define GOT_ENTRY_SIZE (8)
207#define PLT_ENTRY_SIZE (32)
208#define PLT_SMALL_ENTRY_SIZE (16)
209#define PLT_TLSDESC_ENTRY_SIZE (32)
210
211/* Take the PAGE component of an address or offset. */
212#define PG(x) ((x) & ~ 0xfff)
213#define PG_OFFSET(x) ((x) & 0xfff)
214
215/* Encoding of the nop instruction */
216#define INSN_NOP 0xd503201f
217
218#define aarch64_compute_jump_table_size(htab) \
219 (((htab)->root.srelplt == NULL) ? 0 \
220 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
221
222/* The first entry in a procedure linkage table looks like this
223 if the distance between the PLTGOT and the PLT is < 4GB use
224 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
225 in x16 and needs to work out PLTGOT[1] by using an address of
226 [x16,#-8]. */
227static const bfd_byte elf64_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
228{
229 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
230 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
231 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
232 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
233 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
234 0x1f, 0x20, 0x03, 0xd5, /* nop */
235 0x1f, 0x20, 0x03, 0xd5, /* nop */
236 0x1f, 0x20, 0x03, 0xd5, /* nop */
237};
238
239/* Per function entry in a procedure linkage table looks like this
240 if the distance between the PLTGOT and the PLT is < 4GB use
241 these PLT entries. */
242static const bfd_byte elf64_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
243{
244 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
245 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
246 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
247 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
248};
249
250static const bfd_byte
251elf64_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
252{
253 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
254 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
255 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
256 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
257 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
258 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
259 0x1f, 0x20, 0x03, 0xd5, /* nop */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
261};
262
263#define elf_info_to_howto elf64_aarch64_info_to_howto
264#define elf_info_to_howto_rel elf64_aarch64_info_to_howto
265
266#define AARCH64_ELF_ABI_VERSION 0
267#define AARCH64_ELF_OS_ABI_VERSION 0
268
269/* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
270#define ALL_ONES (~ (bfd_vma) 0)
271
272static reloc_howto_type elf64_aarch64_howto_none =
273 HOWTO (R_AARCH64_NONE, /* type */
274 0, /* rightshift */
275 0, /* size (0 = byte, 1 = short, 2 = long) */
276 0, /* bitsize */
277 FALSE, /* pc_relative */
278 0, /* bitpos */
279 complain_overflow_dont,/* complain_on_overflow */
280 bfd_elf_generic_reloc, /* special_function */
281 "R_AARCH64_NONE", /* name */
282 FALSE, /* partial_inplace */
283 0, /* src_mask */
284 0, /* dst_mask */
285 FALSE); /* pcrel_offset */
286
287static reloc_howto_type elf64_aarch64_howto_dynrelocs[] =
288{
289 HOWTO (R_AARCH64_COPY, /* type */
290 0, /* rightshift */
291 2, /* size (0 = byte, 1 = short, 2 = long) */
292 64, /* bitsize */
293 FALSE, /* pc_relative */
294 0, /* bitpos */
295 complain_overflow_bitfield, /* complain_on_overflow */
296 bfd_elf_generic_reloc, /* special_function */
297 "R_AARCH64_COPY", /* name */
298 TRUE, /* partial_inplace */
299 0xffffffff, /* src_mask */
300 0xffffffff, /* dst_mask */
301 FALSE), /* pcrel_offset */
302
303 HOWTO (R_AARCH64_GLOB_DAT, /* type */
304 0, /* rightshift */
305 2, /* size (0 = byte, 1 = short, 2 = long) */
306 64, /* bitsize */
307 FALSE, /* pc_relative */
308 0, /* bitpos */
309 complain_overflow_bitfield, /* complain_on_overflow */
310 bfd_elf_generic_reloc, /* special_function */
311 "R_AARCH64_GLOB_DAT", /* name */
312 TRUE, /* partial_inplace */
313 0xffffffff, /* src_mask */
314 0xffffffff, /* dst_mask */
315 FALSE), /* pcrel_offset */
316
317 HOWTO (R_AARCH64_JUMP_SLOT, /* type */
318 0, /* rightshift */
319 2, /* size (0 = byte, 1 = short, 2 = long) */
320 64, /* bitsize */
321 FALSE, /* pc_relative */
322 0, /* bitpos */
323 complain_overflow_bitfield, /* complain_on_overflow */
324 bfd_elf_generic_reloc, /* special_function */
325 "R_AARCH64_JUMP_SLOT", /* name */
326 TRUE, /* partial_inplace */
327 0xffffffff, /* src_mask */
328 0xffffffff, /* dst_mask */
329 FALSE), /* pcrel_offset */
330
331 HOWTO (R_AARCH64_RELATIVE, /* type */
332 0, /* rightshift */
333 2, /* size (0 = byte, 1 = short, 2 = long) */
334 64, /* bitsize */
335 FALSE, /* pc_relative */
336 0, /* bitpos */
337 complain_overflow_bitfield, /* complain_on_overflow */
338 bfd_elf_generic_reloc, /* special_function */
339 "R_AARCH64_RELATIVE", /* name */
340 TRUE, /* partial_inplace */
341 ALL_ONES, /* src_mask */
342 ALL_ONES, /* dst_mask */
343 FALSE), /* pcrel_offset */
344
345 HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */
346 0, /* rightshift */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
348 64, /* bitsize */
349 FALSE, /* pc_relative */
350 0, /* bitpos */
351 complain_overflow_dont, /* complain_on_overflow */
352 bfd_elf_generic_reloc, /* special_function */
353 "R_AARCH64_TLS_DTPMOD64", /* name */
354 FALSE, /* partial_inplace */
355 0, /* src_mask */
356 ALL_ONES, /* dst_mask */
357 FALSE), /* pc_reloffset */
358
359 HOWTO (R_AARCH64_TLS_DTPREL64, /* type */
360 0, /* rightshift */
361 2, /* size (0 = byte, 1 = short, 2 = long) */
362 64, /* bitsize */
363 FALSE, /* pc_relative */
364 0, /* bitpos */
365 complain_overflow_dont, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 "R_AARCH64_TLS_DTPREL64", /* name */
368 FALSE, /* partial_inplace */
369 0, /* src_mask */
370 ALL_ONES, /* dst_mask */
371 FALSE), /* pcrel_offset */
372
373 HOWTO (R_AARCH64_TLS_TPREL64, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 64, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_dont, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_AARCH64_TLS_TPREL64", /* name */
382 FALSE, /* partial_inplace */
383 0, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 HOWTO (R_AARCH64_TLSDESC, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 64, /* bitsize */
391 FALSE, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_dont, /* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_AARCH64_TLSDESC", /* name */
396 FALSE, /* partial_inplace */
397 0, /* src_mask */
398 ALL_ONES, /* dst_mask */
399 FALSE), /* pcrel_offset */
400
401};
402
403/* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
404 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
405 in that slot. */
406
407static reloc_howto_type elf64_aarch64_howto_table[] =
408{
409 /* Basic data relocations. */
410
411 HOWTO (R_AARCH64_NULL, /* type */
412 0, /* rightshift */
413 0, /* size (0 = byte, 1 = short, 2 = long) */
414 0, /* bitsize */
415 FALSE, /* pc_relative */
416 0, /* bitpos */
417 complain_overflow_dont, /* complain_on_overflow */
418 bfd_elf_generic_reloc, /* special_function */
419 "R_AARCH64_NULL", /* name */
420 FALSE, /* partial_inplace */
421 0, /* src_mask */
422 0, /* dst_mask */
423 FALSE), /* pcrel_offset */
424
425 /* .xword: (S+A) */
426 HOWTO (R_AARCH64_ABS64, /* type */
427 0, /* rightshift */
428 4, /* size (4 = long long) */
429 64, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_unsigned, /* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_AARCH64_ABS64", /* name */
435 FALSE, /* partial_inplace */
436 ALL_ONES, /* src_mask */
437 ALL_ONES, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 /* .word: (S+A) */
441 HOWTO (R_AARCH64_ABS32, /* type */
442 0, /* rightshift */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
444 32, /* bitsize */
445 FALSE, /* pc_relative */
446 0, /* bitpos */
447 complain_overflow_unsigned, /* complain_on_overflow */
448 bfd_elf_generic_reloc, /* special_function */
449 "R_AARCH64_ABS32", /* name */
450 FALSE, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 FALSE), /* pcrel_offset */
454
455 /* .half: (S+A) */
456 HOWTO (R_AARCH64_ABS16, /* type */
457 0, /* rightshift */
458 1, /* size (0 = byte, 1 = short, 2 = long) */
459 16, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_unsigned, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_ABS16", /* name */
465 FALSE, /* partial_inplace */
466 0xffff, /* src_mask */
467 0xffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 /* .xword: (S+A-P) */
471 HOWTO (R_AARCH64_PREL64, /* type */
472 0, /* rightshift */
473 4, /* size (4 = long long) */
474 64, /* bitsize */
475 TRUE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_signed, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_AARCH64_PREL64", /* name */
480 FALSE, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 TRUE), /* pcrel_offset */
484
485 /* .word: (S+A-P) */
486 HOWTO (R_AARCH64_PREL32, /* type */
487 0, /* rightshift */
488 2, /* size (0 = byte, 1 = short, 2 = long) */
489 32, /* bitsize */
490 TRUE, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_signed, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 "R_AARCH64_PREL32", /* name */
495 FALSE, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 TRUE), /* pcrel_offset */
499
500 /* .half: (S+A-P) */
501 HOWTO (R_AARCH64_PREL16, /* type */
502 0, /* rightshift */
503 1, /* size (0 = byte, 1 = short, 2 = long) */
504 16, /* bitsize */
505 TRUE, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_signed, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 "R_AARCH64_PREL16", /* name */
510 FALSE, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 TRUE), /* pcrel_offset */
514
515 /* Group relocations to create a 16, 32, 48 or 64 bit
516 unsigned data or abs address inline. */
517
518 /* MOVZ: ((S+A) >> 0) & 0xffff */
519 HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */
520 0, /* rightshift */
521 2, /* size (0 = byte, 1 = short, 2 = long) */
522 16, /* bitsize */
523 FALSE, /* pc_relative */
524 0, /* bitpos */
525 complain_overflow_unsigned, /* complain_on_overflow */
526 bfd_elf_generic_reloc, /* special_function */
527 "R_AARCH64_MOVW_UABS_G0", /* name */
528 FALSE, /* partial_inplace */
529 0xffff, /* src_mask */
530 0xffff, /* dst_mask */
531 FALSE), /* pcrel_offset */
532
533 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
534 HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */
535 0, /* rightshift */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
537 16, /* bitsize */
538 FALSE, /* pc_relative */
539 0, /* bitpos */
540 complain_overflow_dont, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
547
548 /* MOVZ: ((S+A) >> 16) & 0xffff */
549 HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */
550 16, /* rightshift */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
552 16, /* bitsize */
553 FALSE, /* pc_relative */
554 0, /* bitpos */
555 complain_overflow_unsigned, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 "R_AARCH64_MOVW_UABS_G1", /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
562
563 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
564 HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */
565 16, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 16, /* bitsize */
568 FALSE, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_dont, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
577
578 /* MOVZ: ((S+A) >> 32) & 0xffff */
579 HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */
580 32, /* rightshift */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
582 16, /* bitsize */
583 FALSE, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_unsigned, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 "R_AARCH64_MOVW_UABS_G2", /* name */
588 FALSE, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE), /* pcrel_offset */
592
593 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
594 HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */
595 32, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 16, /* bitsize */
598 FALSE, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_dont, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
603 FALSE, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 /* MOVZ: ((S+A) >> 48) & 0xffff */
609 HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */
610 48, /* rightshift */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
612 16, /* bitsize */
613 FALSE, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_unsigned, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 "R_AARCH64_MOVW_UABS_G3", /* name */
618 FALSE, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE), /* pcrel_offset */
622
623 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
624 signed data or abs address inline. Will change instruction
625 to MOVN or MOVZ depending on sign of calculated value. */
626
627 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
628 HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */
629 0, /* rightshift */
630 2, /* size (0 = byte, 1 = short, 2 = long) */
631 16, /* bitsize */
632 FALSE, /* pc_relative */
633 0, /* bitpos */
634 complain_overflow_signed, /* complain_on_overflow */
635 bfd_elf_generic_reloc, /* special_function */
636 "R_AARCH64_MOVW_SABS_G0", /* name */
637 FALSE, /* partial_inplace */
638 0xffff, /* src_mask */
639 0xffff, /* dst_mask */
640 FALSE), /* pcrel_offset */
641
642 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
643 HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */
644 16, /* rightshift */
645 2, /* size (0 = byte, 1 = short, 2 = long) */
646 16, /* bitsize */
647 FALSE, /* pc_relative */
648 0, /* bitpos */
649 complain_overflow_signed, /* complain_on_overflow */
650 bfd_elf_generic_reloc, /* special_function */
651 "R_AARCH64_MOVW_SABS_G1", /* name */
652 FALSE, /* partial_inplace */
653 0xffff, /* src_mask */
654 0xffff, /* dst_mask */
655 FALSE), /* pcrel_offset */
656
657 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
658 HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */
659 32, /* rightshift */
660 2, /* size (0 = byte, 1 = short, 2 = long) */
661 16, /* bitsize */
662 FALSE, /* pc_relative */
663 0, /* bitpos */
664 complain_overflow_signed, /* complain_on_overflow */
665 bfd_elf_generic_reloc, /* special_function */
666 "R_AARCH64_MOVW_SABS_G2", /* name */
667 FALSE, /* partial_inplace */
668 0xffff, /* src_mask */
669 0xffff, /* dst_mask */
670 FALSE), /* pcrel_offset */
671
672/* Relocations to generate 19, 21 and 33 bit PC-relative load/store
673 addresses: PG(x) is (x & ~0xfff). */
674
675 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
676 HOWTO (R_AARCH64_LD_PREL_LO19, /* type */
677 2, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 19, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed, /* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_AARCH64_LD_PREL_LO19", /* name */
685 FALSE, /* partial_inplace */
686 0x7ffff, /* src_mask */
687 0x7ffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 /* ADR: (S+A-P) & 0x1fffff */
691 HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */
692 0, /* rightshift */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
694 21, /* bitsize */
695 TRUE, /* pc_relative */
696 0, /* bitpos */
697 complain_overflow_signed, /* complain_on_overflow */
698 bfd_elf_generic_reloc, /* special_function */
699 "R_AARCH64_ADR_PREL_LO21", /* name */
700 FALSE, /* partial_inplace */
701 0x1fffff, /* src_mask */
702 0x1fffff, /* dst_mask */
703 TRUE), /* pcrel_offset */
704
705 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
706 HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */
707 12, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 21, /* bitsize */
710 TRUE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_signed, /* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
715 FALSE, /* partial_inplace */
716 0x1fffff, /* src_mask */
717 0x1fffff, /* dst_mask */
718 TRUE), /* pcrel_offset */
719
720 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
721 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */
722 12, /* rightshift */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
724 21, /* bitsize */
725 TRUE, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_dont, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
730 FALSE, /* partial_inplace */
731 0x1fffff, /* src_mask */
732 0x1fffff, /* dst_mask */
733 TRUE), /* pcrel_offset */
734
735 /* ADD: (S+A) & 0xfff [no overflow check] */
736 HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 12, /* bitsize */
740 FALSE, /* pc_relative */
741 10, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
745 FALSE, /* partial_inplace */
746 0x3ffc00, /* src_mask */
747 0x3ffc00, /* dst_mask */
748 FALSE), /* pcrel_offset */
749
750 /* LD/ST8: (S+A) & 0xfff */
751 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 12, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
760 FALSE, /* partial_inplace */
761 0xfff, /* src_mask */
762 0xfff, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 /* Relocations for control-flow instructions. */
766
767 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
768 HOWTO (R_AARCH64_TSTBR14, /* type */
769 2, /* rightshift */
770 2, /* size (0 = byte, 1 = short, 2 = long) */
771 14, /* bitsize */
772 TRUE, /* pc_relative */
773 0, /* bitpos */
774 complain_overflow_signed, /* complain_on_overflow */
775 bfd_elf_generic_reloc, /* special_function */
776 "R_AARCH64_TSTBR14", /* name */
777 FALSE, /* partial_inplace */
778 0x3fff, /* src_mask */
779 0x3fff, /* dst_mask */
780 TRUE), /* pcrel_offset */
781
782 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
783 HOWTO (R_AARCH64_CONDBR19, /* type */
784 2, /* rightshift */
785 2, /* size (0 = byte, 1 = short, 2 = long) */
786 19, /* bitsize */
787 TRUE, /* pc_relative */
788 0, /* bitpos */
789 complain_overflow_signed, /* complain_on_overflow */
790 bfd_elf_generic_reloc, /* special_function */
791 "R_AARCH64_CONDBR19", /* name */
792 FALSE, /* partial_inplace */
793 0x7ffff, /* src_mask */
794 0x7ffff, /* dst_mask */
795 TRUE), /* pcrel_offset */
796
797 EMPTY_HOWTO (281),
798
799 /* B: ((S+A-P) >> 2) & 0x3ffffff */
800 HOWTO (R_AARCH64_JUMP26, /* type */
801 2, /* rightshift */
802 2, /* size (0 = byte, 1 = short, 2 = long) */
803 26, /* bitsize */
804 TRUE, /* pc_relative */
805 0, /* bitpos */
806 complain_overflow_signed, /* complain_on_overflow */
807 bfd_elf_generic_reloc, /* special_function */
808 "R_AARCH64_JUMP26", /* name */
809 FALSE, /* partial_inplace */
810 0x3ffffff, /* src_mask */
811 0x3ffffff, /* dst_mask */
812 TRUE), /* pcrel_offset */
813
814 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
815 HOWTO (R_AARCH64_CALL26, /* type */
816 2, /* rightshift */
817 2, /* size (0 = byte, 1 = short, 2 = long) */
818 26, /* bitsize */
819 TRUE, /* pc_relative */
820 0, /* bitpos */
821 complain_overflow_signed, /* complain_on_overflow */
822 bfd_elf_generic_reloc, /* special_function */
823 "R_AARCH64_CALL26", /* name */
824 FALSE, /* partial_inplace */
825 0x3ffffff, /* src_mask */
826 0x3ffffff, /* dst_mask */
827 TRUE), /* pcrel_offset */
828
829 /* LD/ST16: (S+A) & 0xffe */
830 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */
831 1, /* rightshift */
832 2, /* size (0 = byte, 1 = short, 2 = long) */
833 12, /* bitsize */
834 FALSE, /* pc_relative */
835 0, /* bitpos */
836 complain_overflow_dont, /* complain_on_overflow */
837 bfd_elf_generic_reloc, /* special_function */
838 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
839 FALSE, /* partial_inplace */
840 0xffe, /* src_mask */
841 0xffe, /* dst_mask */
842 FALSE), /* pcrel_offset */
843
844 /* LD/ST32: (S+A) & 0xffc */
845 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */
846 2, /* rightshift */
847 2, /* size (0 = byte, 1 = short, 2 = long) */
848 12, /* bitsize */
849 FALSE, /* pc_relative */
850 0, /* bitpos */
851 complain_overflow_dont, /* complain_on_overflow */
852 bfd_elf_generic_reloc, /* special_function */
853 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
854 FALSE, /* partial_inplace */
855 0xffc, /* src_mask */
856 0xffc, /* dst_mask */
857 FALSE), /* pcrel_offset */
858
859 /* LD/ST64: (S+A) & 0xff8 */
860 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */
861 3, /* rightshift */
862 2, /* size (0 = byte, 1 = short, 2 = long) */
863 12, /* bitsize */
864 FALSE, /* pc_relative */
865 0, /* bitpos */
866 complain_overflow_dont, /* complain_on_overflow */
867 bfd_elf_generic_reloc, /* special_function */
868 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
869 FALSE, /* partial_inplace */
870 0xff8, /* src_mask */
871 0xff8, /* dst_mask */
872 FALSE), /* pcrel_offset */
873
874 EMPTY_HOWTO (287),
875 EMPTY_HOWTO (288),
876 EMPTY_HOWTO (289),
877 EMPTY_HOWTO (290),
878 EMPTY_HOWTO (291),
879 EMPTY_HOWTO (292),
880 EMPTY_HOWTO (293),
881 EMPTY_HOWTO (294),
882 EMPTY_HOWTO (295),
883 EMPTY_HOWTO (296),
884 EMPTY_HOWTO (297),
885 EMPTY_HOWTO (298),
886
887 /* LD/ST128: (S+A) & 0xff0 */
888 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */
889 4, /* rightshift */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
891 12, /* bitsize */
892 FALSE, /* pc_relative */
893 0, /* bitpos */
894 complain_overflow_dont, /* complain_on_overflow */
895 bfd_elf_generic_reloc, /* special_function */
896 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
897 FALSE, /* partial_inplace */
898 0xff0, /* src_mask */
899 0xff0, /* dst_mask */
900 FALSE), /* pcrel_offset */
901
902 EMPTY_HOWTO (300),
903 EMPTY_HOWTO (301),
904 EMPTY_HOWTO (302),
905 EMPTY_HOWTO (303),
906 EMPTY_HOWTO (304),
907 EMPTY_HOWTO (305),
908 EMPTY_HOWTO (306),
909 EMPTY_HOWTO (307),
910 EMPTY_HOWTO (308),
f41aef5f
RE
911
912 /* Set a load-literal immediate field to bits
913 0x1FFFFC of G(S)-P */
914 HOWTO (R_AARCH64_GOT_LD_PREL19, /* type */
915 2, /* rightshift */
916 2, /* size (0 = byte,1 = short,2 = long) */
917 19, /* bitsize */
918 TRUE, /* pc_relative */
919 0, /* bitpos */
920 complain_overflow_signed, /* complain_on_overflow */
921 bfd_elf_generic_reloc, /* special_function */
922 "R_AARCH64_GOT_LD_PREL19", /* name */
923 FALSE, /* partial_inplace */
924 0xffffe0, /* src_mask */
925 0xffffe0, /* dst_mask */
926 TRUE), /* pcrel_offset */
927
a06ea964
NC
928 EMPTY_HOWTO (310),
929
930 /* Get to the page for the GOT entry for the symbol
931 (G(S) - P) using an ADRP instruction. */
932 HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */
933 12, /* rightshift */
934 2, /* size (0 = byte, 1 = short, 2 = long) */
935 21, /* bitsize */
936 TRUE, /* pc_relative */
937 0, /* bitpos */
938 complain_overflow_dont, /* complain_on_overflow */
939 bfd_elf_generic_reloc, /* special_function */
940 "R_AARCH64_ADR_GOT_PAGE", /* name */
941 FALSE, /* partial_inplace */
942 0x1fffff, /* src_mask */
943 0x1fffff, /* dst_mask */
944 TRUE), /* pcrel_offset */
945
946 /* LD64: GOT offset G(S) & 0xff8 */
947 HOWTO (R_AARCH64_LD64_GOT_LO12_NC, /* type */
948 3, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 12, /* bitsize */
951 FALSE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_AARCH64_LD64_GOT_LO12_NC", /* name */
956 FALSE, /* partial_inplace */
957 0xff8, /* src_mask */
958 0xff8, /* dst_mask */
959 FALSE) /* pcrel_offset */
960};
961
962static reloc_howto_type elf64_aarch64_tls_howto_table[] =
963{
964 EMPTY_HOWTO (512),
965
966 /* Get to the page for the GOT entry for the symbol
967 (G(S) - P) using an ADRP instruction. */
968 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */
969 12, /* rightshift */
970 2, /* size (0 = byte, 1 = short, 2 = long) */
971 21, /* bitsize */
972 TRUE, /* pc_relative */
973 0, /* bitpos */
974 complain_overflow_dont, /* complain_on_overflow */
975 bfd_elf_generic_reloc, /* special_function */
976 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
977 FALSE, /* partial_inplace */
978 0x1fffff, /* src_mask */
979 0x1fffff, /* dst_mask */
980 TRUE), /* pcrel_offset */
981
982 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
983 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */
984 0, /* rightshift */
985 2, /* size (0 = byte, 1 = short, 2 = long) */
986 12, /* bitsize */
987 FALSE, /* pc_relative */
988 0, /* bitpos */
989 complain_overflow_dont, /* complain_on_overflow */
990 bfd_elf_generic_reloc, /* special_function */
991 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
992 FALSE, /* partial_inplace */
993 0xfff, /* src_mask */
994 0xfff, /* dst_mask */
995 FALSE), /* pcrel_offset */
996
997 EMPTY_HOWTO (515),
998 EMPTY_HOWTO (516),
999 EMPTY_HOWTO (517),
1000 EMPTY_HOWTO (518),
1001 EMPTY_HOWTO (519),
1002 EMPTY_HOWTO (520),
1003 EMPTY_HOWTO (521),
1004 EMPTY_HOWTO (522),
1005 EMPTY_HOWTO (523),
1006 EMPTY_HOWTO (524),
1007 EMPTY_HOWTO (525),
1008 EMPTY_HOWTO (526),
1009 EMPTY_HOWTO (527),
1010 EMPTY_HOWTO (528),
1011 EMPTY_HOWTO (529),
1012 EMPTY_HOWTO (530),
1013 EMPTY_HOWTO (531),
1014 EMPTY_HOWTO (532),
1015 EMPTY_HOWTO (533),
1016 EMPTY_HOWTO (534),
1017 EMPTY_HOWTO (535),
1018 EMPTY_HOWTO (536),
1019 EMPTY_HOWTO (537),
1020 EMPTY_HOWTO (538),
1021
1022 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */
1023 16, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 16, /* bitsize */
1026 FALSE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1035
1036 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 FALSE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1049
1050 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 21, /* bitsize */
1054 FALSE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1059 FALSE, /* partial_inplace */
1060 0x1fffff, /* src_mask */
1061 0x1fffff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1063
1064 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */
1065 3, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 12, /* bitsize */
1068 FALSE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1073 FALSE, /* partial_inplace */
1074 0xff8, /* src_mask */
1075 0xff8, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1077
1078 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */
bb3f9ed8 1079 2, /* rightshift */
a06ea964
NC
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 21, /* bitsize */
1082 FALSE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1087 FALSE, /* partial_inplace */
1088 0x1ffffc, /* src_mask */
1089 0x1ffffc, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1091
1092 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */
bb3f9ed8 1093 32, /* rightshift */
a06ea964
NC
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 12, /* bitsize */
1096 FALSE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffff, /* src_mask */
1103 0xffff, /* dst_mask */
1104 FALSE), /* pcrel_offset */
1105
1106 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */
bb3f9ed8 1107 16, /* rightshift */
a06ea964
NC
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 12, /* bitsize */
1110 FALSE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffff, /* src_mask */
1117 0xffff, /* dst_mask */
1118 FALSE), /* pcrel_offset */
1119
1120 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */
bb3f9ed8 1121 16, /* rightshift */
a06ea964
NC
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 12, /* bitsize */
1124 FALSE, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont, /* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffff, /* src_mask */
1131 0xffff, /* dst_mask */
1132 FALSE), /* pcrel_offset */
1133
1134 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 12, /* bitsize */
1138 FALSE, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont, /* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffff, /* src_mask */
1145 0xffff, /* dst_mask */
1146 FALSE), /* pcrel_offset */
1147
1148 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 12, /* bitsize */
1152 FALSE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont, /* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffff, /* src_mask */
1159 0xffff, /* dst_mask */
1160 FALSE), /* pcrel_offset */
1161
1162 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */
bb3f9ed8 1163 12, /* rightshift */
a06ea964
NC
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 12, /* bitsize */
1166 FALSE, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont, /* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1171 FALSE, /* partial_inplace */
1172 0xfff, /* src_mask */
1173 0xfff, /* dst_mask */
1174 FALSE), /* pcrel_offset */
1175
1176 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 12, /* bitsize */
1180 FALSE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont, /* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1185 FALSE, /* partial_inplace */
1186 0xfff, /* src_mask */
1187 0xfff, /* dst_mask */
1188 FALSE), /* pcrel_offset */
1189
1190 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 12, /* bitsize */
1194 FALSE, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont, /* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1199 FALSE, /* partial_inplace */
1200 0xfff, /* src_mask */
1201 0xfff, /* dst_mask */
1202 FALSE), /* pcrel_offset */
1203};
1204
1205static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] =
1206{
1207 HOWTO (R_AARCH64_TLSDESC_LD64_PREL19, /* type */
bb3f9ed8 1208 2, /* rightshift */
a06ea964
NC
1209 2, /* size (0 = byte, 1 = short, 2 = long) */
1210 21, /* bitsize */
1211 TRUE, /* pc_relative */
1212 0, /* bitpos */
1213 complain_overflow_dont, /* complain_on_overflow */
1214 bfd_elf_generic_reloc, /* special_function */
1215 "R_AARCH64_TLSDESC_LD64_PREL19", /* name */
1216 FALSE, /* partial_inplace */
1217 0x1ffffc, /* src_mask */
1218 0x1ffffc, /* dst_mask */
1219 TRUE), /* pcrel_offset */
1220
1221 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */
1222 0, /* rightshift */
1223 2, /* size (0 = byte, 1 = short, 2 = long) */
1224 21, /* bitsize */
1225 TRUE, /* pc_relative */
1226 0, /* bitpos */
1227 complain_overflow_dont, /* complain_on_overflow */
1228 bfd_elf_generic_reloc, /* special_function */
1229 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1230 FALSE, /* partial_inplace */
1231 0x1fffff, /* src_mask */
1232 0x1fffff, /* dst_mask */
1233 TRUE), /* pcrel_offset */
1234
1235 /* Get to the page for the GOT entry for the symbol
1236 (G(S) - P) using an ADRP instruction. */
1237 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE, /* type */
1238 12, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1240 21, /* bitsize */
1241 TRUE, /* pc_relative */
1242 0, /* bitpos */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 "R_AARCH64_TLSDESC_ADR_PAGE", /* name */
1246 FALSE, /* partial_inplace */
1247 0x1fffff, /* src_mask */
1248 0x1fffff, /* dst_mask */
1249 TRUE), /* pcrel_offset */
1250
1251 /* LD64: GOT offset G(S) & 0xfff. */
1252 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */
1253 3, /* rightshift */
1254 2, /* size (0 = byte, 1 = short, 2 = long) */
1255 12, /* bitsize */
1256 FALSE, /* pc_relative */
1257 0, /* bitpos */
1258 complain_overflow_dont, /* complain_on_overflow */
1259 bfd_elf_generic_reloc, /* special_function */
1260 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1261 FALSE, /* partial_inplace */
1262 0xfff, /* src_mask */
1263 0xfff, /* dst_mask */
1264 FALSE), /* pcrel_offset */
1265
1266 /* ADD: GOT offset G(S) & 0xfff. */
1267 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */
1268 0, /* rightshift */
1269 2, /* size (0 = byte, 1 = short, 2 = long) */
1270 12, /* bitsize */
1271 FALSE, /* pc_relative */
1272 0, /* bitpos */
1273 complain_overflow_dont, /* complain_on_overflow */
1274 bfd_elf_generic_reloc, /* special_function */
1275 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1276 FALSE, /* partial_inplace */
1277 0xfff, /* src_mask */
1278 0xfff, /* dst_mask */
1279 FALSE), /* pcrel_offset */
1280
1281 HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */
bb3f9ed8 1282 16, /* rightshift */
a06ea964
NC
1283 2, /* size (0 = byte, 1 = short, 2 = long) */
1284 12, /* bitsize */
1285 FALSE, /* pc_relative */
1286 0, /* bitpos */
1287 complain_overflow_dont, /* complain_on_overflow */
1288 bfd_elf_generic_reloc, /* special_function */
1289 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1290 FALSE, /* partial_inplace */
1291 0xffff, /* src_mask */
1292 0xffff, /* dst_mask */
1293 FALSE), /* pcrel_offset */
1294
1295 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */
1296 0, /* rightshift */
1297 2, /* size (0 = byte, 1 = short, 2 = long) */
1298 12, /* bitsize */
1299 FALSE, /* pc_relative */
1300 0, /* bitpos */
1301 complain_overflow_dont, /* complain_on_overflow */
1302 bfd_elf_generic_reloc, /* special_function */
1303 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1304 FALSE, /* partial_inplace */
1305 0xffff, /* src_mask */
1306 0xffff, /* dst_mask */
1307 FALSE), /* pcrel_offset */
1308
1309 HOWTO (R_AARCH64_TLSDESC_LDR, /* type */
1310 0, /* rightshift */
1311 2, /* size (0 = byte, 1 = short, 2 = long) */
1312 12, /* bitsize */
1313 FALSE, /* pc_relative */
1314 0, /* bitpos */
1315 complain_overflow_dont, /* complain_on_overflow */
1316 bfd_elf_generic_reloc, /* special_function */
1317 "R_AARCH64_TLSDESC_LDR", /* name */
1318 FALSE, /* partial_inplace */
1319 0x0, /* src_mask */
1320 0x0, /* dst_mask */
1321 FALSE), /* pcrel_offset */
1322
1323 HOWTO (R_AARCH64_TLSDESC_ADD, /* type */
1324 0, /* rightshift */
1325 2, /* size (0 = byte, 1 = short, 2 = long) */
1326 12, /* bitsize */
1327 FALSE, /* pc_relative */
1328 0, /* bitpos */
1329 complain_overflow_dont, /* complain_on_overflow */
1330 bfd_elf_generic_reloc, /* special_function */
1331 "R_AARCH64_TLSDESC_ADD", /* name */
1332 FALSE, /* partial_inplace */
1333 0x0, /* src_mask */
1334 0x0, /* dst_mask */
1335 FALSE), /* pcrel_offset */
1336
1337 HOWTO (R_AARCH64_TLSDESC_CALL, /* type */
1338 0, /* rightshift */
1339 2, /* size (0 = byte, 1 = short, 2 = long) */
1340 12, /* bitsize */
1341 FALSE, /* pc_relative */
1342 0, /* bitpos */
1343 complain_overflow_dont, /* complain_on_overflow */
1344 bfd_elf_generic_reloc, /* special_function */
1345 "R_AARCH64_TLSDESC_CALL", /* name */
1346 FALSE, /* partial_inplace */
1347 0x0, /* src_mask */
1348 0x0, /* dst_mask */
1349 FALSE), /* pcrel_offset */
1350};
1351
1352static reloc_howto_type *
1353elf64_aarch64_howto_from_type (unsigned int r_type)
1354{
1355 if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max)
1356 return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min];
1357
1358 if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max)
1359 return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min];
1360
1361 if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max)
1362 return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min];
1363
1364 if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max)
1365 return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min];
1366
1367 switch (r_type)
1368 {
1369 case R_AARCH64_NONE:
1370 return &elf64_aarch64_howto_none;
1371
1372 }
1373 bfd_set_error (bfd_error_bad_value);
1374 return NULL;
1375}
1376
1377static void
1378elf64_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1379 Elf_Internal_Rela *elf_reloc)
1380{
1381 unsigned int r_type;
1382
1383 r_type = ELF64_R_TYPE (elf_reloc->r_info);
1384 bfd_reloc->howto = elf64_aarch64_howto_from_type (r_type);
1385}
1386
1387struct elf64_aarch64_reloc_map
1388{
1389 bfd_reloc_code_real_type bfd_reloc_val;
1390 unsigned int elf_reloc_val;
1391};
1392
1393/* All entries in this list must also be present in
1394 elf64_aarch64_howto_table. */
1395static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] =
1396{
1397 {BFD_RELOC_NONE, R_AARCH64_NONE},
1398
1399 /* Basic data relocations. */
1400 {BFD_RELOC_CTOR, R_AARCH64_ABS64},
1401 {BFD_RELOC_64, R_AARCH64_ABS64},
1402 {BFD_RELOC_32, R_AARCH64_ABS32},
1403 {BFD_RELOC_16, R_AARCH64_ABS16},
1404 {BFD_RELOC_64_PCREL, R_AARCH64_PREL64},
1405 {BFD_RELOC_32_PCREL, R_AARCH64_PREL32},
1406 {BFD_RELOC_16_PCREL, R_AARCH64_PREL16},
1407
1408 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1409 value inline. */
1410 {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC},
1411 {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC},
1412 {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC},
1413
1414 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1415 signed value inline. */
1416 {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0},
1417 {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1},
1418 {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2},
1419
1420 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1421 unsigned value inline. */
1422 {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0},
1423 {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1},
1424 {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2},
1425 {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3},
1426
1427 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1428 {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19},
1429 {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21},
1430 {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21},
1431 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC},
1432 {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC},
1433 {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC},
1434 {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC},
1435 {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC},
1436 {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC},
1437 {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC},
1438
1439 /* Relocations for control-flow instructions. */
1440 {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14},
1441 {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19},
1442 {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26},
1443 {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26},
1444
1445 /* Relocations for PIC. */
f41aef5f 1446 {BFD_RELOC_AARCH64_GOT_LD_PREL19, R_AARCH64_GOT_LD_PREL19},
a06ea964
NC
1447 {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE},
1448 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC},
1449
1450 /* Relocations for TLS. */
1451 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21},
1452 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC},
1453 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
1454 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1},
1455 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
1456 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC},
1457 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
1458 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21},
1459 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
1460 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
1461 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
1462 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
1463 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2},
1464 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1},
1465 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
1466 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC},
1467 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0},
1468 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
1469 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC},
1470 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12},
1471 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12},
1472 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
1473 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC},
1474 {BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19, R_AARCH64_TLSDESC_LD64_PREL19},
1475 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21},
1476 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE, R_AARCH64_TLSDESC_ADR_PAGE},
1477 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC},
1478 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC},
1479 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1},
1480 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC},
1481 {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR},
1482 {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD},
1483 {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL},
1484 {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64},
1485 {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64},
1486 {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64},
1487 {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC},
1488};
1489
1490static reloc_howto_type *
1491elf64_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1492 bfd_reloc_code_real_type code)
1493{
1494 unsigned int i;
1495
1496 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++)
1497 if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code)
1498 return elf64_aarch64_howto_from_type
1499 (elf64_aarch64_reloc_map[i].elf_reloc_val);
1500
1501 bfd_set_error (bfd_error_bad_value);
1502 return NULL;
1503}
1504
1505static reloc_howto_type *
1506elf64_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1507 const char *r_name)
1508{
1509 unsigned int i;
1510
1511 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++)
1512 if (elf64_aarch64_howto_table[i].name != NULL
1513 && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0)
1514 return &elf64_aarch64_howto_table[i];
1515
1516 return NULL;
1517}
1518
cd6fa7fd
YZ
1519/* Support for core dump NOTE sections. */
1520
1521static bfd_boolean
1522elf64_aarch64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1523{
1524 int offset;
1525 size_t size;
1526
1527 switch (note->descsz)
1528 {
1529 default:
1530 return FALSE;
1531
1532 case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
1533 /* pr_cursig */
228e534f 1534 elf_tdata (abfd)->core->signal
cd6fa7fd
YZ
1535 = bfd_get_16 (abfd, note->descdata + 12);
1536
1537 /* pr_pid */
228e534f 1538 elf_tdata (abfd)->core->lwpid
cd6fa7fd
YZ
1539 = bfd_get_32 (abfd, note->descdata + 32);
1540
1541 /* pr_reg */
1542 offset = 112;
170a8295 1543 size = 272;
cd6fa7fd
YZ
1544
1545 break;
1546 }
1547
1548 /* Make a ".reg/999" section. */
1549 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1550 size, note->descpos + offset);
1551}
1552
a06ea964
NC
1553#define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
1554#define TARGET_LITTLE_NAME "elf64-littleaarch64"
1555#define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
1556#define TARGET_BIG_NAME "elf64-bigaarch64"
1557
cd6fa7fd
YZ
1558#define elf_backend_grok_prstatus elf64_aarch64_grok_prstatus
1559
a06ea964
NC
1560typedef unsigned long int insn32;
1561
1562/* The linker script knows the section names for placement.
1563 The entry_names are used to do simple name mangling on the stubs.
1564 Given a function name, and its type, the stub can be found. The
1565 name can be changed. The only requirement is the %s be present. */
1566#define STUB_ENTRY_NAME "__%s_veneer"
1567
1568/* The name of the dynamic interpreter. This is put in the .interp
1569 section. */
1570#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1571
1572#define AARCH64_MAX_FWD_BRANCH_OFFSET \
1573 (((1 << 25) - 1) << 2)
1574#define AARCH64_MAX_BWD_BRANCH_OFFSET \
1575 (-((1 << 25) << 2))
1576
1577#define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1578#define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1579
1580static int
1581aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1582{
1583 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1584 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1585}
1586
1587static int
1588aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1589{
1590 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1591 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1592 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1593}
1594
1595static const uint32_t aarch64_adrp_branch_stub [] =
1596{
1597 0x90000010, /* adrp ip0, X */
1598 /* R_AARCH64_ADR_HI21_PCREL(X) */
1599 0x91000210, /* add ip0, ip0, :lo12:X */
1600 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1601 0xd61f0200, /* br ip0 */
1602};
1603
1604static const uint32_t aarch64_long_branch_stub[] =
1605{
1606 0x58000090, /* ldr ip0, 1f */
1607 0x10000011, /* adr ip1, #0 */
1608 0x8b110210, /* add ip0, ip0, ip1 */
1609 0xd61f0200, /* br ip0 */
1610 0x00000000, /* 1: .xword
1611 R_AARCH64_PREL64(X) + 12
1612 */
1613 0x00000000,
1614};
1615
1616/* Section name for stubs is the associated section name plus this
1617 string. */
1618#define STUB_SUFFIX ".stub"
1619
1620enum elf64_aarch64_stub_type
1621{
1622 aarch64_stub_none,
1623 aarch64_stub_adrp_branch,
1624 aarch64_stub_long_branch,
1625};
1626
1627struct elf64_aarch64_stub_hash_entry
1628{
1629 /* Base hash table entry structure. */
1630 struct bfd_hash_entry root;
1631
1632 /* The stub section. */
1633 asection *stub_sec;
1634
1635 /* Offset within stub_sec of the beginning of this stub. */
1636 bfd_vma stub_offset;
1637
1638 /* Given the symbol's value and its section we can determine its final
1639 value when building the stubs (so the stub knows where to jump). */
1640 bfd_vma target_value;
1641 asection *target_section;
1642
1643 enum elf64_aarch64_stub_type stub_type;
1644
1645 /* The symbol table entry, if any, that this was derived from. */
1646 struct elf64_aarch64_link_hash_entry *h;
1647
1648 /* Destination symbol type */
1649 unsigned char st_type;
1650
1651 /* Where this stub is being called from, or, in the case of combined
1652 stub sections, the first input section in the group. */
1653 asection *id_sec;
1654
1655 /* The name for the local symbol at the start of this stub. The
1656 stub name in the hash table has to be unique; this does not, so
1657 it can be friendlier. */
1658 char *output_name;
1659};
1660
1661/* Used to build a map of a section. This is required for mixed-endian
1662 code/data. */
1663
1664typedef struct elf64_elf_section_map
1665{
1666 bfd_vma vma;
1667 char type;
1668}
1669elf64_aarch64_section_map;
1670
1671
1672typedef struct _aarch64_elf_section_data
1673{
1674 struct bfd_elf_section_data elf;
1675 unsigned int mapcount;
1676 unsigned int mapsize;
1677 elf64_aarch64_section_map *map;
1678}
1679_aarch64_elf_section_data;
1680
1681#define elf64_aarch64_section_data(sec) \
1682 ((_aarch64_elf_section_data *) elf_section_data (sec))
1683
1684/* The size of the thread control block. */
1685#define TCB_SIZE 16
1686
1687struct elf_aarch64_local_symbol
1688{
1689 unsigned int got_type;
1690 bfd_signed_vma got_refcount;
1691 bfd_vma got_offset;
1692
1693 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1694 offset is from the end of the jump table and reserved entries
1695 within the PLTGOT.
1696
1697 The magic value (bfd_vma) -1 indicates that an offset has not be
1698 allocated. */
1699 bfd_vma tlsdesc_got_jump_table_offset;
1700};
1701
1702struct elf_aarch64_obj_tdata
1703{
1704 struct elf_obj_tdata root;
1705
1706 /* local symbol descriptors */
1707 struct elf_aarch64_local_symbol *locals;
1708
1709 /* Zero to warn when linking objects with incompatible enum sizes. */
1710 int no_enum_size_warning;
1711
1712 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1713 int no_wchar_size_warning;
1714};
1715
1716#define elf_aarch64_tdata(bfd) \
1717 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1718
1719#define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1720
1721#define is_aarch64_elf(bfd) \
1722 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1723 && elf_tdata (bfd) != NULL \
1724 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1725
1726static bfd_boolean
1727elf64_aarch64_mkobject (bfd *abfd)
1728{
1729 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1730 AARCH64_ELF_DATA);
1731}
1732
1733/* The AArch64 linker needs to keep track of the number of relocs that it
1734 decides to copy in check_relocs for each symbol. This is so that
1735 it can discard PC relative relocs if it doesn't need them when
1736 linking with -Bsymbolic. We store the information in a field
1737 extending the regular ELF linker hash table. */
1738
1739/* This structure keeps track of the number of relocs we have copied
1740 for a given symbol. */
1741struct elf64_aarch64_relocs_copied
1742{
1743 /* Next section. */
1744 struct elf64_aarch64_relocs_copied *next;
1745 /* A section in dynobj. */
1746 asection *section;
1747 /* Number of relocs copied in this section. */
1748 bfd_size_type count;
1749 /* Number of PC-relative relocs copied in this section. */
1750 bfd_size_type pc_count;
1751};
1752
1753#define elf64_aarch64_hash_entry(ent) \
1754 ((struct elf64_aarch64_link_hash_entry *)(ent))
1755
1756#define GOT_UNKNOWN 0
1757#define GOT_NORMAL 1
1758#define GOT_TLS_GD 2
1759#define GOT_TLS_IE 4
1760#define GOT_TLSDESC_GD 8
1761
1762#define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1763
1764/* AArch64 ELF linker hash entry. */
1765struct elf64_aarch64_link_hash_entry
1766{
1767 struct elf_link_hash_entry root;
1768
1769 /* Track dynamic relocs copied for this symbol. */
1770 struct elf_dyn_relocs *dyn_relocs;
1771
1772 /* Number of PC relative relocs copied for this symbol. */
1773 struct elf64_aarch64_relocs_copied *relocs_copied;
1774
1775 /* Since PLT entries have variable size, we need to record the
1776 index into .got.plt instead of recomputing it from the PLT
1777 offset. */
1778 bfd_signed_vma plt_got_offset;
1779
1780 /* Bit mask representing the type of GOT entry(s) if any required by
1781 this symbol. */
1782 unsigned int got_type;
1783
1784 /* A pointer to the most recently used stub hash entry against this
1785 symbol. */
1786 struct elf64_aarch64_stub_hash_entry *stub_cache;
1787
1788 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1789 is from the end of the jump table and reserved entries within the PLTGOT.
1790
1791 The magic value (bfd_vma) -1 indicates that an offset has not
1792 be allocated. */
1793 bfd_vma tlsdesc_got_jump_table_offset;
1794};
1795
1796static unsigned int
1797elf64_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1798 bfd *abfd,
1799 unsigned long r_symndx)
1800{
1801 if (h)
1802 return elf64_aarch64_hash_entry (h)->got_type;
1803
1804 if (! elf64_aarch64_locals (abfd))
1805 return GOT_UNKNOWN;
1806
1807 return elf64_aarch64_locals (abfd)[r_symndx].got_type;
1808}
1809
1810/* Traverse an AArch64 ELF linker hash table. */
1811#define elf64_aarch64_link_hash_traverse(table, func, info) \
1812 (elf_link_hash_traverse \
1813 (&(table)->root, \
1814 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
1815 (info)))
1816
1817/* Get the AArch64 elf linker hash table from a link_info structure. */
1818#define elf64_aarch64_hash_table(info) \
1819 ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
1820
1821#define aarch64_stub_hash_lookup(table, string, create, copy) \
1822 ((struct elf64_aarch64_stub_hash_entry *) \
1823 bfd_hash_lookup ((table), (string), (create), (copy)))
1824
1825/* AArch64 ELF linker hash table. */
1826struct elf64_aarch64_link_hash_table
1827{
1828 /* The main hash table. */
1829 struct elf_link_hash_table root;
1830
1831 /* Nonzero to force PIC branch veneers. */
1832 int pic_veneer;
1833
1834 /* The number of bytes in the initial entry in the PLT. */
1835 bfd_size_type plt_header_size;
1836
1837 /* The number of bytes in the subsequent PLT etries. */
1838 bfd_size_type plt_entry_size;
1839
1840 /* Short-cuts to get to dynamic linker sections. */
1841 asection *sdynbss;
1842 asection *srelbss;
1843
1844 /* Small local sym cache. */
1845 struct sym_cache sym_cache;
1846
1847 /* For convenience in allocate_dynrelocs. */
1848 bfd *obfd;
1849
1850 /* The amount of space used by the reserved portion of the sgotplt
1851 section, plus whatever space is used by the jump slots. */
1852 bfd_vma sgotplt_jump_table_size;
1853
1854 /* The stub hash table. */
1855 struct bfd_hash_table stub_hash_table;
1856
1857 /* Linker stub bfd. */
1858 bfd *stub_bfd;
1859
1860 /* Linker call-backs. */
1861 asection *(*add_stub_section) (const char *, asection *);
1862 void (*layout_sections_again) (void);
1863
1864 /* Array to keep track of which stub sections have been created, and
1865 information on stub grouping. */
1866 struct map_stub
1867 {
1868 /* This is the section to which stubs in the group will be
1869 attached. */
1870 asection *link_sec;
1871 /* The stub section. */
1872 asection *stub_sec;
1873 } *stub_group;
1874
1875 /* Assorted information used by elf64_aarch64_size_stubs. */
1876 unsigned int bfd_count;
1877 int top_index;
1878 asection **input_list;
1879
1880 /* The offset into splt of the PLT entry for the TLS descriptor
1881 resolver. Special values are 0, if not necessary (or not found
1882 to be necessary yet), and -1 if needed but not determined
1883 yet. */
1884 bfd_vma tlsdesc_plt;
1885
1886 /* The GOT offset for the lazy trampoline. Communicated to the
1887 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1888 indicates an offset is not allocated. */
1889 bfd_vma dt_tlsdesc_got;
1890};
1891
1892
1893/* Return non-zero if the indicated VALUE has overflowed the maximum
1894 range expressible by a unsigned number with the indicated number of
1895 BITS. */
1896
1897static bfd_reloc_status_type
1898aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
1899{
1900 bfd_vma lim;
1901 if (bits >= sizeof (bfd_vma) * 8)
1902 return bfd_reloc_ok;
1903 lim = (bfd_vma) 1 << bits;
1904 if (value >= lim)
1905 return bfd_reloc_overflow;
1906 return bfd_reloc_ok;
1907}
1908
1909
1910/* Return non-zero if the indicated VALUE has overflowed the maximum
1911 range expressible by an signed number with the indicated number of
1912 BITS. */
1913
1914static bfd_reloc_status_type
1915aarch64_signed_overflow (bfd_vma value, unsigned int bits)
1916{
1917 bfd_signed_vma svalue = (bfd_signed_vma) value;
1918 bfd_signed_vma lim;
1919
1920 if (bits >= sizeof (bfd_vma) * 8)
1921 return bfd_reloc_ok;
1922 lim = (bfd_signed_vma) 1 << (bits - 1);
1923 if (svalue < -lim || svalue >= lim)
1924 return bfd_reloc_overflow;
1925 return bfd_reloc_ok;
1926}
1927
1928/* Create an entry in an AArch64 ELF linker hash table. */
1929
1930static struct bfd_hash_entry *
1931elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1932 struct bfd_hash_table *table,
1933 const char *string)
1934{
1935 struct elf64_aarch64_link_hash_entry *ret =
1936 (struct elf64_aarch64_link_hash_entry *) entry;
1937
1938 /* Allocate the structure if it has not already been allocated by a
1939 subclass. */
1940 if (ret == NULL)
1941 ret = bfd_hash_allocate (table,
1942 sizeof (struct elf64_aarch64_link_hash_entry));
1943 if (ret == NULL)
1944 return (struct bfd_hash_entry *) ret;
1945
1946 /* Call the allocation method of the superclass. */
1947 ret = ((struct elf64_aarch64_link_hash_entry *)
1948 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1949 table, string));
1950 if (ret != NULL)
1951 {
1952 ret->dyn_relocs = NULL;
1953 ret->relocs_copied = NULL;
1954 ret->got_type = GOT_UNKNOWN;
1955 ret->plt_got_offset = (bfd_vma) - 1;
1956 ret->stub_cache = NULL;
1957 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1958 }
1959
1960 return (struct bfd_hash_entry *) ret;
1961}
1962
1963/* Initialize an entry in the stub hash table. */
1964
1965static struct bfd_hash_entry *
1966stub_hash_newfunc (struct bfd_hash_entry *entry,
1967 struct bfd_hash_table *table, const char *string)
1968{
1969 /* Allocate the structure if it has not already been allocated by a
1970 subclass. */
1971 if (entry == NULL)
1972 {
1973 entry = bfd_hash_allocate (table,
1974 sizeof (struct
1975 elf64_aarch64_stub_hash_entry));
1976 if (entry == NULL)
1977 return entry;
1978 }
1979
1980 /* Call the allocation method of the superclass. */
1981 entry = bfd_hash_newfunc (entry, table, string);
1982 if (entry != NULL)
1983 {
1984 struct elf64_aarch64_stub_hash_entry *eh;
1985
1986 /* Initialize the local fields. */
1987 eh = (struct elf64_aarch64_stub_hash_entry *) entry;
1988 eh->stub_sec = NULL;
1989 eh->stub_offset = 0;
1990 eh->target_value = 0;
1991 eh->target_section = NULL;
1992 eh->stub_type = aarch64_stub_none;
1993 eh->h = NULL;
1994 eh->id_sec = NULL;
1995 }
1996
1997 return entry;
1998}
1999
2000
2001/* Copy the extra info we tack onto an elf_link_hash_entry. */
2002
2003static void
2004elf64_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2005 struct elf_link_hash_entry *dir,
2006 struct elf_link_hash_entry *ind)
2007{
2008 struct elf64_aarch64_link_hash_entry *edir, *eind;
2009
2010 edir = (struct elf64_aarch64_link_hash_entry *) dir;
2011 eind = (struct elf64_aarch64_link_hash_entry *) ind;
2012
2013 if (eind->dyn_relocs != NULL)
2014 {
2015 if (edir->dyn_relocs != NULL)
2016 {
2017 struct elf_dyn_relocs **pp;
2018 struct elf_dyn_relocs *p;
2019
2020 /* Add reloc counts against the indirect sym to the direct sym
2021 list. Merge any entries against the same section. */
2022 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2023 {
2024 struct elf_dyn_relocs *q;
2025
2026 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2027 if (q->sec == p->sec)
2028 {
2029 q->pc_count += p->pc_count;
2030 q->count += p->count;
2031 *pp = p->next;
2032 break;
2033 }
2034 if (q == NULL)
2035 pp = &p->next;
2036 }
2037 *pp = edir->dyn_relocs;
2038 }
2039
2040 edir->dyn_relocs = eind->dyn_relocs;
2041 eind->dyn_relocs = NULL;
2042 }
2043
2044 if (eind->relocs_copied != NULL)
2045 {
2046 if (edir->relocs_copied != NULL)
2047 {
2048 struct elf64_aarch64_relocs_copied **pp;
2049 struct elf64_aarch64_relocs_copied *p;
2050
2051 /* Add reloc counts against the indirect sym to the direct sym
2052 list. Merge any entries against the same section. */
2053 for (pp = &eind->relocs_copied; (p = *pp) != NULL;)
2054 {
2055 struct elf64_aarch64_relocs_copied *q;
2056
2057 for (q = edir->relocs_copied; q != NULL; q = q->next)
2058 if (q->section == p->section)
2059 {
2060 q->pc_count += p->pc_count;
2061 q->count += p->count;
2062 *pp = p->next;
2063 break;
2064 }
2065 if (q == NULL)
2066 pp = &p->next;
2067 }
2068 *pp = edir->relocs_copied;
2069 }
2070
2071 edir->relocs_copied = eind->relocs_copied;
2072 eind->relocs_copied = NULL;
2073 }
2074
2075 if (ind->root.type == bfd_link_hash_indirect)
2076 {
2077 /* Copy over PLT info. */
2078 if (dir->got.refcount <= 0)
2079 {
2080 edir->got_type = eind->got_type;
2081 eind->got_type = GOT_UNKNOWN;
2082 }
2083 }
2084
2085 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2086}
2087
2088/* Create an AArch64 elf linker hash table. */
2089
2090static struct bfd_link_hash_table *
2091elf64_aarch64_link_hash_table_create (bfd *abfd)
2092{
2093 struct elf64_aarch64_link_hash_table *ret;
2094 bfd_size_type amt = sizeof (struct elf64_aarch64_link_hash_table);
2095
7bf52ea2 2096 ret = bfd_zmalloc (amt);
a06ea964
NC
2097 if (ret == NULL)
2098 return NULL;
2099
2100 if (!_bfd_elf_link_hash_table_init
2101 (&ret->root, abfd, elf64_aarch64_link_hash_newfunc,
2102 sizeof (struct elf64_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2103 {
2104 free (ret);
2105 return NULL;
2106 }
2107
a06ea964
NC
2108 ret->plt_header_size = PLT_ENTRY_SIZE;
2109 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
a06ea964 2110 ret->obfd = abfd;
a06ea964
NC
2111 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2112
2113 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2114 sizeof (struct elf64_aarch64_stub_hash_entry)))
2115 {
2116 free (ret);
2117 return NULL;
2118 }
2119
2120 return &ret->root.root;
2121}
2122
2123/* Free the derived linker hash table. */
2124
2125static void
2126elf64_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
2127{
2128 struct elf64_aarch64_link_hash_table *ret
2129 = (struct elf64_aarch64_link_hash_table *) hash;
2130
2131 bfd_hash_table_free (&ret->stub_hash_table);
9f7c3e5e 2132 _bfd_elf_link_hash_table_free (hash);
a06ea964
NC
2133}
2134
2135static bfd_vma
2136aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
2137 bfd_vma addend, bfd_boolean weak_undef_p)
2138{
2139 switch (r_type)
2140 {
2141 case R_AARCH64_TLSDESC_CALL:
2142 case R_AARCH64_NONE:
2143 case R_AARCH64_NULL:
2144 break;
2145
2146 case R_AARCH64_ADR_PREL_LO21:
2147 case R_AARCH64_CONDBR19:
2148 case R_AARCH64_LD_PREL_LO19:
2149 case R_AARCH64_PREL16:
2150 case R_AARCH64_PREL32:
2151 case R_AARCH64_PREL64:
2152 case R_AARCH64_TSTBR14:
2153 if (weak_undef_p)
2154 value = place;
2155 value = value + addend - place;
2156 break;
2157
2158 case R_AARCH64_CALL26:
2159 case R_AARCH64_JUMP26:
2160 value = value + addend - place;
2161 break;
2162
2163 case R_AARCH64_ABS16:
2164 case R_AARCH64_ABS32:
2165 case R_AARCH64_MOVW_SABS_G0:
2166 case R_AARCH64_MOVW_SABS_G1:
2167 case R_AARCH64_MOVW_SABS_G2:
2168 case R_AARCH64_MOVW_UABS_G0:
2169 case R_AARCH64_MOVW_UABS_G0_NC:
2170 case R_AARCH64_MOVW_UABS_G1:
2171 case R_AARCH64_MOVW_UABS_G1_NC:
2172 case R_AARCH64_MOVW_UABS_G2:
2173 case R_AARCH64_MOVW_UABS_G2_NC:
2174 case R_AARCH64_MOVW_UABS_G3:
2175 value = value + addend;
2176 break;
2177
2178 case R_AARCH64_ADR_PREL_PG_HI21:
2179 case R_AARCH64_ADR_PREL_PG_HI21_NC:
2180 if (weak_undef_p)
2181 value = PG (place);
2182 value = PG (value + addend) - PG (place);
2183 break;
2184
f41aef5f
RE
2185 case R_AARCH64_GOT_LD_PREL19:
2186 value = value + addend - place;
2187 break;
2188
a06ea964
NC
2189 case R_AARCH64_ADR_GOT_PAGE:
2190 case R_AARCH64_TLSDESC_ADR_PAGE:
2191 case R_AARCH64_TLSGD_ADR_PAGE21:
2192 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
2193 value = PG (value + addend) - PG (place);
2194 break;
2195
2196 case R_AARCH64_ADD_ABS_LO12_NC:
2197 case R_AARCH64_LD64_GOT_LO12_NC:
2198 case R_AARCH64_LDST8_ABS_LO12_NC:
2199 case R_AARCH64_LDST16_ABS_LO12_NC:
2200 case R_AARCH64_LDST32_ABS_LO12_NC:
2201 case R_AARCH64_LDST64_ABS_LO12_NC:
2202 case R_AARCH64_LDST128_ABS_LO12_NC:
2203 case R_AARCH64_TLSDESC_ADD_LO12_NC:
2204 case R_AARCH64_TLSDESC_ADD:
2205 case R_AARCH64_TLSDESC_LD64_LO12_NC:
2206 case R_AARCH64_TLSDESC_LDR:
2207 case R_AARCH64_TLSGD_ADD_LO12_NC:
2208 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
2209 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
2210 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
2211 value = PG_OFFSET (value + addend);
2212 break;
2213
2214 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
2215 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
2216 value = (value + addend) & (bfd_vma) 0xffff0000;
2217 break;
2218 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
2219 value = (value + addend) & (bfd_vma) 0xfff000;
2220 break;
2221
2222 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
2223 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
2224 value = (value + addend) & (bfd_vma) 0xffff;
2225 break;
2226
2227 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
2228 value = (value + addend) & ~(bfd_vma) 0xffffffff;
2229 value -= place & ~(bfd_vma) 0xffffffff;
2230 break;
2231 }
2232 return value;
2233}
2234
2235static bfd_boolean
2236aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2237 bfd_vma offset, bfd_vma value)
2238{
2239 reloc_howto_type *howto;
2240 bfd_vma place;
2241
2242 howto = elf64_aarch64_howto_from_type (r_type);
2243 place = (input_section->output_section->vma + input_section->output_offset
2244 + offset);
2245 value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
2246 return bfd_elf_aarch64_put_addend (input_bfd,
2247 input_section->contents + offset,
2248 howto, value);
2249}
2250
2251static enum elf64_aarch64_stub_type
2252aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2253{
2254 if (aarch64_valid_for_adrp_p (value, place))
2255 return aarch64_stub_adrp_branch;
2256 return aarch64_stub_long_branch;
2257}
2258
2259/* Determine the type of stub needed, if any, for a call. */
2260
2261static enum elf64_aarch64_stub_type
2262aarch64_type_of_stub (struct bfd_link_info *info,
2263 asection *input_sec,
2264 const Elf_Internal_Rela *rel,
2265 unsigned char st_type,
2266 struct elf64_aarch64_link_hash_entry *hash,
2267 bfd_vma destination)
2268{
2269 bfd_vma location;
2270 bfd_signed_vma branch_offset;
2271 unsigned int r_type;
2272 struct elf64_aarch64_link_hash_table *globals;
2273 enum elf64_aarch64_stub_type stub_type = aarch64_stub_none;
2274 bfd_boolean via_plt_p;
2275
2276 if (st_type != STT_FUNC)
2277 return stub_type;
2278
2279 globals = elf64_aarch64_hash_table (info);
2280 via_plt_p = (globals->root.splt != NULL && hash != NULL
2281 && hash->root.plt.offset != (bfd_vma) - 1);
2282
2283 if (via_plt_p)
2284 return stub_type;
2285
2286 /* Determine where the call point is. */
2287 location = (input_sec->output_offset
2288 + input_sec->output_section->vma + rel->r_offset);
2289
2290 branch_offset = (bfd_signed_vma) (destination - location);
2291
2292 r_type = ELF64_R_TYPE (rel->r_info);
2293
2294 /* We don't want to redirect any old unconditional jump in this way,
2295 only one which is being used for a sibcall, where it is
2296 acceptable for the IP0 and IP1 registers to be clobbered. */
2297 if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26)
2298 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2299 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2300 {
2301 stub_type = aarch64_stub_long_branch;
2302 }
2303
2304 return stub_type;
2305}
2306
2307/* Build a name for an entry in the stub hash table. */
2308
2309static char *
2310elf64_aarch64_stub_name (const asection *input_section,
2311 const asection *sym_sec,
2312 const struct elf64_aarch64_link_hash_entry *hash,
2313 const Elf_Internal_Rela *rel)
2314{
2315 char *stub_name;
2316 bfd_size_type len;
2317
2318 if (hash)
2319 {
2320 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2321 stub_name = bfd_malloc (len);
2322 if (stub_name != NULL)
2323 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2324 (unsigned int) input_section->id,
2325 hash->root.root.root.string,
2326 rel->r_addend);
2327 }
2328 else
2329 {
2330 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2331 stub_name = bfd_malloc (len);
2332 if (stub_name != NULL)
2333 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2334 (unsigned int) input_section->id,
2335 (unsigned int) sym_sec->id,
2336 (unsigned int) ELF64_R_SYM (rel->r_info),
2337 rel->r_addend);
2338 }
2339
2340 return stub_name;
2341}
2342
2343/* Look up an entry in the stub hash. Stub entries are cached because
2344 creating the stub name takes a bit of time. */
2345
2346static struct elf64_aarch64_stub_hash_entry *
2347elf64_aarch64_get_stub_entry (const asection *input_section,
2348 const asection *sym_sec,
2349 struct elf_link_hash_entry *hash,
2350 const Elf_Internal_Rela *rel,
2351 struct elf64_aarch64_link_hash_table *htab)
2352{
2353 struct elf64_aarch64_stub_hash_entry *stub_entry;
2354 struct elf64_aarch64_link_hash_entry *h =
2355 (struct elf64_aarch64_link_hash_entry *) hash;
2356 const asection *id_sec;
2357
2358 if ((input_section->flags & SEC_CODE) == 0)
2359 return NULL;
2360
2361 /* If this input section is part of a group of sections sharing one
2362 stub section, then use the id of the first section in the group.
2363 Stub names need to include a section id, as there may well be
2364 more than one stub used to reach say, printf, and we need to
2365 distinguish between them. */
2366 id_sec = htab->stub_group[input_section->id].link_sec;
2367
2368 if (h != NULL && h->stub_cache != NULL
2369 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2370 {
2371 stub_entry = h->stub_cache;
2372 }
2373 else
2374 {
2375 char *stub_name;
2376
2377 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, h, rel);
2378 if (stub_name == NULL)
2379 return NULL;
2380
2381 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2382 stub_name, FALSE, FALSE);
2383 if (h != NULL)
2384 h->stub_cache = stub_entry;
2385
2386 free (stub_name);
2387 }
2388
2389 return stub_entry;
2390}
2391
2392/* Add a new stub entry to the stub hash. Not all fields of the new
2393 stub entry are initialised. */
2394
2395static struct elf64_aarch64_stub_hash_entry *
2396elf64_aarch64_add_stub (const char *stub_name,
2397 asection *section,
2398 struct elf64_aarch64_link_hash_table *htab)
2399{
2400 asection *link_sec;
2401 asection *stub_sec;
2402 struct elf64_aarch64_stub_hash_entry *stub_entry;
2403
2404 link_sec = htab->stub_group[section->id].link_sec;
2405 stub_sec = htab->stub_group[section->id].stub_sec;
2406 if (stub_sec == NULL)
2407 {
2408 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2409 if (stub_sec == NULL)
2410 {
2411 size_t namelen;
2412 bfd_size_type len;
2413 char *s_name;
2414
2415 namelen = strlen (link_sec->name);
2416 len = namelen + sizeof (STUB_SUFFIX);
2417 s_name = bfd_alloc (htab->stub_bfd, len);
2418 if (s_name == NULL)
2419 return NULL;
2420
2421 memcpy (s_name, link_sec->name, namelen);
2422 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2423 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2424 if (stub_sec == NULL)
2425 return NULL;
2426 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2427 }
2428 htab->stub_group[section->id].stub_sec = stub_sec;
2429 }
2430
2431 /* Enter this entry into the linker stub hash table. */
2432 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2433 TRUE, FALSE);
2434 if (stub_entry == NULL)
2435 {
2436 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2437 section->owner, stub_name);
2438 return NULL;
2439 }
2440
2441 stub_entry->stub_sec = stub_sec;
2442 stub_entry->stub_offset = 0;
2443 stub_entry->id_sec = link_sec;
2444
2445 return stub_entry;
2446}
2447
2448static bfd_boolean
2449aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2450 void *in_arg ATTRIBUTE_UNUSED)
2451{
2452 struct elf64_aarch64_stub_hash_entry *stub_entry;
2453 asection *stub_sec;
2454 bfd *stub_bfd;
2455 bfd_byte *loc;
2456 bfd_vma sym_value;
2457 unsigned int template_size;
2458 const uint32_t *template;
2459 unsigned int i;
2460
2461 /* Massage our args to the form they really have. */
2462 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2463
2464 stub_sec = stub_entry->stub_sec;
2465
2466 /* Make a note of the offset within the stubs for this entry. */
2467 stub_entry->stub_offset = stub_sec->size;
2468 loc = stub_sec->contents + stub_entry->stub_offset;
2469
2470 stub_bfd = stub_sec->owner;
2471
2472 /* This is the address of the stub destination. */
2473 sym_value = (stub_entry->target_value
2474 + stub_entry->target_section->output_offset
2475 + stub_entry->target_section->output_section->vma);
2476
2477 if (stub_entry->stub_type == aarch64_stub_long_branch)
2478 {
2479 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2480 + stub_sec->output_offset);
2481
2482 /* See if we can relax the stub. */
2483 if (aarch64_valid_for_adrp_p (sym_value, place))
2484 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2485 }
2486
2487 switch (stub_entry->stub_type)
2488 {
2489 case aarch64_stub_adrp_branch:
2490 template = aarch64_adrp_branch_stub;
2491 template_size = sizeof (aarch64_adrp_branch_stub);
2492 break;
2493 case aarch64_stub_long_branch:
2494 template = aarch64_long_branch_stub;
2495 template_size = sizeof (aarch64_long_branch_stub);
2496 break;
2497 default:
2498 BFD_FAIL ();
2499 return FALSE;
2500 }
2501
2502 for (i = 0; i < (template_size / sizeof template[0]); i++)
2503 {
2504 bfd_putl32 (template[i], loc);
2505 loc += 4;
2506 }
2507
2508 template_size = (template_size + 7) & ~7;
2509 stub_sec->size += template_size;
2510
2511 switch (stub_entry->stub_type)
2512 {
2513 case aarch64_stub_adrp_branch:
2514 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec,
2515 stub_entry->stub_offset, sym_value))
2516 /* The stub would not have been relaxed if the offset was out
2517 of range. */
2518 BFD_FAIL ();
2519
2520 _bfd_final_link_relocate
2521 (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC),
2522 stub_bfd,
2523 stub_sec,
2524 stub_sec->contents,
2525 stub_entry->stub_offset + 4,
2526 sym_value,
2527 0);
2528 break;
2529
2530 case aarch64_stub_long_branch:
2531 /* We want the value relative to the address 12 bytes back from the
2532 value itself. */
2533 _bfd_final_link_relocate (elf64_aarch64_howto_from_type
2534 (R_AARCH64_PREL64), stub_bfd, stub_sec,
2535 stub_sec->contents,
2536 stub_entry->stub_offset + 16,
2537 sym_value + 12, 0);
2538 break;
2539 default:
2540 break;
2541 }
2542
2543 return TRUE;
2544}
2545
2546/* As above, but don't actually build the stub. Just bump offset so
2547 we know stub section sizes. */
2548
2549static bfd_boolean
2550aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2551 void *in_arg ATTRIBUTE_UNUSED)
2552{
2553 struct elf64_aarch64_stub_hash_entry *stub_entry;
2554 int size;
2555
2556 /* Massage our args to the form they really have. */
2557 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2558
2559 switch (stub_entry->stub_type)
2560 {
2561 case aarch64_stub_adrp_branch:
2562 size = sizeof (aarch64_adrp_branch_stub);
2563 break;
2564 case aarch64_stub_long_branch:
2565 size = sizeof (aarch64_long_branch_stub);
2566 break;
2567 default:
2568 BFD_FAIL ();
2569 return FALSE;
2570 break;
2571 }
2572
2573 size = (size + 7) & ~7;
2574 stub_entry->stub_sec->size += size;
2575 return TRUE;
2576}
2577
2578/* External entry points for sizing and building linker stubs. */
2579
2580/* Set up various things so that we can make a list of input sections
2581 for each output section included in the link. Returns -1 on error,
2582 0 when no stubs will be needed, and 1 on success. */
2583
2584int
2585elf64_aarch64_setup_section_lists (bfd *output_bfd,
2586 struct bfd_link_info *info)
2587{
2588 bfd *input_bfd;
2589 unsigned int bfd_count;
2590 int top_id, top_index;
2591 asection *section;
2592 asection **input_list, **list;
2593 bfd_size_type amt;
2594 struct elf64_aarch64_link_hash_table *htab =
2595 elf64_aarch64_hash_table (info);
2596
2597 if (!is_elf_hash_table (htab))
2598 return 0;
2599
2600 /* Count the number of input BFDs and find the top input section id. */
2601 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2602 input_bfd != NULL; input_bfd = input_bfd->link_next)
2603 {
2604 bfd_count += 1;
2605 for (section = input_bfd->sections;
2606 section != NULL; section = section->next)
2607 {
2608 if (top_id < section->id)
2609 top_id = section->id;
2610 }
2611 }
2612 htab->bfd_count = bfd_count;
2613
2614 amt = sizeof (struct map_stub) * (top_id + 1);
2615 htab->stub_group = bfd_zmalloc (amt);
2616 if (htab->stub_group == NULL)
2617 return -1;
2618
2619 /* We can't use output_bfd->section_count here to find the top output
2620 section index as some sections may have been removed, and
2621 _bfd_strip_section_from_output doesn't renumber the indices. */
2622 for (section = output_bfd->sections, top_index = 0;
2623 section != NULL; section = section->next)
2624 {
2625 if (top_index < section->index)
2626 top_index = section->index;
2627 }
2628
2629 htab->top_index = top_index;
2630 amt = sizeof (asection *) * (top_index + 1);
2631 input_list = bfd_malloc (amt);
2632 htab->input_list = input_list;
2633 if (input_list == NULL)
2634 return -1;
2635
2636 /* For sections we aren't interested in, mark their entries with a
2637 value we can check later. */
2638 list = input_list + top_index;
2639 do
2640 *list = bfd_abs_section_ptr;
2641 while (list-- != input_list);
2642
2643 for (section = output_bfd->sections;
2644 section != NULL; section = section->next)
2645 {
2646 if ((section->flags & SEC_CODE) != 0)
2647 input_list[section->index] = NULL;
2648 }
2649
2650 return 1;
2651}
2652
2653/* Used by elf64_aarch64_next_input_section and group_sections. */
2654#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2655
2656/* The linker repeatedly calls this function for each input section,
2657 in the order that input sections are linked into output sections.
2658 Build lists of input sections to determine groupings between which
2659 we may insert linker stubs. */
2660
2661void
2662elf64_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2663{
2664 struct elf64_aarch64_link_hash_table *htab =
2665 elf64_aarch64_hash_table (info);
2666
2667 if (isec->output_section->index <= htab->top_index)
2668 {
2669 asection **list = htab->input_list + isec->output_section->index;
2670
2671 if (*list != bfd_abs_section_ptr)
2672 {
2673 /* Steal the link_sec pointer for our list. */
2674 /* This happens to make the list in reverse order,
2675 which is what we want. */
2676 PREV_SEC (isec) = *list;
2677 *list = isec;
2678 }
2679 }
2680}
2681
2682/* See whether we can group stub sections together. Grouping stub
2683 sections may result in fewer stubs. More importantly, we need to
2684 put all .init* and .fini* stubs at the beginning of the .init or
2685 .fini output sections respectively, because glibc splits the
2686 _init and _fini functions into multiple parts. Putting a stub in
2687 the middle of a function is not a good idea. */
2688
2689static void
2690group_sections (struct elf64_aarch64_link_hash_table *htab,
2691 bfd_size_type stub_group_size,
2692 bfd_boolean stubs_always_before_branch)
2693{
2694 asection **list = htab->input_list + htab->top_index;
2695
2696 do
2697 {
2698 asection *tail = *list;
2699
2700 if (tail == bfd_abs_section_ptr)
2701 continue;
2702
2703 while (tail != NULL)
2704 {
2705 asection *curr;
2706 asection *prev;
2707 bfd_size_type total;
2708
2709 curr = tail;
2710 total = tail->size;
2711 while ((prev = PREV_SEC (curr)) != NULL
2712 && ((total += curr->output_offset - prev->output_offset)
2713 < stub_group_size))
2714 curr = prev;
2715
2716 /* OK, the size from the start of CURR to the end is less
2717 than stub_group_size and thus can be handled by one stub
2718 section. (Or the tail section is itself larger than
2719 stub_group_size, in which case we may be toast.)
2720 We should really be keeping track of the total size of
2721 stubs added here, as stubs contribute to the final output
2722 section size. */
2723 do
2724 {
2725 prev = PREV_SEC (tail);
2726 /* Set up this stub group. */
2727 htab->stub_group[tail->id].link_sec = curr;
2728 }
2729 while (tail != curr && (tail = prev) != NULL);
2730
2731 /* But wait, there's more! Input sections up to stub_group_size
2732 bytes before the stub section can be handled by it too. */
2733 if (!stubs_always_before_branch)
2734 {
2735 total = 0;
2736 while (prev != NULL
2737 && ((total += tail->output_offset - prev->output_offset)
2738 < stub_group_size))
2739 {
2740 tail = prev;
2741 prev = PREV_SEC (tail);
2742 htab->stub_group[tail->id].link_sec = curr;
2743 }
2744 }
2745 tail = prev;
2746 }
2747 }
2748 while (list-- != htab->input_list);
2749
2750 free (htab->input_list);
2751}
2752
2753#undef PREV_SEC
2754
2755/* Determine and set the size of the stub section for a final link.
2756
2757 The basic idea here is to examine all the relocations looking for
2758 PC-relative calls to a target that is unreachable with a "bl"
2759 instruction. */
2760
2761bfd_boolean
2762elf64_aarch64_size_stubs (bfd *output_bfd,
2763 bfd *stub_bfd,
2764 struct bfd_link_info *info,
2765 bfd_signed_vma group_size,
2766 asection * (*add_stub_section) (const char *,
2767 asection *),
2768 void (*layout_sections_again) (void))
2769{
2770 bfd_size_type stub_group_size;
2771 bfd_boolean stubs_always_before_branch;
2772 bfd_boolean stub_changed = 0;
2773 struct elf64_aarch64_link_hash_table *htab = elf64_aarch64_hash_table (info);
2774
2775 /* Propagate mach to stub bfd, because it may not have been
2776 finalized when we created stub_bfd. */
2777 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
2778 bfd_get_mach (output_bfd));
2779
2780 /* Stash our params away. */
2781 htab->stub_bfd = stub_bfd;
2782 htab->add_stub_section = add_stub_section;
2783 htab->layout_sections_again = layout_sections_again;
2784 stubs_always_before_branch = group_size < 0;
2785 if (group_size < 0)
2786 stub_group_size = -group_size;
2787 else
2788 stub_group_size = group_size;
2789
2790 if (stub_group_size == 1)
2791 {
2792 /* Default values. */
2793 /* Aarch64 branch range is +-128MB. The value used is 1MB less. */
2794 stub_group_size = 127 * 1024 * 1024;
2795 }
2796
2797 group_sections (htab, stub_group_size, stubs_always_before_branch);
2798
2799 while (1)
2800 {
2801 bfd *input_bfd;
2802 unsigned int bfd_indx;
2803 asection *stub_sec;
2804
2805 for (input_bfd = info->input_bfds, bfd_indx = 0;
2806 input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++)
2807 {
2808 Elf_Internal_Shdr *symtab_hdr;
2809 asection *section;
2810 Elf_Internal_Sym *local_syms = NULL;
2811
2812 /* We'll need the symbol table in a second. */
2813 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2814 if (symtab_hdr->sh_info == 0)
2815 continue;
2816
2817 /* Walk over each section attached to the input bfd. */
2818 for (section = input_bfd->sections;
2819 section != NULL; section = section->next)
2820 {
2821 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2822
2823 /* If there aren't any relocs, then there's nothing more
2824 to do. */
2825 if ((section->flags & SEC_RELOC) == 0
2826 || section->reloc_count == 0
2827 || (section->flags & SEC_CODE) == 0)
2828 continue;
2829
2830 /* If this section is a link-once section that will be
2831 discarded, then don't create any stubs. */
2832 if (section->output_section == NULL
2833 || section->output_section->owner != output_bfd)
2834 continue;
2835
2836 /* Get the relocs. */
2837 internal_relocs
2838 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
2839 NULL, info->keep_memory);
2840 if (internal_relocs == NULL)
2841 goto error_ret_free_local;
2842
2843 /* Now examine each relocation. */
2844 irela = internal_relocs;
2845 irelaend = irela + section->reloc_count;
2846 for (; irela < irelaend; irela++)
2847 {
2848 unsigned int r_type, r_indx;
2849 enum elf64_aarch64_stub_type stub_type;
2850 struct elf64_aarch64_stub_hash_entry *stub_entry;
2851 asection *sym_sec;
2852 bfd_vma sym_value;
2853 bfd_vma destination;
2854 struct elf64_aarch64_link_hash_entry *hash;
2855 const char *sym_name;
2856 char *stub_name;
2857 const asection *id_sec;
2858 unsigned char st_type;
2859 bfd_size_type len;
2860
2861 r_type = ELF64_R_TYPE (irela->r_info);
2862 r_indx = ELF64_R_SYM (irela->r_info);
2863
2864 if (r_type >= (unsigned int) R_AARCH64_end)
2865 {
2866 bfd_set_error (bfd_error_bad_value);
2867 error_ret_free_internal:
2868 if (elf_section_data (section)->relocs == NULL)
2869 free (internal_relocs);
2870 goto error_ret_free_local;
2871 }
2872
2873 /* Only look for stubs on unconditional branch and
2874 branch and link instructions. */
2875 if (r_type != (unsigned int) R_AARCH64_CALL26
2876 && r_type != (unsigned int) R_AARCH64_JUMP26)
2877 continue;
2878
2879 /* Now determine the call target, its name, value,
2880 section. */
2881 sym_sec = NULL;
2882 sym_value = 0;
2883 destination = 0;
2884 hash = NULL;
2885 sym_name = NULL;
2886 if (r_indx < symtab_hdr->sh_info)
2887 {
2888 /* It's a local symbol. */
2889 Elf_Internal_Sym *sym;
2890 Elf_Internal_Shdr *hdr;
2891
2892 if (local_syms == NULL)
2893 {
2894 local_syms
2895 = (Elf_Internal_Sym *) symtab_hdr->contents;
2896 if (local_syms == NULL)
2897 local_syms
2898 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
2899 symtab_hdr->sh_info, 0,
2900 NULL, NULL, NULL);
2901 if (local_syms == NULL)
2902 goto error_ret_free_internal;
2903 }
2904
2905 sym = local_syms + r_indx;
2906 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
2907 sym_sec = hdr->bfd_section;
2908 if (!sym_sec)
2909 /* This is an undefined symbol. It can never
2910 be resolved. */
2911 continue;
2912
2913 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
2914 sym_value = sym->st_value;
2915 destination = (sym_value + irela->r_addend
2916 + sym_sec->output_offset
2917 + sym_sec->output_section->vma);
2918 st_type = ELF_ST_TYPE (sym->st_info);
2919 sym_name
2920 = bfd_elf_string_from_elf_section (input_bfd,
2921 symtab_hdr->sh_link,
2922 sym->st_name);
2923 }
2924 else
2925 {
2926 int e_indx;
2927
2928 e_indx = r_indx - symtab_hdr->sh_info;
2929 hash = ((struct elf64_aarch64_link_hash_entry *)
2930 elf_sym_hashes (input_bfd)[e_indx]);
2931
2932 while (hash->root.root.type == bfd_link_hash_indirect
2933 || hash->root.root.type == bfd_link_hash_warning)
2934 hash = ((struct elf64_aarch64_link_hash_entry *)
2935 hash->root.root.u.i.link);
2936
2937 if (hash->root.root.type == bfd_link_hash_defined
2938 || hash->root.root.type == bfd_link_hash_defweak)
2939 {
2940 struct elf64_aarch64_link_hash_table *globals =
2941 elf64_aarch64_hash_table (info);
2942 sym_sec = hash->root.root.u.def.section;
2943 sym_value = hash->root.root.u.def.value;
2944 /* For a destination in a shared library,
2945 use the PLT stub as target address to
2946 decide whether a branch stub is
2947 needed. */
2948 if (globals->root.splt != NULL && hash != NULL
2949 && hash->root.plt.offset != (bfd_vma) - 1)
2950 {
2951 sym_sec = globals->root.splt;
2952 sym_value = hash->root.plt.offset;
2953 if (sym_sec->output_section != NULL)
2954 destination = (sym_value
2955 + sym_sec->output_offset
2956 +
2957 sym_sec->output_section->vma);
2958 }
2959 else if (sym_sec->output_section != NULL)
2960 destination = (sym_value + irela->r_addend
2961 + sym_sec->output_offset
2962 + sym_sec->output_section->vma);
2963 }
2964 else if (hash->root.root.type == bfd_link_hash_undefined
2965 || (hash->root.root.type
2966 == bfd_link_hash_undefweak))
2967 {
2968 /* For a shared library, use the PLT stub as
2969 target address to decide whether a long
2970 branch stub is needed.
2971 For absolute code, they cannot be handled. */
2972 struct elf64_aarch64_link_hash_table *globals =
2973 elf64_aarch64_hash_table (info);
2974
2975 if (globals->root.splt != NULL && hash != NULL
2976 && hash->root.plt.offset != (bfd_vma) - 1)
2977 {
2978 sym_sec = globals->root.splt;
2979 sym_value = hash->root.plt.offset;
2980 if (sym_sec->output_section != NULL)
2981 destination = (sym_value
2982 + sym_sec->output_offset
2983 +
2984 sym_sec->output_section->vma);
2985 }
2986 else
2987 continue;
2988 }
2989 else
2990 {
2991 bfd_set_error (bfd_error_bad_value);
2992 goto error_ret_free_internal;
2993 }
2994 st_type = ELF_ST_TYPE (hash->root.type);
2995 sym_name = hash->root.root.root.string;
2996 }
2997
2998 /* Determine what (if any) linker stub is needed. */
2999 stub_type = aarch64_type_of_stub
3000 (info, section, irela, st_type, hash, destination);
3001 if (stub_type == aarch64_stub_none)
3002 continue;
3003
3004 /* Support for grouping stub sections. */
3005 id_sec = htab->stub_group[section->id].link_sec;
3006
3007 /* Get the name of this stub. */
3008 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, hash,
3009 irela);
3010 if (!stub_name)
3011 goto error_ret_free_internal;
3012
3013 stub_entry =
3014 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3015 stub_name, FALSE, FALSE);
3016 if (stub_entry != NULL)
3017 {
3018 /* The proper stub has already been created. */
3019 free (stub_name);
3020 continue;
3021 }
3022
3023 stub_entry = elf64_aarch64_add_stub (stub_name, section,
3024 htab);
3025 if (stub_entry == NULL)
3026 {
3027 free (stub_name);
3028 goto error_ret_free_internal;
3029 }
3030
3031 stub_entry->target_value = sym_value;
3032 stub_entry->target_section = sym_sec;
3033 stub_entry->stub_type = stub_type;
3034 stub_entry->h = hash;
3035 stub_entry->st_type = st_type;
3036
3037 if (sym_name == NULL)
3038 sym_name = "unnamed";
3039 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3040 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3041 if (stub_entry->output_name == NULL)
3042 {
3043 free (stub_name);
3044 goto error_ret_free_internal;
3045 }
3046
3047 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3048 sym_name);
3049
3050 stub_changed = TRUE;
3051 }
3052
3053 /* We're done with the internal relocs, free them. */
3054 if (elf_section_data (section)->relocs == NULL)
3055 free (internal_relocs);
3056 }
3057 }
3058
3059 if (!stub_changed)
3060 break;
3061
3062 /* OK, we've added some stubs. Find out the new size of the
3063 stub sections. */
3064 for (stub_sec = htab->stub_bfd->sections;
3065 stub_sec != NULL; stub_sec = stub_sec->next)
3066 stub_sec->size = 0;
3067
3068 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3069
3070 /* Ask the linker to do its stuff. */
3071 (*htab->layout_sections_again) ();
3072 stub_changed = FALSE;
3073 }
3074
3075 return TRUE;
3076
3077error_ret_free_local:
3078 return FALSE;
3079}
3080
3081/* Build all the stubs associated with the current output file. The
3082 stubs are kept in a hash table attached to the main linker hash
3083 table. We also set up the .plt entries for statically linked PIC
3084 functions here. This function is called via aarch64_elf_finish in the
3085 linker. */
3086
3087bfd_boolean
3088elf64_aarch64_build_stubs (struct bfd_link_info *info)
3089{
3090 asection *stub_sec;
3091 struct bfd_hash_table *table;
3092 struct elf64_aarch64_link_hash_table *htab;
3093
3094 htab = elf64_aarch64_hash_table (info);
3095
3096 for (stub_sec = htab->stub_bfd->sections;
3097 stub_sec != NULL; stub_sec = stub_sec->next)
3098 {
3099 bfd_size_type size;
3100
3101 /* Ignore non-stub sections. */
3102 if (!strstr (stub_sec->name, STUB_SUFFIX))
3103 continue;
3104
3105 /* Allocate memory to hold the linker stubs. */
3106 size = stub_sec->size;
3107 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3108 if (stub_sec->contents == NULL && size != 0)
3109 return FALSE;
3110 stub_sec->size = 0;
3111 }
3112
3113 /* Build the stubs as directed by the stub hash table. */
3114 table = &htab->stub_hash_table;
3115 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3116
3117 return TRUE;
3118}
3119
3120
3121/* Add an entry to the code/data map for section SEC. */
3122
3123static void
3124elf64_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3125{
3126 struct _aarch64_elf_section_data *sec_data =
3127 elf64_aarch64_section_data (sec);
3128 unsigned int newidx;
3129
3130 if (sec_data->map == NULL)
3131 {
3132 sec_data->map = bfd_malloc (sizeof (elf64_aarch64_section_map));
3133 sec_data->mapcount = 0;
3134 sec_data->mapsize = 1;
3135 }
3136
3137 newidx = sec_data->mapcount++;
3138
3139 if (sec_data->mapcount > sec_data->mapsize)
3140 {
3141 sec_data->mapsize *= 2;
3142 sec_data->map = bfd_realloc_or_free
3143 (sec_data->map, sec_data->mapsize * sizeof (elf64_aarch64_section_map));
3144 }
3145
3146 if (sec_data->map)
3147 {
3148 sec_data->map[newidx].vma = vma;
3149 sec_data->map[newidx].type = type;
3150 }
3151}
3152
3153
3154/* Initialise maps of insn/data for input BFDs. */
3155void
3156bfd_elf64_aarch64_init_maps (bfd *abfd)
3157{
3158 Elf_Internal_Sym *isymbuf;
3159 Elf_Internal_Shdr *hdr;
3160 unsigned int i, localsyms;
3161
3162 /* Make sure that we are dealing with an AArch64 elf binary. */
3163 if (!is_aarch64_elf (abfd))
3164 return;
3165
3166 if ((abfd->flags & DYNAMIC) != 0)
3167 return;
3168
3169 hdr = &elf_symtab_hdr (abfd);
3170 localsyms = hdr->sh_info;
3171
3172 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3173 should contain the number of local symbols, which should come before any
3174 global symbols. Mapping symbols are always local. */
3175 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3176
3177 /* No internal symbols read? Skip this BFD. */
3178 if (isymbuf == NULL)
3179 return;
3180
3181 for (i = 0; i < localsyms; i++)
3182 {
3183 Elf_Internal_Sym *isym = &isymbuf[i];
3184 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3185 const char *name;
3186
3187 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3188 {
3189 name = bfd_elf_string_from_elf_section (abfd,
3190 hdr->sh_link,
3191 isym->st_name);
3192
3193 if (bfd_is_aarch64_special_symbol_name
3194 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3195 elf64_aarch64_section_map_add (sec, name[1], isym->st_value);
3196 }
3197 }
3198}
3199
3200/* Set option values needed during linking. */
3201void
3202bfd_elf64_aarch64_set_options (struct bfd *output_bfd,
3203 struct bfd_link_info *link_info,
3204 int no_enum_warn,
3205 int no_wchar_warn, int pic_veneer)
3206{
3207 struct elf64_aarch64_link_hash_table *globals;
3208
3209 globals = elf64_aarch64_hash_table (link_info);
3210 globals->pic_veneer = pic_veneer;
3211
3212 BFD_ASSERT (is_aarch64_elf (output_bfd));
3213 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3214 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3215}
3216
3217#define MASK(n) ((1u << (n)) - 1)
3218
3219/* Decode the 26-bit offset of unconditional branch. */
3220static inline uint32_t
3221decode_branch_ofs_26 (uint32_t insn)
3222{
3223 return insn & MASK (26);
3224}
3225
3226/* Decode the 19-bit offset of conditional branch and compare & branch. */
3227static inline uint32_t
3228decode_cond_branch_ofs_19 (uint32_t insn)
3229{
3230 return (insn >> 5) & MASK (19);
3231}
3232
3233/* Decode the 19-bit offset of load literal. */
3234static inline uint32_t
3235decode_ld_lit_ofs_19 (uint32_t insn)
3236{
3237 return (insn >> 5) & MASK (19);
3238}
3239
3240/* Decode the 14-bit offset of test & branch. */
3241static inline uint32_t
3242decode_tst_branch_ofs_14 (uint32_t insn)
3243{
3244 return (insn >> 5) & MASK (14);
3245}
3246
3247/* Decode the 16-bit imm of move wide. */
3248static inline uint32_t
3249decode_movw_imm (uint32_t insn)
3250{
3251 return (insn >> 5) & MASK (16);
3252}
3253
3254/* Decode the 21-bit imm of adr. */
3255static inline uint32_t
3256decode_adr_imm (uint32_t insn)
3257{
3258 return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
3259}
3260
3261/* Decode the 12-bit imm of add immediate. */
3262static inline uint32_t
3263decode_add_imm (uint32_t insn)
3264{
3265 return (insn >> 10) & MASK (12);
3266}
3267
3268
3269/* Encode the 26-bit offset of unconditional branch. */
3270static inline uint32_t
3271reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
3272{
3273 return (insn & ~MASK (26)) | (ofs & MASK (26));
3274}
3275
3276/* Encode the 19-bit offset of conditional branch and compare & branch. */
3277static inline uint32_t
3278reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
3279{
3280 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3281}
3282
3283/* Decode the 19-bit offset of load literal. */
3284static inline uint32_t
3285reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
3286{
3287 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3288}
3289
3290/* Encode the 14-bit offset of test & branch. */
3291static inline uint32_t
3292reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
3293{
3294 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
3295}
3296
3297/* Reencode the imm field of move wide. */
3298static inline uint32_t
3299reencode_movw_imm (uint32_t insn, uint32_t imm)
3300{
3301 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
3302}
3303
3304/* Reencode the imm field of adr. */
3305static inline uint32_t
3306reencode_adr_imm (uint32_t insn, uint32_t imm)
3307{
3308 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
3309 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
3310}
3311
3312/* Reencode the imm field of ld/st pos immediate. */
3313static inline uint32_t
3314reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
3315{
3316 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3317}
3318
3319/* Reencode the imm field of add immediate. */
3320static inline uint32_t
3321reencode_add_imm (uint32_t insn, uint32_t imm)
3322{
3323 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3324}
3325
3326/* Reencode mov[zn] to movz. */
3327static inline uint32_t
3328reencode_movzn_to_movz (uint32_t opcode)
3329{
3330 return opcode | (1 << 30);
3331}
3332
3333/* Reencode mov[zn] to movn. */
3334static inline uint32_t
3335reencode_movzn_to_movn (uint32_t opcode)
3336{
3337 return opcode & ~(1 << 30);
3338}
3339
3340/* Insert the addend/value into the instruction or data object being
3341 relocated. */
3342static bfd_reloc_status_type
3343bfd_elf_aarch64_put_addend (bfd *abfd,
3344 bfd_byte *address,
3345 reloc_howto_type *howto, bfd_signed_vma addend)
3346{
3347 bfd_reloc_status_type status = bfd_reloc_ok;
3348 bfd_signed_vma old_addend = addend;
3349 bfd_vma contents;
3350 int size;
3351
3352 size = bfd_get_reloc_size (howto);
3353 switch (size)
3354 {
3355 case 2:
3356 contents = bfd_get_16 (abfd, address);
3357 break;
3358 case 4:
3359 if (howto->src_mask != 0xffffffff)
3360 /* Must be 32-bit instruction, always little-endian. */
3361 contents = bfd_getl32 (address);
3362 else
3363 /* Must be 32-bit data (endianness dependent). */
3364 contents = bfd_get_32 (abfd, address);
3365 break;
3366 case 8:
3367 contents = bfd_get_64 (abfd, address);
3368 break;
3369 default:
3370 abort ();
3371 }
3372
3373 switch (howto->complain_on_overflow)
3374 {
3375 case complain_overflow_dont:
3376 break;
3377 case complain_overflow_signed:
3378 status = aarch64_signed_overflow (addend,
3379 howto->bitsize + howto->rightshift);
3380 break;
3381 case complain_overflow_unsigned:
3382 status = aarch64_unsigned_overflow (addend,
3383 howto->bitsize + howto->rightshift);
3384 break;
3385 case complain_overflow_bitfield:
3386 default:
3387 abort ();
3388 }
3389
3390 addend >>= howto->rightshift;
3391
3392 switch (howto->type)
3393 {
3394 case R_AARCH64_JUMP26:
3395 case R_AARCH64_CALL26:
3396 contents = reencode_branch_ofs_26 (contents, addend);
3397 break;
3398
3399 case R_AARCH64_CONDBR19:
3400 contents = reencode_cond_branch_ofs_19 (contents, addend);
3401 break;
3402
3403 case R_AARCH64_TSTBR14:
3404 contents = reencode_tst_branch_ofs_14 (contents, addend);
3405 break;
3406
3407 case R_AARCH64_LD_PREL_LO19:
f41aef5f 3408 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3409 if (old_addend & ((1 << howto->rightshift) - 1))
3410 return bfd_reloc_overflow;
3411 contents = reencode_ld_lit_ofs_19 (contents, addend);
3412 break;
3413
3414 case R_AARCH64_TLSDESC_CALL:
3415 break;
3416
3417 case R_AARCH64_TLSGD_ADR_PAGE21:
3418 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3419 case R_AARCH64_TLSDESC_ADR_PAGE:
3420 case R_AARCH64_ADR_GOT_PAGE:
3421 case R_AARCH64_ADR_PREL_LO21:
3422 case R_AARCH64_ADR_PREL_PG_HI21:
3423 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3424 contents = reencode_adr_imm (contents, addend);
3425 break;
3426
3427 case R_AARCH64_TLSGD_ADD_LO12_NC:
3428 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3429 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3430 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3431 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3432 case R_AARCH64_ADD_ABS_LO12_NC:
3433 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3434 12 bits of the page offset following
3435 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3436 (pc-relative) page base. */
3437 contents = reencode_add_imm (contents, addend);
3438 break;
3439
3440 case R_AARCH64_LDST8_ABS_LO12_NC:
3441 case R_AARCH64_LDST16_ABS_LO12_NC:
3442 case R_AARCH64_LDST32_ABS_LO12_NC:
3443 case R_AARCH64_LDST64_ABS_LO12_NC:
3444 case R_AARCH64_LDST128_ABS_LO12_NC:
3445 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3446 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3447 case R_AARCH64_LD64_GOT_LO12_NC:
3448 if (old_addend & ((1 << howto->rightshift) - 1))
3449 return bfd_reloc_overflow;
3450 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3451 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3452 which computes the (pc-relative) page base. */
3453 contents = reencode_ldst_pos_imm (contents, addend);
3454 break;
3455
3456 /* Group relocations to create high bits of a 16, 32, 48 or 64
3457 bit signed data or abs address inline. Will change
3458 instruction to MOVN or MOVZ depending on sign of calculated
3459 value. */
3460
3461 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3462 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3463 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3464 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3465 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3466 case R_AARCH64_MOVW_SABS_G0:
3467 case R_AARCH64_MOVW_SABS_G1:
3468 case R_AARCH64_MOVW_SABS_G2:
3469 /* NOTE: We can only come here with movz or movn. */
3470 if (addend < 0)
3471 {
3472 /* Force use of MOVN. */
3473 addend = ~addend;
3474 contents = reencode_movzn_to_movn (contents);
3475 }
3476 else
3477 {
3478 /* Force use of MOVZ. */
3479 contents = reencode_movzn_to_movz (contents);
3480 }
3481 /* fall through */
3482
3483 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3484 data or abs address inline. */
3485
3486 case R_AARCH64_MOVW_UABS_G0:
3487 case R_AARCH64_MOVW_UABS_G0_NC:
3488 case R_AARCH64_MOVW_UABS_G1:
3489 case R_AARCH64_MOVW_UABS_G1_NC:
3490 case R_AARCH64_MOVW_UABS_G2:
3491 case R_AARCH64_MOVW_UABS_G2_NC:
3492 case R_AARCH64_MOVW_UABS_G3:
3493 contents = reencode_movw_imm (contents, addend);
3494 break;
3495
3496 default:
3497 /* Repack simple data */
3498 if (howto->dst_mask & (howto->dst_mask + 1))
3499 return bfd_reloc_notsupported;
3500
3501 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
3502 break;
3503 }
3504
3505 switch (size)
3506 {
3507 case 2:
3508 bfd_put_16 (abfd, contents, address);
3509 break;
3510 case 4:
3511 if (howto->dst_mask != 0xffffffff)
3512 /* must be 32-bit instruction, always little-endian */
3513 bfd_putl32 (contents, address);
3514 else
3515 /* must be 32-bit data (endianness dependent) */
3516 bfd_put_32 (abfd, contents, address);
3517 break;
3518 case 8:
3519 bfd_put_64 (abfd, contents, address);
3520 break;
3521 default:
3522 abort ();
3523 }
3524
3525 return status;
3526}
3527
3528static bfd_vma
3529aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3530 struct elf64_aarch64_link_hash_table
3531 *globals, struct bfd_link_info *info,
3532 bfd_vma value, bfd *output_bfd,
3533 bfd_boolean *unresolved_reloc_p)
3534{
3535 bfd_vma off = (bfd_vma) - 1;
3536 asection *basegot = globals->root.sgot;
3537 bfd_boolean dyn = globals->root.dynamic_sections_created;
3538
3539 if (h != NULL)
3540 {
3541 off = h->got.offset;
3542 BFD_ASSERT (off != (bfd_vma) - 1);
3543 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3544 || (info->shared
3545 && SYMBOL_REFERENCES_LOCAL (info, h))
3546 || (ELF_ST_VISIBILITY (h->other)
3547 && h->root.type == bfd_link_hash_undefweak))
3548 {
3549 /* This is actually a static link, or it is a -Bsymbolic link
3550 and the symbol is defined locally. We must initialize this
3551 entry in the global offset table. Since the offset must
3552 always be a multiple of 8, we use the least significant bit
3553 to record whether we have initialized it already.
3554 When doing a dynamic link, we create a .rel(a).got relocation
3555 entry to initialize the value. This is done in the
3556 finish_dynamic_symbol routine. */
3557 if ((off & 1) != 0)
3558 off &= ~1;
3559 else
3560 {
3561 bfd_put_64 (output_bfd, value, basegot->contents + off);
3562 h->got.offset |= 1;
3563 }
3564 }
3565 else
3566 *unresolved_reloc_p = FALSE;
3567
3568 off = off + basegot->output_section->vma + basegot->output_offset;
3569 }
3570
3571 return off;
3572}
3573
3574/* Change R_TYPE to a more efficient access model where possible,
3575 return the new reloc type. */
3576
3577static unsigned int
3578aarch64_tls_transition_without_check (unsigned int r_type,
3579 struct elf_link_hash_entry *h)
3580{
3581 bfd_boolean is_local = h == NULL;
3582 switch (r_type)
3583 {
3584 case R_AARCH64_TLSGD_ADR_PAGE21:
3585 case R_AARCH64_TLSDESC_ADR_PAGE:
3586 return is_local
3587 ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
3588
3589 case R_AARCH64_TLSGD_ADD_LO12_NC:
3590 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3591 return is_local
3592 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3593 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
3594
3595 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3596 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3597
3598 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3599 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3600
3601 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3602 case R_AARCH64_TLSDESC_CALL:
3603 /* Instructions with these relocations will become NOPs. */
3604 return R_AARCH64_NONE;
3605 }
3606
3607 return r_type;
3608}
3609
3610static unsigned int
3611aarch64_reloc_got_type (unsigned int r_type)
3612{
3613 switch (r_type)
3614 {
3615 case R_AARCH64_LD64_GOT_LO12_NC:
3616 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 3617 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3618 return GOT_NORMAL;
3619
3620 case R_AARCH64_TLSGD_ADR_PAGE21:
3621 case R_AARCH64_TLSGD_ADD_LO12_NC:
3622 return GOT_TLS_GD;
3623
3624 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3625 case R_AARCH64_TLSDESC_ADR_PAGE:
3626 case R_AARCH64_TLSDESC_CALL:
3627 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3628 return GOT_TLSDESC_GD;
3629
3630 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3631 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3632 return GOT_TLS_IE;
3633
3634 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3635 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3636 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3637 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3638 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3639 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3640 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3641 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3642 return GOT_UNKNOWN;
3643 }
3644 return GOT_UNKNOWN;
3645}
3646
3647static bfd_boolean
3648aarch64_can_relax_tls (bfd *input_bfd,
3649 struct bfd_link_info *info,
3650 unsigned int r_type,
3651 struct elf_link_hash_entry *h,
3652 unsigned long r_symndx)
3653{
3654 unsigned int symbol_got_type;
3655 unsigned int reloc_got_type;
3656
3657 if (! IS_AARCH64_TLS_RELOC (r_type))
3658 return FALSE;
3659
3660 symbol_got_type = elf64_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3661 reloc_got_type = aarch64_reloc_got_type (r_type);
3662
3663 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3664 return TRUE;
3665
3666 if (info->shared)
3667 return FALSE;
3668
3669 if (h && h->root.type == bfd_link_hash_undefweak)
3670 return FALSE;
3671
3672 return TRUE;
3673}
3674
3675static unsigned int
3676aarch64_tls_transition (bfd *input_bfd,
3677 struct bfd_link_info *info,
3678 unsigned int r_type,
3679 struct elf_link_hash_entry *h,
3680 unsigned long r_symndx)
3681{
3682 if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx))
3683 return r_type;
3684
3685 return aarch64_tls_transition_without_check (r_type, h);
3686}
3687
3688/* Return the base VMA address which should be subtracted from real addresses
3689 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3690
3691static bfd_vma
3692dtpoff_base (struct bfd_link_info *info)
3693{
3694 /* If tls_sec is NULL, we should have signalled an error already. */
3695 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3696 return elf_hash_table (info)->tls_sec->vma;
3697}
3698
3699
3700/* Return the base VMA address which should be subtracted from real addresses
3701 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3702
3703static bfd_vma
3704tpoff_base (struct bfd_link_info *info)
3705{
3706 struct elf_link_hash_table *htab = elf_hash_table (info);
3707
3708 /* If tls_sec is NULL, we should have signalled an error already. */
3709 if (htab->tls_sec == NULL)
3710 return 0;
3711
3712 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3713 htab->tls_sec->alignment_power);
3714 return htab->tls_sec->vma - base;
3715}
3716
3717static bfd_vma *
3718symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3719 unsigned long r_symndx)
3720{
3721 /* Calculate the address of the GOT entry for symbol
3722 referred to in h. */
3723 if (h != NULL)
3724 return &h->got.offset;
3725 else
3726 {
3727 /* local symbol */
3728 struct elf_aarch64_local_symbol *l;
3729
3730 l = elf64_aarch64_locals (input_bfd);
3731 return &l[r_symndx].got_offset;
3732 }
3733}
3734
3735static void
3736symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3737 unsigned long r_symndx)
3738{
3739 bfd_vma *p;
3740 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3741 *p |= 1;
3742}
3743
3744static int
3745symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3746 unsigned long r_symndx)
3747{
3748 bfd_vma value;
3749 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3750 return value & 1;
3751}
3752
3753static bfd_vma
3754symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3755 unsigned long r_symndx)
3756{
3757 bfd_vma value;
3758 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3759 value &= ~1;
3760 return value;
3761}
3762
3763static bfd_vma *
3764symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3765 unsigned long r_symndx)
3766{
3767 /* Calculate the address of the GOT entry for symbol
3768 referred to in h. */
3769 if (h != NULL)
3770 {
3771 struct elf64_aarch64_link_hash_entry *eh;
3772 eh = (struct elf64_aarch64_link_hash_entry *) h;
3773 return &eh->tlsdesc_got_jump_table_offset;
3774 }
3775 else
3776 {
3777 /* local symbol */
3778 struct elf_aarch64_local_symbol *l;
3779
3780 l = elf64_aarch64_locals (input_bfd);
3781 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3782 }
3783}
3784
3785static void
3786symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3787 unsigned long r_symndx)
3788{
3789 bfd_vma *p;
3790 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3791 *p |= 1;
3792}
3793
3794static int
3795symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3796 struct elf_link_hash_entry *h,
3797 unsigned long r_symndx)
3798{
3799 bfd_vma value;
3800 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3801 return value & 1;
3802}
3803
3804static bfd_vma
3805symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3806 unsigned long r_symndx)
3807{
3808 bfd_vma value;
3809 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3810 value &= ~1;
3811 return value;
3812}
3813
3814/* Perform a relocation as part of a final link. */
3815static bfd_reloc_status_type
3816elf64_aarch64_final_link_relocate (reloc_howto_type *howto,
3817 bfd *input_bfd,
3818 bfd *output_bfd,
3819 asection *input_section,
3820 bfd_byte *contents,
3821 Elf_Internal_Rela *rel,
3822 bfd_vma value,
3823 struct bfd_link_info *info,
3824 asection *sym_sec,
3825 struct elf_link_hash_entry *h,
3826 bfd_boolean *unresolved_reloc_p,
3827 bfd_boolean save_addend,
3828 bfd_vma *saved_addend)
3829{
3830 unsigned int r_type = howto->type;
3831 unsigned long r_symndx;
3832 bfd_byte *hit_data = contents + rel->r_offset;
3833 bfd_vma place;
3834 bfd_signed_vma signed_addend;
3835 struct elf64_aarch64_link_hash_table *globals;
3836 bfd_boolean weak_undef_p;
3837
3838 globals = elf64_aarch64_hash_table (info);
3839
3840 BFD_ASSERT (is_aarch64_elf (input_bfd));
3841
3842 r_symndx = ELF64_R_SYM (rel->r_info);
3843
3844 /* It is possible to have linker relaxations on some TLS access
3845 models. Update our information here. */
3846 r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
3847
3848 if (r_type != howto->type)
3849 howto = elf64_aarch64_howto_from_type (r_type);
3850
3851 place = input_section->output_section->vma
3852 + input_section->output_offset + rel->r_offset;
3853
3854 /* Get addend, accumulating the addend for consecutive relocs
3855 which refer to the same offset. */
3856 signed_addend = saved_addend ? *saved_addend : 0;
3857 signed_addend += rel->r_addend;
3858
3859 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
3860 : bfd_is_und_section (sym_sec));
3861 switch (r_type)
3862 {
3863 case R_AARCH64_NONE:
3864 case R_AARCH64_NULL:
3865 case R_AARCH64_TLSDESC_CALL:
3866 *unresolved_reloc_p = FALSE;
3867 return bfd_reloc_ok;
3868
3869 case R_AARCH64_ABS64:
3870
3871 /* When generating a shared object or relocatable executable, these
3872 relocations are copied into the output file to be resolved at
3873 run time. */
3874 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
3875 && (input_section->flags & SEC_ALLOC)
3876 && (h == NULL
3877 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3878 || h->root.type != bfd_link_hash_undefweak))
3879 {
3880 Elf_Internal_Rela outrel;
3881 bfd_byte *loc;
3882 bfd_boolean skip, relocate;
3883 asection *sreloc;
3884
3885 *unresolved_reloc_p = FALSE;
3886
3887 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd,
3888 input_section, 1);
3889 if (sreloc == NULL)
3890 return bfd_reloc_notsupported;
3891
3892 skip = FALSE;
3893 relocate = FALSE;
3894
3895 outrel.r_addend = signed_addend;
3896 outrel.r_offset =
3897 _bfd_elf_section_offset (output_bfd, info, input_section,
3898 rel->r_offset);
3899 if (outrel.r_offset == (bfd_vma) - 1)
3900 skip = TRUE;
3901 else if (outrel.r_offset == (bfd_vma) - 2)
3902 {
3903 skip = TRUE;
3904 relocate = TRUE;
3905 }
3906
3907 outrel.r_offset += (input_section->output_section->vma
3908 + input_section->output_offset);
3909
3910 if (skip)
3911 memset (&outrel, 0, sizeof outrel);
3912 else if (h != NULL
3913 && h->dynindx != -1
3914 && (!info->shared || !info->symbolic || !h->def_regular))
3915 outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
3916 else
3917 {
3918 int symbol;
3919
3920 /* On SVR4-ish systems, the dynamic loader cannot
3921 relocate the text and data segments independently,
3922 so the symbol does not matter. */
3923 symbol = 0;
3924 outrel.r_info = ELF64_R_INFO (symbol, R_AARCH64_RELATIVE);
3925 outrel.r_addend += value;
3926 }
3927
3928 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab);
3929 bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
3930
3931 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
3932 {
3933 /* Sanity to check that we have previously allocated
3934 sufficient space in the relocation section for the
3935 number of relocations we actually want to emit. */
3936 abort ();
3937 }
3938
3939 /* If this reloc is against an external symbol, we do not want to
3940 fiddle with the addend. Otherwise, we need to include the symbol
3941 value so that it becomes an addend for the dynamic reloc. */
3942 if (!relocate)
3943 return bfd_reloc_ok;
3944
3945 return _bfd_final_link_relocate (howto, input_bfd, input_section,
3946 contents, rel->r_offset, value,
3947 signed_addend);
3948 }
3949 else
3950 value += signed_addend;
3951 break;
3952
3953 case R_AARCH64_JUMP26:
3954 case R_AARCH64_CALL26:
3955 {
3956 asection *splt = globals->root.splt;
3957 bfd_boolean via_plt_p =
3958 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
3959
3960 /* A call to an undefined weak symbol is converted to a jump to
3961 the next instruction unless a PLT entry will be created.
3962 The jump to the next instruction is optimized as a NOP.
3963 Do the same for local undefined symbols. */
3964 if (weak_undef_p && ! via_plt_p)
3965 {
3966 bfd_putl32 (INSN_NOP, hit_data);
3967 return bfd_reloc_ok;
3968 }
3969
3970 /* If the call goes through a PLT entry, make sure to
3971 check distance to the right destination address. */
3972 if (via_plt_p)
3973 {
3974 value = (splt->output_section->vma
3975 + splt->output_offset + h->plt.offset);
3976 *unresolved_reloc_p = FALSE;
3977 }
3978
3979 /* If the target symbol is global and marked as a function the
3980 relocation applies a function call or a tail call. In this
3981 situation we can veneer out of range branches. The veneers
3982 use IP0 and IP1 hence cannot be used arbitrary out of range
3983 branches that occur within the body of a function. */
3984 if (h && h->type == STT_FUNC)
3985 {
3986 /* Check if a stub has to be inserted because the destination
3987 is too far away. */
3988 if (! aarch64_valid_branch_p (value, place))
3989 {
3990 /* The target is out of reach, so redirect the branch to
3991 the local stub for this function. */
3992 struct elf64_aarch64_stub_hash_entry *stub_entry;
3993 stub_entry = elf64_aarch64_get_stub_entry (input_section,
3994 sym_sec, h,
3995 rel, globals);
3996 if (stub_entry != NULL)
3997 value = (stub_entry->stub_offset
3998 + stub_entry->stub_sec->output_offset
3999 + stub_entry->stub_sec->output_section->vma);
4000 }
4001 }
4002 }
4003 value = aarch64_resolve_relocation (r_type, place, value,
4004 signed_addend, weak_undef_p);
4005 break;
4006
4007 case R_AARCH64_ABS16:
4008 case R_AARCH64_ABS32:
4009 case R_AARCH64_ADD_ABS_LO12_NC:
4010 case R_AARCH64_ADR_PREL_LO21:
4011 case R_AARCH64_ADR_PREL_PG_HI21:
4012 case R_AARCH64_ADR_PREL_PG_HI21_NC:
4013 case R_AARCH64_CONDBR19:
4014 case R_AARCH64_LD_PREL_LO19:
4015 case R_AARCH64_LDST8_ABS_LO12_NC:
4016 case R_AARCH64_LDST16_ABS_LO12_NC:
4017 case R_AARCH64_LDST32_ABS_LO12_NC:
4018 case R_AARCH64_LDST64_ABS_LO12_NC:
4019 case R_AARCH64_LDST128_ABS_LO12_NC:
4020 case R_AARCH64_MOVW_SABS_G0:
4021 case R_AARCH64_MOVW_SABS_G1:
4022 case R_AARCH64_MOVW_SABS_G2:
4023 case R_AARCH64_MOVW_UABS_G0:
4024 case R_AARCH64_MOVW_UABS_G0_NC:
4025 case R_AARCH64_MOVW_UABS_G1:
4026 case R_AARCH64_MOVW_UABS_G1_NC:
4027 case R_AARCH64_MOVW_UABS_G2:
4028 case R_AARCH64_MOVW_UABS_G2_NC:
4029 case R_AARCH64_MOVW_UABS_G3:
4030 case R_AARCH64_PREL16:
4031 case R_AARCH64_PREL32:
4032 case R_AARCH64_PREL64:
4033 case R_AARCH64_TSTBR14:
4034 value = aarch64_resolve_relocation (r_type, place, value,
4035 signed_addend, weak_undef_p);
4036 break;
4037
4038 case R_AARCH64_LD64_GOT_LO12_NC:
4039 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 4040 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
4041 if (globals->root.sgot == NULL)
4042 BFD_ASSERT (h != NULL);
4043
4044 if (h != NULL)
4045 {
4046 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4047 output_bfd,
4048 unresolved_reloc_p);
4049 value = aarch64_resolve_relocation (r_type, place, value,
4050 0, weak_undef_p);
4051 }
4052 break;
4053
4054 case R_AARCH64_TLSGD_ADR_PAGE21:
4055 case R_AARCH64_TLSGD_ADD_LO12_NC:
4056 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4057 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4058 if (globals->root.sgot == NULL)
4059 return bfd_reloc_notsupported;
4060
4061 value = (symbol_got_offset (input_bfd, h, r_symndx)
4062 + globals->root.sgot->output_section->vma
4063 + globals->root.sgot->output_section->output_offset);
4064
4065 value = aarch64_resolve_relocation (r_type, place, value,
4066 0, weak_undef_p);
4067 *unresolved_reloc_p = FALSE;
4068 break;
4069
4070 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4071 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4072 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4073 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4074 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4075 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4076 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4077 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4078 value = aarch64_resolve_relocation (r_type, place, value,
bb3f9ed8 4079 signed_addend - tpoff_base (info), weak_undef_p);
a06ea964
NC
4080 *unresolved_reloc_p = FALSE;
4081 break;
4082
4083 case R_AARCH64_TLSDESC_ADR_PAGE:
4084 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4085 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4086 case R_AARCH64_TLSDESC_ADD:
4087 case R_AARCH64_TLSDESC_LDR:
4088 if (globals->root.sgot == NULL)
4089 return bfd_reloc_notsupported;
4090
4091 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4092 + globals->root.sgotplt->output_section->vma
4093 + globals->root.sgotplt->output_section->output_offset
4094 + globals->sgotplt_jump_table_size);
4095
4096 value = aarch64_resolve_relocation (r_type, place, value,
4097 0, weak_undef_p);
4098 *unresolved_reloc_p = FALSE;
4099 break;
4100
4101 default:
4102 return bfd_reloc_notsupported;
4103 }
4104
4105 if (saved_addend)
4106 *saved_addend = value;
4107
4108 /* Only apply the final relocation in a sequence. */
4109 if (save_addend)
4110 return bfd_reloc_continue;
4111
4112 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4113}
4114
4115/* Handle TLS relaxations. Relaxing is possible for symbols that use
4116 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4117 link.
4118
4119 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4120 is to then call final_link_relocate. Return other values in the
4121 case of error. */
4122
4123static bfd_reloc_status_type
4124elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table *globals,
4125 bfd *input_bfd, bfd_byte *contents,
4126 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4127{
4128 bfd_boolean is_local = h == NULL;
4129 unsigned int r_type = ELF64_R_TYPE (rel->r_info);
4130 unsigned long insn;
4131
4132 BFD_ASSERT (globals && input_bfd && contents && rel);
4133
4134 switch (r_type)
4135 {
4136 case R_AARCH64_TLSGD_ADR_PAGE21:
4137 case R_AARCH64_TLSDESC_ADR_PAGE:
4138 if (is_local)
4139 {
4140 /* GD->LE relaxation:
4141 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4142 or
4143 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4144 */
4145 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4146 return bfd_reloc_continue;
4147 }
4148 else
4149 {
4150 /* GD->IE relaxation:
4151 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4152 or
4153 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4154 */
4155 insn = bfd_getl32 (contents + rel->r_offset);
4156 return bfd_reloc_continue;
4157 }
4158
4159 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4160 if (is_local)
4161 {
4162 /* GD->LE relaxation:
4163 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4164 */
4165 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4166 return bfd_reloc_continue;
4167 }
4168 else
4169 {
4170 /* GD->IE relaxation:
4171 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4172 */
4173 insn = bfd_getl32 (contents + rel->r_offset);
4174 insn &= 0xfffffff0;
4175 bfd_putl32 (insn, contents + rel->r_offset);
4176 return bfd_reloc_continue;
4177 }
4178
4179 case R_AARCH64_TLSGD_ADD_LO12_NC:
4180 if (is_local)
4181 {
4182 /* GD->LE relaxation
4183 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4184 bl __tls_get_addr => mrs x1, tpidr_el0
4185 nop => add x0, x1, x0
4186 */
4187
4188 /* First kill the tls_get_addr reloc on the bl instruction. */
4189 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4190 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4191
4192 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4193 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4194 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4195 return bfd_reloc_continue;
4196 }
4197 else
4198 {
4199 /* GD->IE relaxation
4200 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4201 BL __tls_get_addr => mrs x1, tpidr_el0
4202 R_AARCH64_CALL26
4203 NOP => add x0, x1, x0
4204 */
4205
4206 BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26);
4207
4208 /* Remove the relocation on the BL instruction. */
4209 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4210
4211 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4212
4213 /* We choose to fixup the BL and NOP instructions using the
4214 offset from the second relocation to allow flexibility in
4215 scheduling instructions between the ADD and BL. */
4216 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4217 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4218 return bfd_reloc_continue;
4219 }
4220
4221 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4222 case R_AARCH64_TLSDESC_CALL:
4223 /* GD->IE/LE relaxation:
4224 add x0, x0, #:tlsdesc_lo12:var => nop
4225 blr xd => nop
4226 */
4227 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4228 return bfd_reloc_ok;
4229
4230 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4231 /* IE->LE relaxation:
4232 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4233 */
4234 if (is_local)
4235 {
4236 insn = bfd_getl32 (contents + rel->r_offset);
4237 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4238 }
4239 return bfd_reloc_continue;
4240
4241 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4242 /* IE->LE relaxation:
4243 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4244 */
4245 if (is_local)
4246 {
4247 insn = bfd_getl32 (contents + rel->r_offset);
4248 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4249 }
4250 return bfd_reloc_continue;
4251
4252 default:
4253 return bfd_reloc_continue;
4254 }
4255
4256 return bfd_reloc_ok;
4257}
4258
4259/* Relocate an AArch64 ELF section. */
4260
4261static bfd_boolean
4262elf64_aarch64_relocate_section (bfd *output_bfd,
4263 struct bfd_link_info *info,
4264 bfd *input_bfd,
4265 asection *input_section,
4266 bfd_byte *contents,
4267 Elf_Internal_Rela *relocs,
4268 Elf_Internal_Sym *local_syms,
4269 asection **local_sections)
4270{
4271 Elf_Internal_Shdr *symtab_hdr;
4272 struct elf_link_hash_entry **sym_hashes;
4273 Elf_Internal_Rela *rel;
4274 Elf_Internal_Rela *relend;
4275 const char *name;
4276 struct elf64_aarch64_link_hash_table *globals;
4277 bfd_boolean save_addend = FALSE;
4278 bfd_vma addend = 0;
4279
4280 globals = elf64_aarch64_hash_table (info);
4281
4282 symtab_hdr = &elf_symtab_hdr (input_bfd);
4283 sym_hashes = elf_sym_hashes (input_bfd);
4284
4285 rel = relocs;
4286 relend = relocs + input_section->reloc_count;
4287 for (; rel < relend; rel++)
4288 {
4289 unsigned int r_type;
4290 unsigned int relaxed_r_type;
4291 reloc_howto_type *howto;
4292 unsigned long r_symndx;
4293 Elf_Internal_Sym *sym;
4294 asection *sec;
4295 struct elf_link_hash_entry *h;
4296 bfd_vma relocation;
4297 bfd_reloc_status_type r;
4298 arelent bfd_reloc;
4299 char sym_type;
4300 bfd_boolean unresolved_reloc = FALSE;
4301 char *error_message = NULL;
4302
4303 r_symndx = ELF64_R_SYM (rel->r_info);
4304 r_type = ELF64_R_TYPE (rel->r_info);
4305
4306 bfd_reloc.howto = elf64_aarch64_howto_from_type (r_type);
4307 howto = bfd_reloc.howto;
4308
7fcfd62d
NC
4309 if (howto == NULL)
4310 {
4311 (*_bfd_error_handler)
4312 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4313 input_bfd, input_section, r_type);
4314 return FALSE;
4315 }
4316
a06ea964
NC
4317 h = NULL;
4318 sym = NULL;
4319 sec = NULL;
4320
4321 if (r_symndx < symtab_hdr->sh_info)
4322 {
4323 sym = local_syms + r_symndx;
4324 sym_type = ELF64_ST_TYPE (sym->st_info);
4325 sec = local_sections[r_symndx];
4326
4327 /* An object file might have a reference to a local
4328 undefined symbol. This is a daft object file, but we
4329 should at least do something about it. */
4330 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4331 && bfd_is_und_section (sec)
4332 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4333 {
4334 if (!info->callbacks->undefined_symbol
4335 (info, bfd_elf_string_from_elf_section
4336 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4337 input_bfd, input_section, rel->r_offset, TRUE))
4338 return FALSE;
4339 }
4340
a06ea964
NC
4341 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4342 }
4343 else
4344 {
4345 bfd_boolean warned;
4346
4347 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4348 r_symndx, symtab_hdr, sym_hashes,
4349 h, sec, relocation,
4350 unresolved_reloc, warned);
4351
4352 sym_type = h->type;
4353 }
4354
4355 if (sec != NULL && discarded_section (sec))
4356 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4357 rel, 1, relend, howto, 0, contents);
4358
4359 if (info->relocatable)
4360 {
4361 /* This is a relocatable link. We don't have to change
4362 anything, unless the reloc is against a section symbol,
4363 in which case we have to adjust according to where the
4364 section symbol winds up in the output section. */
4365 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
4366 rel->r_addend += sec->output_offset;
4367 continue;
4368 }
4369
4370 if (h != NULL)
4371 name = h->root.root.string;
4372 else
4373 {
4374 name = (bfd_elf_string_from_elf_section
4375 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4376 if (name == NULL || *name == '\0')
4377 name = bfd_section_name (input_bfd, sec);
4378 }
4379
4380 if (r_symndx != 0
4381 && r_type != R_AARCH64_NONE
4382 && r_type != R_AARCH64_NULL
4383 && (h == NULL
4384 || h->root.type == bfd_link_hash_defined
4385 || h->root.type == bfd_link_hash_defweak)
4386 && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
4387 {
4388 (*_bfd_error_handler)
4389 ((sym_type == STT_TLS
4390 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4391 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4392 input_bfd,
4393 input_section, (long) rel->r_offset, howto->name, name);
4394 }
4395
4396
4397 /* We relax only if we can see that there can be a valid transition
4398 from a reloc type to another.
4399 We call elf64_aarch64_final_link_relocate unless we're completely
4400 done, i.e., the relaxation produced the final output we want. */
4401
4402 relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4403 h, r_symndx);
4404 if (relaxed_r_type != r_type)
4405 {
4406 r_type = relaxed_r_type;
4407 howto = elf64_aarch64_howto_from_type (r_type);
4408
4409 r = elf64_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4410 unresolved_reloc = 0;
4411 }
4412 else
4413 r = bfd_reloc_continue;
4414
4415 /* There may be multiple consecutive relocations for the
4416 same offset. In that case we are supposed to treat the
4417 output of each relocation as the addend for the next. */
4418 if (rel + 1 < relend
4419 && rel->r_offset == rel[1].r_offset
4420 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4421 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4422 save_addend = TRUE;
4423 else
4424 save_addend = FALSE;
4425
4426 if (r == bfd_reloc_continue)
4427 r = elf64_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4428 input_section, contents, rel,
4429 relocation, info, sec,
4430 h, &unresolved_reloc,
4431 save_addend, &addend);
4432
4433 switch (r_type)
4434 {
4435 case R_AARCH64_TLSGD_ADR_PAGE21:
4436 case R_AARCH64_TLSGD_ADD_LO12_NC:
4437 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4438 {
4439 bfd_boolean need_relocs = FALSE;
4440 bfd_byte *loc;
4441 int indx;
4442 bfd_vma off;
4443
4444 off = symbol_got_offset (input_bfd, h, r_symndx);
4445 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4446
4447 need_relocs =
4448 (info->shared || indx != 0) &&
4449 (h == NULL
4450 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4451 || h->root.type != bfd_link_hash_undefweak);
4452
4453 BFD_ASSERT (globals->root.srelgot != NULL);
4454
4455 if (need_relocs)
4456 {
4457 Elf_Internal_Rela rela;
4458 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_DTPMOD64);
4459 rela.r_addend = 0;
4460 rela.r_offset = globals->root.sgot->output_section->vma +
4461 globals->root.sgot->output_offset + off;
4462
4463
4464 loc = globals->root.srelgot->contents;
4465 loc += globals->root.srelgot->reloc_count++
4466 * RELOC_SIZE (htab);
4467 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4468
4469 if (indx == 0)
4470 {
4471 bfd_put_64 (output_bfd,
4472 relocation - dtpoff_base (info),
4473 globals->root.sgot->contents + off
4474 + GOT_ENTRY_SIZE);
4475 }
4476 else
4477 {
4478 /* This TLS symbol is global. We emit a
4479 relocation to fixup the tls offset at load
4480 time. */
4481 rela.r_info =
4482 ELF64_R_INFO (indx, R_AARCH64_TLS_DTPREL64);
4483 rela.r_addend = 0;
4484 rela.r_offset =
4485 (globals->root.sgot->output_section->vma
4486 + globals->root.sgot->output_offset + off
4487 + GOT_ENTRY_SIZE);
4488
4489 loc = globals->root.srelgot->contents;
4490 loc += globals->root.srelgot->reloc_count++
4491 * RELOC_SIZE (globals);
4492 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4493 bfd_put_64 (output_bfd, (bfd_vma) 0,
4494 globals->root.sgot->contents + off
4495 + GOT_ENTRY_SIZE);
4496 }
4497 }
4498 else
4499 {
4500 bfd_put_64 (output_bfd, (bfd_vma) 1,
4501 globals->root.sgot->contents + off);
4502 bfd_put_64 (output_bfd,
4503 relocation - dtpoff_base (info),
4504 globals->root.sgot->contents + off
4505 + GOT_ENTRY_SIZE);
4506 }
4507
4508 symbol_got_offset_mark (input_bfd, h, r_symndx);
4509 }
4510 break;
4511
4512 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4513 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4514 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4515 {
4516 bfd_boolean need_relocs = FALSE;
4517 bfd_byte *loc;
4518 int indx;
4519 bfd_vma off;
4520
4521 off = symbol_got_offset (input_bfd, h, r_symndx);
4522
4523 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4524
4525 need_relocs =
4526 (info->shared || indx != 0) &&
4527 (h == NULL
4528 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4529 || h->root.type != bfd_link_hash_undefweak);
4530
4531 BFD_ASSERT (globals->root.srelgot != NULL);
4532
4533 if (need_relocs)
4534 {
4535 Elf_Internal_Rela rela;
4536
4537 if (indx == 0)
4538 rela.r_addend = relocation - dtpoff_base (info);
4539 else
4540 rela.r_addend = 0;
4541
4542 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_TPREL64);
4543 rela.r_offset = globals->root.sgot->output_section->vma +
4544 globals->root.sgot->output_offset + off;
4545
4546 loc = globals->root.srelgot->contents;
4547 loc += globals->root.srelgot->reloc_count++
4548 * RELOC_SIZE (htab);
4549
4550 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4551
4552 bfd_put_64 (output_bfd, rela.r_addend,
4553 globals->root.sgot->contents + off);
4554 }
4555 else
4556 bfd_put_64 (output_bfd, relocation - tpoff_base (info),
4557 globals->root.sgot->contents + off);
4558
4559 symbol_got_offset_mark (input_bfd, h, r_symndx);
4560 }
4561 break;
4562
4563 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4564 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4565 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4566 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4567 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4568 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4569 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4570 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4571 break;
4572
4573 case R_AARCH64_TLSDESC_ADR_PAGE:
4574 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4575 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4576 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
4577 {
4578 bfd_boolean need_relocs = FALSE;
4579 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
4580 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
4581
4582 need_relocs = (h == NULL
4583 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4584 || h->root.type != bfd_link_hash_undefweak);
4585
4586 BFD_ASSERT (globals->root.srelgot != NULL);
4587 BFD_ASSERT (globals->root.sgot != NULL);
4588
4589 if (need_relocs)
4590 {
4591 bfd_byte *loc;
4592 Elf_Internal_Rela rela;
4593 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLSDESC);
4594 rela.r_addend = 0;
4595 rela.r_offset = (globals->root.sgotplt->output_section->vma
4596 + globals->root.sgotplt->output_offset
4597 + off + globals->sgotplt_jump_table_size);
4598
4599 if (indx == 0)
4600 rela.r_addend = relocation - dtpoff_base (info);
4601
4602 /* Allocate the next available slot in the PLT reloc
4603 section to hold our R_AARCH64_TLSDESC, the next
4604 available slot is determined from reloc_count,
4605 which we step. But note, reloc_count was
4606 artifically moved down while allocating slots for
4607 real PLT relocs such that all of the PLT relocs
4608 will fit above the initial reloc_count and the
4609 extra stuff will fit below. */
4610 loc = globals->root.srelplt->contents;
4611 loc += globals->root.srelplt->reloc_count++
4612 * RELOC_SIZE (globals);
4613
4614 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4615
4616 bfd_put_64 (output_bfd, (bfd_vma) 0,
4617 globals->root.sgotplt->contents + off +
4618 globals->sgotplt_jump_table_size);
4619 bfd_put_64 (output_bfd, (bfd_vma) 0,
4620 globals->root.sgotplt->contents + off +
4621 globals->sgotplt_jump_table_size +
4622 GOT_ENTRY_SIZE);
4623 }
4624
4625 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
4626 }
4627 break;
4628 }
4629
4630 if (!save_addend)
4631 addend = 0;
4632
4633
4634 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4635 because such sections are not SEC_ALLOC and thus ld.so will
4636 not process them. */
4637 if (unresolved_reloc
4638 && !((input_section->flags & SEC_DEBUGGING) != 0
4639 && h->def_dynamic)
4640 && _bfd_elf_section_offset (output_bfd, info, input_section,
4641 +rel->r_offset) != (bfd_vma) - 1)
4642 {
4643 (*_bfd_error_handler)
4644 (_
4645 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4646 input_bfd, input_section, (long) rel->r_offset, howto->name,
4647 h->root.root.string);
4648 return FALSE;
4649 }
4650
4651 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
4652 {
4653 switch (r)
4654 {
4655 case bfd_reloc_overflow:
4656 /* If the overflowing reloc was to an undefined symbol,
4657 we have already printed one error message and there
4658 is no point complaining again. */
4659 if ((!h ||
4660 h->root.type != bfd_link_hash_undefined)
4661 && (!((*info->callbacks->reloc_overflow)
4662 (info, (h ? &h->root : NULL), name, howto->name,
4663 (bfd_vma) 0, input_bfd, input_section,
4664 rel->r_offset))))
4665 return FALSE;
4666 break;
4667
4668 case bfd_reloc_undefined:
4669 if (!((*info->callbacks->undefined_symbol)
4670 (info, name, input_bfd, input_section,
4671 rel->r_offset, TRUE)))
4672 return FALSE;
4673 break;
4674
4675 case bfd_reloc_outofrange:
4676 error_message = _("out of range");
4677 goto common_error;
4678
4679 case bfd_reloc_notsupported:
4680 error_message = _("unsupported relocation");
4681 goto common_error;
4682
4683 case bfd_reloc_dangerous:
4684 /* error_message should already be set. */
4685 goto common_error;
4686
4687 default:
4688 error_message = _("unknown error");
4689 /* Fall through. */
4690
4691 common_error:
4692 BFD_ASSERT (error_message != NULL);
4693 if (!((*info->callbacks->reloc_dangerous)
4694 (info, error_message, input_bfd, input_section,
4695 rel->r_offset)))
4696 return FALSE;
4697 break;
4698 }
4699 }
4700 }
4701
4702 return TRUE;
4703}
4704
4705/* Set the right machine number. */
4706
4707static bfd_boolean
4708elf64_aarch64_object_p (bfd *abfd)
4709{
4710 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
4711 return TRUE;
4712}
4713
4714/* Function to keep AArch64 specific flags in the ELF header. */
4715
4716static bfd_boolean
4717elf64_aarch64_set_private_flags (bfd *abfd, flagword flags)
4718{
4719 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
4720 {
4721 }
4722 else
4723 {
4724 elf_elfheader (abfd)->e_flags = flags;
4725 elf_flags_init (abfd) = TRUE;
4726 }
4727
4728 return TRUE;
4729}
4730
4731/* Copy backend specific data from one object module to another. */
4732
4733static bfd_boolean
4734elf64_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
4735{
4736 flagword in_flags;
4737
4738 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4739 return TRUE;
4740
4741 in_flags = elf_elfheader (ibfd)->e_flags;
4742
4743 elf_elfheader (obfd)->e_flags = in_flags;
4744 elf_flags_init (obfd) = TRUE;
4745
4746 /* Also copy the EI_OSABI field. */
4747 elf_elfheader (obfd)->e_ident[EI_OSABI] =
4748 elf_elfheader (ibfd)->e_ident[EI_OSABI];
4749
4750 /* Copy object attributes. */
4751 _bfd_elf_copy_obj_attributes (ibfd, obfd);
4752
4753 return TRUE;
4754}
4755
4756/* Merge backend specific data from an object file to the output
4757 object file when linking. */
4758
4759static bfd_boolean
4760elf64_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
4761{
4762 flagword out_flags;
4763 flagword in_flags;
4764 bfd_boolean flags_compatible = TRUE;
4765 asection *sec;
4766
4767 /* Check if we have the same endianess. */
4768 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
4769 return FALSE;
4770
4771 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4772 return TRUE;
4773
4774 /* The input BFD must have had its flags initialised. */
4775 /* The following seems bogus to me -- The flags are initialized in
4776 the assembler but I don't think an elf_flags_init field is
4777 written into the object. */
4778 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4779
4780 in_flags = elf_elfheader (ibfd)->e_flags;
4781 out_flags = elf_elfheader (obfd)->e_flags;
4782
4783 if (!elf_flags_init (obfd))
4784 {
4785 /* If the input is the default architecture and had the default
4786 flags then do not bother setting the flags for the output
4787 architecture, instead allow future merges to do this. If no
4788 future merges ever set these flags then they will retain their
4789 uninitialised values, which surprise surprise, correspond
4790 to the default values. */
4791 if (bfd_get_arch_info (ibfd)->the_default
4792 && elf_elfheader (ibfd)->e_flags == 0)
4793 return TRUE;
4794
4795 elf_flags_init (obfd) = TRUE;
4796 elf_elfheader (obfd)->e_flags = in_flags;
4797
4798 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
4799 && bfd_get_arch_info (obfd)->the_default)
4800 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
4801 bfd_get_mach (ibfd));
4802
4803 return TRUE;
4804 }
4805
4806 /* Identical flags must be compatible. */
4807 if (in_flags == out_flags)
4808 return TRUE;
4809
4810 /* Check to see if the input BFD actually contains any sections. If
4811 not, its flags may not have been initialised either, but it
4812 cannot actually cause any incompatiblity. Do not short-circuit
4813 dynamic objects; their section list may be emptied by
4814 elf_link_add_object_symbols.
4815
4816 Also check to see if there are no code sections in the input.
4817 In this case there is no need to check for code specific flags.
4818 XXX - do we need to worry about floating-point format compatability
4819 in data sections ? */
4820 if (!(ibfd->flags & DYNAMIC))
4821 {
4822 bfd_boolean null_input_bfd = TRUE;
4823 bfd_boolean only_data_sections = TRUE;
4824
4825 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4826 {
4827 if ((bfd_get_section_flags (ibfd, sec)
4828 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4829 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4830 only_data_sections = FALSE;
4831
4832 null_input_bfd = FALSE;
4833 break;
4834 }
4835
4836 if (null_input_bfd || only_data_sections)
4837 return TRUE;
4838 }
4839
4840 return flags_compatible;
4841}
4842
4843/* Display the flags field. */
4844
4845static bfd_boolean
4846elf64_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
4847{
4848 FILE *file = (FILE *) ptr;
4849 unsigned long flags;
4850
4851 BFD_ASSERT (abfd != NULL && ptr != NULL);
4852
4853 /* Print normal ELF private data. */
4854 _bfd_elf_print_private_bfd_data (abfd, ptr);
4855
4856 flags = elf_elfheader (abfd)->e_flags;
4857 /* Ignore init flag - it may not be set, despite the flags field
4858 containing valid data. */
4859
4860 /* xgettext:c-format */
4861 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
4862
4863 if (flags)
4864 fprintf (file, _("<Unrecognised flag bits set>"));
4865
4866 fputc ('\n', file);
4867
4868 return TRUE;
4869}
4870
4871/* Update the got entry reference counts for the section being removed. */
4872
4873static bfd_boolean
cb8af559
NC
4874elf64_aarch64_gc_sweep_hook (bfd *abfd,
4875 struct bfd_link_info *info,
4876 asection *sec,
4877 const Elf_Internal_Rela * relocs)
a06ea964 4878{
59c108f7
NC
4879 struct elf64_aarch64_link_hash_table *htab;
4880 Elf_Internal_Shdr *symtab_hdr;
4881 struct elf_link_hash_entry **sym_hashes;
cb8af559 4882 struct elf_aarch64_local_symbol *locals;
59c108f7
NC
4883 const Elf_Internal_Rela *rel, *relend;
4884
4885 if (info->relocatable)
4886 return TRUE;
4887
4888 htab = elf64_aarch64_hash_table (info);
4889
4890 if (htab == NULL)
4891 return FALSE;
4892
4893 elf_section_data (sec)->local_dynrel = NULL;
4894
4895 symtab_hdr = &elf_symtab_hdr (abfd);
4896 sym_hashes = elf_sym_hashes (abfd);
4897
cb8af559 4898 locals = elf64_aarch64_locals (abfd);
59c108f7
NC
4899
4900 relend = relocs + sec->reloc_count;
4901 for (rel = relocs; rel < relend; rel++)
4902 {
4903 unsigned long r_symndx;
4904 unsigned int r_type;
4905 struct elf_link_hash_entry *h = NULL;
4906
4907 r_symndx = ELF64_R_SYM (rel->r_info);
4908
4909 if (r_symndx >= symtab_hdr->sh_info)
4910 {
4911 struct elf64_aarch64_link_hash_entry *eh;
4912 struct elf_dyn_relocs **pp;
4913 struct elf_dyn_relocs *p;
4914
4915 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4916 while (h->root.type == bfd_link_hash_indirect
4917 || h->root.type == bfd_link_hash_warning)
4918 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4919 eh = (struct elf64_aarch64_link_hash_entry *) h;
4920
4921 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
4922 {
4923 if (p->sec == sec)
4924 {
4925 /* Everything must go for SEC. */
4926 *pp = p->next;
4927 break;
4928 }
4929 }
4930 }
4931 else
4932 {
4933 Elf_Internal_Sym *isym;
4934
4935 /* A local symbol. */
4936 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
4937 abfd, r_symndx);
4938 if (isym == NULL)
4939 return FALSE;
4940 }
4941
4942 r_type = ELF64_R_TYPE (rel->r_info);
4943 r_type = aarch64_tls_transition (abfd,info, r_type, h ,r_symndx);
4944 switch (r_type)
4945 {
4946 case R_AARCH64_LD64_GOT_LO12_NC:
4947 case R_AARCH64_GOT_LD_PREL19:
4948 case R_AARCH64_ADR_GOT_PAGE:
4949 case R_AARCH64_TLSGD_ADR_PAGE21:
4950 case R_AARCH64_TLSGD_ADD_LO12_NC:
4951 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4952 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4953 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4954 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4955 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4956 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4957 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4958 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4959 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4960 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4961 case R_AARCH64_TLSDESC_ADR_PAGE:
4962 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4963 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4964 if (h != NULL)
4965 {
4966 if (h->got.refcount > 0)
4967 h->got.refcount -= 1;
4968 }
cb8af559 4969 else if (locals != NULL)
59c108f7 4970 {
cb8af559
NC
4971 if (locals[r_symndx].got_refcount > 0)
4972 locals[r_symndx].got_refcount -= 1;
59c108f7
NC
4973 }
4974 break;
4975
4976 case R_AARCH64_ADR_PREL_PG_HI21_NC:
4977 case R_AARCH64_ADR_PREL_PG_HI21:
4978 case R_AARCH64_ADR_PREL_LO21:
4979 if (h != NULL && info->executable)
4980 {
4981 if (h->plt.refcount > 0)
4982 h->plt.refcount -= 1;
4983 }
4984 break;
4985
4986 case R_AARCH64_CALL26:
4987 case R_AARCH64_JUMP26:
4988 /* If this is a local symbol then we resolve it
4989 directly without creating a PLT entry. */
4990 if (h == NULL)
4991 continue;
4992
4993 if (h->plt.refcount > 0)
4994 h->plt.refcount -= 1;
4995 break;
4996
4997 case R_AARCH64_ABS64:
4998 if (h != NULL && info->executable)
4999 {
5000 if (h->plt.refcount > 0)
5001 h->plt.refcount -= 1;
5002 }
5003 break;
5004
5005 default:
5006 break;
5007 }
5008 }
5009
a06ea964
NC
5010 return TRUE;
5011}
5012
5013/* Adjust a symbol defined by a dynamic object and referenced by a
5014 regular object. The current definition is in some section of the
5015 dynamic object, but we're not including those sections. We have to
5016 change the definition to something the rest of the link can
5017 understand. */
5018
5019static bfd_boolean
5020elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5021 struct elf_link_hash_entry *h)
5022{
5023 struct elf64_aarch64_link_hash_table *htab;
5024 asection *s;
5025
5026 /* If this is a function, put it in the procedure linkage table. We
5027 will fill in the contents of the procedure linkage table later,
5028 when we know the address of the .got section. */
5029 if (h->type == STT_FUNC || h->needs_plt)
5030 {
5031 if (h->plt.refcount <= 0
5032 || SYMBOL_CALLS_LOCAL (info, h)
5033 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
5034 && h->root.type == bfd_link_hash_undefweak))
5035 {
5036 /* This case can occur if we saw a CALL26 reloc in
5037 an input file, but the symbol wasn't referred to
5038 by a dynamic object or all references were
5039 garbage collected. In which case we can end up
5040 resolving. */
5041 h->plt.offset = (bfd_vma) - 1;
5042 h->needs_plt = 0;
5043 }
5044
5045 return TRUE;
5046 }
5047 else
5048 /* It's possible that we incorrectly decided a .plt reloc was
5049 needed for an R_X86_64_PC32 reloc to a non-function sym in
5050 check_relocs. We can't decide accurately between function and
5051 non-function syms in check-relocs; Objects loaded later in
5052 the link may change h->type. So fix it now. */
5053 h->plt.offset = (bfd_vma) - 1;
5054
5055
5056 /* If this is a weak symbol, and there is a real definition, the
5057 processor independent code will have arranged for us to see the
5058 real definition first, and we can just use the same value. */
5059 if (h->u.weakdef != NULL)
5060 {
5061 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5062 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5063 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5064 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5065 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5066 h->non_got_ref = h->u.weakdef->non_got_ref;
5067 return TRUE;
5068 }
5069
5070 /* If we are creating a shared library, we must presume that the
5071 only references to the symbol are via the global offset table.
5072 For such cases we need not do anything here; the relocations will
5073 be handled correctly by relocate_section. */
5074 if (info->shared)
5075 return TRUE;
5076
5077 /* If there are no references to this symbol that do not use the
5078 GOT, we don't need to generate a copy reloc. */
5079 if (!h->non_got_ref)
5080 return TRUE;
5081
5082 /* If -z nocopyreloc was given, we won't generate them either. */
5083 if (info->nocopyreloc)
5084 {
5085 h->non_got_ref = 0;
5086 return TRUE;
5087 }
5088
5089 /* We must allocate the symbol in our .dynbss section, which will
5090 become part of the .bss section of the executable. There will be
5091 an entry for this symbol in the .dynsym section. The dynamic
5092 object will contain position independent code, so all references
5093 from the dynamic object to this symbol will go through the global
5094 offset table. The dynamic linker will use the .dynsym entry to
5095 determine the address it must put in the global offset table, so
5096 both the dynamic object and the regular object will refer to the
5097 same memory location for the variable. */
5098
5099 htab = elf64_aarch64_hash_table (info);
5100
5101 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5102 to copy the initial value out of the dynamic object and into the
5103 runtime process image. */
5104 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5105 {
5106 htab->srelbss->size += RELOC_SIZE (htab);
5107 h->needs_copy = 1;
5108 }
5109
5110 s = htab->sdynbss;
5111
5112 return _bfd_elf_adjust_dynamic_copy (h, s);
5113
5114}
5115
5116static bfd_boolean
5117elf64_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5118{
5119 struct elf_aarch64_local_symbol *locals;
5120 locals = elf64_aarch64_locals (abfd);
5121 if (locals == NULL)
5122 {
5123 locals = (struct elf_aarch64_local_symbol *)
5124 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5125 if (locals == NULL)
5126 return FALSE;
5127 elf64_aarch64_locals (abfd) = locals;
5128 }
5129 return TRUE;
5130}
5131
5132/* Look through the relocs for a section during the first phase. */
5133
5134static bfd_boolean
5135elf64_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5136 asection *sec, const Elf_Internal_Rela *relocs)
5137{
5138 Elf_Internal_Shdr *symtab_hdr;
5139 struct elf_link_hash_entry **sym_hashes;
5140 const Elf_Internal_Rela *rel;
5141 const Elf_Internal_Rela *rel_end;
5142 asection *sreloc;
5143
5144 struct elf64_aarch64_link_hash_table *htab;
5145
a06ea964
NC
5146 if (info->relocatable)
5147 return TRUE;
5148
5149 BFD_ASSERT (is_aarch64_elf (abfd));
5150
5151 htab = elf64_aarch64_hash_table (info);
5152 sreloc = NULL;
5153
5154 symtab_hdr = &elf_symtab_hdr (abfd);
5155 sym_hashes = elf_sym_hashes (abfd);
a06ea964
NC
5156
5157 rel_end = relocs + sec->reloc_count;
5158 for (rel = relocs; rel < rel_end; rel++)
5159 {
5160 struct elf_link_hash_entry *h;
5161 unsigned long r_symndx;
5162 unsigned int r_type;
5163
5164 r_symndx = ELF64_R_SYM (rel->r_info);
5165 r_type = ELF64_R_TYPE (rel->r_info);
5166
5167 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5168 {
5169 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5170 r_symndx);
5171 return FALSE;
5172 }
5173
ed5acf27 5174 if (r_symndx < symtab_hdr->sh_info)
a06ea964
NC
5175 h = NULL;
5176 else
5177 {
5178 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5179 while (h->root.type == bfd_link_hash_indirect
5180 || h->root.type == bfd_link_hash_warning)
5181 h = (struct elf_link_hash_entry *) h->root.u.i.link;
81fbe831
AM
5182
5183 /* PR15323, ref flags aren't set for references in the same
5184 object. */
5185 h->root.non_ir_ref = 1;
a06ea964
NC
5186 }
5187
5188 /* Could be done earlier, if h were already available. */
5189 r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5190
5191 switch (r_type)
5192 {
5193 case R_AARCH64_ABS64:
5194
5195 /* We don't need to handle relocs into sections not going into
5196 the "real" output. */
5197 if ((sec->flags & SEC_ALLOC) == 0)
5198 break;
5199
5200 if (h != NULL)
5201 {
5202 if (!info->shared)
5203 h->non_got_ref = 1;
5204
5205 h->plt.refcount += 1;
5206 h->pointer_equality_needed = 1;
5207 }
5208
5209 /* No need to do anything if we're not creating a shared
5210 object. */
5211 if (! info->shared)
5212 break;
5213
5214 {
5215 struct elf_dyn_relocs *p;
5216 struct elf_dyn_relocs **head;
5217
5218 /* We must copy these reloc types into the output file.
5219 Create a reloc section in dynobj and make room for
5220 this reloc. */
5221 if (sreloc == NULL)
5222 {
5223 if (htab->root.dynobj == NULL)
5224 htab->root.dynobj = abfd;
5225
5226 sreloc = _bfd_elf_make_dynamic_reloc_section
5227 (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
5228
5229 if (sreloc == NULL)
5230 return FALSE;
5231 }
5232
5233 /* If this is a global symbol, we count the number of
5234 relocations we need for this symbol. */
5235 if (h != NULL)
5236 {
5237 struct elf64_aarch64_link_hash_entry *eh;
5238 eh = (struct elf64_aarch64_link_hash_entry *) h;
5239 head = &eh->dyn_relocs;
5240 }
5241 else
5242 {
5243 /* Track dynamic relocs needed for local syms too.
5244 We really need local syms available to do this
5245 easily. Oh well. */
5246
5247 asection *s;
5248 void **vpp;
5249 Elf_Internal_Sym *isym;
5250
5251 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5252 abfd, r_symndx);
5253 if (isym == NULL)
5254 return FALSE;
5255
5256 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5257 if (s == NULL)
5258 s = sec;
5259
5260 /* Beware of type punned pointers vs strict aliasing
5261 rules. */
5262 vpp = &(elf_section_data (s)->local_dynrel);
5263 head = (struct elf_dyn_relocs **) vpp;
5264 }
5265
5266 p = *head;
5267 if (p == NULL || p->sec != sec)
5268 {
5269 bfd_size_type amt = sizeof *p;
5270 p = ((struct elf_dyn_relocs *)
5271 bfd_zalloc (htab->root.dynobj, amt));
5272 if (p == NULL)
5273 return FALSE;
5274 p->next = *head;
5275 *head = p;
5276 p->sec = sec;
5277 }
5278
5279 p->count += 1;
5280
5281 }
5282 break;
5283
5284 /* RR: We probably want to keep a consistency check that
5285 there are no dangling GOT_PAGE relocs. */
5286 case R_AARCH64_LD64_GOT_LO12_NC:
f41aef5f 5287 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
5288 case R_AARCH64_ADR_GOT_PAGE:
5289 case R_AARCH64_TLSGD_ADR_PAGE21:
5290 case R_AARCH64_TLSGD_ADD_LO12_NC:
5291 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5292 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5293 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
5294 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
5295 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5296 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
5297 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
5298 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5299 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
5300 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5301 case R_AARCH64_TLSDESC_ADR_PAGE:
5302 case R_AARCH64_TLSDESC_ADD_LO12_NC:
5303 case R_AARCH64_TLSDESC_LD64_LO12_NC:
5304 {
5305 unsigned got_type;
5306 unsigned old_got_type;
5307
5308 got_type = aarch64_reloc_got_type (r_type);
5309
5310 if (h)
5311 {
5312 h->got.refcount += 1;
5313 old_got_type = elf64_aarch64_hash_entry (h)->got_type;
5314 }
5315 else
5316 {
5317 struct elf_aarch64_local_symbol *locals;
5318
5319 if (!elf64_aarch64_allocate_local_symbols
5320 (abfd, symtab_hdr->sh_info))
5321 return FALSE;
5322
5323 locals = elf64_aarch64_locals (abfd);
5324 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5325 locals[r_symndx].got_refcount += 1;
5326 old_got_type = locals[r_symndx].got_type;
5327 }
5328
5329 /* If a variable is accessed with both general dynamic TLS
5330 methods, two slots may be created. */
5331 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5332 got_type |= old_got_type;
5333
5334 /* We will already have issued an error message if there
5335 is a TLS/non-TLS mismatch, based on the symbol type.
5336 So just combine any TLS types needed. */
5337 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5338 && got_type != GOT_NORMAL)
5339 got_type |= old_got_type;
5340
5341 /* If the symbol is accessed by both IE and GD methods, we
5342 are able to relax. Turn off the GD flag, without
5343 messing up with any other kind of TLS types that may be
5344 involved. */
5345 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5346 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5347
5348 if (old_got_type != got_type)
5349 {
5350 if (h != NULL)
5351 elf64_aarch64_hash_entry (h)->got_type = got_type;
5352 else
5353 {
5354 struct elf_aarch64_local_symbol *locals;
5355 locals = elf64_aarch64_locals (abfd);
5356 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5357 locals[r_symndx].got_type = got_type;
5358 }
5359 }
5360
5361 if (htab->root.sgot == NULL)
5362 {
5363 if (htab->root.dynobj == NULL)
5364 htab->root.dynobj = abfd;
5365 if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
5366 return FALSE;
5367 }
5368 break;
5369 }
5370
5371 case R_AARCH64_ADR_PREL_PG_HI21_NC:
5372 case R_AARCH64_ADR_PREL_PG_HI21:
f41aef5f 5373 case R_AARCH64_ADR_PREL_LO21:
a06ea964
NC
5374 if (h != NULL && info->executable)
5375 {
5376 /* If this reloc is in a read-only section, we might
5377 need a copy reloc. We can't check reliably at this
5378 stage whether the section is read-only, as input
5379 sections have not yet been mapped to output sections.
5380 Tentatively set the flag for now, and correct in
5381 adjust_dynamic_symbol. */
5382 h->non_got_ref = 1;
5383 h->plt.refcount += 1;
5384 h->pointer_equality_needed = 1;
5385 }
5386 /* FIXME:: RR need to handle these in shared libraries
5387 and essentially bomb out as these being non-PIC
5388 relocations in shared libraries. */
5389 break;
5390
5391 case R_AARCH64_CALL26:
5392 case R_AARCH64_JUMP26:
5393 /* If this is a local symbol then we resolve it
5394 directly without creating a PLT entry. */
5395 if (h == NULL)
5396 continue;
5397
5398 h->needs_plt = 1;
5399 h->plt.refcount += 1;
5400 break;
5401 }
5402 }
5403 return TRUE;
5404}
5405
5406/* Treat mapping symbols as special target symbols. */
5407
5408static bfd_boolean
5409elf64_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
5410 asymbol *sym)
5411{
5412 return bfd_is_aarch64_special_symbol_name (sym->name,
5413 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
5414}
5415
5416/* This is a copy of elf_find_function () from elf.c except that
5417 AArch64 mapping symbols are ignored when looking for function names. */
5418
5419static bfd_boolean
5420aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
5421 asection *section,
5422 asymbol **symbols,
5423 bfd_vma offset,
5424 const char **filename_ptr,
5425 const char **functionname_ptr)
5426{
5427 const char *filename = NULL;
5428 asymbol *func = NULL;
5429 bfd_vma low_func = 0;
5430 asymbol **p;
5431
5432 for (p = symbols; *p != NULL; p++)
5433 {
5434 elf_symbol_type *q;
5435
5436 q = (elf_symbol_type *) * p;
5437
5438 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
5439 {
5440 default:
5441 break;
5442 case STT_FILE:
5443 filename = bfd_asymbol_name (&q->symbol);
5444 break;
5445 case STT_FUNC:
5446 case STT_NOTYPE:
5447 /* Skip mapping symbols. */
5448 if ((q->symbol.flags & BSF_LOCAL)
5449 && (bfd_is_aarch64_special_symbol_name
5450 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
5451 continue;
5452 /* Fall through. */
5453 if (bfd_get_section (&q->symbol) == section
5454 && q->symbol.value >= low_func && q->symbol.value <= offset)
5455 {
5456 func = (asymbol *) q;
5457 low_func = q->symbol.value;
5458 }
5459 break;
5460 }
5461 }
5462
5463 if (func == NULL)
5464 return FALSE;
5465
5466 if (filename_ptr)
5467 *filename_ptr = filename;
5468 if (functionname_ptr)
5469 *functionname_ptr = bfd_asymbol_name (func);
5470
5471 return TRUE;
5472}
5473
5474
5475/* Find the nearest line to a particular section and offset, for error
5476 reporting. This code is a duplicate of the code in elf.c, except
5477 that it uses aarch64_elf_find_function. */
5478
5479static bfd_boolean
5480elf64_aarch64_find_nearest_line (bfd *abfd,
5481 asection *section,
5482 asymbol **symbols,
5483 bfd_vma offset,
5484 const char **filename_ptr,
5485 const char **functionname_ptr,
5486 unsigned int *line_ptr)
5487{
5488 bfd_boolean found = FALSE;
5489
5490 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5491 toolchain uses it. */
5492
5493 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
5494 section, symbols, offset,
5495 filename_ptr, functionname_ptr,
5496 line_ptr, NULL, 0,
5497 &elf_tdata (abfd)->dwarf2_find_line_info))
5498 {
5499 if (!*functionname_ptr)
5500 aarch64_elf_find_function (abfd, section, symbols, offset,
5501 *filename_ptr ? NULL : filename_ptr,
5502 functionname_ptr);
5503
5504 return TRUE;
5505 }
5506
5507 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
5508 &found, filename_ptr,
5509 functionname_ptr, line_ptr,
5510 &elf_tdata (abfd)->line_info))
5511 return FALSE;
5512
5513 if (found && (*functionname_ptr || *line_ptr))
5514 return TRUE;
5515
5516 if (symbols == NULL)
5517 return FALSE;
5518
5519 if (!aarch64_elf_find_function (abfd, section, symbols, offset,
5520 filename_ptr, functionname_ptr))
5521 return FALSE;
5522
5523 *line_ptr = 0;
5524 return TRUE;
5525}
5526
5527static bfd_boolean
5528elf64_aarch64_find_inliner_info (bfd *abfd,
5529 const char **filename_ptr,
5530 const char **functionname_ptr,
5531 unsigned int *line_ptr)
5532{
5533 bfd_boolean found;
5534 found = _bfd_dwarf2_find_inliner_info
5535 (abfd, filename_ptr,
5536 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
5537 return found;
5538}
5539
5540
5541static void
5542elf64_aarch64_post_process_headers (bfd *abfd,
5543 struct bfd_link_info *link_info
5544 ATTRIBUTE_UNUSED)
5545{
5546 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
5547
5548 i_ehdrp = elf_elfheader (abfd);
5549 i_ehdrp->e_ident[EI_OSABI] = 0;
5550 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
5551}
5552
5553static enum elf_reloc_type_class
7e612e98
AM
5554elf64_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5555 const asection *rel_sec ATTRIBUTE_UNUSED,
5556 const Elf_Internal_Rela *rela)
a06ea964
NC
5557{
5558 switch ((int) ELF64_R_TYPE (rela->r_info))
5559 {
5560 case R_AARCH64_RELATIVE:
5561 return reloc_class_relative;
5562 case R_AARCH64_JUMP_SLOT:
5563 return reloc_class_plt;
5564 case R_AARCH64_COPY:
5565 return reloc_class_copy;
5566 default:
5567 return reloc_class_normal;
5568 }
5569}
5570
5571/* Set the right machine number for an AArch64 ELF file. */
5572
5573static bfd_boolean
5574elf64_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
5575{
5576 if (hdr->sh_type == SHT_NOTE)
5577 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
5578
5579 return TRUE;
5580}
5581
5582/* Handle an AArch64 specific section when reading an object file. This is
5583 called when bfd_section_from_shdr finds a section with an unknown
5584 type. */
5585
5586static bfd_boolean
5587elf64_aarch64_section_from_shdr (bfd *abfd,
5588 Elf_Internal_Shdr *hdr,
5589 const char *name, int shindex)
5590{
5591 /* There ought to be a place to keep ELF backend specific flags, but
5592 at the moment there isn't one. We just keep track of the
5593 sections by their name, instead. Fortunately, the ABI gives
5594 names for all the AArch64 specific sections, so we will probably get
5595 away with this. */
5596 switch (hdr->sh_type)
5597 {
5598 case SHT_AARCH64_ATTRIBUTES:
5599 break;
5600
5601 default:
5602 return FALSE;
5603 }
5604
5605 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5606 return FALSE;
5607
5608 return TRUE;
5609}
5610
5611/* A structure used to record a list of sections, independently
5612 of the next and prev fields in the asection structure. */
5613typedef struct section_list
5614{
5615 asection *sec;
5616 struct section_list *next;
5617 struct section_list *prev;
5618}
5619section_list;
5620
5621/* Unfortunately we need to keep a list of sections for which
5622 an _aarch64_elf_section_data structure has been allocated. This
5623 is because it is possible for functions like elf64_aarch64_write_section
5624 to be called on a section which has had an elf_data_structure
5625 allocated for it (and so the used_by_bfd field is valid) but
5626 for which the AArch64 extended version of this structure - the
5627 _aarch64_elf_section_data structure - has not been allocated. */
5628static section_list *sections_with_aarch64_elf_section_data = NULL;
5629
5630static void
5631record_section_with_aarch64_elf_section_data (asection *sec)
5632{
5633 struct section_list *entry;
5634
5635 entry = bfd_malloc (sizeof (*entry));
5636 if (entry == NULL)
5637 return;
5638 entry->sec = sec;
5639 entry->next = sections_with_aarch64_elf_section_data;
5640 entry->prev = NULL;
5641 if (entry->next != NULL)
5642 entry->next->prev = entry;
5643 sections_with_aarch64_elf_section_data = entry;
5644}
5645
5646static struct section_list *
5647find_aarch64_elf_section_entry (asection *sec)
5648{
5649 struct section_list *entry;
5650 static struct section_list *last_entry = NULL;
5651
5652 /* This is a short cut for the typical case where the sections are added
5653 to the sections_with_aarch64_elf_section_data list in forward order and
5654 then looked up here in backwards order. This makes a real difference
5655 to the ld-srec/sec64k.exp linker test. */
5656 entry = sections_with_aarch64_elf_section_data;
5657 if (last_entry != NULL)
5658 {
5659 if (last_entry->sec == sec)
5660 entry = last_entry;
5661 else if (last_entry->next != NULL && last_entry->next->sec == sec)
5662 entry = last_entry->next;
5663 }
5664
5665 for (; entry; entry = entry->next)
5666 if (entry->sec == sec)
5667 break;
5668
5669 if (entry)
5670 /* Record the entry prior to this one - it is the entry we are
5671 most likely to want to locate next time. Also this way if we
5672 have been called from
5673 unrecord_section_with_aarch64_elf_section_data () we will not
5674 be caching a pointer that is about to be freed. */
5675 last_entry = entry->prev;
5676
5677 return entry;
5678}
5679
5680static void
5681unrecord_section_with_aarch64_elf_section_data (asection *sec)
5682{
5683 struct section_list *entry;
5684
5685 entry = find_aarch64_elf_section_entry (sec);
5686
5687 if (entry)
5688 {
5689 if (entry->prev != NULL)
5690 entry->prev->next = entry->next;
5691 if (entry->next != NULL)
5692 entry->next->prev = entry->prev;
5693 if (entry == sections_with_aarch64_elf_section_data)
5694 sections_with_aarch64_elf_section_data = entry->next;
5695 free (entry);
5696 }
5697}
5698
5699
5700typedef struct
5701{
5702 void *finfo;
5703 struct bfd_link_info *info;
5704 asection *sec;
5705 int sec_shndx;
5706 int (*func) (void *, const char *, Elf_Internal_Sym *,
5707 asection *, struct elf_link_hash_entry *);
5708} output_arch_syminfo;
5709
5710enum map_symbol_type
5711{
5712 AARCH64_MAP_INSN,
5713 AARCH64_MAP_DATA
5714};
5715
5716
5717/* Output a single mapping symbol. */
5718
5719static bfd_boolean
5720elf64_aarch64_output_map_sym (output_arch_syminfo *osi,
5721 enum map_symbol_type type, bfd_vma offset)
5722{
5723 static const char *names[2] = { "$x", "$d" };
5724 Elf_Internal_Sym sym;
5725
5726 sym.st_value = (osi->sec->output_section->vma
5727 + osi->sec->output_offset + offset);
5728 sym.st_size = 0;
5729 sym.st_other = 0;
5730 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5731 sym.st_shndx = osi->sec_shndx;
5732 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
5733}
5734
5735
5736
5737/* Output mapping symbols for PLT entries associated with H. */
5738
5739static bfd_boolean
5740elf64_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
5741{
5742 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
5743 bfd_vma addr;
5744
5745 if (h->root.type == bfd_link_hash_indirect)
5746 return TRUE;
5747
5748 if (h->root.type == bfd_link_hash_warning)
5749 /* When warning symbols are created, they **replace** the "real"
5750 entry in the hash table, thus we never get to see the real
5751 symbol in a hash traversal. So look at it now. */
5752 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5753
5754 if (h->plt.offset == (bfd_vma) - 1)
5755 return TRUE;
5756
5757 addr = h->plt.offset;
5758 if (addr == 32)
5759 {
5760 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5761 return FALSE;
5762 }
5763 return TRUE;
5764}
5765
5766
5767/* Output a single local symbol for a generated stub. */
5768
5769static bfd_boolean
5770elf64_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
5771 bfd_vma offset, bfd_vma size)
5772{
5773 Elf_Internal_Sym sym;
5774
5775 sym.st_value = (osi->sec->output_section->vma
5776 + osi->sec->output_offset + offset);
5777 sym.st_size = size;
5778 sym.st_other = 0;
5779 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5780 sym.st_shndx = osi->sec_shndx;
5781 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
5782}
5783
5784static bfd_boolean
5785aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
5786{
5787 struct elf64_aarch64_stub_hash_entry *stub_entry;
5788 asection *stub_sec;
5789 bfd_vma addr;
5790 char *stub_name;
5791 output_arch_syminfo *osi;
5792
5793 /* Massage our args to the form they really have. */
5794 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
5795 osi = (output_arch_syminfo *) in_arg;
5796
5797 stub_sec = stub_entry->stub_sec;
5798
5799 /* Ensure this stub is attached to the current section being
5800 processed. */
5801 if (stub_sec != osi->sec)
5802 return TRUE;
5803
5804 addr = (bfd_vma) stub_entry->stub_offset;
5805
5806 stub_name = stub_entry->output_name;
5807
5808 switch (stub_entry->stub_type)
5809 {
5810 case aarch64_stub_adrp_branch:
5811 if (!elf64_aarch64_output_stub_sym (osi, stub_name, addr,
5812 sizeof (aarch64_adrp_branch_stub)))
5813 return FALSE;
5814 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5815 return FALSE;
5816 break;
5817 case aarch64_stub_long_branch:
5818 if (!elf64_aarch64_output_stub_sym
5819 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
5820 return FALSE;
5821 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5822 return FALSE;
5823 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
5824 return FALSE;
5825 break;
5826 default:
5827 BFD_FAIL ();
5828 }
5829
5830 return TRUE;
5831}
5832
5833/* Output mapping symbols for linker generated sections. */
5834
5835static bfd_boolean
5836elf64_aarch64_output_arch_local_syms (bfd *output_bfd,
5837 struct bfd_link_info *info,
5838 void *finfo,
5839 int (*func) (void *, const char *,
5840 Elf_Internal_Sym *,
5841 asection *,
5842 struct elf_link_hash_entry
5843 *))
5844{
5845 output_arch_syminfo osi;
5846 struct elf64_aarch64_link_hash_table *htab;
5847
5848 htab = elf64_aarch64_hash_table (info);
5849
5850 osi.finfo = finfo;
5851 osi.info = info;
5852 osi.func = func;
5853
5854 /* Long calls stubs. */
5855 if (htab->stub_bfd && htab->stub_bfd->sections)
5856 {
5857 asection *stub_sec;
5858
5859 for (stub_sec = htab->stub_bfd->sections;
5860 stub_sec != NULL; stub_sec = stub_sec->next)
5861 {
5862 /* Ignore non-stub sections. */
5863 if (!strstr (stub_sec->name, STUB_SUFFIX))
5864 continue;
5865
5866 osi.sec = stub_sec;
5867
5868 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5869 (output_bfd, osi.sec->output_section);
5870
5871 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
5872 &osi);
5873 }
5874 }
5875
5876 /* Finally, output mapping symbols for the PLT. */
5877 if (!htab->root.splt || htab->root.splt->size == 0)
5878 return TRUE;
5879
5880 /* For now live without mapping symbols for the plt. */
5881 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5882 (output_bfd, htab->root.splt->output_section);
5883 osi.sec = htab->root.splt;
5884
5885 elf_link_hash_traverse (&htab->root, elf64_aarch64_output_plt_map,
5886 (void *) &osi);
5887
5888 return TRUE;
5889
5890}
5891
5892/* Allocate target specific section data. */
5893
5894static bfd_boolean
5895elf64_aarch64_new_section_hook (bfd *abfd, asection *sec)
5896{
5897 if (!sec->used_by_bfd)
5898 {
5899 _aarch64_elf_section_data *sdata;
5900 bfd_size_type amt = sizeof (*sdata);
5901
5902 sdata = bfd_zalloc (abfd, amt);
5903 if (sdata == NULL)
5904 return FALSE;
5905 sec->used_by_bfd = sdata;
5906 }
5907
5908 record_section_with_aarch64_elf_section_data (sec);
5909
5910 return _bfd_elf_new_section_hook (abfd, sec);
5911}
5912
5913
5914static void
5915unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
5916 asection *sec,
5917 void *ignore ATTRIBUTE_UNUSED)
5918{
5919 unrecord_section_with_aarch64_elf_section_data (sec);
5920}
5921
5922static bfd_boolean
5923elf64_aarch64_close_and_cleanup (bfd *abfd)
5924{
5925 if (abfd->sections)
5926 bfd_map_over_sections (abfd,
5927 unrecord_section_via_map_over_sections, NULL);
5928
5929 return _bfd_elf_close_and_cleanup (abfd);
5930}
5931
5932static bfd_boolean
5933elf64_aarch64_bfd_free_cached_info (bfd *abfd)
5934{
5935 if (abfd->sections)
5936 bfd_map_over_sections (abfd,
5937 unrecord_section_via_map_over_sections, NULL);
5938
5939 return _bfd_free_cached_info (abfd);
5940}
5941
5942static bfd_boolean
5943elf64_aarch64_is_function_type (unsigned int type)
5944{
5945 return type == STT_FUNC;
5946}
5947
5948/* Create dynamic sections. This is different from the ARM backend in that
5949 the got, plt, gotplt and their relocation sections are all created in the
5950 standard part of the bfd elf backend. */
5951
5952static bfd_boolean
5953elf64_aarch64_create_dynamic_sections (bfd *dynobj,
5954 struct bfd_link_info *info)
5955{
5956 struct elf64_aarch64_link_hash_table *htab;
5957 struct elf_link_hash_entry *h;
5958
5959 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
5960 return FALSE;
5961
5962 htab = elf64_aarch64_hash_table (info);
5963 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
5964 if (!info->shared)
5965 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
5966
5967 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
5968 abort ();
5969
5970 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5971 dynobj's .got section. We don't do this in the linker script
5972 because we don't want to define the symbol if we are not creating
5973 a global offset table. */
5974 h = _bfd_elf_define_linkage_sym (dynobj, info,
5975 htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
5976 elf_hash_table (info)->hgot = h;
5977 if (h == NULL)
5978 return FALSE;
5979
5980 return TRUE;
5981}
5982
5983
5984/* Allocate space in .plt, .got and associated reloc sections for
5985 dynamic relocs. */
5986
5987static bfd_boolean
5988elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
5989{
5990 struct bfd_link_info *info;
5991 struct elf64_aarch64_link_hash_table *htab;
5992 struct elf64_aarch64_link_hash_entry *eh;
5993 struct elf_dyn_relocs *p;
5994
5995 /* An example of a bfd_link_hash_indirect symbol is versioned
5996 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
5997 -> __gxx_personality_v0(bfd_link_hash_defined)
5998
5999 There is no need to process bfd_link_hash_indirect symbols here
6000 because we will also be presented with the concrete instance of
6001 the symbol and elf64_aarch64_copy_indirect_symbol () will have been
6002 called to copy all relevant data from the generic to the concrete
6003 symbol instance.
6004 */
6005 if (h->root.type == bfd_link_hash_indirect)
6006 return TRUE;
6007
6008 if (h->root.type == bfd_link_hash_warning)
6009 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6010
6011 info = (struct bfd_link_info *) inf;
6012 htab = elf64_aarch64_hash_table (info);
6013
6014 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
6015 {
6016 /* Make sure this symbol is output as a dynamic symbol.
6017 Undefined weak syms won't yet be marked as dynamic. */
6018 if (h->dynindx == -1 && !h->forced_local)
6019 {
6020 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6021 return FALSE;
6022 }
6023
6024 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
6025 {
6026 asection *s = htab->root.splt;
6027
6028 /* If this is the first .plt entry, make room for the special
6029 first entry. */
6030 if (s->size == 0)
6031 s->size += htab->plt_header_size;
6032
6033 h->plt.offset = s->size;
6034
6035 /* If this symbol is not defined in a regular file, and we are
6036 not generating a shared library, then set the symbol to this
6037 location in the .plt. This is required to make function
6038 pointers compare as equal between the normal executable and
6039 the shared library. */
6040 if (!info->shared && !h->def_regular)
6041 {
6042 h->root.u.def.section = s;
6043 h->root.u.def.value = h->plt.offset;
6044 }
6045
6046 /* Make room for this entry. For now we only create the
6047 small model PLT entries. We later need to find a way
6048 of relaxing into these from the large model PLT entries. */
6049 s->size += PLT_SMALL_ENTRY_SIZE;
6050
6051 /* We also need to make an entry in the .got.plt section, which
6052 will be placed in the .got section by the linker script. */
6053 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6054
6055 /* We also need to make an entry in the .rela.plt section. */
6056 htab->root.srelplt->size += RELOC_SIZE (htab);
6057
6058 /* We need to ensure that all GOT entries that serve the PLT
6059 are consecutive with the special GOT slots [0] [1] and
6060 [2]. Any addtional relocations, such as
6061 R_AARCH64_TLSDESC, must be placed after the PLT related
6062 entries. We abuse the reloc_count such that during
6063 sizing we adjust reloc_count to indicate the number of
6064 PLT related reserved entries. In subsequent phases when
6065 filling in the contents of the reloc entries, PLT related
6066 entries are placed by computing their PLT index (0
6067 .. reloc_count). While other none PLT relocs are placed
6068 at the slot indicated by reloc_count and reloc_count is
6069 updated. */
6070
6071 htab->root.srelplt->reloc_count++;
6072 }
6073 else
6074 {
6075 h->plt.offset = (bfd_vma) - 1;
6076 h->needs_plt = 0;
6077 }
6078 }
6079 else
6080 {
6081 h->plt.offset = (bfd_vma) - 1;
6082 h->needs_plt = 0;
6083 }
6084
6085 eh = (struct elf64_aarch64_link_hash_entry *) h;
6086 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6087
6088 if (h->got.refcount > 0)
6089 {
6090 bfd_boolean dyn;
6091 unsigned got_type = elf64_aarch64_hash_entry (h)->got_type;
6092
6093 h->got.offset = (bfd_vma) - 1;
6094
6095 dyn = htab->root.dynamic_sections_created;
6096
6097 /* Make sure this symbol is output as a dynamic symbol.
6098 Undefined weak syms won't yet be marked as dynamic. */
6099 if (dyn && h->dynindx == -1 && !h->forced_local)
6100 {
6101 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6102 return FALSE;
6103 }
6104
6105 if (got_type == GOT_UNKNOWN)
6106 {
6107 }
6108 else if (got_type == GOT_NORMAL)
6109 {
6110 h->got.offset = htab->root.sgot->size;
6111 htab->root.sgot->size += GOT_ENTRY_SIZE;
6112 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6113 || h->root.type != bfd_link_hash_undefweak)
6114 && (info->shared
6115 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6116 {
6117 htab->root.srelgot->size += RELOC_SIZE (htab);
6118 }
6119 }
6120 else
6121 {
6122 int indx;
6123 if (got_type & GOT_TLSDESC_GD)
6124 {
6125 eh->tlsdesc_got_jump_table_offset =
6126 (htab->root.sgotplt->size
6127 - aarch64_compute_jump_table_size (htab));
6128 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6129 h->got.offset = (bfd_vma) - 2;
6130 }
6131
6132 if (got_type & GOT_TLS_GD)
6133 {
6134 h->got.offset = htab->root.sgot->size;
6135 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6136 }
6137
6138 if (got_type & GOT_TLS_IE)
6139 {
6140 h->got.offset = htab->root.sgot->size;
6141 htab->root.sgot->size += GOT_ENTRY_SIZE;
6142 }
6143
6144 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6145 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6146 || h->root.type != bfd_link_hash_undefweak)
6147 && (info->shared
6148 || indx != 0
6149 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6150 {
6151 if (got_type & GOT_TLSDESC_GD)
6152 {
6153 htab->root.srelplt->size += RELOC_SIZE (htab);
6154 /* Note reloc_count not incremented here! We have
6155 already adjusted reloc_count for this relocation
6156 type. */
6157
6158 /* TLSDESC PLT is now needed, but not yet determined. */
6159 htab->tlsdesc_plt = (bfd_vma) - 1;
6160 }
6161
6162 if (got_type & GOT_TLS_GD)
6163 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6164
6165 if (got_type & GOT_TLS_IE)
6166 htab->root.srelgot->size += RELOC_SIZE (htab);
6167 }
6168 }
6169 }
6170 else
6171 {
6172 h->got.offset = (bfd_vma) - 1;
6173 }
6174
6175 if (eh->dyn_relocs == NULL)
6176 return TRUE;
6177
6178 /* In the shared -Bsymbolic case, discard space allocated for
6179 dynamic pc-relative relocs against symbols which turn out to be
6180 defined in regular objects. For the normal shared case, discard
6181 space for pc-relative relocs that have become local due to symbol
6182 visibility changes. */
6183
6184 if (info->shared)
6185 {
6186 /* Relocs that use pc_count are those that appear on a call
6187 insn, or certain REL relocs that can generated via assembly.
6188 We want calls to protected symbols to resolve directly to the
6189 function rather than going via the plt. If people want
6190 function pointer comparisons to work as expected then they
6191 should avoid writing weird assembly. */
6192 if (SYMBOL_CALLS_LOCAL (info, h))
6193 {
6194 struct elf_dyn_relocs **pp;
6195
6196 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6197 {
6198 p->count -= p->pc_count;
6199 p->pc_count = 0;
6200 if (p->count == 0)
6201 *pp = p->next;
6202 else
6203 pp = &p->next;
6204 }
6205 }
6206
6207 /* Also discard relocs on undefined weak syms with non-default
6208 visibility. */
6209 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6210 {
6211 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6212 eh->dyn_relocs = NULL;
6213
6214 /* Make sure undefined weak symbols are output as a dynamic
6215 symbol in PIEs. */
6216 else if (h->dynindx == -1
6217 && !h->forced_local
6218 && !bfd_elf_link_record_dynamic_symbol (info, h))
6219 return FALSE;
6220 }
6221
6222 }
6223 else if (ELIMINATE_COPY_RELOCS)
6224 {
6225 /* For the non-shared case, discard space for relocs against
6226 symbols which turn out to need copy relocs or are not
6227 dynamic. */
6228
6229 if (!h->non_got_ref
6230 && ((h->def_dynamic
6231 && !h->def_regular)
6232 || (htab->root.dynamic_sections_created
6233 && (h->root.type == bfd_link_hash_undefweak
6234 || h->root.type == bfd_link_hash_undefined))))
6235 {
6236 /* Make sure this symbol is output as a dynamic symbol.
6237 Undefined weak syms won't yet be marked as dynamic. */
6238 if (h->dynindx == -1
6239 && !h->forced_local
6240 && !bfd_elf_link_record_dynamic_symbol (info, h))
6241 return FALSE;
6242
6243 /* If that succeeded, we know we'll be keeping all the
6244 relocs. */
6245 if (h->dynindx != -1)
6246 goto keep;
6247 }
6248
6249 eh->dyn_relocs = NULL;
6250
6251 keep:;
6252 }
6253
6254 /* Finally, allocate space. */
6255 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6256 {
6257 asection *sreloc;
6258
6259 sreloc = elf_section_data (p->sec)->sreloc;
6260
6261 BFD_ASSERT (sreloc != NULL);
6262
6263 sreloc->size += p->count * RELOC_SIZE (htab);
6264 }
6265
6266 return TRUE;
6267}
6268
6269
6270
6271
6272/* This is the most important function of all . Innocuosly named
6273 though ! */
6274static bfd_boolean
6275elf64_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6276 struct bfd_link_info *info)
6277{
6278 struct elf64_aarch64_link_hash_table *htab;
6279 bfd *dynobj;
6280 asection *s;
6281 bfd_boolean relocs;
6282 bfd *ibfd;
6283
6284 htab = elf64_aarch64_hash_table ((info));
6285 dynobj = htab->root.dynobj;
6286
6287 BFD_ASSERT (dynobj != NULL);
6288
6289 if (htab->root.dynamic_sections_created)
6290 {
6291 if (info->executable)
6292 {
6293 s = bfd_get_linker_section (dynobj, ".interp");
6294 if (s == NULL)
6295 abort ();
6296 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
6297 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
6298 }
6299 }
6300
6301 /* Set up .got offsets for local syms, and space for local dynamic
6302 relocs. */
6303 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
6304 {
6305 struct elf_aarch64_local_symbol *locals = NULL;
6306 Elf_Internal_Shdr *symtab_hdr;
6307 asection *srel;
6308 unsigned int i;
6309
6310 if (!is_aarch64_elf (ibfd))
6311 continue;
6312
6313 for (s = ibfd->sections; s != NULL; s = s->next)
6314 {
6315 struct elf_dyn_relocs *p;
6316
6317 for (p = (struct elf_dyn_relocs *)
6318 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
6319 {
6320 if (!bfd_is_abs_section (p->sec)
6321 && bfd_is_abs_section (p->sec->output_section))
6322 {
6323 /* Input section has been discarded, either because
6324 it is a copy of a linkonce section or due to
6325 linker script /DISCARD/, so we'll be discarding
6326 the relocs too. */
6327 }
6328 else if (p->count != 0)
6329 {
6330 srel = elf_section_data (p->sec)->sreloc;
6331 srel->size += p->count * RELOC_SIZE (htab);
6332 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
6333 info->flags |= DF_TEXTREL;
6334 }
6335 }
6336 }
6337
6338 locals = elf64_aarch64_locals (ibfd);
6339 if (!locals)
6340 continue;
6341
6342 symtab_hdr = &elf_symtab_hdr (ibfd);
6343 srel = htab->root.srelgot;
6344 for (i = 0; i < symtab_hdr->sh_info; i++)
6345 {
6346 locals[i].got_offset = (bfd_vma) - 1;
6347 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6348 if (locals[i].got_refcount > 0)
6349 {
6350 unsigned got_type = locals[i].got_type;
6351 if (got_type & GOT_TLSDESC_GD)
6352 {
6353 locals[i].tlsdesc_got_jump_table_offset =
6354 (htab->root.sgotplt->size
6355 - aarch64_compute_jump_table_size (htab));
6356 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6357 locals[i].got_offset = (bfd_vma) - 2;
6358 }
6359
6360 if (got_type & GOT_TLS_GD)
6361 {
6362 locals[i].got_offset = htab->root.sgot->size;
6363 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6364 }
6365
6366 if (got_type & GOT_TLS_IE)
6367 {
6368 locals[i].got_offset = htab->root.sgot->size;
6369 htab->root.sgot->size += GOT_ENTRY_SIZE;
6370 }
6371
6372 if (got_type == GOT_UNKNOWN)
6373 {
6374 }
6375
6376 if (got_type == GOT_NORMAL)
6377 {
6378 }
6379
6380 if (info->shared)
6381 {
6382 if (got_type & GOT_TLSDESC_GD)
6383 {
6384 htab->root.srelplt->size += RELOC_SIZE (htab);
6385 /* Note RELOC_COUNT not incremented here! */
6386 htab->tlsdesc_plt = (bfd_vma) - 1;
6387 }
6388
6389 if (got_type & GOT_TLS_GD)
6390 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6391
6392 if (got_type & GOT_TLS_IE)
6393 htab->root.srelgot->size += RELOC_SIZE (htab);
6394 }
6395 }
6396 else
6397 {
6398 locals[i].got_refcount = (bfd_vma) - 1;
6399 }
6400 }
6401 }
6402
6403
6404 /* Allocate global sym .plt and .got entries, and space for global
6405 sym dynamic relocs. */
6406 elf_link_hash_traverse (&htab->root, elf64_aarch64_allocate_dynrelocs,
6407 info);
6408
6409
6410 /* For every jump slot reserved in the sgotplt, reloc_count is
6411 incremented. However, when we reserve space for TLS descriptors,
6412 it's not incremented, so in order to compute the space reserved
6413 for them, it suffices to multiply the reloc count by the jump
6414 slot size. */
6415
6416 if (htab->root.srelplt)
6417 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
6418
6419 if (htab->tlsdesc_plt)
6420 {
6421 if (htab->root.splt->size == 0)
6422 htab->root.splt->size += PLT_ENTRY_SIZE;
6423
6424 htab->tlsdesc_plt = htab->root.splt->size;
6425 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
6426
6427 /* If we're not using lazy TLS relocations, don't generate the
6428 GOT entry required. */
6429 if (!(info->flags & DF_BIND_NOW))
6430 {
6431 htab->dt_tlsdesc_got = htab->root.sgot->size;
6432 htab->root.sgot->size += GOT_ENTRY_SIZE;
6433 }
6434 }
6435
6436 /* We now have determined the sizes of the various dynamic sections.
6437 Allocate memory for them. */
6438 relocs = FALSE;
6439 for (s = dynobj->sections; s != NULL; s = s->next)
6440 {
6441 if ((s->flags & SEC_LINKER_CREATED) == 0)
6442 continue;
6443
6444 if (s == htab->root.splt
6445 || s == htab->root.sgot
6446 || s == htab->root.sgotplt
6447 || s == htab->root.iplt
6448 || s == htab->root.igotplt || s == htab->sdynbss)
6449 {
6450 /* Strip this section if we don't need it; see the
6451 comment below. */
6452 }
6453 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
6454 {
6455 if (s->size != 0 && s != htab->root.srelplt)
6456 relocs = TRUE;
6457
6458 /* We use the reloc_count field as a counter if we need
6459 to copy relocs into the output file. */
6460 if (s != htab->root.srelplt)
6461 s->reloc_count = 0;
6462 }
6463 else
6464 {
6465 /* It's not one of our sections, so don't allocate space. */
6466 continue;
6467 }
6468
6469 if (s->size == 0)
6470 {
6471 /* If we don't need this section, strip it from the
6472 output file. This is mostly to handle .rela.bss and
6473 .rela.plt. We must create both sections in
6474 create_dynamic_sections, because they must be created
6475 before the linker maps input sections to output
6476 sections. The linker does that before
6477 adjust_dynamic_symbol is called, and it is that
6478 function which decides whether anything needs to go
6479 into these sections. */
6480
6481 s->flags |= SEC_EXCLUDE;
6482 continue;
6483 }
6484
6485 if ((s->flags & SEC_HAS_CONTENTS) == 0)
6486 continue;
6487
6488 /* Allocate memory for the section contents. We use bfd_zalloc
6489 here in case unused entries are not reclaimed before the
6490 section's contents are written out. This should not happen,
6491 but this way if it does, we get a R_AARCH64_NONE reloc instead
6492 of garbage. */
6493 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
6494 if (s->contents == NULL)
6495 return FALSE;
6496 }
6497
6498 if (htab->root.dynamic_sections_created)
6499 {
6500 /* Add some entries to the .dynamic section. We fill in the
6501 values later, in elf64_aarch64_finish_dynamic_sections, but we
6502 must add the entries now so that we get the correct size for
6503 the .dynamic section. The DT_DEBUG entry is filled in by the
6504 dynamic linker and used by the debugger. */
6505#define add_dynamic_entry(TAG, VAL) \
6506 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6507
6508 if (info->executable)
6509 {
6510 if (!add_dynamic_entry (DT_DEBUG, 0))
6511 return FALSE;
6512 }
6513
6514 if (htab->root.splt->size != 0)
6515 {
6516 if (!add_dynamic_entry (DT_PLTGOT, 0)
6517 || !add_dynamic_entry (DT_PLTRELSZ, 0)
6518 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
6519 || !add_dynamic_entry (DT_JMPREL, 0))
6520 return FALSE;
6521
6522 if (htab->tlsdesc_plt
6523 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
6524 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
6525 return FALSE;
6526 }
6527
6528 if (relocs)
6529 {
6530 if (!add_dynamic_entry (DT_RELA, 0)
6531 || !add_dynamic_entry (DT_RELASZ, 0)
6532 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
6533 return FALSE;
6534
6535 /* If any dynamic relocs apply to a read-only section,
6536 then we need a DT_TEXTREL entry. */
6537 if ((info->flags & DF_TEXTREL) != 0)
6538 {
6539 if (!add_dynamic_entry (DT_TEXTREL, 0))
6540 return FALSE;
6541 }
6542 }
6543 }
6544#undef add_dynamic_entry
6545
6546 return TRUE;
6547
6548
6549}
6550
6551static inline void
6552elf64_aarch64_update_plt_entry (bfd *output_bfd,
6553 unsigned int r_type,
6554 bfd_byte *plt_entry, bfd_vma value)
6555{
6556 reloc_howto_type *howto;
6557 howto = elf64_aarch64_howto_from_type (r_type);
6558 bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
6559}
6560
6561static void
6562elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
6563 struct elf64_aarch64_link_hash_table
6564 *htab, bfd *output_bfd)
6565{
6566 bfd_byte *plt_entry;
6567 bfd_vma plt_index;
6568 bfd_vma got_offset;
6569 bfd_vma gotplt_entry_address;
6570 bfd_vma plt_entry_address;
6571 Elf_Internal_Rela rela;
6572 bfd_byte *loc;
6573
6574 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
6575
6576 /* Offset in the GOT is PLT index plus got GOT headers(3)
6577 times 8. */
6578 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
6579 plt_entry = htab->root.splt->contents + h->plt.offset;
6580 plt_entry_address = htab->root.splt->output_section->vma
6581 + htab->root.splt->output_section->output_offset + h->plt.offset;
6582 gotplt_entry_address = htab->root.sgotplt->output_section->vma +
6583 htab->root.sgotplt->output_offset + got_offset;
6584
6585 /* Copy in the boiler-plate for the PLTn entry. */
6586 memcpy (plt_entry, elf64_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
6587
6588 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6589 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6590 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6591 plt_entry,
6592 PG (gotplt_entry_address) -
6593 PG (plt_entry_address));
6594
6595 /* Fill in the lo12 bits for the load from the pltgot. */
6596 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6597 plt_entry + 4,
6598 PG_OFFSET (gotplt_entry_address));
6599
6600 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6601 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6602 plt_entry + 8,
6603 PG_OFFSET (gotplt_entry_address));
6604
6605 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6606 bfd_put_64 (output_bfd,
6607 (htab->root.splt->output_section->vma
6608 + htab->root.splt->output_offset),
6609 htab->root.sgotplt->contents + got_offset);
6610
6611 /* Fill in the entry in the .rela.plt section. */
6612 rela.r_offset = gotplt_entry_address;
6613 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_JUMP_SLOT);
6614 rela.r_addend = 0;
6615
6616 /* Compute the relocation entry to used based on PLT index and do
6617 not adjust reloc_count. The reloc_count has already been adjusted
6618 to account for this entry. */
6619 loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab);
6620 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6621}
6622
6623/* Size sections even though they're not dynamic. We use it to setup
6624 _TLS_MODULE_BASE_, if needed. */
6625
6626static bfd_boolean
6627elf64_aarch64_always_size_sections (bfd *output_bfd,
6628 struct bfd_link_info *info)
6629{
6630 asection *tls_sec;
6631
6632 if (info->relocatable)
6633 return TRUE;
6634
6635 tls_sec = elf_hash_table (info)->tls_sec;
6636
6637 if (tls_sec)
6638 {
6639 struct elf_link_hash_entry *tlsbase;
6640
6641 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
6642 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
6643
6644 if (tlsbase)
6645 {
6646 struct bfd_link_hash_entry *h = NULL;
6647 const struct elf_backend_data *bed =
6648 get_elf_backend_data (output_bfd);
6649
6650 if (!(_bfd_generic_link_add_one_symbol
6651 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
6652 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
6653 return FALSE;
6654
6655 tlsbase->type = STT_TLS;
6656 tlsbase = (struct elf_link_hash_entry *) h;
6657 tlsbase->def_regular = 1;
6658 tlsbase->other = STV_HIDDEN;
6659 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
6660 }
6661 }
6662
6663 return TRUE;
6664}
6665
6666/* Finish up dynamic symbol handling. We set the contents of various
6667 dynamic sections here. */
6668static bfd_boolean
6669elf64_aarch64_finish_dynamic_symbol (bfd *output_bfd,
6670 struct bfd_link_info *info,
6671 struct elf_link_hash_entry *h,
6672 Elf_Internal_Sym *sym)
6673{
6674 struct elf64_aarch64_link_hash_table *htab;
6675 htab = elf64_aarch64_hash_table (info);
6676
6677 if (h->plt.offset != (bfd_vma) - 1)
6678 {
6679 /* This symbol has an entry in the procedure linkage table. Set
6680 it up. */
6681
6682 if (h->dynindx == -1
6683 || htab->root.splt == NULL
6684 || htab->root.sgotplt == NULL || htab->root.srelplt == NULL)
6685 abort ();
6686
6687 elf64_aarch64_create_small_pltn_entry (h, htab, output_bfd);
6688 if (!h->def_regular)
6689 {
6690 /* Mark the symbol as undefined, rather than as defined in
6691 the .plt section. Leave the value alone. This is a clue
6692 for the dynamic linker, to make function pointer
6693 comparisons work between an application and shared
6694 library. */
6695 sym->st_shndx = SHN_UNDEF;
6696 }
6697 }
6698
6699 if (h->got.offset != (bfd_vma) - 1
6700 && elf64_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
6701 {
6702 Elf_Internal_Rela rela;
6703 bfd_byte *loc;
6704
6705 /* This symbol has an entry in the global offset table. Set it
6706 up. */
6707 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
6708 abort ();
6709
6710 rela.r_offset = (htab->root.sgot->output_section->vma
6711 + htab->root.sgot->output_offset
6712 + (h->got.offset & ~(bfd_vma) 1));
6713
6714 if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
6715 {
6716 if (!h->def_regular)
6717 return FALSE;
6718
6719 BFD_ASSERT ((h->got.offset & 1) != 0);
6720 rela.r_info = ELF64_R_INFO (0, R_AARCH64_RELATIVE);
6721 rela.r_addend = (h->root.u.def.value
6722 + h->root.u.def.section->output_section->vma
6723 + h->root.u.def.section->output_offset);
6724 }
6725 else
6726 {
6727 BFD_ASSERT ((h->got.offset & 1) == 0);
6728 bfd_put_64 (output_bfd, (bfd_vma) 0,
6729 htab->root.sgot->contents + h->got.offset);
6730 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT);
6731 rela.r_addend = 0;
6732 }
6733
6734 loc = htab->root.srelgot->contents;
6735 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
6736 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6737 }
6738
6739 if (h->needs_copy)
6740 {
6741 Elf_Internal_Rela rela;
6742 bfd_byte *loc;
6743
6744 /* This symbol needs a copy reloc. Set it up. */
6745
6746 if (h->dynindx == -1
6747 || (h->root.type != bfd_link_hash_defined
6748 && h->root.type != bfd_link_hash_defweak)
6749 || htab->srelbss == NULL)
6750 abort ();
6751
6752 rela.r_offset = (h->root.u.def.value
6753 + h->root.u.def.section->output_section->vma
6754 + h->root.u.def.section->output_offset);
6755 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_COPY);
6756 rela.r_addend = 0;
6757 loc = htab->srelbss->contents;
6758 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
6759 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6760 }
6761
6762 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6763 be NULL for local symbols. */
6764 if (sym != NULL
9637f6ef 6765 && (h == elf_hash_table (info)->hdynamic
a06ea964
NC
6766 || h == elf_hash_table (info)->hgot))
6767 sym->st_shndx = SHN_ABS;
6768
6769 return TRUE;
6770}
6771
6772static void
6773elf64_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
6774 struct elf64_aarch64_link_hash_table
6775 *htab)
6776{
6777 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6778 small and large plts and at the minute just generates
6779 the small PLT. */
6780
6781 /* PLT0 of the small PLT looks like this -
6782 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6783 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6784 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6785 // symbol resolver
6786 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6787 // GOTPLT entry for this.
6788 br x17
6789 */
6790 bfd_vma plt_got_base;
6791 bfd_vma plt_base;
6792
6793
6794 memcpy (htab->root.splt->contents, elf64_aarch64_small_plt0_entry,
6795 PLT_ENTRY_SIZE);
6796 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
6797 PLT_ENTRY_SIZE;
6798
6799 plt_got_base = (htab->root.sgotplt->output_section->vma
6800 + htab->root.sgotplt->output_offset);
6801
6802 plt_base = htab->root.splt->output_section->vma +
6803 htab->root.splt->output_section->output_offset;
6804
6805 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6806 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6807 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6808 htab->root.splt->contents + 4,
6809 PG (plt_got_base + 16) - PG (plt_base + 4));
6810
6811 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6812 htab->root.splt->contents + 8,
6813 PG_OFFSET (plt_got_base + 16));
6814
6815 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6816 htab->root.splt->contents + 12,
6817 PG_OFFSET (plt_got_base + 16));
6818}
6819
6820static bfd_boolean
6821elf64_aarch64_finish_dynamic_sections (bfd *output_bfd,
6822 struct bfd_link_info *info)
6823{
6824 struct elf64_aarch64_link_hash_table *htab;
6825 bfd *dynobj;
6826 asection *sdyn;
6827
6828 htab = elf64_aarch64_hash_table (info);
6829 dynobj = htab->root.dynobj;
6830 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6831
6832 if (htab->root.dynamic_sections_created)
6833 {
6834 Elf64_External_Dyn *dyncon, *dynconend;
6835
6836 if (sdyn == NULL || htab->root.sgot == NULL)
6837 abort ();
6838
6839 dyncon = (Elf64_External_Dyn *) sdyn->contents;
6840 dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
6841 for (; dyncon < dynconend; dyncon++)
6842 {
6843 Elf_Internal_Dyn dyn;
6844 asection *s;
6845
6846 bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
6847
6848 switch (dyn.d_tag)
6849 {
6850 default:
6851 continue;
6852
6853 case DT_PLTGOT:
6854 s = htab->root.sgotplt;
6855 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6856 break;
6857
6858 case DT_JMPREL:
6859 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
6860 break;
6861
6862 case DT_PLTRELSZ:
6863 s = htab->root.srelplt->output_section;
6864 dyn.d_un.d_val = s->size;
6865 break;
6866
6867 case DT_RELASZ:
6868 /* The procedure linkage table relocs (DT_JMPREL) should
6869 not be included in the overall relocs (DT_RELA).
6870 Therefore, we override the DT_RELASZ entry here to
6871 make it not include the JMPREL relocs. Since the
6872 linker script arranges for .rela.plt to follow all
6873 other relocation sections, we don't have to worry
6874 about changing the DT_RELA entry. */
6875 if (htab->root.srelplt != NULL)
6876 {
6877 s = htab->root.srelplt->output_section;
6878 dyn.d_un.d_val -= s->size;
6879 }
6880 break;
6881
6882 case DT_TLSDESC_PLT:
6883 s = htab->root.splt;
6884 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6885 + htab->tlsdesc_plt;
6886 break;
6887
6888 case DT_TLSDESC_GOT:
6889 s = htab->root.sgot;
6890 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6891 + htab->dt_tlsdesc_got;
6892 break;
6893 }
6894
6895 bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
6896 }
6897
6898 }
6899
6900 /* Fill in the special first entry in the procedure linkage table. */
6901 if (htab->root.splt && htab->root.splt->size > 0)
6902 {
6903 elf64_aarch64_init_small_plt0_entry (output_bfd, htab);
6904
6905 elf_section_data (htab->root.splt->output_section)->
6906 this_hdr.sh_entsize = htab->plt_entry_size;
6907
6908
6909 if (htab->tlsdesc_plt)
6910 {
6911 bfd_put_64 (output_bfd, (bfd_vma) 0,
6912 htab->root.sgot->contents + htab->dt_tlsdesc_got);
6913
6914 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
6915 elf64_aarch64_tlsdesc_small_plt_entry,
6916 sizeof (elf64_aarch64_tlsdesc_small_plt_entry));
6917
6918 {
6919 bfd_vma adrp1_addr =
6920 htab->root.splt->output_section->vma
6921 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
6922
6923 bfd_vma adrp2_addr =
6924 htab->root.splt->output_section->vma
6925 + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
6926
6927 bfd_vma got_addr =
6928 htab->root.sgot->output_section->vma
6929 + htab->root.sgot->output_offset;
6930
6931 bfd_vma pltgot_addr =
6932 htab->root.sgotplt->output_section->vma
6933 + htab->root.sgotplt->output_offset;
6934
6935 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
6936 bfd_vma opcode;
6937
6938 /* adrp x2, DT_TLSDESC_GOT */
6939 opcode = bfd_get_32 (output_bfd,
6940 htab->root.splt->contents
6941 + htab->tlsdesc_plt + 4);
6942 opcode = reencode_adr_imm
6943 (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
6944 bfd_put_32 (output_bfd, opcode,
6945 htab->root.splt->contents + htab->tlsdesc_plt + 4);
6946
6947 /* adrp x3, 0 */
6948 opcode = bfd_get_32 (output_bfd,
6949 htab->root.splt->contents
6950 + htab->tlsdesc_plt + 8);
6951 opcode = reencode_adr_imm
6952 (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
6953 bfd_put_32 (output_bfd, opcode,
6954 htab->root.splt->contents + htab->tlsdesc_plt + 8);
6955
6956 /* ldr x2, [x2, #0] */
6957 opcode = bfd_get_32 (output_bfd,
6958 htab->root.splt->contents
6959 + htab->tlsdesc_plt + 12);
6960 opcode = reencode_ldst_pos_imm (opcode,
6961 PG_OFFSET (dt_tlsdesc_got) >> 3);
6962 bfd_put_32 (output_bfd, opcode,
6963 htab->root.splt->contents + htab->tlsdesc_plt + 12);
6964
6965 /* add x3, x3, 0 */
6966 opcode = bfd_get_32 (output_bfd,
6967 htab->root.splt->contents
6968 + htab->tlsdesc_plt + 16);
6969 opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
6970 bfd_put_32 (output_bfd, opcode,
6971 htab->root.splt->contents + htab->tlsdesc_plt + 16);
6972 }
6973 }
6974 }
6975
6976 if (htab->root.sgotplt)
6977 {
6978 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
6979 {
6980 (*_bfd_error_handler)
6981 (_("discarded output section: `%A'"), htab->root.sgotplt);
6982 return FALSE;
6983 }
6984
6985 /* Fill in the first three entries in the global offset table. */
6986 if (htab->root.sgotplt->size > 0)
6987 {
6988 /* Set the first entry in the global offset table to the address of
6989 the dynamic section. */
6990 if (sdyn == NULL)
6991 bfd_put_64 (output_bfd, (bfd_vma) 0,
6992 htab->root.sgotplt->contents);
6993 else
6994 bfd_put_64 (output_bfd,
6995 sdyn->output_section->vma + sdyn->output_offset,
6996 htab->root.sgotplt->contents);
6997 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6998 bfd_put_64 (output_bfd,
6999 (bfd_vma) 0,
7000 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
7001 bfd_put_64 (output_bfd,
7002 (bfd_vma) 0,
7003 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
7004 }
7005
7006 elf_section_data (htab->root.sgotplt->output_section)->
7007 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
7008 }
7009
7010 if (htab->root.sgot && htab->root.sgot->size > 0)
7011 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
7012 = GOT_ENTRY_SIZE;
7013
7014 return TRUE;
7015}
7016
7017/* Return address for Ith PLT stub in section PLT, for relocation REL
7018 or (bfd_vma) -1 if it should not be included. */
7019
7020static bfd_vma
7021elf64_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
7022 const arelent *rel ATTRIBUTE_UNUSED)
7023{
7024 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
7025}
7026
7027
7028/* We use this so we can override certain functions
7029 (though currently we don't). */
7030
7031const struct elf_size_info elf64_aarch64_size_info =
7032{
7033 sizeof (Elf64_External_Ehdr),
7034 sizeof (Elf64_External_Phdr),
7035 sizeof (Elf64_External_Shdr),
7036 sizeof (Elf64_External_Rel),
7037 sizeof (Elf64_External_Rela),
7038 sizeof (Elf64_External_Sym),
7039 sizeof (Elf64_External_Dyn),
7040 sizeof (Elf_External_Note),
7041 4, /* Hash table entry size. */
7042 1, /* Internal relocs per external relocs. */
7043 64, /* Arch size. */
7044 3, /* Log_file_align. */
7045 ELFCLASS64, EV_CURRENT,
7046 bfd_elf64_write_out_phdrs,
7047 bfd_elf64_write_shdrs_and_ehdr,
7048 bfd_elf64_checksum_contents,
7049 bfd_elf64_write_relocs,
7050 bfd_elf64_swap_symbol_in,
7051 bfd_elf64_swap_symbol_out,
7052 bfd_elf64_slurp_reloc_table,
7053 bfd_elf64_slurp_symbol_table,
7054 bfd_elf64_swap_dyn_in,
7055 bfd_elf64_swap_dyn_out,
7056 bfd_elf64_swap_reloc_in,
7057 bfd_elf64_swap_reloc_out,
7058 bfd_elf64_swap_reloca_in,
7059 bfd_elf64_swap_reloca_out
7060};
7061
7062#define ELF_ARCH bfd_arch_aarch64
7063#define ELF_MACHINE_CODE EM_AARCH64
7064#define ELF_MAXPAGESIZE 0x10000
7065#define ELF_MINPAGESIZE 0x1000
7066#define ELF_COMMONPAGESIZE 0x1000
7067
7068#define bfd_elf64_close_and_cleanup \
7069 elf64_aarch64_close_and_cleanup
7070
7071#define bfd_elf64_bfd_copy_private_bfd_data \
7072 elf64_aarch64_copy_private_bfd_data
7073
7074#define bfd_elf64_bfd_free_cached_info \
7075 elf64_aarch64_bfd_free_cached_info
7076
7077#define bfd_elf64_bfd_is_target_special_symbol \
7078 elf64_aarch64_is_target_special_symbol
7079
7080#define bfd_elf64_bfd_link_hash_table_create \
7081 elf64_aarch64_link_hash_table_create
7082
7083#define bfd_elf64_bfd_link_hash_table_free \
7084 elf64_aarch64_hash_table_free
7085
7086#define bfd_elf64_bfd_merge_private_bfd_data \
7087 elf64_aarch64_merge_private_bfd_data
7088
7089#define bfd_elf64_bfd_print_private_bfd_data \
7090 elf64_aarch64_print_private_bfd_data
7091
7092#define bfd_elf64_bfd_reloc_type_lookup \
7093 elf64_aarch64_reloc_type_lookup
7094
7095#define bfd_elf64_bfd_reloc_name_lookup \
7096 elf64_aarch64_reloc_name_lookup
7097
7098#define bfd_elf64_bfd_set_private_flags \
7099 elf64_aarch64_set_private_flags
7100
7101#define bfd_elf64_find_inliner_info \
7102 elf64_aarch64_find_inliner_info
7103
7104#define bfd_elf64_find_nearest_line \
7105 elf64_aarch64_find_nearest_line
7106
7107#define bfd_elf64_mkobject \
7108 elf64_aarch64_mkobject
7109
7110#define bfd_elf64_new_section_hook \
7111 elf64_aarch64_new_section_hook
7112
7113#define elf_backend_adjust_dynamic_symbol \
7114 elf64_aarch64_adjust_dynamic_symbol
7115
7116#define elf_backend_always_size_sections \
7117 elf64_aarch64_always_size_sections
7118
7119#define elf_backend_check_relocs \
7120 elf64_aarch64_check_relocs
7121
7122#define elf_backend_copy_indirect_symbol \
7123 elf64_aarch64_copy_indirect_symbol
7124
7125/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7126 to them in our hash. */
7127#define elf_backend_create_dynamic_sections \
7128 elf64_aarch64_create_dynamic_sections
7129
7130#define elf_backend_init_index_section \
7131 _bfd_elf_init_2_index_sections
7132
7133#define elf_backend_is_function_type \
7134 elf64_aarch64_is_function_type
7135
7136#define elf_backend_finish_dynamic_sections \
7137 elf64_aarch64_finish_dynamic_sections
7138
7139#define elf_backend_finish_dynamic_symbol \
7140 elf64_aarch64_finish_dynamic_symbol
7141
7142#define elf_backend_gc_sweep_hook \
7143 elf64_aarch64_gc_sweep_hook
7144
7145#define elf_backend_object_p \
7146 elf64_aarch64_object_p
7147
7148#define elf_backend_output_arch_local_syms \
7149 elf64_aarch64_output_arch_local_syms
7150
7151#define elf_backend_plt_sym_val \
7152 elf64_aarch64_plt_sym_val
7153
7154#define elf_backend_post_process_headers \
7155 elf64_aarch64_post_process_headers
7156
7157#define elf_backend_relocate_section \
7158 elf64_aarch64_relocate_section
7159
7160#define elf_backend_reloc_type_class \
7161 elf64_aarch64_reloc_type_class
7162
7163#define elf_backend_section_flags \
7164 elf64_aarch64_section_flags
7165
7166#define elf_backend_section_from_shdr \
7167 elf64_aarch64_section_from_shdr
7168
7169#define elf_backend_size_dynamic_sections \
7170 elf64_aarch64_size_dynamic_sections
7171
7172#define elf_backend_size_info \
7173 elf64_aarch64_size_info
7174
7175#define elf_backend_can_refcount 1
59c108f7 7176#define elf_backend_can_gc_sections 1
a06ea964
NC
7177#define elf_backend_plt_readonly 1
7178#define elf_backend_want_got_plt 1
7179#define elf_backend_want_plt_sym 0
7180#define elf_backend_may_use_rel_p 0
7181#define elf_backend_may_use_rela_p 1
7182#define elf_backend_default_use_rela_p 1
7183#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
7184
7185#undef elf_backend_obj_attrs_section
7186#define elf_backend_obj_attrs_section ".ARM.attributes"
7187
7188#include "elf64-target.h"
This page took 0.354298 seconds and 4 git commands to generate.