* i386linux.c (linux_link_hash_table_create): Allocate table
[deliverable/binutils-gdb.git] / bfd / elf64-aarch64.c
CommitLineData
a06ea964
NC
1/* ELF support for AArch64.
2 Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21/* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elf64_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elf64_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elf64_aarch64_relocate_section ()
123
124 Calls elf64_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elf64_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138#include "sysdep.h"
139#include "bfd.h"
140#include "libiberty.h"
141#include "libbfd.h"
142#include "bfd_stdint.h"
143#include "elf-bfd.h"
144#include "bfdlink.h"
145#include "elf/aarch64.h"
146
147static bfd_reloc_status_type
148bfd_elf_aarch64_put_addend (bfd *abfd,
149 bfd_byte *address,
150 reloc_howto_type *howto, bfd_signed_vma addend);
151
152#define IS_AARCH64_TLS_RELOC(R_TYPE) \
153 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
154 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
155 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
156 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
157 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
158 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
159 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
160 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
161 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
162 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
163 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
164 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
165 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
166 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
167 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
168 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
169 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
170 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
171 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
172
173#define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
174 ((R_TYPE) == R_AARCH64_TLSDESC_LD64_PREL19 \
175 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
176 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE \
177 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
178 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
179 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
180 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
181 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
182 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
183 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
184 || (R_TYPE) == R_AARCH64_TLSDESC)
185
186#define ELIMINATE_COPY_RELOCS 0
187
188/* Return the relocation section associated with NAME. HTAB is the
189 bfd's elf64_aarch64_link_hash_entry. */
190#define RELOC_SECTION(HTAB, NAME) \
191 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
192
193/* Return size of a relocation entry. HTAB is the bfd's
194 elf64_aarch64_link_hash_entry. */
195#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
196
197/* Return function to swap relocations in. HTAB is the bfd's
198 elf64_aarch64_link_hash_entry. */
199#define SWAP_RELOC_IN(HTAB) (bfd_elf64_swap_reloca_in)
200
201/* Return function to swap relocations out. HTAB is the bfd's
202 elf64_aarch64_link_hash_entry. */
203#define SWAP_RELOC_OUT(HTAB) (bfd_elf64_swap_reloca_out)
204
205/* GOT Entry size - 8 bytes. */
206#define GOT_ENTRY_SIZE (8)
207#define PLT_ENTRY_SIZE (32)
208#define PLT_SMALL_ENTRY_SIZE (16)
209#define PLT_TLSDESC_ENTRY_SIZE (32)
210
211/* Take the PAGE component of an address or offset. */
212#define PG(x) ((x) & ~ 0xfff)
213#define PG_OFFSET(x) ((x) & 0xfff)
214
215/* Encoding of the nop instruction */
216#define INSN_NOP 0xd503201f
217
218#define aarch64_compute_jump_table_size(htab) \
219 (((htab)->root.srelplt == NULL) ? 0 \
220 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
221
222/* The first entry in a procedure linkage table looks like this
223 if the distance between the PLTGOT and the PLT is < 4GB use
224 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
225 in x16 and needs to work out PLTGOT[1] by using an address of
226 [x16,#-8]. */
227static const bfd_byte elf64_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
228{
229 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
230 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
231 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
232 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
233 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
234 0x1f, 0x20, 0x03, 0xd5, /* nop */
235 0x1f, 0x20, 0x03, 0xd5, /* nop */
236 0x1f, 0x20, 0x03, 0xd5, /* nop */
237};
238
239/* Per function entry in a procedure linkage table looks like this
240 if the distance between the PLTGOT and the PLT is < 4GB use
241 these PLT entries. */
242static const bfd_byte elf64_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
243{
244 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
245 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
246 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
247 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
248};
249
250static const bfd_byte
251elf64_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
252{
253 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
254 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
255 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
256 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
257 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
258 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
259 0x1f, 0x20, 0x03, 0xd5, /* nop */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
261};
262
263#define elf_info_to_howto elf64_aarch64_info_to_howto
264#define elf_info_to_howto_rel elf64_aarch64_info_to_howto
265
266#define AARCH64_ELF_ABI_VERSION 0
267#define AARCH64_ELF_OS_ABI_VERSION 0
268
269/* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
270#define ALL_ONES (~ (bfd_vma) 0)
271
272static reloc_howto_type elf64_aarch64_howto_none =
273 HOWTO (R_AARCH64_NONE, /* type */
274 0, /* rightshift */
275 0, /* size (0 = byte, 1 = short, 2 = long) */
276 0, /* bitsize */
277 FALSE, /* pc_relative */
278 0, /* bitpos */
279 complain_overflow_dont,/* complain_on_overflow */
280 bfd_elf_generic_reloc, /* special_function */
281 "R_AARCH64_NONE", /* name */
282 FALSE, /* partial_inplace */
283 0, /* src_mask */
284 0, /* dst_mask */
285 FALSE); /* pcrel_offset */
286
287static reloc_howto_type elf64_aarch64_howto_dynrelocs[] =
288{
289 HOWTO (R_AARCH64_COPY, /* type */
290 0, /* rightshift */
291 2, /* size (0 = byte, 1 = short, 2 = long) */
292 64, /* bitsize */
293 FALSE, /* pc_relative */
294 0, /* bitpos */
295 complain_overflow_bitfield, /* complain_on_overflow */
296 bfd_elf_generic_reloc, /* special_function */
297 "R_AARCH64_COPY", /* name */
298 TRUE, /* partial_inplace */
299 0xffffffff, /* src_mask */
300 0xffffffff, /* dst_mask */
301 FALSE), /* pcrel_offset */
302
303 HOWTO (R_AARCH64_GLOB_DAT, /* type */
304 0, /* rightshift */
305 2, /* size (0 = byte, 1 = short, 2 = long) */
306 64, /* bitsize */
307 FALSE, /* pc_relative */
308 0, /* bitpos */
309 complain_overflow_bitfield, /* complain_on_overflow */
310 bfd_elf_generic_reloc, /* special_function */
311 "R_AARCH64_GLOB_DAT", /* name */
312 TRUE, /* partial_inplace */
313 0xffffffff, /* src_mask */
314 0xffffffff, /* dst_mask */
315 FALSE), /* pcrel_offset */
316
317 HOWTO (R_AARCH64_JUMP_SLOT, /* type */
318 0, /* rightshift */
319 2, /* size (0 = byte, 1 = short, 2 = long) */
320 64, /* bitsize */
321 FALSE, /* pc_relative */
322 0, /* bitpos */
323 complain_overflow_bitfield, /* complain_on_overflow */
324 bfd_elf_generic_reloc, /* special_function */
325 "R_AARCH64_JUMP_SLOT", /* name */
326 TRUE, /* partial_inplace */
327 0xffffffff, /* src_mask */
328 0xffffffff, /* dst_mask */
329 FALSE), /* pcrel_offset */
330
331 HOWTO (R_AARCH64_RELATIVE, /* type */
332 0, /* rightshift */
333 2, /* size (0 = byte, 1 = short, 2 = long) */
334 64, /* bitsize */
335 FALSE, /* pc_relative */
336 0, /* bitpos */
337 complain_overflow_bitfield, /* complain_on_overflow */
338 bfd_elf_generic_reloc, /* special_function */
339 "R_AARCH64_RELATIVE", /* name */
340 TRUE, /* partial_inplace */
341 ALL_ONES, /* src_mask */
342 ALL_ONES, /* dst_mask */
343 FALSE), /* pcrel_offset */
344
345 HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */
346 0, /* rightshift */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
348 64, /* bitsize */
349 FALSE, /* pc_relative */
350 0, /* bitpos */
351 complain_overflow_dont, /* complain_on_overflow */
352 bfd_elf_generic_reloc, /* special_function */
353 "R_AARCH64_TLS_DTPMOD64", /* name */
354 FALSE, /* partial_inplace */
355 0, /* src_mask */
356 ALL_ONES, /* dst_mask */
357 FALSE), /* pc_reloffset */
358
359 HOWTO (R_AARCH64_TLS_DTPREL64, /* type */
360 0, /* rightshift */
361 2, /* size (0 = byte, 1 = short, 2 = long) */
362 64, /* bitsize */
363 FALSE, /* pc_relative */
364 0, /* bitpos */
365 complain_overflow_dont, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 "R_AARCH64_TLS_DTPREL64", /* name */
368 FALSE, /* partial_inplace */
369 0, /* src_mask */
370 ALL_ONES, /* dst_mask */
371 FALSE), /* pcrel_offset */
372
373 HOWTO (R_AARCH64_TLS_TPREL64, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 64, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_dont, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_AARCH64_TLS_TPREL64", /* name */
382 FALSE, /* partial_inplace */
383 0, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 HOWTO (R_AARCH64_TLSDESC, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 64, /* bitsize */
391 FALSE, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_dont, /* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_AARCH64_TLSDESC", /* name */
396 FALSE, /* partial_inplace */
397 0, /* src_mask */
398 ALL_ONES, /* dst_mask */
399 FALSE), /* pcrel_offset */
400
401};
402
403/* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
404 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
405 in that slot. */
406
407static reloc_howto_type elf64_aarch64_howto_table[] =
408{
409 /* Basic data relocations. */
410
411 HOWTO (R_AARCH64_NULL, /* type */
412 0, /* rightshift */
413 0, /* size (0 = byte, 1 = short, 2 = long) */
414 0, /* bitsize */
415 FALSE, /* pc_relative */
416 0, /* bitpos */
417 complain_overflow_dont, /* complain_on_overflow */
418 bfd_elf_generic_reloc, /* special_function */
419 "R_AARCH64_NULL", /* name */
420 FALSE, /* partial_inplace */
421 0, /* src_mask */
422 0, /* dst_mask */
423 FALSE), /* pcrel_offset */
424
425 /* .xword: (S+A) */
426 HOWTO (R_AARCH64_ABS64, /* type */
427 0, /* rightshift */
428 4, /* size (4 = long long) */
429 64, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_unsigned, /* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_AARCH64_ABS64", /* name */
435 FALSE, /* partial_inplace */
436 ALL_ONES, /* src_mask */
437 ALL_ONES, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 /* .word: (S+A) */
441 HOWTO (R_AARCH64_ABS32, /* type */
442 0, /* rightshift */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
444 32, /* bitsize */
445 FALSE, /* pc_relative */
446 0, /* bitpos */
447 complain_overflow_unsigned, /* complain_on_overflow */
448 bfd_elf_generic_reloc, /* special_function */
449 "R_AARCH64_ABS32", /* name */
450 FALSE, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 FALSE), /* pcrel_offset */
454
455 /* .half: (S+A) */
456 HOWTO (R_AARCH64_ABS16, /* type */
457 0, /* rightshift */
458 1, /* size (0 = byte, 1 = short, 2 = long) */
459 16, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_unsigned, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_ABS16", /* name */
465 FALSE, /* partial_inplace */
466 0xffff, /* src_mask */
467 0xffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 /* .xword: (S+A-P) */
471 HOWTO (R_AARCH64_PREL64, /* type */
472 0, /* rightshift */
473 4, /* size (4 = long long) */
474 64, /* bitsize */
475 TRUE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_signed, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_AARCH64_PREL64", /* name */
480 FALSE, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 TRUE), /* pcrel_offset */
484
485 /* .word: (S+A-P) */
486 HOWTO (R_AARCH64_PREL32, /* type */
487 0, /* rightshift */
488 2, /* size (0 = byte, 1 = short, 2 = long) */
489 32, /* bitsize */
490 TRUE, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_signed, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 "R_AARCH64_PREL32", /* name */
495 FALSE, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 TRUE), /* pcrel_offset */
499
500 /* .half: (S+A-P) */
501 HOWTO (R_AARCH64_PREL16, /* type */
502 0, /* rightshift */
503 1, /* size (0 = byte, 1 = short, 2 = long) */
504 16, /* bitsize */
505 TRUE, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_signed, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 "R_AARCH64_PREL16", /* name */
510 FALSE, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 TRUE), /* pcrel_offset */
514
515 /* Group relocations to create a 16, 32, 48 or 64 bit
516 unsigned data or abs address inline. */
517
518 /* MOVZ: ((S+A) >> 0) & 0xffff */
519 HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */
520 0, /* rightshift */
521 2, /* size (0 = byte, 1 = short, 2 = long) */
522 16, /* bitsize */
523 FALSE, /* pc_relative */
524 0, /* bitpos */
525 complain_overflow_unsigned, /* complain_on_overflow */
526 bfd_elf_generic_reloc, /* special_function */
527 "R_AARCH64_MOVW_UABS_G0", /* name */
528 FALSE, /* partial_inplace */
529 0xffff, /* src_mask */
530 0xffff, /* dst_mask */
531 FALSE), /* pcrel_offset */
532
533 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
534 HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */
535 0, /* rightshift */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
537 16, /* bitsize */
538 FALSE, /* pc_relative */
539 0, /* bitpos */
540 complain_overflow_dont, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
547
548 /* MOVZ: ((S+A) >> 16) & 0xffff */
549 HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */
550 16, /* rightshift */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
552 16, /* bitsize */
553 FALSE, /* pc_relative */
554 0, /* bitpos */
555 complain_overflow_unsigned, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 "R_AARCH64_MOVW_UABS_G1", /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
562
563 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
564 HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */
565 16, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 16, /* bitsize */
568 FALSE, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_dont, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
577
578 /* MOVZ: ((S+A) >> 32) & 0xffff */
579 HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */
580 32, /* rightshift */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
582 16, /* bitsize */
583 FALSE, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_unsigned, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 "R_AARCH64_MOVW_UABS_G2", /* name */
588 FALSE, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE), /* pcrel_offset */
592
593 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
594 HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */
595 32, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 16, /* bitsize */
598 FALSE, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_dont, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
603 FALSE, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 /* MOVZ: ((S+A) >> 48) & 0xffff */
609 HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */
610 48, /* rightshift */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
612 16, /* bitsize */
613 FALSE, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_unsigned, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 "R_AARCH64_MOVW_UABS_G3", /* name */
618 FALSE, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE), /* pcrel_offset */
622
623 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
624 signed data or abs address inline. Will change instruction
625 to MOVN or MOVZ depending on sign of calculated value. */
626
627 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
628 HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */
629 0, /* rightshift */
630 2, /* size (0 = byte, 1 = short, 2 = long) */
631 16, /* bitsize */
632 FALSE, /* pc_relative */
633 0, /* bitpos */
634 complain_overflow_signed, /* complain_on_overflow */
635 bfd_elf_generic_reloc, /* special_function */
636 "R_AARCH64_MOVW_SABS_G0", /* name */
637 FALSE, /* partial_inplace */
638 0xffff, /* src_mask */
639 0xffff, /* dst_mask */
640 FALSE), /* pcrel_offset */
641
642 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
643 HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */
644 16, /* rightshift */
645 2, /* size (0 = byte, 1 = short, 2 = long) */
646 16, /* bitsize */
647 FALSE, /* pc_relative */
648 0, /* bitpos */
649 complain_overflow_signed, /* complain_on_overflow */
650 bfd_elf_generic_reloc, /* special_function */
651 "R_AARCH64_MOVW_SABS_G1", /* name */
652 FALSE, /* partial_inplace */
653 0xffff, /* src_mask */
654 0xffff, /* dst_mask */
655 FALSE), /* pcrel_offset */
656
657 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
658 HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */
659 32, /* rightshift */
660 2, /* size (0 = byte, 1 = short, 2 = long) */
661 16, /* bitsize */
662 FALSE, /* pc_relative */
663 0, /* bitpos */
664 complain_overflow_signed, /* complain_on_overflow */
665 bfd_elf_generic_reloc, /* special_function */
666 "R_AARCH64_MOVW_SABS_G2", /* name */
667 FALSE, /* partial_inplace */
668 0xffff, /* src_mask */
669 0xffff, /* dst_mask */
670 FALSE), /* pcrel_offset */
671
672/* Relocations to generate 19, 21 and 33 bit PC-relative load/store
673 addresses: PG(x) is (x & ~0xfff). */
674
675 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
676 HOWTO (R_AARCH64_LD_PREL_LO19, /* type */
677 2, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 19, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed, /* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_AARCH64_LD_PREL_LO19", /* name */
685 FALSE, /* partial_inplace */
686 0x7ffff, /* src_mask */
687 0x7ffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 /* ADR: (S+A-P) & 0x1fffff */
691 HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */
692 0, /* rightshift */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
694 21, /* bitsize */
695 TRUE, /* pc_relative */
696 0, /* bitpos */
697 complain_overflow_signed, /* complain_on_overflow */
698 bfd_elf_generic_reloc, /* special_function */
699 "R_AARCH64_ADR_PREL_LO21", /* name */
700 FALSE, /* partial_inplace */
701 0x1fffff, /* src_mask */
702 0x1fffff, /* dst_mask */
703 TRUE), /* pcrel_offset */
704
705 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
706 HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */
707 12, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 21, /* bitsize */
710 TRUE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_signed, /* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
715 FALSE, /* partial_inplace */
716 0x1fffff, /* src_mask */
717 0x1fffff, /* dst_mask */
718 TRUE), /* pcrel_offset */
719
720 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
721 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */
722 12, /* rightshift */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
724 21, /* bitsize */
725 TRUE, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_dont, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
730 FALSE, /* partial_inplace */
731 0x1fffff, /* src_mask */
732 0x1fffff, /* dst_mask */
733 TRUE), /* pcrel_offset */
734
735 /* ADD: (S+A) & 0xfff [no overflow check] */
736 HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 12, /* bitsize */
740 FALSE, /* pc_relative */
741 10, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
745 FALSE, /* partial_inplace */
746 0x3ffc00, /* src_mask */
747 0x3ffc00, /* dst_mask */
748 FALSE), /* pcrel_offset */
749
750 /* LD/ST8: (S+A) & 0xfff */
751 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 12, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
760 FALSE, /* partial_inplace */
761 0xfff, /* src_mask */
762 0xfff, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 /* Relocations for control-flow instructions. */
766
767 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
768 HOWTO (R_AARCH64_TSTBR14, /* type */
769 2, /* rightshift */
770 2, /* size (0 = byte, 1 = short, 2 = long) */
771 14, /* bitsize */
772 TRUE, /* pc_relative */
773 0, /* bitpos */
774 complain_overflow_signed, /* complain_on_overflow */
775 bfd_elf_generic_reloc, /* special_function */
776 "R_AARCH64_TSTBR14", /* name */
777 FALSE, /* partial_inplace */
778 0x3fff, /* src_mask */
779 0x3fff, /* dst_mask */
780 TRUE), /* pcrel_offset */
781
782 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
783 HOWTO (R_AARCH64_CONDBR19, /* type */
784 2, /* rightshift */
785 2, /* size (0 = byte, 1 = short, 2 = long) */
786 19, /* bitsize */
787 TRUE, /* pc_relative */
788 0, /* bitpos */
789 complain_overflow_signed, /* complain_on_overflow */
790 bfd_elf_generic_reloc, /* special_function */
791 "R_AARCH64_CONDBR19", /* name */
792 FALSE, /* partial_inplace */
793 0x7ffff, /* src_mask */
794 0x7ffff, /* dst_mask */
795 TRUE), /* pcrel_offset */
796
797 EMPTY_HOWTO (281),
798
799 /* B: ((S+A-P) >> 2) & 0x3ffffff */
800 HOWTO (R_AARCH64_JUMP26, /* type */
801 2, /* rightshift */
802 2, /* size (0 = byte, 1 = short, 2 = long) */
803 26, /* bitsize */
804 TRUE, /* pc_relative */
805 0, /* bitpos */
806 complain_overflow_signed, /* complain_on_overflow */
807 bfd_elf_generic_reloc, /* special_function */
808 "R_AARCH64_JUMP26", /* name */
809 FALSE, /* partial_inplace */
810 0x3ffffff, /* src_mask */
811 0x3ffffff, /* dst_mask */
812 TRUE), /* pcrel_offset */
813
814 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
815 HOWTO (R_AARCH64_CALL26, /* type */
816 2, /* rightshift */
817 2, /* size (0 = byte, 1 = short, 2 = long) */
818 26, /* bitsize */
819 TRUE, /* pc_relative */
820 0, /* bitpos */
821 complain_overflow_signed, /* complain_on_overflow */
822 bfd_elf_generic_reloc, /* special_function */
823 "R_AARCH64_CALL26", /* name */
824 FALSE, /* partial_inplace */
825 0x3ffffff, /* src_mask */
826 0x3ffffff, /* dst_mask */
827 TRUE), /* pcrel_offset */
828
829 /* LD/ST16: (S+A) & 0xffe */
830 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */
831 1, /* rightshift */
832 2, /* size (0 = byte, 1 = short, 2 = long) */
833 12, /* bitsize */
834 FALSE, /* pc_relative */
835 0, /* bitpos */
836 complain_overflow_dont, /* complain_on_overflow */
837 bfd_elf_generic_reloc, /* special_function */
838 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
839 FALSE, /* partial_inplace */
840 0xffe, /* src_mask */
841 0xffe, /* dst_mask */
842 FALSE), /* pcrel_offset */
843
844 /* LD/ST32: (S+A) & 0xffc */
845 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */
846 2, /* rightshift */
847 2, /* size (0 = byte, 1 = short, 2 = long) */
848 12, /* bitsize */
849 FALSE, /* pc_relative */
850 0, /* bitpos */
851 complain_overflow_dont, /* complain_on_overflow */
852 bfd_elf_generic_reloc, /* special_function */
853 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
854 FALSE, /* partial_inplace */
855 0xffc, /* src_mask */
856 0xffc, /* dst_mask */
857 FALSE), /* pcrel_offset */
858
859 /* LD/ST64: (S+A) & 0xff8 */
860 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */
861 3, /* rightshift */
862 2, /* size (0 = byte, 1 = short, 2 = long) */
863 12, /* bitsize */
864 FALSE, /* pc_relative */
865 0, /* bitpos */
866 complain_overflow_dont, /* complain_on_overflow */
867 bfd_elf_generic_reloc, /* special_function */
868 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
869 FALSE, /* partial_inplace */
870 0xff8, /* src_mask */
871 0xff8, /* dst_mask */
872 FALSE), /* pcrel_offset */
873
874 EMPTY_HOWTO (287),
875 EMPTY_HOWTO (288),
876 EMPTY_HOWTO (289),
877 EMPTY_HOWTO (290),
878 EMPTY_HOWTO (291),
879 EMPTY_HOWTO (292),
880 EMPTY_HOWTO (293),
881 EMPTY_HOWTO (294),
882 EMPTY_HOWTO (295),
883 EMPTY_HOWTO (296),
884 EMPTY_HOWTO (297),
885 EMPTY_HOWTO (298),
886
887 /* LD/ST128: (S+A) & 0xff0 */
888 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */
889 4, /* rightshift */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
891 12, /* bitsize */
892 FALSE, /* pc_relative */
893 0, /* bitpos */
894 complain_overflow_dont, /* complain_on_overflow */
895 bfd_elf_generic_reloc, /* special_function */
896 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
897 FALSE, /* partial_inplace */
898 0xff0, /* src_mask */
899 0xff0, /* dst_mask */
900 FALSE), /* pcrel_offset */
901
902 EMPTY_HOWTO (300),
903 EMPTY_HOWTO (301),
904 EMPTY_HOWTO (302),
905 EMPTY_HOWTO (303),
906 EMPTY_HOWTO (304),
907 EMPTY_HOWTO (305),
908 EMPTY_HOWTO (306),
909 EMPTY_HOWTO (307),
910 EMPTY_HOWTO (308),
f41aef5f
RE
911
912 /* Set a load-literal immediate field to bits
913 0x1FFFFC of G(S)-P */
914 HOWTO (R_AARCH64_GOT_LD_PREL19, /* type */
915 2, /* rightshift */
916 2, /* size (0 = byte,1 = short,2 = long) */
917 19, /* bitsize */
918 TRUE, /* pc_relative */
919 0, /* bitpos */
920 complain_overflow_signed, /* complain_on_overflow */
921 bfd_elf_generic_reloc, /* special_function */
922 "R_AARCH64_GOT_LD_PREL19", /* name */
923 FALSE, /* partial_inplace */
924 0xffffe0, /* src_mask */
925 0xffffe0, /* dst_mask */
926 TRUE), /* pcrel_offset */
927
a06ea964
NC
928 EMPTY_HOWTO (310),
929
930 /* Get to the page for the GOT entry for the symbol
931 (G(S) - P) using an ADRP instruction. */
932 HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */
933 12, /* rightshift */
934 2, /* size (0 = byte, 1 = short, 2 = long) */
935 21, /* bitsize */
936 TRUE, /* pc_relative */
937 0, /* bitpos */
938 complain_overflow_dont, /* complain_on_overflow */
939 bfd_elf_generic_reloc, /* special_function */
940 "R_AARCH64_ADR_GOT_PAGE", /* name */
941 FALSE, /* partial_inplace */
942 0x1fffff, /* src_mask */
943 0x1fffff, /* dst_mask */
944 TRUE), /* pcrel_offset */
945
946 /* LD64: GOT offset G(S) & 0xff8 */
947 HOWTO (R_AARCH64_LD64_GOT_LO12_NC, /* type */
948 3, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 12, /* bitsize */
951 FALSE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_AARCH64_LD64_GOT_LO12_NC", /* name */
956 FALSE, /* partial_inplace */
957 0xff8, /* src_mask */
958 0xff8, /* dst_mask */
959 FALSE) /* pcrel_offset */
960};
961
962static reloc_howto_type elf64_aarch64_tls_howto_table[] =
963{
964 EMPTY_HOWTO (512),
965
966 /* Get to the page for the GOT entry for the symbol
967 (G(S) - P) using an ADRP instruction. */
968 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */
969 12, /* rightshift */
970 2, /* size (0 = byte, 1 = short, 2 = long) */
971 21, /* bitsize */
972 TRUE, /* pc_relative */
973 0, /* bitpos */
974 complain_overflow_dont, /* complain_on_overflow */
975 bfd_elf_generic_reloc, /* special_function */
976 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
977 FALSE, /* partial_inplace */
978 0x1fffff, /* src_mask */
979 0x1fffff, /* dst_mask */
980 TRUE), /* pcrel_offset */
981
982 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
983 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */
984 0, /* rightshift */
985 2, /* size (0 = byte, 1 = short, 2 = long) */
986 12, /* bitsize */
987 FALSE, /* pc_relative */
988 0, /* bitpos */
989 complain_overflow_dont, /* complain_on_overflow */
990 bfd_elf_generic_reloc, /* special_function */
991 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
992 FALSE, /* partial_inplace */
993 0xfff, /* src_mask */
994 0xfff, /* dst_mask */
995 FALSE), /* pcrel_offset */
996
997 EMPTY_HOWTO (515),
998 EMPTY_HOWTO (516),
999 EMPTY_HOWTO (517),
1000 EMPTY_HOWTO (518),
1001 EMPTY_HOWTO (519),
1002 EMPTY_HOWTO (520),
1003 EMPTY_HOWTO (521),
1004 EMPTY_HOWTO (522),
1005 EMPTY_HOWTO (523),
1006 EMPTY_HOWTO (524),
1007 EMPTY_HOWTO (525),
1008 EMPTY_HOWTO (526),
1009 EMPTY_HOWTO (527),
1010 EMPTY_HOWTO (528),
1011 EMPTY_HOWTO (529),
1012 EMPTY_HOWTO (530),
1013 EMPTY_HOWTO (531),
1014 EMPTY_HOWTO (532),
1015 EMPTY_HOWTO (533),
1016 EMPTY_HOWTO (534),
1017 EMPTY_HOWTO (535),
1018 EMPTY_HOWTO (536),
1019 EMPTY_HOWTO (537),
1020 EMPTY_HOWTO (538),
1021
1022 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */
1023 16, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 16, /* bitsize */
1026 FALSE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1035
1036 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 FALSE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1049
1050 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 21, /* bitsize */
1054 FALSE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1059 FALSE, /* partial_inplace */
1060 0x1fffff, /* src_mask */
1061 0x1fffff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1063
1064 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */
1065 3, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 12, /* bitsize */
1068 FALSE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1073 FALSE, /* partial_inplace */
1074 0xff8, /* src_mask */
1075 0xff8, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1077
1078 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */
bb3f9ed8 1079 2, /* rightshift */
a06ea964
NC
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 21, /* bitsize */
1082 FALSE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1087 FALSE, /* partial_inplace */
1088 0x1ffffc, /* src_mask */
1089 0x1ffffc, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1091
1092 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */
bb3f9ed8 1093 32, /* rightshift */
a06ea964
NC
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 12, /* bitsize */
1096 FALSE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffff, /* src_mask */
1103 0xffff, /* dst_mask */
1104 FALSE), /* pcrel_offset */
1105
1106 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */
bb3f9ed8 1107 16, /* rightshift */
a06ea964
NC
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 12, /* bitsize */
1110 FALSE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffff, /* src_mask */
1117 0xffff, /* dst_mask */
1118 FALSE), /* pcrel_offset */
1119
1120 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */
bb3f9ed8 1121 16, /* rightshift */
a06ea964
NC
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 12, /* bitsize */
1124 FALSE, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont, /* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffff, /* src_mask */
1131 0xffff, /* dst_mask */
1132 FALSE), /* pcrel_offset */
1133
1134 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 12, /* bitsize */
1138 FALSE, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont, /* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffff, /* src_mask */
1145 0xffff, /* dst_mask */
1146 FALSE), /* pcrel_offset */
1147
1148 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 12, /* bitsize */
1152 FALSE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont, /* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffff, /* src_mask */
1159 0xffff, /* dst_mask */
1160 FALSE), /* pcrel_offset */
1161
1162 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */
bb3f9ed8 1163 12, /* rightshift */
a06ea964
NC
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 12, /* bitsize */
1166 FALSE, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont, /* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1171 FALSE, /* partial_inplace */
1172 0xfff, /* src_mask */
1173 0xfff, /* dst_mask */
1174 FALSE), /* pcrel_offset */
1175
1176 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 12, /* bitsize */
1180 FALSE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont, /* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1185 FALSE, /* partial_inplace */
1186 0xfff, /* src_mask */
1187 0xfff, /* dst_mask */
1188 FALSE), /* pcrel_offset */
1189
1190 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 12, /* bitsize */
1194 FALSE, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont, /* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1199 FALSE, /* partial_inplace */
1200 0xfff, /* src_mask */
1201 0xfff, /* dst_mask */
1202 FALSE), /* pcrel_offset */
1203};
1204
1205static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] =
1206{
1207 HOWTO (R_AARCH64_TLSDESC_LD64_PREL19, /* type */
bb3f9ed8 1208 2, /* rightshift */
a06ea964
NC
1209 2, /* size (0 = byte, 1 = short, 2 = long) */
1210 21, /* bitsize */
1211 TRUE, /* pc_relative */
1212 0, /* bitpos */
1213 complain_overflow_dont, /* complain_on_overflow */
1214 bfd_elf_generic_reloc, /* special_function */
1215 "R_AARCH64_TLSDESC_LD64_PREL19", /* name */
1216 FALSE, /* partial_inplace */
1217 0x1ffffc, /* src_mask */
1218 0x1ffffc, /* dst_mask */
1219 TRUE), /* pcrel_offset */
1220
1221 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */
1222 0, /* rightshift */
1223 2, /* size (0 = byte, 1 = short, 2 = long) */
1224 21, /* bitsize */
1225 TRUE, /* pc_relative */
1226 0, /* bitpos */
1227 complain_overflow_dont, /* complain_on_overflow */
1228 bfd_elf_generic_reloc, /* special_function */
1229 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1230 FALSE, /* partial_inplace */
1231 0x1fffff, /* src_mask */
1232 0x1fffff, /* dst_mask */
1233 TRUE), /* pcrel_offset */
1234
1235 /* Get to the page for the GOT entry for the symbol
1236 (G(S) - P) using an ADRP instruction. */
1237 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE, /* type */
1238 12, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1240 21, /* bitsize */
1241 TRUE, /* pc_relative */
1242 0, /* bitpos */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 "R_AARCH64_TLSDESC_ADR_PAGE", /* name */
1246 FALSE, /* partial_inplace */
1247 0x1fffff, /* src_mask */
1248 0x1fffff, /* dst_mask */
1249 TRUE), /* pcrel_offset */
1250
1251 /* LD64: GOT offset G(S) & 0xfff. */
1252 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */
1253 3, /* rightshift */
1254 2, /* size (0 = byte, 1 = short, 2 = long) */
1255 12, /* bitsize */
1256 FALSE, /* pc_relative */
1257 0, /* bitpos */
1258 complain_overflow_dont, /* complain_on_overflow */
1259 bfd_elf_generic_reloc, /* special_function */
1260 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1261 FALSE, /* partial_inplace */
1262 0xfff, /* src_mask */
1263 0xfff, /* dst_mask */
1264 FALSE), /* pcrel_offset */
1265
1266 /* ADD: GOT offset G(S) & 0xfff. */
1267 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */
1268 0, /* rightshift */
1269 2, /* size (0 = byte, 1 = short, 2 = long) */
1270 12, /* bitsize */
1271 FALSE, /* pc_relative */
1272 0, /* bitpos */
1273 complain_overflow_dont, /* complain_on_overflow */
1274 bfd_elf_generic_reloc, /* special_function */
1275 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1276 FALSE, /* partial_inplace */
1277 0xfff, /* src_mask */
1278 0xfff, /* dst_mask */
1279 FALSE), /* pcrel_offset */
1280
1281 HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */
bb3f9ed8 1282 16, /* rightshift */
a06ea964
NC
1283 2, /* size (0 = byte, 1 = short, 2 = long) */
1284 12, /* bitsize */
1285 FALSE, /* pc_relative */
1286 0, /* bitpos */
1287 complain_overflow_dont, /* complain_on_overflow */
1288 bfd_elf_generic_reloc, /* special_function */
1289 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1290 FALSE, /* partial_inplace */
1291 0xffff, /* src_mask */
1292 0xffff, /* dst_mask */
1293 FALSE), /* pcrel_offset */
1294
1295 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */
1296 0, /* rightshift */
1297 2, /* size (0 = byte, 1 = short, 2 = long) */
1298 12, /* bitsize */
1299 FALSE, /* pc_relative */
1300 0, /* bitpos */
1301 complain_overflow_dont, /* complain_on_overflow */
1302 bfd_elf_generic_reloc, /* special_function */
1303 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1304 FALSE, /* partial_inplace */
1305 0xffff, /* src_mask */
1306 0xffff, /* dst_mask */
1307 FALSE), /* pcrel_offset */
1308
1309 HOWTO (R_AARCH64_TLSDESC_LDR, /* type */
1310 0, /* rightshift */
1311 2, /* size (0 = byte, 1 = short, 2 = long) */
1312 12, /* bitsize */
1313 FALSE, /* pc_relative */
1314 0, /* bitpos */
1315 complain_overflow_dont, /* complain_on_overflow */
1316 bfd_elf_generic_reloc, /* special_function */
1317 "R_AARCH64_TLSDESC_LDR", /* name */
1318 FALSE, /* partial_inplace */
1319 0x0, /* src_mask */
1320 0x0, /* dst_mask */
1321 FALSE), /* pcrel_offset */
1322
1323 HOWTO (R_AARCH64_TLSDESC_ADD, /* type */
1324 0, /* rightshift */
1325 2, /* size (0 = byte, 1 = short, 2 = long) */
1326 12, /* bitsize */
1327 FALSE, /* pc_relative */
1328 0, /* bitpos */
1329 complain_overflow_dont, /* complain_on_overflow */
1330 bfd_elf_generic_reloc, /* special_function */
1331 "R_AARCH64_TLSDESC_ADD", /* name */
1332 FALSE, /* partial_inplace */
1333 0x0, /* src_mask */
1334 0x0, /* dst_mask */
1335 FALSE), /* pcrel_offset */
1336
1337 HOWTO (R_AARCH64_TLSDESC_CALL, /* type */
1338 0, /* rightshift */
1339 2, /* size (0 = byte, 1 = short, 2 = long) */
1340 12, /* bitsize */
1341 FALSE, /* pc_relative */
1342 0, /* bitpos */
1343 complain_overflow_dont, /* complain_on_overflow */
1344 bfd_elf_generic_reloc, /* special_function */
1345 "R_AARCH64_TLSDESC_CALL", /* name */
1346 FALSE, /* partial_inplace */
1347 0x0, /* src_mask */
1348 0x0, /* dst_mask */
1349 FALSE), /* pcrel_offset */
1350};
1351
1352static reloc_howto_type *
1353elf64_aarch64_howto_from_type (unsigned int r_type)
1354{
1355 if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max)
1356 return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min];
1357
1358 if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max)
1359 return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min];
1360
1361 if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max)
1362 return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min];
1363
1364 if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max)
1365 return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min];
1366
1367 switch (r_type)
1368 {
1369 case R_AARCH64_NONE:
1370 return &elf64_aarch64_howto_none;
1371
1372 }
1373 bfd_set_error (bfd_error_bad_value);
1374 return NULL;
1375}
1376
1377static void
1378elf64_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1379 Elf_Internal_Rela *elf_reloc)
1380{
1381 unsigned int r_type;
1382
1383 r_type = ELF64_R_TYPE (elf_reloc->r_info);
1384 bfd_reloc->howto = elf64_aarch64_howto_from_type (r_type);
1385}
1386
1387struct elf64_aarch64_reloc_map
1388{
1389 bfd_reloc_code_real_type bfd_reloc_val;
1390 unsigned int elf_reloc_val;
1391};
1392
1393/* All entries in this list must also be present in
1394 elf64_aarch64_howto_table. */
1395static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] =
1396{
1397 {BFD_RELOC_NONE, R_AARCH64_NONE},
1398
1399 /* Basic data relocations. */
1400 {BFD_RELOC_CTOR, R_AARCH64_ABS64},
1401 {BFD_RELOC_64, R_AARCH64_ABS64},
1402 {BFD_RELOC_32, R_AARCH64_ABS32},
1403 {BFD_RELOC_16, R_AARCH64_ABS16},
1404 {BFD_RELOC_64_PCREL, R_AARCH64_PREL64},
1405 {BFD_RELOC_32_PCREL, R_AARCH64_PREL32},
1406 {BFD_RELOC_16_PCREL, R_AARCH64_PREL16},
1407
1408 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1409 value inline. */
1410 {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC},
1411 {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC},
1412 {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC},
1413
1414 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1415 signed value inline. */
1416 {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0},
1417 {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1},
1418 {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2},
1419
1420 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1421 unsigned value inline. */
1422 {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0},
1423 {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1},
1424 {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2},
1425 {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3},
1426
1427 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1428 {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19},
1429 {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21},
1430 {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21},
1431 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC},
1432 {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC},
1433 {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC},
1434 {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC},
1435 {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC},
1436 {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC},
1437 {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC},
1438
1439 /* Relocations for control-flow instructions. */
1440 {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14},
1441 {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19},
1442 {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26},
1443 {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26},
1444
1445 /* Relocations for PIC. */
f41aef5f 1446 {BFD_RELOC_AARCH64_GOT_LD_PREL19, R_AARCH64_GOT_LD_PREL19},
a06ea964
NC
1447 {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE},
1448 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC},
1449
1450 /* Relocations for TLS. */
1451 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21},
1452 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC},
1453 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
1454 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1},
1455 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
1456 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC},
1457 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
1458 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21},
1459 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
1460 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
1461 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
1462 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
1463 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2},
1464 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1},
1465 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
1466 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC},
1467 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0},
1468 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
1469 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC},
1470 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12},
1471 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12},
1472 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
1473 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC},
1474 {BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19, R_AARCH64_TLSDESC_LD64_PREL19},
1475 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21},
1476 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE, R_AARCH64_TLSDESC_ADR_PAGE},
1477 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC},
1478 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC},
1479 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1},
1480 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC},
1481 {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR},
1482 {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD},
1483 {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL},
1484 {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64},
1485 {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64},
1486 {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64},
1487 {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC},
1488};
1489
1490static reloc_howto_type *
1491elf64_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1492 bfd_reloc_code_real_type code)
1493{
1494 unsigned int i;
1495
1496 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++)
1497 if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code)
1498 return elf64_aarch64_howto_from_type
1499 (elf64_aarch64_reloc_map[i].elf_reloc_val);
1500
1501 bfd_set_error (bfd_error_bad_value);
1502 return NULL;
1503}
1504
1505static reloc_howto_type *
1506elf64_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1507 const char *r_name)
1508{
1509 unsigned int i;
1510
1511 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++)
1512 if (elf64_aarch64_howto_table[i].name != NULL
1513 && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0)
1514 return &elf64_aarch64_howto_table[i];
1515
1516 return NULL;
1517}
1518
cd6fa7fd
YZ
1519/* Support for core dump NOTE sections. */
1520
1521static bfd_boolean
1522elf64_aarch64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1523{
1524 int offset;
1525 size_t size;
1526
1527 switch (note->descsz)
1528 {
1529 default:
1530 return FALSE;
1531
1532 case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
1533 /* pr_cursig */
1534 elf_tdata (abfd)->core_signal
1535 = bfd_get_16 (abfd, note->descdata + 12);
1536
1537 /* pr_pid */
1538 elf_tdata (abfd)->core_lwpid
1539 = bfd_get_32 (abfd, note->descdata + 32);
1540
1541 /* pr_reg */
1542 offset = 112;
170a8295 1543 size = 272;
cd6fa7fd
YZ
1544
1545 break;
1546 }
1547
1548 /* Make a ".reg/999" section. */
1549 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1550 size, note->descpos + offset);
1551}
1552
a06ea964
NC
1553#define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
1554#define TARGET_LITTLE_NAME "elf64-littleaarch64"
1555#define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
1556#define TARGET_BIG_NAME "elf64-bigaarch64"
1557
cd6fa7fd
YZ
1558#define elf_backend_grok_prstatus elf64_aarch64_grok_prstatus
1559
a06ea964
NC
1560typedef unsigned long int insn32;
1561
1562/* The linker script knows the section names for placement.
1563 The entry_names are used to do simple name mangling on the stubs.
1564 Given a function name, and its type, the stub can be found. The
1565 name can be changed. The only requirement is the %s be present. */
1566#define STUB_ENTRY_NAME "__%s_veneer"
1567
1568/* The name of the dynamic interpreter. This is put in the .interp
1569 section. */
1570#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1571
1572#define AARCH64_MAX_FWD_BRANCH_OFFSET \
1573 (((1 << 25) - 1) << 2)
1574#define AARCH64_MAX_BWD_BRANCH_OFFSET \
1575 (-((1 << 25) << 2))
1576
1577#define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1578#define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1579
1580static int
1581aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1582{
1583 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1584 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1585}
1586
1587static int
1588aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1589{
1590 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1591 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1592 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1593}
1594
1595static const uint32_t aarch64_adrp_branch_stub [] =
1596{
1597 0x90000010, /* adrp ip0, X */
1598 /* R_AARCH64_ADR_HI21_PCREL(X) */
1599 0x91000210, /* add ip0, ip0, :lo12:X */
1600 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1601 0xd61f0200, /* br ip0 */
1602};
1603
1604static const uint32_t aarch64_long_branch_stub[] =
1605{
1606 0x58000090, /* ldr ip0, 1f */
1607 0x10000011, /* adr ip1, #0 */
1608 0x8b110210, /* add ip0, ip0, ip1 */
1609 0xd61f0200, /* br ip0 */
1610 0x00000000, /* 1: .xword
1611 R_AARCH64_PREL64(X) + 12
1612 */
1613 0x00000000,
1614};
1615
1616/* Section name for stubs is the associated section name plus this
1617 string. */
1618#define STUB_SUFFIX ".stub"
1619
1620enum elf64_aarch64_stub_type
1621{
1622 aarch64_stub_none,
1623 aarch64_stub_adrp_branch,
1624 aarch64_stub_long_branch,
1625};
1626
1627struct elf64_aarch64_stub_hash_entry
1628{
1629 /* Base hash table entry structure. */
1630 struct bfd_hash_entry root;
1631
1632 /* The stub section. */
1633 asection *stub_sec;
1634
1635 /* Offset within stub_sec of the beginning of this stub. */
1636 bfd_vma stub_offset;
1637
1638 /* Given the symbol's value and its section we can determine its final
1639 value when building the stubs (so the stub knows where to jump). */
1640 bfd_vma target_value;
1641 asection *target_section;
1642
1643 enum elf64_aarch64_stub_type stub_type;
1644
1645 /* The symbol table entry, if any, that this was derived from. */
1646 struct elf64_aarch64_link_hash_entry *h;
1647
1648 /* Destination symbol type */
1649 unsigned char st_type;
1650
1651 /* Where this stub is being called from, or, in the case of combined
1652 stub sections, the first input section in the group. */
1653 asection *id_sec;
1654
1655 /* The name for the local symbol at the start of this stub. The
1656 stub name in the hash table has to be unique; this does not, so
1657 it can be friendlier. */
1658 char *output_name;
1659};
1660
1661/* Used to build a map of a section. This is required for mixed-endian
1662 code/data. */
1663
1664typedef struct elf64_elf_section_map
1665{
1666 bfd_vma vma;
1667 char type;
1668}
1669elf64_aarch64_section_map;
1670
1671
1672typedef struct _aarch64_elf_section_data
1673{
1674 struct bfd_elf_section_data elf;
1675 unsigned int mapcount;
1676 unsigned int mapsize;
1677 elf64_aarch64_section_map *map;
1678}
1679_aarch64_elf_section_data;
1680
1681#define elf64_aarch64_section_data(sec) \
1682 ((_aarch64_elf_section_data *) elf_section_data (sec))
1683
1684/* The size of the thread control block. */
1685#define TCB_SIZE 16
1686
1687struct elf_aarch64_local_symbol
1688{
1689 unsigned int got_type;
1690 bfd_signed_vma got_refcount;
1691 bfd_vma got_offset;
1692
1693 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1694 offset is from the end of the jump table and reserved entries
1695 within the PLTGOT.
1696
1697 The magic value (bfd_vma) -1 indicates that an offset has not be
1698 allocated. */
1699 bfd_vma tlsdesc_got_jump_table_offset;
1700};
1701
1702struct elf_aarch64_obj_tdata
1703{
1704 struct elf_obj_tdata root;
1705
1706 /* local symbol descriptors */
1707 struct elf_aarch64_local_symbol *locals;
1708
1709 /* Zero to warn when linking objects with incompatible enum sizes. */
1710 int no_enum_size_warning;
1711
1712 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1713 int no_wchar_size_warning;
1714};
1715
1716#define elf_aarch64_tdata(bfd) \
1717 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1718
1719#define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1720
1721#define is_aarch64_elf(bfd) \
1722 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1723 && elf_tdata (bfd) != NULL \
1724 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1725
1726static bfd_boolean
1727elf64_aarch64_mkobject (bfd *abfd)
1728{
1729 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1730 AARCH64_ELF_DATA);
1731}
1732
1733/* The AArch64 linker needs to keep track of the number of relocs that it
1734 decides to copy in check_relocs for each symbol. This is so that
1735 it can discard PC relative relocs if it doesn't need them when
1736 linking with -Bsymbolic. We store the information in a field
1737 extending the regular ELF linker hash table. */
1738
1739/* This structure keeps track of the number of relocs we have copied
1740 for a given symbol. */
1741struct elf64_aarch64_relocs_copied
1742{
1743 /* Next section. */
1744 struct elf64_aarch64_relocs_copied *next;
1745 /* A section in dynobj. */
1746 asection *section;
1747 /* Number of relocs copied in this section. */
1748 bfd_size_type count;
1749 /* Number of PC-relative relocs copied in this section. */
1750 bfd_size_type pc_count;
1751};
1752
1753#define elf64_aarch64_hash_entry(ent) \
1754 ((struct elf64_aarch64_link_hash_entry *)(ent))
1755
1756#define GOT_UNKNOWN 0
1757#define GOT_NORMAL 1
1758#define GOT_TLS_GD 2
1759#define GOT_TLS_IE 4
1760#define GOT_TLSDESC_GD 8
1761
1762#define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1763
1764/* AArch64 ELF linker hash entry. */
1765struct elf64_aarch64_link_hash_entry
1766{
1767 struct elf_link_hash_entry root;
1768
1769 /* Track dynamic relocs copied for this symbol. */
1770 struct elf_dyn_relocs *dyn_relocs;
1771
1772 /* Number of PC relative relocs copied for this symbol. */
1773 struct elf64_aarch64_relocs_copied *relocs_copied;
1774
1775 /* Since PLT entries have variable size, we need to record the
1776 index into .got.plt instead of recomputing it from the PLT
1777 offset. */
1778 bfd_signed_vma plt_got_offset;
1779
1780 /* Bit mask representing the type of GOT entry(s) if any required by
1781 this symbol. */
1782 unsigned int got_type;
1783
1784 /* A pointer to the most recently used stub hash entry against this
1785 symbol. */
1786 struct elf64_aarch64_stub_hash_entry *stub_cache;
1787
1788 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1789 is from the end of the jump table and reserved entries within the PLTGOT.
1790
1791 The magic value (bfd_vma) -1 indicates that an offset has not
1792 be allocated. */
1793 bfd_vma tlsdesc_got_jump_table_offset;
1794};
1795
1796static unsigned int
1797elf64_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1798 bfd *abfd,
1799 unsigned long r_symndx)
1800{
1801 if (h)
1802 return elf64_aarch64_hash_entry (h)->got_type;
1803
1804 if (! elf64_aarch64_locals (abfd))
1805 return GOT_UNKNOWN;
1806
1807 return elf64_aarch64_locals (abfd)[r_symndx].got_type;
1808}
1809
1810/* Traverse an AArch64 ELF linker hash table. */
1811#define elf64_aarch64_link_hash_traverse(table, func, info) \
1812 (elf_link_hash_traverse \
1813 (&(table)->root, \
1814 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
1815 (info)))
1816
1817/* Get the AArch64 elf linker hash table from a link_info structure. */
1818#define elf64_aarch64_hash_table(info) \
1819 ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
1820
1821#define aarch64_stub_hash_lookup(table, string, create, copy) \
1822 ((struct elf64_aarch64_stub_hash_entry *) \
1823 bfd_hash_lookup ((table), (string), (create), (copy)))
1824
1825/* AArch64 ELF linker hash table. */
1826struct elf64_aarch64_link_hash_table
1827{
1828 /* The main hash table. */
1829 struct elf_link_hash_table root;
1830
1831 /* Nonzero to force PIC branch veneers. */
1832 int pic_veneer;
1833
1834 /* The number of bytes in the initial entry in the PLT. */
1835 bfd_size_type plt_header_size;
1836
1837 /* The number of bytes in the subsequent PLT etries. */
1838 bfd_size_type plt_entry_size;
1839
1840 /* Short-cuts to get to dynamic linker sections. */
1841 asection *sdynbss;
1842 asection *srelbss;
1843
1844 /* Small local sym cache. */
1845 struct sym_cache sym_cache;
1846
1847 /* For convenience in allocate_dynrelocs. */
1848 bfd *obfd;
1849
1850 /* The amount of space used by the reserved portion of the sgotplt
1851 section, plus whatever space is used by the jump slots. */
1852 bfd_vma sgotplt_jump_table_size;
1853
1854 /* The stub hash table. */
1855 struct bfd_hash_table stub_hash_table;
1856
1857 /* Linker stub bfd. */
1858 bfd *stub_bfd;
1859
1860 /* Linker call-backs. */
1861 asection *(*add_stub_section) (const char *, asection *);
1862 void (*layout_sections_again) (void);
1863
1864 /* Array to keep track of which stub sections have been created, and
1865 information on stub grouping. */
1866 struct map_stub
1867 {
1868 /* This is the section to which stubs in the group will be
1869 attached. */
1870 asection *link_sec;
1871 /* The stub section. */
1872 asection *stub_sec;
1873 } *stub_group;
1874
1875 /* Assorted information used by elf64_aarch64_size_stubs. */
1876 unsigned int bfd_count;
1877 int top_index;
1878 asection **input_list;
1879
1880 /* The offset into splt of the PLT entry for the TLS descriptor
1881 resolver. Special values are 0, if not necessary (or not found
1882 to be necessary yet), and -1 if needed but not determined
1883 yet. */
1884 bfd_vma tlsdesc_plt;
1885
1886 /* The GOT offset for the lazy trampoline. Communicated to the
1887 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1888 indicates an offset is not allocated. */
1889 bfd_vma dt_tlsdesc_got;
1890};
1891
1892
1893/* Return non-zero if the indicated VALUE has overflowed the maximum
1894 range expressible by a unsigned number with the indicated number of
1895 BITS. */
1896
1897static bfd_reloc_status_type
1898aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
1899{
1900 bfd_vma lim;
1901 if (bits >= sizeof (bfd_vma) * 8)
1902 return bfd_reloc_ok;
1903 lim = (bfd_vma) 1 << bits;
1904 if (value >= lim)
1905 return bfd_reloc_overflow;
1906 return bfd_reloc_ok;
1907}
1908
1909
1910/* Return non-zero if the indicated VALUE has overflowed the maximum
1911 range expressible by an signed number with the indicated number of
1912 BITS. */
1913
1914static bfd_reloc_status_type
1915aarch64_signed_overflow (bfd_vma value, unsigned int bits)
1916{
1917 bfd_signed_vma svalue = (bfd_signed_vma) value;
1918 bfd_signed_vma lim;
1919
1920 if (bits >= sizeof (bfd_vma) * 8)
1921 return bfd_reloc_ok;
1922 lim = (bfd_signed_vma) 1 << (bits - 1);
1923 if (svalue < -lim || svalue >= lim)
1924 return bfd_reloc_overflow;
1925 return bfd_reloc_ok;
1926}
1927
1928/* Create an entry in an AArch64 ELF linker hash table. */
1929
1930static struct bfd_hash_entry *
1931elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1932 struct bfd_hash_table *table,
1933 const char *string)
1934{
1935 struct elf64_aarch64_link_hash_entry *ret =
1936 (struct elf64_aarch64_link_hash_entry *) entry;
1937
1938 /* Allocate the structure if it has not already been allocated by a
1939 subclass. */
1940 if (ret == NULL)
1941 ret = bfd_hash_allocate (table,
1942 sizeof (struct elf64_aarch64_link_hash_entry));
1943 if (ret == NULL)
1944 return (struct bfd_hash_entry *) ret;
1945
1946 /* Call the allocation method of the superclass. */
1947 ret = ((struct elf64_aarch64_link_hash_entry *)
1948 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1949 table, string));
1950 if (ret != NULL)
1951 {
1952 ret->dyn_relocs = NULL;
1953 ret->relocs_copied = NULL;
1954 ret->got_type = GOT_UNKNOWN;
1955 ret->plt_got_offset = (bfd_vma) - 1;
1956 ret->stub_cache = NULL;
1957 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1958 }
1959
1960 return (struct bfd_hash_entry *) ret;
1961}
1962
1963/* Initialize an entry in the stub hash table. */
1964
1965static struct bfd_hash_entry *
1966stub_hash_newfunc (struct bfd_hash_entry *entry,
1967 struct bfd_hash_table *table, const char *string)
1968{
1969 /* Allocate the structure if it has not already been allocated by a
1970 subclass. */
1971 if (entry == NULL)
1972 {
1973 entry = bfd_hash_allocate (table,
1974 sizeof (struct
1975 elf64_aarch64_stub_hash_entry));
1976 if (entry == NULL)
1977 return entry;
1978 }
1979
1980 /* Call the allocation method of the superclass. */
1981 entry = bfd_hash_newfunc (entry, table, string);
1982 if (entry != NULL)
1983 {
1984 struct elf64_aarch64_stub_hash_entry *eh;
1985
1986 /* Initialize the local fields. */
1987 eh = (struct elf64_aarch64_stub_hash_entry *) entry;
1988 eh->stub_sec = NULL;
1989 eh->stub_offset = 0;
1990 eh->target_value = 0;
1991 eh->target_section = NULL;
1992 eh->stub_type = aarch64_stub_none;
1993 eh->h = NULL;
1994 eh->id_sec = NULL;
1995 }
1996
1997 return entry;
1998}
1999
2000
2001/* Copy the extra info we tack onto an elf_link_hash_entry. */
2002
2003static void
2004elf64_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2005 struct elf_link_hash_entry *dir,
2006 struct elf_link_hash_entry *ind)
2007{
2008 struct elf64_aarch64_link_hash_entry *edir, *eind;
2009
2010 edir = (struct elf64_aarch64_link_hash_entry *) dir;
2011 eind = (struct elf64_aarch64_link_hash_entry *) ind;
2012
2013 if (eind->dyn_relocs != NULL)
2014 {
2015 if (edir->dyn_relocs != NULL)
2016 {
2017 struct elf_dyn_relocs **pp;
2018 struct elf_dyn_relocs *p;
2019
2020 /* Add reloc counts against the indirect sym to the direct sym
2021 list. Merge any entries against the same section. */
2022 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2023 {
2024 struct elf_dyn_relocs *q;
2025
2026 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2027 if (q->sec == p->sec)
2028 {
2029 q->pc_count += p->pc_count;
2030 q->count += p->count;
2031 *pp = p->next;
2032 break;
2033 }
2034 if (q == NULL)
2035 pp = &p->next;
2036 }
2037 *pp = edir->dyn_relocs;
2038 }
2039
2040 edir->dyn_relocs = eind->dyn_relocs;
2041 eind->dyn_relocs = NULL;
2042 }
2043
2044 if (eind->relocs_copied != NULL)
2045 {
2046 if (edir->relocs_copied != NULL)
2047 {
2048 struct elf64_aarch64_relocs_copied **pp;
2049 struct elf64_aarch64_relocs_copied *p;
2050
2051 /* Add reloc counts against the indirect sym to the direct sym
2052 list. Merge any entries against the same section. */
2053 for (pp = &eind->relocs_copied; (p = *pp) != NULL;)
2054 {
2055 struct elf64_aarch64_relocs_copied *q;
2056
2057 for (q = edir->relocs_copied; q != NULL; q = q->next)
2058 if (q->section == p->section)
2059 {
2060 q->pc_count += p->pc_count;
2061 q->count += p->count;
2062 *pp = p->next;
2063 break;
2064 }
2065 if (q == NULL)
2066 pp = &p->next;
2067 }
2068 *pp = edir->relocs_copied;
2069 }
2070
2071 edir->relocs_copied = eind->relocs_copied;
2072 eind->relocs_copied = NULL;
2073 }
2074
2075 if (ind->root.type == bfd_link_hash_indirect)
2076 {
2077 /* Copy over PLT info. */
2078 if (dir->got.refcount <= 0)
2079 {
2080 edir->got_type = eind->got_type;
2081 eind->got_type = GOT_UNKNOWN;
2082 }
2083 }
2084
2085 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2086}
2087
2088/* Create an AArch64 elf linker hash table. */
2089
2090static struct bfd_link_hash_table *
2091elf64_aarch64_link_hash_table_create (bfd *abfd)
2092{
2093 struct elf64_aarch64_link_hash_table *ret;
2094 bfd_size_type amt = sizeof (struct elf64_aarch64_link_hash_table);
2095
2096 ret = bfd_malloc (amt);
2097 if (ret == NULL)
2098 return NULL;
2099
2100 if (!_bfd_elf_link_hash_table_init
2101 (&ret->root, abfd, elf64_aarch64_link_hash_newfunc,
2102 sizeof (struct elf64_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2103 {
2104 free (ret);
2105 return NULL;
2106 }
2107
2108 ret->sdynbss = NULL;
2109 ret->srelbss = NULL;
2110
2111 ret->plt_header_size = PLT_ENTRY_SIZE;
2112 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2113
2114 ret->sym_cache.abfd = NULL;
2115 ret->obfd = abfd;
2116
2117 ret->stub_bfd = NULL;
2118 ret->add_stub_section = NULL;
2119 ret->layout_sections_again = NULL;
2120 ret->stub_group = NULL;
2121 ret->bfd_count = 0;
2122 ret->top_index = 0;
2123 ret->input_list = NULL;
2124 ret->tlsdesc_plt = 0;
2125 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2126
2127 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2128 sizeof (struct elf64_aarch64_stub_hash_entry)))
2129 {
2130 free (ret);
2131 return NULL;
2132 }
2133
2134 return &ret->root.root;
2135}
2136
2137/* Free the derived linker hash table. */
2138
2139static void
2140elf64_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
2141{
2142 struct elf64_aarch64_link_hash_table *ret
2143 = (struct elf64_aarch64_link_hash_table *) hash;
2144
2145 bfd_hash_table_free (&ret->stub_hash_table);
2146 _bfd_generic_link_hash_table_free (hash);
2147}
2148
2149static bfd_vma
2150aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
2151 bfd_vma addend, bfd_boolean weak_undef_p)
2152{
2153 switch (r_type)
2154 {
2155 case R_AARCH64_TLSDESC_CALL:
2156 case R_AARCH64_NONE:
2157 case R_AARCH64_NULL:
2158 break;
2159
2160 case R_AARCH64_ADR_PREL_LO21:
2161 case R_AARCH64_CONDBR19:
2162 case R_AARCH64_LD_PREL_LO19:
2163 case R_AARCH64_PREL16:
2164 case R_AARCH64_PREL32:
2165 case R_AARCH64_PREL64:
2166 case R_AARCH64_TSTBR14:
2167 if (weak_undef_p)
2168 value = place;
2169 value = value + addend - place;
2170 break;
2171
2172 case R_AARCH64_CALL26:
2173 case R_AARCH64_JUMP26:
2174 value = value + addend - place;
2175 break;
2176
2177 case R_AARCH64_ABS16:
2178 case R_AARCH64_ABS32:
2179 case R_AARCH64_MOVW_SABS_G0:
2180 case R_AARCH64_MOVW_SABS_G1:
2181 case R_AARCH64_MOVW_SABS_G2:
2182 case R_AARCH64_MOVW_UABS_G0:
2183 case R_AARCH64_MOVW_UABS_G0_NC:
2184 case R_AARCH64_MOVW_UABS_G1:
2185 case R_AARCH64_MOVW_UABS_G1_NC:
2186 case R_AARCH64_MOVW_UABS_G2:
2187 case R_AARCH64_MOVW_UABS_G2_NC:
2188 case R_AARCH64_MOVW_UABS_G3:
2189 value = value + addend;
2190 break;
2191
2192 case R_AARCH64_ADR_PREL_PG_HI21:
2193 case R_AARCH64_ADR_PREL_PG_HI21_NC:
2194 if (weak_undef_p)
2195 value = PG (place);
2196 value = PG (value + addend) - PG (place);
2197 break;
2198
f41aef5f
RE
2199 case R_AARCH64_GOT_LD_PREL19:
2200 value = value + addend - place;
2201 break;
2202
a06ea964
NC
2203 case R_AARCH64_ADR_GOT_PAGE:
2204 case R_AARCH64_TLSDESC_ADR_PAGE:
2205 case R_AARCH64_TLSGD_ADR_PAGE21:
2206 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
2207 value = PG (value + addend) - PG (place);
2208 break;
2209
2210 case R_AARCH64_ADD_ABS_LO12_NC:
2211 case R_AARCH64_LD64_GOT_LO12_NC:
2212 case R_AARCH64_LDST8_ABS_LO12_NC:
2213 case R_AARCH64_LDST16_ABS_LO12_NC:
2214 case R_AARCH64_LDST32_ABS_LO12_NC:
2215 case R_AARCH64_LDST64_ABS_LO12_NC:
2216 case R_AARCH64_LDST128_ABS_LO12_NC:
2217 case R_AARCH64_TLSDESC_ADD_LO12_NC:
2218 case R_AARCH64_TLSDESC_ADD:
2219 case R_AARCH64_TLSDESC_LD64_LO12_NC:
2220 case R_AARCH64_TLSDESC_LDR:
2221 case R_AARCH64_TLSGD_ADD_LO12_NC:
2222 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
2223 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
2224 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
2225 value = PG_OFFSET (value + addend);
2226 break;
2227
2228 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
2229 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
2230 value = (value + addend) & (bfd_vma) 0xffff0000;
2231 break;
2232 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
2233 value = (value + addend) & (bfd_vma) 0xfff000;
2234 break;
2235
2236 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
2237 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
2238 value = (value + addend) & (bfd_vma) 0xffff;
2239 break;
2240
2241 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
2242 value = (value + addend) & ~(bfd_vma) 0xffffffff;
2243 value -= place & ~(bfd_vma) 0xffffffff;
2244 break;
2245 }
2246 return value;
2247}
2248
2249static bfd_boolean
2250aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2251 bfd_vma offset, bfd_vma value)
2252{
2253 reloc_howto_type *howto;
2254 bfd_vma place;
2255
2256 howto = elf64_aarch64_howto_from_type (r_type);
2257 place = (input_section->output_section->vma + input_section->output_offset
2258 + offset);
2259 value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
2260 return bfd_elf_aarch64_put_addend (input_bfd,
2261 input_section->contents + offset,
2262 howto, value);
2263}
2264
2265static enum elf64_aarch64_stub_type
2266aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2267{
2268 if (aarch64_valid_for_adrp_p (value, place))
2269 return aarch64_stub_adrp_branch;
2270 return aarch64_stub_long_branch;
2271}
2272
2273/* Determine the type of stub needed, if any, for a call. */
2274
2275static enum elf64_aarch64_stub_type
2276aarch64_type_of_stub (struct bfd_link_info *info,
2277 asection *input_sec,
2278 const Elf_Internal_Rela *rel,
2279 unsigned char st_type,
2280 struct elf64_aarch64_link_hash_entry *hash,
2281 bfd_vma destination)
2282{
2283 bfd_vma location;
2284 bfd_signed_vma branch_offset;
2285 unsigned int r_type;
2286 struct elf64_aarch64_link_hash_table *globals;
2287 enum elf64_aarch64_stub_type stub_type = aarch64_stub_none;
2288 bfd_boolean via_plt_p;
2289
2290 if (st_type != STT_FUNC)
2291 return stub_type;
2292
2293 globals = elf64_aarch64_hash_table (info);
2294 via_plt_p = (globals->root.splt != NULL && hash != NULL
2295 && hash->root.plt.offset != (bfd_vma) - 1);
2296
2297 if (via_plt_p)
2298 return stub_type;
2299
2300 /* Determine where the call point is. */
2301 location = (input_sec->output_offset
2302 + input_sec->output_section->vma + rel->r_offset);
2303
2304 branch_offset = (bfd_signed_vma) (destination - location);
2305
2306 r_type = ELF64_R_TYPE (rel->r_info);
2307
2308 /* We don't want to redirect any old unconditional jump in this way,
2309 only one which is being used for a sibcall, where it is
2310 acceptable for the IP0 and IP1 registers to be clobbered. */
2311 if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26)
2312 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2313 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2314 {
2315 stub_type = aarch64_stub_long_branch;
2316 }
2317
2318 return stub_type;
2319}
2320
2321/* Build a name for an entry in the stub hash table. */
2322
2323static char *
2324elf64_aarch64_stub_name (const asection *input_section,
2325 const asection *sym_sec,
2326 const struct elf64_aarch64_link_hash_entry *hash,
2327 const Elf_Internal_Rela *rel)
2328{
2329 char *stub_name;
2330 bfd_size_type len;
2331
2332 if (hash)
2333 {
2334 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2335 stub_name = bfd_malloc (len);
2336 if (stub_name != NULL)
2337 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2338 (unsigned int) input_section->id,
2339 hash->root.root.root.string,
2340 rel->r_addend);
2341 }
2342 else
2343 {
2344 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2345 stub_name = bfd_malloc (len);
2346 if (stub_name != NULL)
2347 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2348 (unsigned int) input_section->id,
2349 (unsigned int) sym_sec->id,
2350 (unsigned int) ELF64_R_SYM (rel->r_info),
2351 rel->r_addend);
2352 }
2353
2354 return stub_name;
2355}
2356
2357/* Look up an entry in the stub hash. Stub entries are cached because
2358 creating the stub name takes a bit of time. */
2359
2360static struct elf64_aarch64_stub_hash_entry *
2361elf64_aarch64_get_stub_entry (const asection *input_section,
2362 const asection *sym_sec,
2363 struct elf_link_hash_entry *hash,
2364 const Elf_Internal_Rela *rel,
2365 struct elf64_aarch64_link_hash_table *htab)
2366{
2367 struct elf64_aarch64_stub_hash_entry *stub_entry;
2368 struct elf64_aarch64_link_hash_entry *h =
2369 (struct elf64_aarch64_link_hash_entry *) hash;
2370 const asection *id_sec;
2371
2372 if ((input_section->flags & SEC_CODE) == 0)
2373 return NULL;
2374
2375 /* If this input section is part of a group of sections sharing one
2376 stub section, then use the id of the first section in the group.
2377 Stub names need to include a section id, as there may well be
2378 more than one stub used to reach say, printf, and we need to
2379 distinguish between them. */
2380 id_sec = htab->stub_group[input_section->id].link_sec;
2381
2382 if (h != NULL && h->stub_cache != NULL
2383 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2384 {
2385 stub_entry = h->stub_cache;
2386 }
2387 else
2388 {
2389 char *stub_name;
2390
2391 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, h, rel);
2392 if (stub_name == NULL)
2393 return NULL;
2394
2395 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2396 stub_name, FALSE, FALSE);
2397 if (h != NULL)
2398 h->stub_cache = stub_entry;
2399
2400 free (stub_name);
2401 }
2402
2403 return stub_entry;
2404}
2405
2406/* Add a new stub entry to the stub hash. Not all fields of the new
2407 stub entry are initialised. */
2408
2409static struct elf64_aarch64_stub_hash_entry *
2410elf64_aarch64_add_stub (const char *stub_name,
2411 asection *section,
2412 struct elf64_aarch64_link_hash_table *htab)
2413{
2414 asection *link_sec;
2415 asection *stub_sec;
2416 struct elf64_aarch64_stub_hash_entry *stub_entry;
2417
2418 link_sec = htab->stub_group[section->id].link_sec;
2419 stub_sec = htab->stub_group[section->id].stub_sec;
2420 if (stub_sec == NULL)
2421 {
2422 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2423 if (stub_sec == NULL)
2424 {
2425 size_t namelen;
2426 bfd_size_type len;
2427 char *s_name;
2428
2429 namelen = strlen (link_sec->name);
2430 len = namelen + sizeof (STUB_SUFFIX);
2431 s_name = bfd_alloc (htab->stub_bfd, len);
2432 if (s_name == NULL)
2433 return NULL;
2434
2435 memcpy (s_name, link_sec->name, namelen);
2436 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2437 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2438 if (stub_sec == NULL)
2439 return NULL;
2440 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2441 }
2442 htab->stub_group[section->id].stub_sec = stub_sec;
2443 }
2444
2445 /* Enter this entry into the linker stub hash table. */
2446 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2447 TRUE, FALSE);
2448 if (stub_entry == NULL)
2449 {
2450 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2451 section->owner, stub_name);
2452 return NULL;
2453 }
2454
2455 stub_entry->stub_sec = stub_sec;
2456 stub_entry->stub_offset = 0;
2457 stub_entry->id_sec = link_sec;
2458
2459 return stub_entry;
2460}
2461
2462static bfd_boolean
2463aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2464 void *in_arg ATTRIBUTE_UNUSED)
2465{
2466 struct elf64_aarch64_stub_hash_entry *stub_entry;
2467 asection *stub_sec;
2468 bfd *stub_bfd;
2469 bfd_byte *loc;
2470 bfd_vma sym_value;
2471 unsigned int template_size;
2472 const uint32_t *template;
2473 unsigned int i;
2474
2475 /* Massage our args to the form they really have. */
2476 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2477
2478 stub_sec = stub_entry->stub_sec;
2479
2480 /* Make a note of the offset within the stubs for this entry. */
2481 stub_entry->stub_offset = stub_sec->size;
2482 loc = stub_sec->contents + stub_entry->stub_offset;
2483
2484 stub_bfd = stub_sec->owner;
2485
2486 /* This is the address of the stub destination. */
2487 sym_value = (stub_entry->target_value
2488 + stub_entry->target_section->output_offset
2489 + stub_entry->target_section->output_section->vma);
2490
2491 if (stub_entry->stub_type == aarch64_stub_long_branch)
2492 {
2493 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2494 + stub_sec->output_offset);
2495
2496 /* See if we can relax the stub. */
2497 if (aarch64_valid_for_adrp_p (sym_value, place))
2498 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2499 }
2500
2501 switch (stub_entry->stub_type)
2502 {
2503 case aarch64_stub_adrp_branch:
2504 template = aarch64_adrp_branch_stub;
2505 template_size = sizeof (aarch64_adrp_branch_stub);
2506 break;
2507 case aarch64_stub_long_branch:
2508 template = aarch64_long_branch_stub;
2509 template_size = sizeof (aarch64_long_branch_stub);
2510 break;
2511 default:
2512 BFD_FAIL ();
2513 return FALSE;
2514 }
2515
2516 for (i = 0; i < (template_size / sizeof template[0]); i++)
2517 {
2518 bfd_putl32 (template[i], loc);
2519 loc += 4;
2520 }
2521
2522 template_size = (template_size + 7) & ~7;
2523 stub_sec->size += template_size;
2524
2525 switch (stub_entry->stub_type)
2526 {
2527 case aarch64_stub_adrp_branch:
2528 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec,
2529 stub_entry->stub_offset, sym_value))
2530 /* The stub would not have been relaxed if the offset was out
2531 of range. */
2532 BFD_FAIL ();
2533
2534 _bfd_final_link_relocate
2535 (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC),
2536 stub_bfd,
2537 stub_sec,
2538 stub_sec->contents,
2539 stub_entry->stub_offset + 4,
2540 sym_value,
2541 0);
2542 break;
2543
2544 case aarch64_stub_long_branch:
2545 /* We want the value relative to the address 12 bytes back from the
2546 value itself. */
2547 _bfd_final_link_relocate (elf64_aarch64_howto_from_type
2548 (R_AARCH64_PREL64), stub_bfd, stub_sec,
2549 stub_sec->contents,
2550 stub_entry->stub_offset + 16,
2551 sym_value + 12, 0);
2552 break;
2553 default:
2554 break;
2555 }
2556
2557 return TRUE;
2558}
2559
2560/* As above, but don't actually build the stub. Just bump offset so
2561 we know stub section sizes. */
2562
2563static bfd_boolean
2564aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2565 void *in_arg ATTRIBUTE_UNUSED)
2566{
2567 struct elf64_aarch64_stub_hash_entry *stub_entry;
2568 int size;
2569
2570 /* Massage our args to the form they really have. */
2571 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2572
2573 switch (stub_entry->stub_type)
2574 {
2575 case aarch64_stub_adrp_branch:
2576 size = sizeof (aarch64_adrp_branch_stub);
2577 break;
2578 case aarch64_stub_long_branch:
2579 size = sizeof (aarch64_long_branch_stub);
2580 break;
2581 default:
2582 BFD_FAIL ();
2583 return FALSE;
2584 break;
2585 }
2586
2587 size = (size + 7) & ~7;
2588 stub_entry->stub_sec->size += size;
2589 return TRUE;
2590}
2591
2592/* External entry points for sizing and building linker stubs. */
2593
2594/* Set up various things so that we can make a list of input sections
2595 for each output section included in the link. Returns -1 on error,
2596 0 when no stubs will be needed, and 1 on success. */
2597
2598int
2599elf64_aarch64_setup_section_lists (bfd *output_bfd,
2600 struct bfd_link_info *info)
2601{
2602 bfd *input_bfd;
2603 unsigned int bfd_count;
2604 int top_id, top_index;
2605 asection *section;
2606 asection **input_list, **list;
2607 bfd_size_type amt;
2608 struct elf64_aarch64_link_hash_table *htab =
2609 elf64_aarch64_hash_table (info);
2610
2611 if (!is_elf_hash_table (htab))
2612 return 0;
2613
2614 /* Count the number of input BFDs and find the top input section id. */
2615 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2616 input_bfd != NULL; input_bfd = input_bfd->link_next)
2617 {
2618 bfd_count += 1;
2619 for (section = input_bfd->sections;
2620 section != NULL; section = section->next)
2621 {
2622 if (top_id < section->id)
2623 top_id = section->id;
2624 }
2625 }
2626 htab->bfd_count = bfd_count;
2627
2628 amt = sizeof (struct map_stub) * (top_id + 1);
2629 htab->stub_group = bfd_zmalloc (amt);
2630 if (htab->stub_group == NULL)
2631 return -1;
2632
2633 /* We can't use output_bfd->section_count here to find the top output
2634 section index as some sections may have been removed, and
2635 _bfd_strip_section_from_output doesn't renumber the indices. */
2636 for (section = output_bfd->sections, top_index = 0;
2637 section != NULL; section = section->next)
2638 {
2639 if (top_index < section->index)
2640 top_index = section->index;
2641 }
2642
2643 htab->top_index = top_index;
2644 amt = sizeof (asection *) * (top_index + 1);
2645 input_list = bfd_malloc (amt);
2646 htab->input_list = input_list;
2647 if (input_list == NULL)
2648 return -1;
2649
2650 /* For sections we aren't interested in, mark their entries with a
2651 value we can check later. */
2652 list = input_list + top_index;
2653 do
2654 *list = bfd_abs_section_ptr;
2655 while (list-- != input_list);
2656
2657 for (section = output_bfd->sections;
2658 section != NULL; section = section->next)
2659 {
2660 if ((section->flags & SEC_CODE) != 0)
2661 input_list[section->index] = NULL;
2662 }
2663
2664 return 1;
2665}
2666
2667/* Used by elf64_aarch64_next_input_section and group_sections. */
2668#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2669
2670/* The linker repeatedly calls this function for each input section,
2671 in the order that input sections are linked into output sections.
2672 Build lists of input sections to determine groupings between which
2673 we may insert linker stubs. */
2674
2675void
2676elf64_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2677{
2678 struct elf64_aarch64_link_hash_table *htab =
2679 elf64_aarch64_hash_table (info);
2680
2681 if (isec->output_section->index <= htab->top_index)
2682 {
2683 asection **list = htab->input_list + isec->output_section->index;
2684
2685 if (*list != bfd_abs_section_ptr)
2686 {
2687 /* Steal the link_sec pointer for our list. */
2688 /* This happens to make the list in reverse order,
2689 which is what we want. */
2690 PREV_SEC (isec) = *list;
2691 *list = isec;
2692 }
2693 }
2694}
2695
2696/* See whether we can group stub sections together. Grouping stub
2697 sections may result in fewer stubs. More importantly, we need to
2698 put all .init* and .fini* stubs at the beginning of the .init or
2699 .fini output sections respectively, because glibc splits the
2700 _init and _fini functions into multiple parts. Putting a stub in
2701 the middle of a function is not a good idea. */
2702
2703static void
2704group_sections (struct elf64_aarch64_link_hash_table *htab,
2705 bfd_size_type stub_group_size,
2706 bfd_boolean stubs_always_before_branch)
2707{
2708 asection **list = htab->input_list + htab->top_index;
2709
2710 do
2711 {
2712 asection *tail = *list;
2713
2714 if (tail == bfd_abs_section_ptr)
2715 continue;
2716
2717 while (tail != NULL)
2718 {
2719 asection *curr;
2720 asection *prev;
2721 bfd_size_type total;
2722
2723 curr = tail;
2724 total = tail->size;
2725 while ((prev = PREV_SEC (curr)) != NULL
2726 && ((total += curr->output_offset - prev->output_offset)
2727 < stub_group_size))
2728 curr = prev;
2729
2730 /* OK, the size from the start of CURR to the end is less
2731 than stub_group_size and thus can be handled by one stub
2732 section. (Or the tail section is itself larger than
2733 stub_group_size, in which case we may be toast.)
2734 We should really be keeping track of the total size of
2735 stubs added here, as stubs contribute to the final output
2736 section size. */
2737 do
2738 {
2739 prev = PREV_SEC (tail);
2740 /* Set up this stub group. */
2741 htab->stub_group[tail->id].link_sec = curr;
2742 }
2743 while (tail != curr && (tail = prev) != NULL);
2744
2745 /* But wait, there's more! Input sections up to stub_group_size
2746 bytes before the stub section can be handled by it too. */
2747 if (!stubs_always_before_branch)
2748 {
2749 total = 0;
2750 while (prev != NULL
2751 && ((total += tail->output_offset - prev->output_offset)
2752 < stub_group_size))
2753 {
2754 tail = prev;
2755 prev = PREV_SEC (tail);
2756 htab->stub_group[tail->id].link_sec = curr;
2757 }
2758 }
2759 tail = prev;
2760 }
2761 }
2762 while (list-- != htab->input_list);
2763
2764 free (htab->input_list);
2765}
2766
2767#undef PREV_SEC
2768
2769/* Determine and set the size of the stub section for a final link.
2770
2771 The basic idea here is to examine all the relocations looking for
2772 PC-relative calls to a target that is unreachable with a "bl"
2773 instruction. */
2774
2775bfd_boolean
2776elf64_aarch64_size_stubs (bfd *output_bfd,
2777 bfd *stub_bfd,
2778 struct bfd_link_info *info,
2779 bfd_signed_vma group_size,
2780 asection * (*add_stub_section) (const char *,
2781 asection *),
2782 void (*layout_sections_again) (void))
2783{
2784 bfd_size_type stub_group_size;
2785 bfd_boolean stubs_always_before_branch;
2786 bfd_boolean stub_changed = 0;
2787 struct elf64_aarch64_link_hash_table *htab = elf64_aarch64_hash_table (info);
2788
2789 /* Propagate mach to stub bfd, because it may not have been
2790 finalized when we created stub_bfd. */
2791 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
2792 bfd_get_mach (output_bfd));
2793
2794 /* Stash our params away. */
2795 htab->stub_bfd = stub_bfd;
2796 htab->add_stub_section = add_stub_section;
2797 htab->layout_sections_again = layout_sections_again;
2798 stubs_always_before_branch = group_size < 0;
2799 if (group_size < 0)
2800 stub_group_size = -group_size;
2801 else
2802 stub_group_size = group_size;
2803
2804 if (stub_group_size == 1)
2805 {
2806 /* Default values. */
2807 /* Aarch64 branch range is +-128MB. The value used is 1MB less. */
2808 stub_group_size = 127 * 1024 * 1024;
2809 }
2810
2811 group_sections (htab, stub_group_size, stubs_always_before_branch);
2812
2813 while (1)
2814 {
2815 bfd *input_bfd;
2816 unsigned int bfd_indx;
2817 asection *stub_sec;
2818
2819 for (input_bfd = info->input_bfds, bfd_indx = 0;
2820 input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++)
2821 {
2822 Elf_Internal_Shdr *symtab_hdr;
2823 asection *section;
2824 Elf_Internal_Sym *local_syms = NULL;
2825
2826 /* We'll need the symbol table in a second. */
2827 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2828 if (symtab_hdr->sh_info == 0)
2829 continue;
2830
2831 /* Walk over each section attached to the input bfd. */
2832 for (section = input_bfd->sections;
2833 section != NULL; section = section->next)
2834 {
2835 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2836
2837 /* If there aren't any relocs, then there's nothing more
2838 to do. */
2839 if ((section->flags & SEC_RELOC) == 0
2840 || section->reloc_count == 0
2841 || (section->flags & SEC_CODE) == 0)
2842 continue;
2843
2844 /* If this section is a link-once section that will be
2845 discarded, then don't create any stubs. */
2846 if (section->output_section == NULL
2847 || section->output_section->owner != output_bfd)
2848 continue;
2849
2850 /* Get the relocs. */
2851 internal_relocs
2852 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
2853 NULL, info->keep_memory);
2854 if (internal_relocs == NULL)
2855 goto error_ret_free_local;
2856
2857 /* Now examine each relocation. */
2858 irela = internal_relocs;
2859 irelaend = irela + section->reloc_count;
2860 for (; irela < irelaend; irela++)
2861 {
2862 unsigned int r_type, r_indx;
2863 enum elf64_aarch64_stub_type stub_type;
2864 struct elf64_aarch64_stub_hash_entry *stub_entry;
2865 asection *sym_sec;
2866 bfd_vma sym_value;
2867 bfd_vma destination;
2868 struct elf64_aarch64_link_hash_entry *hash;
2869 const char *sym_name;
2870 char *stub_name;
2871 const asection *id_sec;
2872 unsigned char st_type;
2873 bfd_size_type len;
2874
2875 r_type = ELF64_R_TYPE (irela->r_info);
2876 r_indx = ELF64_R_SYM (irela->r_info);
2877
2878 if (r_type >= (unsigned int) R_AARCH64_end)
2879 {
2880 bfd_set_error (bfd_error_bad_value);
2881 error_ret_free_internal:
2882 if (elf_section_data (section)->relocs == NULL)
2883 free (internal_relocs);
2884 goto error_ret_free_local;
2885 }
2886
2887 /* Only look for stubs on unconditional branch and
2888 branch and link instructions. */
2889 if (r_type != (unsigned int) R_AARCH64_CALL26
2890 && r_type != (unsigned int) R_AARCH64_JUMP26)
2891 continue;
2892
2893 /* Now determine the call target, its name, value,
2894 section. */
2895 sym_sec = NULL;
2896 sym_value = 0;
2897 destination = 0;
2898 hash = NULL;
2899 sym_name = NULL;
2900 if (r_indx < symtab_hdr->sh_info)
2901 {
2902 /* It's a local symbol. */
2903 Elf_Internal_Sym *sym;
2904 Elf_Internal_Shdr *hdr;
2905
2906 if (local_syms == NULL)
2907 {
2908 local_syms
2909 = (Elf_Internal_Sym *) symtab_hdr->contents;
2910 if (local_syms == NULL)
2911 local_syms
2912 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
2913 symtab_hdr->sh_info, 0,
2914 NULL, NULL, NULL);
2915 if (local_syms == NULL)
2916 goto error_ret_free_internal;
2917 }
2918
2919 sym = local_syms + r_indx;
2920 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
2921 sym_sec = hdr->bfd_section;
2922 if (!sym_sec)
2923 /* This is an undefined symbol. It can never
2924 be resolved. */
2925 continue;
2926
2927 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
2928 sym_value = sym->st_value;
2929 destination = (sym_value + irela->r_addend
2930 + sym_sec->output_offset
2931 + sym_sec->output_section->vma);
2932 st_type = ELF_ST_TYPE (sym->st_info);
2933 sym_name
2934 = bfd_elf_string_from_elf_section (input_bfd,
2935 symtab_hdr->sh_link,
2936 sym->st_name);
2937 }
2938 else
2939 {
2940 int e_indx;
2941
2942 e_indx = r_indx - symtab_hdr->sh_info;
2943 hash = ((struct elf64_aarch64_link_hash_entry *)
2944 elf_sym_hashes (input_bfd)[e_indx]);
2945
2946 while (hash->root.root.type == bfd_link_hash_indirect
2947 || hash->root.root.type == bfd_link_hash_warning)
2948 hash = ((struct elf64_aarch64_link_hash_entry *)
2949 hash->root.root.u.i.link);
2950
2951 if (hash->root.root.type == bfd_link_hash_defined
2952 || hash->root.root.type == bfd_link_hash_defweak)
2953 {
2954 struct elf64_aarch64_link_hash_table *globals =
2955 elf64_aarch64_hash_table (info);
2956 sym_sec = hash->root.root.u.def.section;
2957 sym_value = hash->root.root.u.def.value;
2958 /* For a destination in a shared library,
2959 use the PLT stub as target address to
2960 decide whether a branch stub is
2961 needed. */
2962 if (globals->root.splt != NULL && hash != NULL
2963 && hash->root.plt.offset != (bfd_vma) - 1)
2964 {
2965 sym_sec = globals->root.splt;
2966 sym_value = hash->root.plt.offset;
2967 if (sym_sec->output_section != NULL)
2968 destination = (sym_value
2969 + sym_sec->output_offset
2970 +
2971 sym_sec->output_section->vma);
2972 }
2973 else if (sym_sec->output_section != NULL)
2974 destination = (sym_value + irela->r_addend
2975 + sym_sec->output_offset
2976 + sym_sec->output_section->vma);
2977 }
2978 else if (hash->root.root.type == bfd_link_hash_undefined
2979 || (hash->root.root.type
2980 == bfd_link_hash_undefweak))
2981 {
2982 /* For a shared library, use the PLT stub as
2983 target address to decide whether a long
2984 branch stub is needed.
2985 For absolute code, they cannot be handled. */
2986 struct elf64_aarch64_link_hash_table *globals =
2987 elf64_aarch64_hash_table (info);
2988
2989 if (globals->root.splt != NULL && hash != NULL
2990 && hash->root.plt.offset != (bfd_vma) - 1)
2991 {
2992 sym_sec = globals->root.splt;
2993 sym_value = hash->root.plt.offset;
2994 if (sym_sec->output_section != NULL)
2995 destination = (sym_value
2996 + sym_sec->output_offset
2997 +
2998 sym_sec->output_section->vma);
2999 }
3000 else
3001 continue;
3002 }
3003 else
3004 {
3005 bfd_set_error (bfd_error_bad_value);
3006 goto error_ret_free_internal;
3007 }
3008 st_type = ELF_ST_TYPE (hash->root.type);
3009 sym_name = hash->root.root.root.string;
3010 }
3011
3012 /* Determine what (if any) linker stub is needed. */
3013 stub_type = aarch64_type_of_stub
3014 (info, section, irela, st_type, hash, destination);
3015 if (stub_type == aarch64_stub_none)
3016 continue;
3017
3018 /* Support for grouping stub sections. */
3019 id_sec = htab->stub_group[section->id].link_sec;
3020
3021 /* Get the name of this stub. */
3022 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, hash,
3023 irela);
3024 if (!stub_name)
3025 goto error_ret_free_internal;
3026
3027 stub_entry =
3028 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3029 stub_name, FALSE, FALSE);
3030 if (stub_entry != NULL)
3031 {
3032 /* The proper stub has already been created. */
3033 free (stub_name);
3034 continue;
3035 }
3036
3037 stub_entry = elf64_aarch64_add_stub (stub_name, section,
3038 htab);
3039 if (stub_entry == NULL)
3040 {
3041 free (stub_name);
3042 goto error_ret_free_internal;
3043 }
3044
3045 stub_entry->target_value = sym_value;
3046 stub_entry->target_section = sym_sec;
3047 stub_entry->stub_type = stub_type;
3048 stub_entry->h = hash;
3049 stub_entry->st_type = st_type;
3050
3051 if (sym_name == NULL)
3052 sym_name = "unnamed";
3053 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3054 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3055 if (stub_entry->output_name == NULL)
3056 {
3057 free (stub_name);
3058 goto error_ret_free_internal;
3059 }
3060
3061 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3062 sym_name);
3063
3064 stub_changed = TRUE;
3065 }
3066
3067 /* We're done with the internal relocs, free them. */
3068 if (elf_section_data (section)->relocs == NULL)
3069 free (internal_relocs);
3070 }
3071 }
3072
3073 if (!stub_changed)
3074 break;
3075
3076 /* OK, we've added some stubs. Find out the new size of the
3077 stub sections. */
3078 for (stub_sec = htab->stub_bfd->sections;
3079 stub_sec != NULL; stub_sec = stub_sec->next)
3080 stub_sec->size = 0;
3081
3082 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3083
3084 /* Ask the linker to do its stuff. */
3085 (*htab->layout_sections_again) ();
3086 stub_changed = FALSE;
3087 }
3088
3089 return TRUE;
3090
3091error_ret_free_local:
3092 return FALSE;
3093}
3094
3095/* Build all the stubs associated with the current output file. The
3096 stubs are kept in a hash table attached to the main linker hash
3097 table. We also set up the .plt entries for statically linked PIC
3098 functions here. This function is called via aarch64_elf_finish in the
3099 linker. */
3100
3101bfd_boolean
3102elf64_aarch64_build_stubs (struct bfd_link_info *info)
3103{
3104 asection *stub_sec;
3105 struct bfd_hash_table *table;
3106 struct elf64_aarch64_link_hash_table *htab;
3107
3108 htab = elf64_aarch64_hash_table (info);
3109
3110 for (stub_sec = htab->stub_bfd->sections;
3111 stub_sec != NULL; stub_sec = stub_sec->next)
3112 {
3113 bfd_size_type size;
3114
3115 /* Ignore non-stub sections. */
3116 if (!strstr (stub_sec->name, STUB_SUFFIX))
3117 continue;
3118
3119 /* Allocate memory to hold the linker stubs. */
3120 size = stub_sec->size;
3121 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3122 if (stub_sec->contents == NULL && size != 0)
3123 return FALSE;
3124 stub_sec->size = 0;
3125 }
3126
3127 /* Build the stubs as directed by the stub hash table. */
3128 table = &htab->stub_hash_table;
3129 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3130
3131 return TRUE;
3132}
3133
3134
3135/* Add an entry to the code/data map for section SEC. */
3136
3137static void
3138elf64_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3139{
3140 struct _aarch64_elf_section_data *sec_data =
3141 elf64_aarch64_section_data (sec);
3142 unsigned int newidx;
3143
3144 if (sec_data->map == NULL)
3145 {
3146 sec_data->map = bfd_malloc (sizeof (elf64_aarch64_section_map));
3147 sec_data->mapcount = 0;
3148 sec_data->mapsize = 1;
3149 }
3150
3151 newidx = sec_data->mapcount++;
3152
3153 if (sec_data->mapcount > sec_data->mapsize)
3154 {
3155 sec_data->mapsize *= 2;
3156 sec_data->map = bfd_realloc_or_free
3157 (sec_data->map, sec_data->mapsize * sizeof (elf64_aarch64_section_map));
3158 }
3159
3160 if (sec_data->map)
3161 {
3162 sec_data->map[newidx].vma = vma;
3163 sec_data->map[newidx].type = type;
3164 }
3165}
3166
3167
3168/* Initialise maps of insn/data for input BFDs. */
3169void
3170bfd_elf64_aarch64_init_maps (bfd *abfd)
3171{
3172 Elf_Internal_Sym *isymbuf;
3173 Elf_Internal_Shdr *hdr;
3174 unsigned int i, localsyms;
3175
3176 /* Make sure that we are dealing with an AArch64 elf binary. */
3177 if (!is_aarch64_elf (abfd))
3178 return;
3179
3180 if ((abfd->flags & DYNAMIC) != 0)
3181 return;
3182
3183 hdr = &elf_symtab_hdr (abfd);
3184 localsyms = hdr->sh_info;
3185
3186 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3187 should contain the number of local symbols, which should come before any
3188 global symbols. Mapping symbols are always local. */
3189 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3190
3191 /* No internal symbols read? Skip this BFD. */
3192 if (isymbuf == NULL)
3193 return;
3194
3195 for (i = 0; i < localsyms; i++)
3196 {
3197 Elf_Internal_Sym *isym = &isymbuf[i];
3198 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3199 const char *name;
3200
3201 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3202 {
3203 name = bfd_elf_string_from_elf_section (abfd,
3204 hdr->sh_link,
3205 isym->st_name);
3206
3207 if (bfd_is_aarch64_special_symbol_name
3208 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3209 elf64_aarch64_section_map_add (sec, name[1], isym->st_value);
3210 }
3211 }
3212}
3213
3214/* Set option values needed during linking. */
3215void
3216bfd_elf64_aarch64_set_options (struct bfd *output_bfd,
3217 struct bfd_link_info *link_info,
3218 int no_enum_warn,
3219 int no_wchar_warn, int pic_veneer)
3220{
3221 struct elf64_aarch64_link_hash_table *globals;
3222
3223 globals = elf64_aarch64_hash_table (link_info);
3224 globals->pic_veneer = pic_veneer;
3225
3226 BFD_ASSERT (is_aarch64_elf (output_bfd));
3227 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3228 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3229}
3230
3231#define MASK(n) ((1u << (n)) - 1)
3232
3233/* Decode the 26-bit offset of unconditional branch. */
3234static inline uint32_t
3235decode_branch_ofs_26 (uint32_t insn)
3236{
3237 return insn & MASK (26);
3238}
3239
3240/* Decode the 19-bit offset of conditional branch and compare & branch. */
3241static inline uint32_t
3242decode_cond_branch_ofs_19 (uint32_t insn)
3243{
3244 return (insn >> 5) & MASK (19);
3245}
3246
3247/* Decode the 19-bit offset of load literal. */
3248static inline uint32_t
3249decode_ld_lit_ofs_19 (uint32_t insn)
3250{
3251 return (insn >> 5) & MASK (19);
3252}
3253
3254/* Decode the 14-bit offset of test & branch. */
3255static inline uint32_t
3256decode_tst_branch_ofs_14 (uint32_t insn)
3257{
3258 return (insn >> 5) & MASK (14);
3259}
3260
3261/* Decode the 16-bit imm of move wide. */
3262static inline uint32_t
3263decode_movw_imm (uint32_t insn)
3264{
3265 return (insn >> 5) & MASK (16);
3266}
3267
3268/* Decode the 21-bit imm of adr. */
3269static inline uint32_t
3270decode_adr_imm (uint32_t insn)
3271{
3272 return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
3273}
3274
3275/* Decode the 12-bit imm of add immediate. */
3276static inline uint32_t
3277decode_add_imm (uint32_t insn)
3278{
3279 return (insn >> 10) & MASK (12);
3280}
3281
3282
3283/* Encode the 26-bit offset of unconditional branch. */
3284static inline uint32_t
3285reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
3286{
3287 return (insn & ~MASK (26)) | (ofs & MASK (26));
3288}
3289
3290/* Encode the 19-bit offset of conditional branch and compare & branch. */
3291static inline uint32_t
3292reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
3293{
3294 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3295}
3296
3297/* Decode the 19-bit offset of load literal. */
3298static inline uint32_t
3299reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
3300{
3301 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3302}
3303
3304/* Encode the 14-bit offset of test & branch. */
3305static inline uint32_t
3306reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
3307{
3308 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
3309}
3310
3311/* Reencode the imm field of move wide. */
3312static inline uint32_t
3313reencode_movw_imm (uint32_t insn, uint32_t imm)
3314{
3315 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
3316}
3317
3318/* Reencode the imm field of adr. */
3319static inline uint32_t
3320reencode_adr_imm (uint32_t insn, uint32_t imm)
3321{
3322 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
3323 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
3324}
3325
3326/* Reencode the imm field of ld/st pos immediate. */
3327static inline uint32_t
3328reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
3329{
3330 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3331}
3332
3333/* Reencode the imm field of add immediate. */
3334static inline uint32_t
3335reencode_add_imm (uint32_t insn, uint32_t imm)
3336{
3337 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3338}
3339
3340/* Reencode mov[zn] to movz. */
3341static inline uint32_t
3342reencode_movzn_to_movz (uint32_t opcode)
3343{
3344 return opcode | (1 << 30);
3345}
3346
3347/* Reencode mov[zn] to movn. */
3348static inline uint32_t
3349reencode_movzn_to_movn (uint32_t opcode)
3350{
3351 return opcode & ~(1 << 30);
3352}
3353
3354/* Insert the addend/value into the instruction or data object being
3355 relocated. */
3356static bfd_reloc_status_type
3357bfd_elf_aarch64_put_addend (bfd *abfd,
3358 bfd_byte *address,
3359 reloc_howto_type *howto, bfd_signed_vma addend)
3360{
3361 bfd_reloc_status_type status = bfd_reloc_ok;
3362 bfd_signed_vma old_addend = addend;
3363 bfd_vma contents;
3364 int size;
3365
3366 size = bfd_get_reloc_size (howto);
3367 switch (size)
3368 {
3369 case 2:
3370 contents = bfd_get_16 (abfd, address);
3371 break;
3372 case 4:
3373 if (howto->src_mask != 0xffffffff)
3374 /* Must be 32-bit instruction, always little-endian. */
3375 contents = bfd_getl32 (address);
3376 else
3377 /* Must be 32-bit data (endianness dependent). */
3378 contents = bfd_get_32 (abfd, address);
3379 break;
3380 case 8:
3381 contents = bfd_get_64 (abfd, address);
3382 break;
3383 default:
3384 abort ();
3385 }
3386
3387 switch (howto->complain_on_overflow)
3388 {
3389 case complain_overflow_dont:
3390 break;
3391 case complain_overflow_signed:
3392 status = aarch64_signed_overflow (addend,
3393 howto->bitsize + howto->rightshift);
3394 break;
3395 case complain_overflow_unsigned:
3396 status = aarch64_unsigned_overflow (addend,
3397 howto->bitsize + howto->rightshift);
3398 break;
3399 case complain_overflow_bitfield:
3400 default:
3401 abort ();
3402 }
3403
3404 addend >>= howto->rightshift;
3405
3406 switch (howto->type)
3407 {
3408 case R_AARCH64_JUMP26:
3409 case R_AARCH64_CALL26:
3410 contents = reencode_branch_ofs_26 (contents, addend);
3411 break;
3412
3413 case R_AARCH64_CONDBR19:
3414 contents = reencode_cond_branch_ofs_19 (contents, addend);
3415 break;
3416
3417 case R_AARCH64_TSTBR14:
3418 contents = reencode_tst_branch_ofs_14 (contents, addend);
3419 break;
3420
3421 case R_AARCH64_LD_PREL_LO19:
f41aef5f 3422 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3423 if (old_addend & ((1 << howto->rightshift) - 1))
3424 return bfd_reloc_overflow;
3425 contents = reencode_ld_lit_ofs_19 (contents, addend);
3426 break;
3427
3428 case R_AARCH64_TLSDESC_CALL:
3429 break;
3430
3431 case R_AARCH64_TLSGD_ADR_PAGE21:
3432 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3433 case R_AARCH64_TLSDESC_ADR_PAGE:
3434 case R_AARCH64_ADR_GOT_PAGE:
3435 case R_AARCH64_ADR_PREL_LO21:
3436 case R_AARCH64_ADR_PREL_PG_HI21:
3437 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3438 contents = reencode_adr_imm (contents, addend);
3439 break;
3440
3441 case R_AARCH64_TLSGD_ADD_LO12_NC:
3442 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3443 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3444 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3445 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3446 case R_AARCH64_ADD_ABS_LO12_NC:
3447 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3448 12 bits of the page offset following
3449 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3450 (pc-relative) page base. */
3451 contents = reencode_add_imm (contents, addend);
3452 break;
3453
3454 case R_AARCH64_LDST8_ABS_LO12_NC:
3455 case R_AARCH64_LDST16_ABS_LO12_NC:
3456 case R_AARCH64_LDST32_ABS_LO12_NC:
3457 case R_AARCH64_LDST64_ABS_LO12_NC:
3458 case R_AARCH64_LDST128_ABS_LO12_NC:
3459 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3460 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3461 case R_AARCH64_LD64_GOT_LO12_NC:
3462 if (old_addend & ((1 << howto->rightshift) - 1))
3463 return bfd_reloc_overflow;
3464 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3465 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3466 which computes the (pc-relative) page base. */
3467 contents = reencode_ldst_pos_imm (contents, addend);
3468 break;
3469
3470 /* Group relocations to create high bits of a 16, 32, 48 or 64
3471 bit signed data or abs address inline. Will change
3472 instruction to MOVN or MOVZ depending on sign of calculated
3473 value. */
3474
3475 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3476 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3477 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3478 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3479 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3480 case R_AARCH64_MOVW_SABS_G0:
3481 case R_AARCH64_MOVW_SABS_G1:
3482 case R_AARCH64_MOVW_SABS_G2:
3483 /* NOTE: We can only come here with movz or movn. */
3484 if (addend < 0)
3485 {
3486 /* Force use of MOVN. */
3487 addend = ~addend;
3488 contents = reencode_movzn_to_movn (contents);
3489 }
3490 else
3491 {
3492 /* Force use of MOVZ. */
3493 contents = reencode_movzn_to_movz (contents);
3494 }
3495 /* fall through */
3496
3497 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3498 data or abs address inline. */
3499
3500 case R_AARCH64_MOVW_UABS_G0:
3501 case R_AARCH64_MOVW_UABS_G0_NC:
3502 case R_AARCH64_MOVW_UABS_G1:
3503 case R_AARCH64_MOVW_UABS_G1_NC:
3504 case R_AARCH64_MOVW_UABS_G2:
3505 case R_AARCH64_MOVW_UABS_G2_NC:
3506 case R_AARCH64_MOVW_UABS_G3:
3507 contents = reencode_movw_imm (contents, addend);
3508 break;
3509
3510 default:
3511 /* Repack simple data */
3512 if (howto->dst_mask & (howto->dst_mask + 1))
3513 return bfd_reloc_notsupported;
3514
3515 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
3516 break;
3517 }
3518
3519 switch (size)
3520 {
3521 case 2:
3522 bfd_put_16 (abfd, contents, address);
3523 break;
3524 case 4:
3525 if (howto->dst_mask != 0xffffffff)
3526 /* must be 32-bit instruction, always little-endian */
3527 bfd_putl32 (contents, address);
3528 else
3529 /* must be 32-bit data (endianness dependent) */
3530 bfd_put_32 (abfd, contents, address);
3531 break;
3532 case 8:
3533 bfd_put_64 (abfd, contents, address);
3534 break;
3535 default:
3536 abort ();
3537 }
3538
3539 return status;
3540}
3541
3542static bfd_vma
3543aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3544 struct elf64_aarch64_link_hash_table
3545 *globals, struct bfd_link_info *info,
3546 bfd_vma value, bfd *output_bfd,
3547 bfd_boolean *unresolved_reloc_p)
3548{
3549 bfd_vma off = (bfd_vma) - 1;
3550 asection *basegot = globals->root.sgot;
3551 bfd_boolean dyn = globals->root.dynamic_sections_created;
3552
3553 if (h != NULL)
3554 {
3555 off = h->got.offset;
3556 BFD_ASSERT (off != (bfd_vma) - 1);
3557 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3558 || (info->shared
3559 && SYMBOL_REFERENCES_LOCAL (info, h))
3560 || (ELF_ST_VISIBILITY (h->other)
3561 && h->root.type == bfd_link_hash_undefweak))
3562 {
3563 /* This is actually a static link, or it is a -Bsymbolic link
3564 and the symbol is defined locally. We must initialize this
3565 entry in the global offset table. Since the offset must
3566 always be a multiple of 8, we use the least significant bit
3567 to record whether we have initialized it already.
3568 When doing a dynamic link, we create a .rel(a).got relocation
3569 entry to initialize the value. This is done in the
3570 finish_dynamic_symbol routine. */
3571 if ((off & 1) != 0)
3572 off &= ~1;
3573 else
3574 {
3575 bfd_put_64 (output_bfd, value, basegot->contents + off);
3576 h->got.offset |= 1;
3577 }
3578 }
3579 else
3580 *unresolved_reloc_p = FALSE;
3581
3582 off = off + basegot->output_section->vma + basegot->output_offset;
3583 }
3584
3585 return off;
3586}
3587
3588/* Change R_TYPE to a more efficient access model where possible,
3589 return the new reloc type. */
3590
3591static unsigned int
3592aarch64_tls_transition_without_check (unsigned int r_type,
3593 struct elf_link_hash_entry *h)
3594{
3595 bfd_boolean is_local = h == NULL;
3596 switch (r_type)
3597 {
3598 case R_AARCH64_TLSGD_ADR_PAGE21:
3599 case R_AARCH64_TLSDESC_ADR_PAGE:
3600 return is_local
3601 ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
3602
3603 case R_AARCH64_TLSGD_ADD_LO12_NC:
3604 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3605 return is_local
3606 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3607 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
3608
3609 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3610 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3611
3612 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3613 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3614
3615 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3616 case R_AARCH64_TLSDESC_CALL:
3617 /* Instructions with these relocations will become NOPs. */
3618 return R_AARCH64_NONE;
3619 }
3620
3621 return r_type;
3622}
3623
3624static unsigned int
3625aarch64_reloc_got_type (unsigned int r_type)
3626{
3627 switch (r_type)
3628 {
3629 case R_AARCH64_LD64_GOT_LO12_NC:
3630 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 3631 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3632 return GOT_NORMAL;
3633
3634 case R_AARCH64_TLSGD_ADR_PAGE21:
3635 case R_AARCH64_TLSGD_ADD_LO12_NC:
3636 return GOT_TLS_GD;
3637
3638 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3639 case R_AARCH64_TLSDESC_ADR_PAGE:
3640 case R_AARCH64_TLSDESC_CALL:
3641 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3642 return GOT_TLSDESC_GD;
3643
3644 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3645 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3646 return GOT_TLS_IE;
3647
3648 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3649 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3650 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3651 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3652 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3653 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3654 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3655 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3656 return GOT_UNKNOWN;
3657 }
3658 return GOT_UNKNOWN;
3659}
3660
3661static bfd_boolean
3662aarch64_can_relax_tls (bfd *input_bfd,
3663 struct bfd_link_info *info,
3664 unsigned int r_type,
3665 struct elf_link_hash_entry *h,
3666 unsigned long r_symndx)
3667{
3668 unsigned int symbol_got_type;
3669 unsigned int reloc_got_type;
3670
3671 if (! IS_AARCH64_TLS_RELOC (r_type))
3672 return FALSE;
3673
3674 symbol_got_type = elf64_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3675 reloc_got_type = aarch64_reloc_got_type (r_type);
3676
3677 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3678 return TRUE;
3679
3680 if (info->shared)
3681 return FALSE;
3682
3683 if (h && h->root.type == bfd_link_hash_undefweak)
3684 return FALSE;
3685
3686 return TRUE;
3687}
3688
3689static unsigned int
3690aarch64_tls_transition (bfd *input_bfd,
3691 struct bfd_link_info *info,
3692 unsigned int r_type,
3693 struct elf_link_hash_entry *h,
3694 unsigned long r_symndx)
3695{
3696 if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx))
3697 return r_type;
3698
3699 return aarch64_tls_transition_without_check (r_type, h);
3700}
3701
3702/* Return the base VMA address which should be subtracted from real addresses
3703 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3704
3705static bfd_vma
3706dtpoff_base (struct bfd_link_info *info)
3707{
3708 /* If tls_sec is NULL, we should have signalled an error already. */
3709 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3710 return elf_hash_table (info)->tls_sec->vma;
3711}
3712
3713
3714/* Return the base VMA address which should be subtracted from real addresses
3715 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3716
3717static bfd_vma
3718tpoff_base (struct bfd_link_info *info)
3719{
3720 struct elf_link_hash_table *htab = elf_hash_table (info);
3721
3722 /* If tls_sec is NULL, we should have signalled an error already. */
3723 if (htab->tls_sec == NULL)
3724 return 0;
3725
3726 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3727 htab->tls_sec->alignment_power);
3728 return htab->tls_sec->vma - base;
3729}
3730
3731static bfd_vma *
3732symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3733 unsigned long r_symndx)
3734{
3735 /* Calculate the address of the GOT entry for symbol
3736 referred to in h. */
3737 if (h != NULL)
3738 return &h->got.offset;
3739 else
3740 {
3741 /* local symbol */
3742 struct elf_aarch64_local_symbol *l;
3743
3744 l = elf64_aarch64_locals (input_bfd);
3745 return &l[r_symndx].got_offset;
3746 }
3747}
3748
3749static void
3750symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3751 unsigned long r_symndx)
3752{
3753 bfd_vma *p;
3754 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3755 *p |= 1;
3756}
3757
3758static int
3759symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3760 unsigned long r_symndx)
3761{
3762 bfd_vma value;
3763 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3764 return value & 1;
3765}
3766
3767static bfd_vma
3768symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3769 unsigned long r_symndx)
3770{
3771 bfd_vma value;
3772 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3773 value &= ~1;
3774 return value;
3775}
3776
3777static bfd_vma *
3778symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3779 unsigned long r_symndx)
3780{
3781 /* Calculate the address of the GOT entry for symbol
3782 referred to in h. */
3783 if (h != NULL)
3784 {
3785 struct elf64_aarch64_link_hash_entry *eh;
3786 eh = (struct elf64_aarch64_link_hash_entry *) h;
3787 return &eh->tlsdesc_got_jump_table_offset;
3788 }
3789 else
3790 {
3791 /* local symbol */
3792 struct elf_aarch64_local_symbol *l;
3793
3794 l = elf64_aarch64_locals (input_bfd);
3795 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3796 }
3797}
3798
3799static void
3800symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3801 unsigned long r_symndx)
3802{
3803 bfd_vma *p;
3804 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3805 *p |= 1;
3806}
3807
3808static int
3809symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3810 struct elf_link_hash_entry *h,
3811 unsigned long r_symndx)
3812{
3813 bfd_vma value;
3814 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3815 return value & 1;
3816}
3817
3818static bfd_vma
3819symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3820 unsigned long r_symndx)
3821{
3822 bfd_vma value;
3823 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3824 value &= ~1;
3825 return value;
3826}
3827
3828/* Perform a relocation as part of a final link. */
3829static bfd_reloc_status_type
3830elf64_aarch64_final_link_relocate (reloc_howto_type *howto,
3831 bfd *input_bfd,
3832 bfd *output_bfd,
3833 asection *input_section,
3834 bfd_byte *contents,
3835 Elf_Internal_Rela *rel,
3836 bfd_vma value,
3837 struct bfd_link_info *info,
3838 asection *sym_sec,
3839 struct elf_link_hash_entry *h,
3840 bfd_boolean *unresolved_reloc_p,
3841 bfd_boolean save_addend,
3842 bfd_vma *saved_addend)
3843{
3844 unsigned int r_type = howto->type;
3845 unsigned long r_symndx;
3846 bfd_byte *hit_data = contents + rel->r_offset;
3847 bfd_vma place;
3848 bfd_signed_vma signed_addend;
3849 struct elf64_aarch64_link_hash_table *globals;
3850 bfd_boolean weak_undef_p;
3851
3852 globals = elf64_aarch64_hash_table (info);
3853
3854 BFD_ASSERT (is_aarch64_elf (input_bfd));
3855
3856 r_symndx = ELF64_R_SYM (rel->r_info);
3857
3858 /* It is possible to have linker relaxations on some TLS access
3859 models. Update our information here. */
3860 r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
3861
3862 if (r_type != howto->type)
3863 howto = elf64_aarch64_howto_from_type (r_type);
3864
3865 place = input_section->output_section->vma
3866 + input_section->output_offset + rel->r_offset;
3867
3868 /* Get addend, accumulating the addend for consecutive relocs
3869 which refer to the same offset. */
3870 signed_addend = saved_addend ? *saved_addend : 0;
3871 signed_addend += rel->r_addend;
3872
3873 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
3874 : bfd_is_und_section (sym_sec));
3875 switch (r_type)
3876 {
3877 case R_AARCH64_NONE:
3878 case R_AARCH64_NULL:
3879 case R_AARCH64_TLSDESC_CALL:
3880 *unresolved_reloc_p = FALSE;
3881 return bfd_reloc_ok;
3882
3883 case R_AARCH64_ABS64:
3884
3885 /* When generating a shared object or relocatable executable, these
3886 relocations are copied into the output file to be resolved at
3887 run time. */
3888 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
3889 && (input_section->flags & SEC_ALLOC)
3890 && (h == NULL
3891 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3892 || h->root.type != bfd_link_hash_undefweak))
3893 {
3894 Elf_Internal_Rela outrel;
3895 bfd_byte *loc;
3896 bfd_boolean skip, relocate;
3897 asection *sreloc;
3898
3899 *unresolved_reloc_p = FALSE;
3900
3901 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd,
3902 input_section, 1);
3903 if (sreloc == NULL)
3904 return bfd_reloc_notsupported;
3905
3906 skip = FALSE;
3907 relocate = FALSE;
3908
3909 outrel.r_addend = signed_addend;
3910 outrel.r_offset =
3911 _bfd_elf_section_offset (output_bfd, info, input_section,
3912 rel->r_offset);
3913 if (outrel.r_offset == (bfd_vma) - 1)
3914 skip = TRUE;
3915 else if (outrel.r_offset == (bfd_vma) - 2)
3916 {
3917 skip = TRUE;
3918 relocate = TRUE;
3919 }
3920
3921 outrel.r_offset += (input_section->output_section->vma
3922 + input_section->output_offset);
3923
3924 if (skip)
3925 memset (&outrel, 0, sizeof outrel);
3926 else if (h != NULL
3927 && h->dynindx != -1
3928 && (!info->shared || !info->symbolic || !h->def_regular))
3929 outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
3930 else
3931 {
3932 int symbol;
3933
3934 /* On SVR4-ish systems, the dynamic loader cannot
3935 relocate the text and data segments independently,
3936 so the symbol does not matter. */
3937 symbol = 0;
3938 outrel.r_info = ELF64_R_INFO (symbol, R_AARCH64_RELATIVE);
3939 outrel.r_addend += value;
3940 }
3941
3942 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab);
3943 bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
3944
3945 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
3946 {
3947 /* Sanity to check that we have previously allocated
3948 sufficient space in the relocation section for the
3949 number of relocations we actually want to emit. */
3950 abort ();
3951 }
3952
3953 /* If this reloc is against an external symbol, we do not want to
3954 fiddle with the addend. Otherwise, we need to include the symbol
3955 value so that it becomes an addend for the dynamic reloc. */
3956 if (!relocate)
3957 return bfd_reloc_ok;
3958
3959 return _bfd_final_link_relocate (howto, input_bfd, input_section,
3960 contents, rel->r_offset, value,
3961 signed_addend);
3962 }
3963 else
3964 value += signed_addend;
3965 break;
3966
3967 case R_AARCH64_JUMP26:
3968 case R_AARCH64_CALL26:
3969 {
3970 asection *splt = globals->root.splt;
3971 bfd_boolean via_plt_p =
3972 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
3973
3974 /* A call to an undefined weak symbol is converted to a jump to
3975 the next instruction unless a PLT entry will be created.
3976 The jump to the next instruction is optimized as a NOP.
3977 Do the same for local undefined symbols. */
3978 if (weak_undef_p && ! via_plt_p)
3979 {
3980 bfd_putl32 (INSN_NOP, hit_data);
3981 return bfd_reloc_ok;
3982 }
3983
3984 /* If the call goes through a PLT entry, make sure to
3985 check distance to the right destination address. */
3986 if (via_plt_p)
3987 {
3988 value = (splt->output_section->vma
3989 + splt->output_offset + h->plt.offset);
3990 *unresolved_reloc_p = FALSE;
3991 }
3992
3993 /* If the target symbol is global and marked as a function the
3994 relocation applies a function call or a tail call. In this
3995 situation we can veneer out of range branches. The veneers
3996 use IP0 and IP1 hence cannot be used arbitrary out of range
3997 branches that occur within the body of a function. */
3998 if (h && h->type == STT_FUNC)
3999 {
4000 /* Check if a stub has to be inserted because the destination
4001 is too far away. */
4002 if (! aarch64_valid_branch_p (value, place))
4003 {
4004 /* The target is out of reach, so redirect the branch to
4005 the local stub for this function. */
4006 struct elf64_aarch64_stub_hash_entry *stub_entry;
4007 stub_entry = elf64_aarch64_get_stub_entry (input_section,
4008 sym_sec, h,
4009 rel, globals);
4010 if (stub_entry != NULL)
4011 value = (stub_entry->stub_offset
4012 + stub_entry->stub_sec->output_offset
4013 + stub_entry->stub_sec->output_section->vma);
4014 }
4015 }
4016 }
4017 value = aarch64_resolve_relocation (r_type, place, value,
4018 signed_addend, weak_undef_p);
4019 break;
4020
4021 case R_AARCH64_ABS16:
4022 case R_AARCH64_ABS32:
4023 case R_AARCH64_ADD_ABS_LO12_NC:
4024 case R_AARCH64_ADR_PREL_LO21:
4025 case R_AARCH64_ADR_PREL_PG_HI21:
4026 case R_AARCH64_ADR_PREL_PG_HI21_NC:
4027 case R_AARCH64_CONDBR19:
4028 case R_AARCH64_LD_PREL_LO19:
4029 case R_AARCH64_LDST8_ABS_LO12_NC:
4030 case R_AARCH64_LDST16_ABS_LO12_NC:
4031 case R_AARCH64_LDST32_ABS_LO12_NC:
4032 case R_AARCH64_LDST64_ABS_LO12_NC:
4033 case R_AARCH64_LDST128_ABS_LO12_NC:
4034 case R_AARCH64_MOVW_SABS_G0:
4035 case R_AARCH64_MOVW_SABS_G1:
4036 case R_AARCH64_MOVW_SABS_G2:
4037 case R_AARCH64_MOVW_UABS_G0:
4038 case R_AARCH64_MOVW_UABS_G0_NC:
4039 case R_AARCH64_MOVW_UABS_G1:
4040 case R_AARCH64_MOVW_UABS_G1_NC:
4041 case R_AARCH64_MOVW_UABS_G2:
4042 case R_AARCH64_MOVW_UABS_G2_NC:
4043 case R_AARCH64_MOVW_UABS_G3:
4044 case R_AARCH64_PREL16:
4045 case R_AARCH64_PREL32:
4046 case R_AARCH64_PREL64:
4047 case R_AARCH64_TSTBR14:
4048 value = aarch64_resolve_relocation (r_type, place, value,
4049 signed_addend, weak_undef_p);
4050 break;
4051
4052 case R_AARCH64_LD64_GOT_LO12_NC:
4053 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 4054 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
4055 if (globals->root.sgot == NULL)
4056 BFD_ASSERT (h != NULL);
4057
4058 if (h != NULL)
4059 {
4060 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4061 output_bfd,
4062 unresolved_reloc_p);
4063 value = aarch64_resolve_relocation (r_type, place, value,
4064 0, weak_undef_p);
4065 }
4066 break;
4067
4068 case R_AARCH64_TLSGD_ADR_PAGE21:
4069 case R_AARCH64_TLSGD_ADD_LO12_NC:
4070 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4071 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4072 if (globals->root.sgot == NULL)
4073 return bfd_reloc_notsupported;
4074
4075 value = (symbol_got_offset (input_bfd, h, r_symndx)
4076 + globals->root.sgot->output_section->vma
4077 + globals->root.sgot->output_section->output_offset);
4078
4079 value = aarch64_resolve_relocation (r_type, place, value,
4080 0, weak_undef_p);
4081 *unresolved_reloc_p = FALSE;
4082 break;
4083
4084 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4085 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4086 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4087 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4088 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4089 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4090 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4091 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4092 value = aarch64_resolve_relocation (r_type, place, value,
bb3f9ed8 4093 signed_addend - tpoff_base (info), weak_undef_p);
a06ea964
NC
4094 *unresolved_reloc_p = FALSE;
4095 break;
4096
4097 case R_AARCH64_TLSDESC_ADR_PAGE:
4098 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4099 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4100 case R_AARCH64_TLSDESC_ADD:
4101 case R_AARCH64_TLSDESC_LDR:
4102 if (globals->root.sgot == NULL)
4103 return bfd_reloc_notsupported;
4104
4105 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4106 + globals->root.sgotplt->output_section->vma
4107 + globals->root.sgotplt->output_section->output_offset
4108 + globals->sgotplt_jump_table_size);
4109
4110 value = aarch64_resolve_relocation (r_type, place, value,
4111 0, weak_undef_p);
4112 *unresolved_reloc_p = FALSE;
4113 break;
4114
4115 default:
4116 return bfd_reloc_notsupported;
4117 }
4118
4119 if (saved_addend)
4120 *saved_addend = value;
4121
4122 /* Only apply the final relocation in a sequence. */
4123 if (save_addend)
4124 return bfd_reloc_continue;
4125
4126 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4127}
4128
4129/* Handle TLS relaxations. Relaxing is possible for symbols that use
4130 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4131 link.
4132
4133 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4134 is to then call final_link_relocate. Return other values in the
4135 case of error. */
4136
4137static bfd_reloc_status_type
4138elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table *globals,
4139 bfd *input_bfd, bfd_byte *contents,
4140 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4141{
4142 bfd_boolean is_local = h == NULL;
4143 unsigned int r_type = ELF64_R_TYPE (rel->r_info);
4144 unsigned long insn;
4145
4146 BFD_ASSERT (globals && input_bfd && contents && rel);
4147
4148 switch (r_type)
4149 {
4150 case R_AARCH64_TLSGD_ADR_PAGE21:
4151 case R_AARCH64_TLSDESC_ADR_PAGE:
4152 if (is_local)
4153 {
4154 /* GD->LE relaxation:
4155 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4156 or
4157 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4158 */
4159 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4160 return bfd_reloc_continue;
4161 }
4162 else
4163 {
4164 /* GD->IE relaxation:
4165 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4166 or
4167 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4168 */
4169 insn = bfd_getl32 (contents + rel->r_offset);
4170 return bfd_reloc_continue;
4171 }
4172
4173 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4174 if (is_local)
4175 {
4176 /* GD->LE relaxation:
4177 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4178 */
4179 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4180 return bfd_reloc_continue;
4181 }
4182 else
4183 {
4184 /* GD->IE relaxation:
4185 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4186 */
4187 insn = bfd_getl32 (contents + rel->r_offset);
4188 insn &= 0xfffffff0;
4189 bfd_putl32 (insn, contents + rel->r_offset);
4190 return bfd_reloc_continue;
4191 }
4192
4193 case R_AARCH64_TLSGD_ADD_LO12_NC:
4194 if (is_local)
4195 {
4196 /* GD->LE relaxation
4197 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4198 bl __tls_get_addr => mrs x1, tpidr_el0
4199 nop => add x0, x1, x0
4200 */
4201
4202 /* First kill the tls_get_addr reloc on the bl instruction. */
4203 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4204 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4205
4206 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4207 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4208 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4209 return bfd_reloc_continue;
4210 }
4211 else
4212 {
4213 /* GD->IE relaxation
4214 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4215 BL __tls_get_addr => mrs x1, tpidr_el0
4216 R_AARCH64_CALL26
4217 NOP => add x0, x1, x0
4218 */
4219
4220 BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26);
4221
4222 /* Remove the relocation on the BL instruction. */
4223 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4224
4225 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4226
4227 /* We choose to fixup the BL and NOP instructions using the
4228 offset from the second relocation to allow flexibility in
4229 scheduling instructions between the ADD and BL. */
4230 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4231 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4232 return bfd_reloc_continue;
4233 }
4234
4235 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4236 case R_AARCH64_TLSDESC_CALL:
4237 /* GD->IE/LE relaxation:
4238 add x0, x0, #:tlsdesc_lo12:var => nop
4239 blr xd => nop
4240 */
4241 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4242 return bfd_reloc_ok;
4243
4244 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4245 /* IE->LE relaxation:
4246 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4247 */
4248 if (is_local)
4249 {
4250 insn = bfd_getl32 (contents + rel->r_offset);
4251 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4252 }
4253 return bfd_reloc_continue;
4254
4255 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4256 /* IE->LE relaxation:
4257 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4258 */
4259 if (is_local)
4260 {
4261 insn = bfd_getl32 (contents + rel->r_offset);
4262 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4263 }
4264 return bfd_reloc_continue;
4265
4266 default:
4267 return bfd_reloc_continue;
4268 }
4269
4270 return bfd_reloc_ok;
4271}
4272
4273/* Relocate an AArch64 ELF section. */
4274
4275static bfd_boolean
4276elf64_aarch64_relocate_section (bfd *output_bfd,
4277 struct bfd_link_info *info,
4278 bfd *input_bfd,
4279 asection *input_section,
4280 bfd_byte *contents,
4281 Elf_Internal_Rela *relocs,
4282 Elf_Internal_Sym *local_syms,
4283 asection **local_sections)
4284{
4285 Elf_Internal_Shdr *symtab_hdr;
4286 struct elf_link_hash_entry **sym_hashes;
4287 Elf_Internal_Rela *rel;
4288 Elf_Internal_Rela *relend;
4289 const char *name;
4290 struct elf64_aarch64_link_hash_table *globals;
4291 bfd_boolean save_addend = FALSE;
4292 bfd_vma addend = 0;
4293
4294 globals = elf64_aarch64_hash_table (info);
4295
4296 symtab_hdr = &elf_symtab_hdr (input_bfd);
4297 sym_hashes = elf_sym_hashes (input_bfd);
4298
4299 rel = relocs;
4300 relend = relocs + input_section->reloc_count;
4301 for (; rel < relend; rel++)
4302 {
4303 unsigned int r_type;
4304 unsigned int relaxed_r_type;
4305 reloc_howto_type *howto;
4306 unsigned long r_symndx;
4307 Elf_Internal_Sym *sym;
4308 asection *sec;
4309 struct elf_link_hash_entry *h;
4310 bfd_vma relocation;
4311 bfd_reloc_status_type r;
4312 arelent bfd_reloc;
4313 char sym_type;
4314 bfd_boolean unresolved_reloc = FALSE;
4315 char *error_message = NULL;
4316
4317 r_symndx = ELF64_R_SYM (rel->r_info);
4318 r_type = ELF64_R_TYPE (rel->r_info);
4319
4320 bfd_reloc.howto = elf64_aarch64_howto_from_type (r_type);
4321 howto = bfd_reloc.howto;
4322
4323 h = NULL;
4324 sym = NULL;
4325 sec = NULL;
4326
4327 if (r_symndx < symtab_hdr->sh_info)
4328 {
4329 sym = local_syms + r_symndx;
4330 sym_type = ELF64_ST_TYPE (sym->st_info);
4331 sec = local_sections[r_symndx];
4332
4333 /* An object file might have a reference to a local
4334 undefined symbol. This is a daft object file, but we
4335 should at least do something about it. */
4336 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4337 && bfd_is_und_section (sec)
4338 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4339 {
4340 if (!info->callbacks->undefined_symbol
4341 (info, bfd_elf_string_from_elf_section
4342 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4343 input_bfd, input_section, rel->r_offset, TRUE))
4344 return FALSE;
4345 }
4346
4347 if (r_type >= R_AARCH64_dyn_max)
4348 {
4349 bfd_set_error (bfd_error_bad_value);
4350 return FALSE;
4351 }
4352
4353 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4354 }
4355 else
4356 {
4357 bfd_boolean warned;
4358
4359 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4360 r_symndx, symtab_hdr, sym_hashes,
4361 h, sec, relocation,
4362 unresolved_reloc, warned);
4363
4364 sym_type = h->type;
4365 }
4366
4367 if (sec != NULL && discarded_section (sec))
4368 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4369 rel, 1, relend, howto, 0, contents);
4370
4371 if (info->relocatable)
4372 {
4373 /* This is a relocatable link. We don't have to change
4374 anything, unless the reloc is against a section symbol,
4375 in which case we have to adjust according to where the
4376 section symbol winds up in the output section. */
4377 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
4378 rel->r_addend += sec->output_offset;
4379 continue;
4380 }
4381
4382 if (h != NULL)
4383 name = h->root.root.string;
4384 else
4385 {
4386 name = (bfd_elf_string_from_elf_section
4387 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4388 if (name == NULL || *name == '\0')
4389 name = bfd_section_name (input_bfd, sec);
4390 }
4391
4392 if (r_symndx != 0
4393 && r_type != R_AARCH64_NONE
4394 && r_type != R_AARCH64_NULL
4395 && (h == NULL
4396 || h->root.type == bfd_link_hash_defined
4397 || h->root.type == bfd_link_hash_defweak)
4398 && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
4399 {
4400 (*_bfd_error_handler)
4401 ((sym_type == STT_TLS
4402 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4403 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4404 input_bfd,
4405 input_section, (long) rel->r_offset, howto->name, name);
4406 }
4407
4408
4409 /* We relax only if we can see that there can be a valid transition
4410 from a reloc type to another.
4411 We call elf64_aarch64_final_link_relocate unless we're completely
4412 done, i.e., the relaxation produced the final output we want. */
4413
4414 relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4415 h, r_symndx);
4416 if (relaxed_r_type != r_type)
4417 {
4418 r_type = relaxed_r_type;
4419 howto = elf64_aarch64_howto_from_type (r_type);
4420
4421 r = elf64_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4422 unresolved_reloc = 0;
4423 }
4424 else
4425 r = bfd_reloc_continue;
4426
4427 /* There may be multiple consecutive relocations for the
4428 same offset. In that case we are supposed to treat the
4429 output of each relocation as the addend for the next. */
4430 if (rel + 1 < relend
4431 && rel->r_offset == rel[1].r_offset
4432 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4433 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4434 save_addend = TRUE;
4435 else
4436 save_addend = FALSE;
4437
4438 if (r == bfd_reloc_continue)
4439 r = elf64_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4440 input_section, contents, rel,
4441 relocation, info, sec,
4442 h, &unresolved_reloc,
4443 save_addend, &addend);
4444
4445 switch (r_type)
4446 {
4447 case R_AARCH64_TLSGD_ADR_PAGE21:
4448 case R_AARCH64_TLSGD_ADD_LO12_NC:
4449 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4450 {
4451 bfd_boolean need_relocs = FALSE;
4452 bfd_byte *loc;
4453 int indx;
4454 bfd_vma off;
4455
4456 off = symbol_got_offset (input_bfd, h, r_symndx);
4457 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4458
4459 need_relocs =
4460 (info->shared || indx != 0) &&
4461 (h == NULL
4462 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4463 || h->root.type != bfd_link_hash_undefweak);
4464
4465 BFD_ASSERT (globals->root.srelgot != NULL);
4466
4467 if (need_relocs)
4468 {
4469 Elf_Internal_Rela rela;
4470 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_DTPMOD64);
4471 rela.r_addend = 0;
4472 rela.r_offset = globals->root.sgot->output_section->vma +
4473 globals->root.sgot->output_offset + off;
4474
4475
4476 loc = globals->root.srelgot->contents;
4477 loc += globals->root.srelgot->reloc_count++
4478 * RELOC_SIZE (htab);
4479 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4480
4481 if (indx == 0)
4482 {
4483 bfd_put_64 (output_bfd,
4484 relocation - dtpoff_base (info),
4485 globals->root.sgot->contents + off
4486 + GOT_ENTRY_SIZE);
4487 }
4488 else
4489 {
4490 /* This TLS symbol is global. We emit a
4491 relocation to fixup the tls offset at load
4492 time. */
4493 rela.r_info =
4494 ELF64_R_INFO (indx, R_AARCH64_TLS_DTPREL64);
4495 rela.r_addend = 0;
4496 rela.r_offset =
4497 (globals->root.sgot->output_section->vma
4498 + globals->root.sgot->output_offset + off
4499 + GOT_ENTRY_SIZE);
4500
4501 loc = globals->root.srelgot->contents;
4502 loc += globals->root.srelgot->reloc_count++
4503 * RELOC_SIZE (globals);
4504 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4505 bfd_put_64 (output_bfd, (bfd_vma) 0,
4506 globals->root.sgot->contents + off
4507 + GOT_ENTRY_SIZE);
4508 }
4509 }
4510 else
4511 {
4512 bfd_put_64 (output_bfd, (bfd_vma) 1,
4513 globals->root.sgot->contents + off);
4514 bfd_put_64 (output_bfd,
4515 relocation - dtpoff_base (info),
4516 globals->root.sgot->contents + off
4517 + GOT_ENTRY_SIZE);
4518 }
4519
4520 symbol_got_offset_mark (input_bfd, h, r_symndx);
4521 }
4522 break;
4523
4524 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4525 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4526 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4527 {
4528 bfd_boolean need_relocs = FALSE;
4529 bfd_byte *loc;
4530 int indx;
4531 bfd_vma off;
4532
4533 off = symbol_got_offset (input_bfd, h, r_symndx);
4534
4535 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4536
4537 need_relocs =
4538 (info->shared || indx != 0) &&
4539 (h == NULL
4540 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4541 || h->root.type != bfd_link_hash_undefweak);
4542
4543 BFD_ASSERT (globals->root.srelgot != NULL);
4544
4545 if (need_relocs)
4546 {
4547 Elf_Internal_Rela rela;
4548
4549 if (indx == 0)
4550 rela.r_addend = relocation - dtpoff_base (info);
4551 else
4552 rela.r_addend = 0;
4553
4554 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_TPREL64);
4555 rela.r_offset = globals->root.sgot->output_section->vma +
4556 globals->root.sgot->output_offset + off;
4557
4558 loc = globals->root.srelgot->contents;
4559 loc += globals->root.srelgot->reloc_count++
4560 * RELOC_SIZE (htab);
4561
4562 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4563
4564 bfd_put_64 (output_bfd, rela.r_addend,
4565 globals->root.sgot->contents + off);
4566 }
4567 else
4568 bfd_put_64 (output_bfd, relocation - tpoff_base (info),
4569 globals->root.sgot->contents + off);
4570
4571 symbol_got_offset_mark (input_bfd, h, r_symndx);
4572 }
4573 break;
4574
4575 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4576 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4577 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4578 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4579 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4580 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4581 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4582 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4583 break;
4584
4585 case R_AARCH64_TLSDESC_ADR_PAGE:
4586 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4587 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4588 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
4589 {
4590 bfd_boolean need_relocs = FALSE;
4591 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
4592 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
4593
4594 need_relocs = (h == NULL
4595 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4596 || h->root.type != bfd_link_hash_undefweak);
4597
4598 BFD_ASSERT (globals->root.srelgot != NULL);
4599 BFD_ASSERT (globals->root.sgot != NULL);
4600
4601 if (need_relocs)
4602 {
4603 bfd_byte *loc;
4604 Elf_Internal_Rela rela;
4605 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLSDESC);
4606 rela.r_addend = 0;
4607 rela.r_offset = (globals->root.sgotplt->output_section->vma
4608 + globals->root.sgotplt->output_offset
4609 + off + globals->sgotplt_jump_table_size);
4610
4611 if (indx == 0)
4612 rela.r_addend = relocation - dtpoff_base (info);
4613
4614 /* Allocate the next available slot in the PLT reloc
4615 section to hold our R_AARCH64_TLSDESC, the next
4616 available slot is determined from reloc_count,
4617 which we step. But note, reloc_count was
4618 artifically moved down while allocating slots for
4619 real PLT relocs such that all of the PLT relocs
4620 will fit above the initial reloc_count and the
4621 extra stuff will fit below. */
4622 loc = globals->root.srelplt->contents;
4623 loc += globals->root.srelplt->reloc_count++
4624 * RELOC_SIZE (globals);
4625
4626 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4627
4628 bfd_put_64 (output_bfd, (bfd_vma) 0,
4629 globals->root.sgotplt->contents + off +
4630 globals->sgotplt_jump_table_size);
4631 bfd_put_64 (output_bfd, (bfd_vma) 0,
4632 globals->root.sgotplt->contents + off +
4633 globals->sgotplt_jump_table_size +
4634 GOT_ENTRY_SIZE);
4635 }
4636
4637 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
4638 }
4639 break;
4640 }
4641
4642 if (!save_addend)
4643 addend = 0;
4644
4645
4646 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4647 because such sections are not SEC_ALLOC and thus ld.so will
4648 not process them. */
4649 if (unresolved_reloc
4650 && !((input_section->flags & SEC_DEBUGGING) != 0
4651 && h->def_dynamic)
4652 && _bfd_elf_section_offset (output_bfd, info, input_section,
4653 +rel->r_offset) != (bfd_vma) - 1)
4654 {
4655 (*_bfd_error_handler)
4656 (_
4657 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4658 input_bfd, input_section, (long) rel->r_offset, howto->name,
4659 h->root.root.string);
4660 return FALSE;
4661 }
4662
4663 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
4664 {
4665 switch (r)
4666 {
4667 case bfd_reloc_overflow:
4668 /* If the overflowing reloc was to an undefined symbol,
4669 we have already printed one error message and there
4670 is no point complaining again. */
4671 if ((!h ||
4672 h->root.type != bfd_link_hash_undefined)
4673 && (!((*info->callbacks->reloc_overflow)
4674 (info, (h ? &h->root : NULL), name, howto->name,
4675 (bfd_vma) 0, input_bfd, input_section,
4676 rel->r_offset))))
4677 return FALSE;
4678 break;
4679
4680 case bfd_reloc_undefined:
4681 if (!((*info->callbacks->undefined_symbol)
4682 (info, name, input_bfd, input_section,
4683 rel->r_offset, TRUE)))
4684 return FALSE;
4685 break;
4686
4687 case bfd_reloc_outofrange:
4688 error_message = _("out of range");
4689 goto common_error;
4690
4691 case bfd_reloc_notsupported:
4692 error_message = _("unsupported relocation");
4693 goto common_error;
4694
4695 case bfd_reloc_dangerous:
4696 /* error_message should already be set. */
4697 goto common_error;
4698
4699 default:
4700 error_message = _("unknown error");
4701 /* Fall through. */
4702
4703 common_error:
4704 BFD_ASSERT (error_message != NULL);
4705 if (!((*info->callbacks->reloc_dangerous)
4706 (info, error_message, input_bfd, input_section,
4707 rel->r_offset)))
4708 return FALSE;
4709 break;
4710 }
4711 }
4712 }
4713
4714 return TRUE;
4715}
4716
4717/* Set the right machine number. */
4718
4719static bfd_boolean
4720elf64_aarch64_object_p (bfd *abfd)
4721{
4722 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
4723 return TRUE;
4724}
4725
4726/* Function to keep AArch64 specific flags in the ELF header. */
4727
4728static bfd_boolean
4729elf64_aarch64_set_private_flags (bfd *abfd, flagword flags)
4730{
4731 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
4732 {
4733 }
4734 else
4735 {
4736 elf_elfheader (abfd)->e_flags = flags;
4737 elf_flags_init (abfd) = TRUE;
4738 }
4739
4740 return TRUE;
4741}
4742
4743/* Copy backend specific data from one object module to another. */
4744
4745static bfd_boolean
4746elf64_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
4747{
4748 flagword in_flags;
4749
4750 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4751 return TRUE;
4752
4753 in_flags = elf_elfheader (ibfd)->e_flags;
4754
4755 elf_elfheader (obfd)->e_flags = in_flags;
4756 elf_flags_init (obfd) = TRUE;
4757
4758 /* Also copy the EI_OSABI field. */
4759 elf_elfheader (obfd)->e_ident[EI_OSABI] =
4760 elf_elfheader (ibfd)->e_ident[EI_OSABI];
4761
4762 /* Copy object attributes. */
4763 _bfd_elf_copy_obj_attributes (ibfd, obfd);
4764
4765 return TRUE;
4766}
4767
4768/* Merge backend specific data from an object file to the output
4769 object file when linking. */
4770
4771static bfd_boolean
4772elf64_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
4773{
4774 flagword out_flags;
4775 flagword in_flags;
4776 bfd_boolean flags_compatible = TRUE;
4777 asection *sec;
4778
4779 /* Check if we have the same endianess. */
4780 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
4781 return FALSE;
4782
4783 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4784 return TRUE;
4785
4786 /* The input BFD must have had its flags initialised. */
4787 /* The following seems bogus to me -- The flags are initialized in
4788 the assembler but I don't think an elf_flags_init field is
4789 written into the object. */
4790 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4791
4792 in_flags = elf_elfheader (ibfd)->e_flags;
4793 out_flags = elf_elfheader (obfd)->e_flags;
4794
4795 if (!elf_flags_init (obfd))
4796 {
4797 /* If the input is the default architecture and had the default
4798 flags then do not bother setting the flags for the output
4799 architecture, instead allow future merges to do this. If no
4800 future merges ever set these flags then they will retain their
4801 uninitialised values, which surprise surprise, correspond
4802 to the default values. */
4803 if (bfd_get_arch_info (ibfd)->the_default
4804 && elf_elfheader (ibfd)->e_flags == 0)
4805 return TRUE;
4806
4807 elf_flags_init (obfd) = TRUE;
4808 elf_elfheader (obfd)->e_flags = in_flags;
4809
4810 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
4811 && bfd_get_arch_info (obfd)->the_default)
4812 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
4813 bfd_get_mach (ibfd));
4814
4815 return TRUE;
4816 }
4817
4818 /* Identical flags must be compatible. */
4819 if (in_flags == out_flags)
4820 return TRUE;
4821
4822 /* Check to see if the input BFD actually contains any sections. If
4823 not, its flags may not have been initialised either, but it
4824 cannot actually cause any incompatiblity. Do not short-circuit
4825 dynamic objects; their section list may be emptied by
4826 elf_link_add_object_symbols.
4827
4828 Also check to see if there are no code sections in the input.
4829 In this case there is no need to check for code specific flags.
4830 XXX - do we need to worry about floating-point format compatability
4831 in data sections ? */
4832 if (!(ibfd->flags & DYNAMIC))
4833 {
4834 bfd_boolean null_input_bfd = TRUE;
4835 bfd_boolean only_data_sections = TRUE;
4836
4837 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4838 {
4839 if ((bfd_get_section_flags (ibfd, sec)
4840 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4841 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4842 only_data_sections = FALSE;
4843
4844 null_input_bfd = FALSE;
4845 break;
4846 }
4847
4848 if (null_input_bfd || only_data_sections)
4849 return TRUE;
4850 }
4851
4852 return flags_compatible;
4853}
4854
4855/* Display the flags field. */
4856
4857static bfd_boolean
4858elf64_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
4859{
4860 FILE *file = (FILE *) ptr;
4861 unsigned long flags;
4862
4863 BFD_ASSERT (abfd != NULL && ptr != NULL);
4864
4865 /* Print normal ELF private data. */
4866 _bfd_elf_print_private_bfd_data (abfd, ptr);
4867
4868 flags = elf_elfheader (abfd)->e_flags;
4869 /* Ignore init flag - it may not be set, despite the flags field
4870 containing valid data. */
4871
4872 /* xgettext:c-format */
4873 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
4874
4875 if (flags)
4876 fprintf (file, _("<Unrecognised flag bits set>"));
4877
4878 fputc ('\n', file);
4879
4880 return TRUE;
4881}
4882
4883/* Update the got entry reference counts for the section being removed. */
4884
4885static bfd_boolean
4886elf64_aarch64_gc_sweep_hook (bfd *abfd ATTRIBUTE_UNUSED,
4887 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4888 asection *sec ATTRIBUTE_UNUSED,
4889 const Elf_Internal_Rela *
4890 relocs ATTRIBUTE_UNUSED)
4891{
4892 return TRUE;
4893}
4894
4895/* Adjust a symbol defined by a dynamic object and referenced by a
4896 regular object. The current definition is in some section of the
4897 dynamic object, but we're not including those sections. We have to
4898 change the definition to something the rest of the link can
4899 understand. */
4900
4901static bfd_boolean
4902elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
4903 struct elf_link_hash_entry *h)
4904{
4905 struct elf64_aarch64_link_hash_table *htab;
4906 asection *s;
4907
4908 /* If this is a function, put it in the procedure linkage table. We
4909 will fill in the contents of the procedure linkage table later,
4910 when we know the address of the .got section. */
4911 if (h->type == STT_FUNC || h->needs_plt)
4912 {
4913 if (h->plt.refcount <= 0
4914 || SYMBOL_CALLS_LOCAL (info, h)
4915 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
4916 && h->root.type == bfd_link_hash_undefweak))
4917 {
4918 /* This case can occur if we saw a CALL26 reloc in
4919 an input file, but the symbol wasn't referred to
4920 by a dynamic object or all references were
4921 garbage collected. In which case we can end up
4922 resolving. */
4923 h->plt.offset = (bfd_vma) - 1;
4924 h->needs_plt = 0;
4925 }
4926
4927 return TRUE;
4928 }
4929 else
4930 /* It's possible that we incorrectly decided a .plt reloc was
4931 needed for an R_X86_64_PC32 reloc to a non-function sym in
4932 check_relocs. We can't decide accurately between function and
4933 non-function syms in check-relocs; Objects loaded later in
4934 the link may change h->type. So fix it now. */
4935 h->plt.offset = (bfd_vma) - 1;
4936
4937
4938 /* If this is a weak symbol, and there is a real definition, the
4939 processor independent code will have arranged for us to see the
4940 real definition first, and we can just use the same value. */
4941 if (h->u.weakdef != NULL)
4942 {
4943 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
4944 || h->u.weakdef->root.type == bfd_link_hash_defweak);
4945 h->root.u.def.section = h->u.weakdef->root.u.def.section;
4946 h->root.u.def.value = h->u.weakdef->root.u.def.value;
4947 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
4948 h->non_got_ref = h->u.weakdef->non_got_ref;
4949 return TRUE;
4950 }
4951
4952 /* If we are creating a shared library, we must presume that the
4953 only references to the symbol are via the global offset table.
4954 For such cases we need not do anything here; the relocations will
4955 be handled correctly by relocate_section. */
4956 if (info->shared)
4957 return TRUE;
4958
4959 /* If there are no references to this symbol that do not use the
4960 GOT, we don't need to generate a copy reloc. */
4961 if (!h->non_got_ref)
4962 return TRUE;
4963
4964 /* If -z nocopyreloc was given, we won't generate them either. */
4965 if (info->nocopyreloc)
4966 {
4967 h->non_got_ref = 0;
4968 return TRUE;
4969 }
4970
4971 /* We must allocate the symbol in our .dynbss section, which will
4972 become part of the .bss section of the executable. There will be
4973 an entry for this symbol in the .dynsym section. The dynamic
4974 object will contain position independent code, so all references
4975 from the dynamic object to this symbol will go through the global
4976 offset table. The dynamic linker will use the .dynsym entry to
4977 determine the address it must put in the global offset table, so
4978 both the dynamic object and the regular object will refer to the
4979 same memory location for the variable. */
4980
4981 htab = elf64_aarch64_hash_table (info);
4982
4983 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
4984 to copy the initial value out of the dynamic object and into the
4985 runtime process image. */
4986 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
4987 {
4988 htab->srelbss->size += RELOC_SIZE (htab);
4989 h->needs_copy = 1;
4990 }
4991
4992 s = htab->sdynbss;
4993
4994 return _bfd_elf_adjust_dynamic_copy (h, s);
4995
4996}
4997
4998static bfd_boolean
4999elf64_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5000{
5001 struct elf_aarch64_local_symbol *locals;
5002 locals = elf64_aarch64_locals (abfd);
5003 if (locals == NULL)
5004 {
5005 locals = (struct elf_aarch64_local_symbol *)
5006 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5007 if (locals == NULL)
5008 return FALSE;
5009 elf64_aarch64_locals (abfd) = locals;
5010 }
5011 return TRUE;
5012}
5013
5014/* Look through the relocs for a section during the first phase. */
5015
5016static bfd_boolean
5017elf64_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5018 asection *sec, const Elf_Internal_Rela *relocs)
5019{
5020 Elf_Internal_Shdr *symtab_hdr;
5021 struct elf_link_hash_entry **sym_hashes;
5022 const Elf_Internal_Rela *rel;
5023 const Elf_Internal_Rela *rel_end;
5024 asection *sreloc;
5025
5026 struct elf64_aarch64_link_hash_table *htab;
5027
5028 unsigned long nsyms;
5029
5030 if (info->relocatable)
5031 return TRUE;
5032
5033 BFD_ASSERT (is_aarch64_elf (abfd));
5034
5035 htab = elf64_aarch64_hash_table (info);
5036 sreloc = NULL;
5037
5038 symtab_hdr = &elf_symtab_hdr (abfd);
5039 sym_hashes = elf_sym_hashes (abfd);
5040 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
5041
5042 rel_end = relocs + sec->reloc_count;
5043 for (rel = relocs; rel < rel_end; rel++)
5044 {
5045 struct elf_link_hash_entry *h;
5046 unsigned long r_symndx;
5047 unsigned int r_type;
5048
5049 r_symndx = ELF64_R_SYM (rel->r_info);
5050 r_type = ELF64_R_TYPE (rel->r_info);
5051
5052 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5053 {
5054 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5055 r_symndx);
5056 return FALSE;
5057 }
5058
5059 if (r_symndx >= nsyms
5060 /* PR 9934: It is possible to have relocations that do not
5061 refer to symbols, thus it is also possible to have an
5062 object file containing relocations but no symbol table. */
5063 && (r_symndx > 0 || nsyms > 0))
5064 {
5065 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5066 r_symndx);
5067 return FALSE;
5068 }
5069
5070 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
5071 h = NULL;
5072 else
5073 {
5074 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5075 while (h->root.type == bfd_link_hash_indirect
5076 || h->root.type == bfd_link_hash_warning)
5077 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5078 }
5079
5080 /* Could be done earlier, if h were already available. */
5081 r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5082
5083 switch (r_type)
5084 {
5085 case R_AARCH64_ABS64:
5086
5087 /* We don't need to handle relocs into sections not going into
5088 the "real" output. */
5089 if ((sec->flags & SEC_ALLOC) == 0)
5090 break;
5091
5092 if (h != NULL)
5093 {
5094 if (!info->shared)
5095 h->non_got_ref = 1;
5096
5097 h->plt.refcount += 1;
5098 h->pointer_equality_needed = 1;
5099 }
5100
5101 /* No need to do anything if we're not creating a shared
5102 object. */
5103 if (! info->shared)
5104 break;
5105
5106 {
5107 struct elf_dyn_relocs *p;
5108 struct elf_dyn_relocs **head;
5109
5110 /* We must copy these reloc types into the output file.
5111 Create a reloc section in dynobj and make room for
5112 this reloc. */
5113 if (sreloc == NULL)
5114 {
5115 if (htab->root.dynobj == NULL)
5116 htab->root.dynobj = abfd;
5117
5118 sreloc = _bfd_elf_make_dynamic_reloc_section
5119 (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
5120
5121 if (sreloc == NULL)
5122 return FALSE;
5123 }
5124
5125 /* If this is a global symbol, we count the number of
5126 relocations we need for this symbol. */
5127 if (h != NULL)
5128 {
5129 struct elf64_aarch64_link_hash_entry *eh;
5130 eh = (struct elf64_aarch64_link_hash_entry *) h;
5131 head = &eh->dyn_relocs;
5132 }
5133 else
5134 {
5135 /* Track dynamic relocs needed for local syms too.
5136 We really need local syms available to do this
5137 easily. Oh well. */
5138
5139 asection *s;
5140 void **vpp;
5141 Elf_Internal_Sym *isym;
5142
5143 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5144 abfd, r_symndx);
5145 if (isym == NULL)
5146 return FALSE;
5147
5148 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5149 if (s == NULL)
5150 s = sec;
5151
5152 /* Beware of type punned pointers vs strict aliasing
5153 rules. */
5154 vpp = &(elf_section_data (s)->local_dynrel);
5155 head = (struct elf_dyn_relocs **) vpp;
5156 }
5157
5158 p = *head;
5159 if (p == NULL || p->sec != sec)
5160 {
5161 bfd_size_type amt = sizeof *p;
5162 p = ((struct elf_dyn_relocs *)
5163 bfd_zalloc (htab->root.dynobj, amt));
5164 if (p == NULL)
5165 return FALSE;
5166 p->next = *head;
5167 *head = p;
5168 p->sec = sec;
5169 }
5170
5171 p->count += 1;
5172
5173 }
5174 break;
5175
5176 /* RR: We probably want to keep a consistency check that
5177 there are no dangling GOT_PAGE relocs. */
5178 case R_AARCH64_LD64_GOT_LO12_NC:
f41aef5f 5179 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
5180 case R_AARCH64_ADR_GOT_PAGE:
5181 case R_AARCH64_TLSGD_ADR_PAGE21:
5182 case R_AARCH64_TLSGD_ADD_LO12_NC:
5183 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5184 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5185 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
5186 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
5187 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5188 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
5189 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
5190 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5191 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
5192 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5193 case R_AARCH64_TLSDESC_ADR_PAGE:
5194 case R_AARCH64_TLSDESC_ADD_LO12_NC:
5195 case R_AARCH64_TLSDESC_LD64_LO12_NC:
5196 {
5197 unsigned got_type;
5198 unsigned old_got_type;
5199
5200 got_type = aarch64_reloc_got_type (r_type);
5201
5202 if (h)
5203 {
5204 h->got.refcount += 1;
5205 old_got_type = elf64_aarch64_hash_entry (h)->got_type;
5206 }
5207 else
5208 {
5209 struct elf_aarch64_local_symbol *locals;
5210
5211 if (!elf64_aarch64_allocate_local_symbols
5212 (abfd, symtab_hdr->sh_info))
5213 return FALSE;
5214
5215 locals = elf64_aarch64_locals (abfd);
5216 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5217 locals[r_symndx].got_refcount += 1;
5218 old_got_type = locals[r_symndx].got_type;
5219 }
5220
5221 /* If a variable is accessed with both general dynamic TLS
5222 methods, two slots may be created. */
5223 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5224 got_type |= old_got_type;
5225
5226 /* We will already have issued an error message if there
5227 is a TLS/non-TLS mismatch, based on the symbol type.
5228 So just combine any TLS types needed. */
5229 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5230 && got_type != GOT_NORMAL)
5231 got_type |= old_got_type;
5232
5233 /* If the symbol is accessed by both IE and GD methods, we
5234 are able to relax. Turn off the GD flag, without
5235 messing up with any other kind of TLS types that may be
5236 involved. */
5237 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5238 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5239
5240 if (old_got_type != got_type)
5241 {
5242 if (h != NULL)
5243 elf64_aarch64_hash_entry (h)->got_type = got_type;
5244 else
5245 {
5246 struct elf_aarch64_local_symbol *locals;
5247 locals = elf64_aarch64_locals (abfd);
5248 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5249 locals[r_symndx].got_type = got_type;
5250 }
5251 }
5252
5253 if (htab->root.sgot == NULL)
5254 {
5255 if (htab->root.dynobj == NULL)
5256 htab->root.dynobj = abfd;
5257 if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
5258 return FALSE;
5259 }
5260 break;
5261 }
5262
5263 case R_AARCH64_ADR_PREL_PG_HI21_NC:
5264 case R_AARCH64_ADR_PREL_PG_HI21:
f41aef5f 5265 case R_AARCH64_ADR_PREL_LO21:
a06ea964
NC
5266 if (h != NULL && info->executable)
5267 {
5268 /* If this reloc is in a read-only section, we might
5269 need a copy reloc. We can't check reliably at this
5270 stage whether the section is read-only, as input
5271 sections have not yet been mapped to output sections.
5272 Tentatively set the flag for now, and correct in
5273 adjust_dynamic_symbol. */
5274 h->non_got_ref = 1;
5275 h->plt.refcount += 1;
5276 h->pointer_equality_needed = 1;
5277 }
5278 /* FIXME:: RR need to handle these in shared libraries
5279 and essentially bomb out as these being non-PIC
5280 relocations in shared libraries. */
5281 break;
5282
5283 case R_AARCH64_CALL26:
5284 case R_AARCH64_JUMP26:
5285 /* If this is a local symbol then we resolve it
5286 directly without creating a PLT entry. */
5287 if (h == NULL)
5288 continue;
5289
5290 h->needs_plt = 1;
5291 h->plt.refcount += 1;
5292 break;
5293 }
5294 }
5295 return TRUE;
5296}
5297
5298/* Treat mapping symbols as special target symbols. */
5299
5300static bfd_boolean
5301elf64_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
5302 asymbol *sym)
5303{
5304 return bfd_is_aarch64_special_symbol_name (sym->name,
5305 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
5306}
5307
5308/* This is a copy of elf_find_function () from elf.c except that
5309 AArch64 mapping symbols are ignored when looking for function names. */
5310
5311static bfd_boolean
5312aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
5313 asection *section,
5314 asymbol **symbols,
5315 bfd_vma offset,
5316 const char **filename_ptr,
5317 const char **functionname_ptr)
5318{
5319 const char *filename = NULL;
5320 asymbol *func = NULL;
5321 bfd_vma low_func = 0;
5322 asymbol **p;
5323
5324 for (p = symbols; *p != NULL; p++)
5325 {
5326 elf_symbol_type *q;
5327
5328 q = (elf_symbol_type *) * p;
5329
5330 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
5331 {
5332 default:
5333 break;
5334 case STT_FILE:
5335 filename = bfd_asymbol_name (&q->symbol);
5336 break;
5337 case STT_FUNC:
5338 case STT_NOTYPE:
5339 /* Skip mapping symbols. */
5340 if ((q->symbol.flags & BSF_LOCAL)
5341 && (bfd_is_aarch64_special_symbol_name
5342 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
5343 continue;
5344 /* Fall through. */
5345 if (bfd_get_section (&q->symbol) == section
5346 && q->symbol.value >= low_func && q->symbol.value <= offset)
5347 {
5348 func = (asymbol *) q;
5349 low_func = q->symbol.value;
5350 }
5351 break;
5352 }
5353 }
5354
5355 if (func == NULL)
5356 return FALSE;
5357
5358 if (filename_ptr)
5359 *filename_ptr = filename;
5360 if (functionname_ptr)
5361 *functionname_ptr = bfd_asymbol_name (func);
5362
5363 return TRUE;
5364}
5365
5366
5367/* Find the nearest line to a particular section and offset, for error
5368 reporting. This code is a duplicate of the code in elf.c, except
5369 that it uses aarch64_elf_find_function. */
5370
5371static bfd_boolean
5372elf64_aarch64_find_nearest_line (bfd *abfd,
5373 asection *section,
5374 asymbol **symbols,
5375 bfd_vma offset,
5376 const char **filename_ptr,
5377 const char **functionname_ptr,
5378 unsigned int *line_ptr)
5379{
5380 bfd_boolean found = FALSE;
5381
5382 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5383 toolchain uses it. */
5384
5385 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
5386 section, symbols, offset,
5387 filename_ptr, functionname_ptr,
5388 line_ptr, NULL, 0,
5389 &elf_tdata (abfd)->dwarf2_find_line_info))
5390 {
5391 if (!*functionname_ptr)
5392 aarch64_elf_find_function (abfd, section, symbols, offset,
5393 *filename_ptr ? NULL : filename_ptr,
5394 functionname_ptr);
5395
5396 return TRUE;
5397 }
5398
5399 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
5400 &found, filename_ptr,
5401 functionname_ptr, line_ptr,
5402 &elf_tdata (abfd)->line_info))
5403 return FALSE;
5404
5405 if (found && (*functionname_ptr || *line_ptr))
5406 return TRUE;
5407
5408 if (symbols == NULL)
5409 return FALSE;
5410
5411 if (!aarch64_elf_find_function (abfd, section, symbols, offset,
5412 filename_ptr, functionname_ptr))
5413 return FALSE;
5414
5415 *line_ptr = 0;
5416 return TRUE;
5417}
5418
5419static bfd_boolean
5420elf64_aarch64_find_inliner_info (bfd *abfd,
5421 const char **filename_ptr,
5422 const char **functionname_ptr,
5423 unsigned int *line_ptr)
5424{
5425 bfd_boolean found;
5426 found = _bfd_dwarf2_find_inliner_info
5427 (abfd, filename_ptr,
5428 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
5429 return found;
5430}
5431
5432
5433static void
5434elf64_aarch64_post_process_headers (bfd *abfd,
5435 struct bfd_link_info *link_info
5436 ATTRIBUTE_UNUSED)
5437{
5438 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
5439
5440 i_ehdrp = elf_elfheader (abfd);
5441 i_ehdrp->e_ident[EI_OSABI] = 0;
5442 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
5443}
5444
5445static enum elf_reloc_type_class
5446elf64_aarch64_reloc_type_class (const Elf_Internal_Rela *rela)
5447{
5448 switch ((int) ELF64_R_TYPE (rela->r_info))
5449 {
5450 case R_AARCH64_RELATIVE:
5451 return reloc_class_relative;
5452 case R_AARCH64_JUMP_SLOT:
5453 return reloc_class_plt;
5454 case R_AARCH64_COPY:
5455 return reloc_class_copy;
5456 default:
5457 return reloc_class_normal;
5458 }
5459}
5460
5461/* Set the right machine number for an AArch64 ELF file. */
5462
5463static bfd_boolean
5464elf64_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
5465{
5466 if (hdr->sh_type == SHT_NOTE)
5467 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
5468
5469 return TRUE;
5470}
5471
5472/* Handle an AArch64 specific section when reading an object file. This is
5473 called when bfd_section_from_shdr finds a section with an unknown
5474 type. */
5475
5476static bfd_boolean
5477elf64_aarch64_section_from_shdr (bfd *abfd,
5478 Elf_Internal_Shdr *hdr,
5479 const char *name, int shindex)
5480{
5481 /* There ought to be a place to keep ELF backend specific flags, but
5482 at the moment there isn't one. We just keep track of the
5483 sections by their name, instead. Fortunately, the ABI gives
5484 names for all the AArch64 specific sections, so we will probably get
5485 away with this. */
5486 switch (hdr->sh_type)
5487 {
5488 case SHT_AARCH64_ATTRIBUTES:
5489 break;
5490
5491 default:
5492 return FALSE;
5493 }
5494
5495 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5496 return FALSE;
5497
5498 return TRUE;
5499}
5500
5501/* A structure used to record a list of sections, independently
5502 of the next and prev fields in the asection structure. */
5503typedef struct section_list
5504{
5505 asection *sec;
5506 struct section_list *next;
5507 struct section_list *prev;
5508}
5509section_list;
5510
5511/* Unfortunately we need to keep a list of sections for which
5512 an _aarch64_elf_section_data structure has been allocated. This
5513 is because it is possible for functions like elf64_aarch64_write_section
5514 to be called on a section which has had an elf_data_structure
5515 allocated for it (and so the used_by_bfd field is valid) but
5516 for which the AArch64 extended version of this structure - the
5517 _aarch64_elf_section_data structure - has not been allocated. */
5518static section_list *sections_with_aarch64_elf_section_data = NULL;
5519
5520static void
5521record_section_with_aarch64_elf_section_data (asection *sec)
5522{
5523 struct section_list *entry;
5524
5525 entry = bfd_malloc (sizeof (*entry));
5526 if (entry == NULL)
5527 return;
5528 entry->sec = sec;
5529 entry->next = sections_with_aarch64_elf_section_data;
5530 entry->prev = NULL;
5531 if (entry->next != NULL)
5532 entry->next->prev = entry;
5533 sections_with_aarch64_elf_section_data = entry;
5534}
5535
5536static struct section_list *
5537find_aarch64_elf_section_entry (asection *sec)
5538{
5539 struct section_list *entry;
5540 static struct section_list *last_entry = NULL;
5541
5542 /* This is a short cut for the typical case where the sections are added
5543 to the sections_with_aarch64_elf_section_data list in forward order and
5544 then looked up here in backwards order. This makes a real difference
5545 to the ld-srec/sec64k.exp linker test. */
5546 entry = sections_with_aarch64_elf_section_data;
5547 if (last_entry != NULL)
5548 {
5549 if (last_entry->sec == sec)
5550 entry = last_entry;
5551 else if (last_entry->next != NULL && last_entry->next->sec == sec)
5552 entry = last_entry->next;
5553 }
5554
5555 for (; entry; entry = entry->next)
5556 if (entry->sec == sec)
5557 break;
5558
5559 if (entry)
5560 /* Record the entry prior to this one - it is the entry we are
5561 most likely to want to locate next time. Also this way if we
5562 have been called from
5563 unrecord_section_with_aarch64_elf_section_data () we will not
5564 be caching a pointer that is about to be freed. */
5565 last_entry = entry->prev;
5566
5567 return entry;
5568}
5569
5570static void
5571unrecord_section_with_aarch64_elf_section_data (asection *sec)
5572{
5573 struct section_list *entry;
5574
5575 entry = find_aarch64_elf_section_entry (sec);
5576
5577 if (entry)
5578 {
5579 if (entry->prev != NULL)
5580 entry->prev->next = entry->next;
5581 if (entry->next != NULL)
5582 entry->next->prev = entry->prev;
5583 if (entry == sections_with_aarch64_elf_section_data)
5584 sections_with_aarch64_elf_section_data = entry->next;
5585 free (entry);
5586 }
5587}
5588
5589
5590typedef struct
5591{
5592 void *finfo;
5593 struct bfd_link_info *info;
5594 asection *sec;
5595 int sec_shndx;
5596 int (*func) (void *, const char *, Elf_Internal_Sym *,
5597 asection *, struct elf_link_hash_entry *);
5598} output_arch_syminfo;
5599
5600enum map_symbol_type
5601{
5602 AARCH64_MAP_INSN,
5603 AARCH64_MAP_DATA
5604};
5605
5606
5607/* Output a single mapping symbol. */
5608
5609static bfd_boolean
5610elf64_aarch64_output_map_sym (output_arch_syminfo *osi,
5611 enum map_symbol_type type, bfd_vma offset)
5612{
5613 static const char *names[2] = { "$x", "$d" };
5614 Elf_Internal_Sym sym;
5615
5616 sym.st_value = (osi->sec->output_section->vma
5617 + osi->sec->output_offset + offset);
5618 sym.st_size = 0;
5619 sym.st_other = 0;
5620 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5621 sym.st_shndx = osi->sec_shndx;
5622 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
5623}
5624
5625
5626
5627/* Output mapping symbols for PLT entries associated with H. */
5628
5629static bfd_boolean
5630elf64_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
5631{
5632 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
5633 bfd_vma addr;
5634
5635 if (h->root.type == bfd_link_hash_indirect)
5636 return TRUE;
5637
5638 if (h->root.type == bfd_link_hash_warning)
5639 /* When warning symbols are created, they **replace** the "real"
5640 entry in the hash table, thus we never get to see the real
5641 symbol in a hash traversal. So look at it now. */
5642 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5643
5644 if (h->plt.offset == (bfd_vma) - 1)
5645 return TRUE;
5646
5647 addr = h->plt.offset;
5648 if (addr == 32)
5649 {
5650 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5651 return FALSE;
5652 }
5653 return TRUE;
5654}
5655
5656
5657/* Output a single local symbol for a generated stub. */
5658
5659static bfd_boolean
5660elf64_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
5661 bfd_vma offset, bfd_vma size)
5662{
5663 Elf_Internal_Sym sym;
5664
5665 sym.st_value = (osi->sec->output_section->vma
5666 + osi->sec->output_offset + offset);
5667 sym.st_size = size;
5668 sym.st_other = 0;
5669 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5670 sym.st_shndx = osi->sec_shndx;
5671 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
5672}
5673
5674static bfd_boolean
5675aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
5676{
5677 struct elf64_aarch64_stub_hash_entry *stub_entry;
5678 asection *stub_sec;
5679 bfd_vma addr;
5680 char *stub_name;
5681 output_arch_syminfo *osi;
5682
5683 /* Massage our args to the form they really have. */
5684 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
5685 osi = (output_arch_syminfo *) in_arg;
5686
5687 stub_sec = stub_entry->stub_sec;
5688
5689 /* Ensure this stub is attached to the current section being
5690 processed. */
5691 if (stub_sec != osi->sec)
5692 return TRUE;
5693
5694 addr = (bfd_vma) stub_entry->stub_offset;
5695
5696 stub_name = stub_entry->output_name;
5697
5698 switch (stub_entry->stub_type)
5699 {
5700 case aarch64_stub_adrp_branch:
5701 if (!elf64_aarch64_output_stub_sym (osi, stub_name, addr,
5702 sizeof (aarch64_adrp_branch_stub)))
5703 return FALSE;
5704 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5705 return FALSE;
5706 break;
5707 case aarch64_stub_long_branch:
5708 if (!elf64_aarch64_output_stub_sym
5709 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
5710 return FALSE;
5711 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5712 return FALSE;
5713 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
5714 return FALSE;
5715 break;
5716 default:
5717 BFD_FAIL ();
5718 }
5719
5720 return TRUE;
5721}
5722
5723/* Output mapping symbols for linker generated sections. */
5724
5725static bfd_boolean
5726elf64_aarch64_output_arch_local_syms (bfd *output_bfd,
5727 struct bfd_link_info *info,
5728 void *finfo,
5729 int (*func) (void *, const char *,
5730 Elf_Internal_Sym *,
5731 asection *,
5732 struct elf_link_hash_entry
5733 *))
5734{
5735 output_arch_syminfo osi;
5736 struct elf64_aarch64_link_hash_table *htab;
5737
5738 htab = elf64_aarch64_hash_table (info);
5739
5740 osi.finfo = finfo;
5741 osi.info = info;
5742 osi.func = func;
5743
5744 /* Long calls stubs. */
5745 if (htab->stub_bfd && htab->stub_bfd->sections)
5746 {
5747 asection *stub_sec;
5748
5749 for (stub_sec = htab->stub_bfd->sections;
5750 stub_sec != NULL; stub_sec = stub_sec->next)
5751 {
5752 /* Ignore non-stub sections. */
5753 if (!strstr (stub_sec->name, STUB_SUFFIX))
5754 continue;
5755
5756 osi.sec = stub_sec;
5757
5758 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5759 (output_bfd, osi.sec->output_section);
5760
5761 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
5762 &osi);
5763 }
5764 }
5765
5766 /* Finally, output mapping symbols for the PLT. */
5767 if (!htab->root.splt || htab->root.splt->size == 0)
5768 return TRUE;
5769
5770 /* For now live without mapping symbols for the plt. */
5771 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5772 (output_bfd, htab->root.splt->output_section);
5773 osi.sec = htab->root.splt;
5774
5775 elf_link_hash_traverse (&htab->root, elf64_aarch64_output_plt_map,
5776 (void *) &osi);
5777
5778 return TRUE;
5779
5780}
5781
5782/* Allocate target specific section data. */
5783
5784static bfd_boolean
5785elf64_aarch64_new_section_hook (bfd *abfd, asection *sec)
5786{
5787 if (!sec->used_by_bfd)
5788 {
5789 _aarch64_elf_section_data *sdata;
5790 bfd_size_type amt = sizeof (*sdata);
5791
5792 sdata = bfd_zalloc (abfd, amt);
5793 if (sdata == NULL)
5794 return FALSE;
5795 sec->used_by_bfd = sdata;
5796 }
5797
5798 record_section_with_aarch64_elf_section_data (sec);
5799
5800 return _bfd_elf_new_section_hook (abfd, sec);
5801}
5802
5803
5804static void
5805unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
5806 asection *sec,
5807 void *ignore ATTRIBUTE_UNUSED)
5808{
5809 unrecord_section_with_aarch64_elf_section_data (sec);
5810}
5811
5812static bfd_boolean
5813elf64_aarch64_close_and_cleanup (bfd *abfd)
5814{
5815 if (abfd->sections)
5816 bfd_map_over_sections (abfd,
5817 unrecord_section_via_map_over_sections, NULL);
5818
5819 return _bfd_elf_close_and_cleanup (abfd);
5820}
5821
5822static bfd_boolean
5823elf64_aarch64_bfd_free_cached_info (bfd *abfd)
5824{
5825 if (abfd->sections)
5826 bfd_map_over_sections (abfd,
5827 unrecord_section_via_map_over_sections, NULL);
5828
5829 return _bfd_free_cached_info (abfd);
5830}
5831
5832static bfd_boolean
5833elf64_aarch64_is_function_type (unsigned int type)
5834{
5835 return type == STT_FUNC;
5836}
5837
5838/* Create dynamic sections. This is different from the ARM backend in that
5839 the got, plt, gotplt and their relocation sections are all created in the
5840 standard part of the bfd elf backend. */
5841
5842static bfd_boolean
5843elf64_aarch64_create_dynamic_sections (bfd *dynobj,
5844 struct bfd_link_info *info)
5845{
5846 struct elf64_aarch64_link_hash_table *htab;
5847 struct elf_link_hash_entry *h;
5848
5849 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
5850 return FALSE;
5851
5852 htab = elf64_aarch64_hash_table (info);
5853 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
5854 if (!info->shared)
5855 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
5856
5857 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
5858 abort ();
5859
5860 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5861 dynobj's .got section. We don't do this in the linker script
5862 because we don't want to define the symbol if we are not creating
5863 a global offset table. */
5864 h = _bfd_elf_define_linkage_sym (dynobj, info,
5865 htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
5866 elf_hash_table (info)->hgot = h;
5867 if (h == NULL)
5868 return FALSE;
5869
5870 return TRUE;
5871}
5872
5873
5874/* Allocate space in .plt, .got and associated reloc sections for
5875 dynamic relocs. */
5876
5877static bfd_boolean
5878elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
5879{
5880 struct bfd_link_info *info;
5881 struct elf64_aarch64_link_hash_table *htab;
5882 struct elf64_aarch64_link_hash_entry *eh;
5883 struct elf_dyn_relocs *p;
5884
5885 /* An example of a bfd_link_hash_indirect symbol is versioned
5886 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
5887 -> __gxx_personality_v0(bfd_link_hash_defined)
5888
5889 There is no need to process bfd_link_hash_indirect symbols here
5890 because we will also be presented with the concrete instance of
5891 the symbol and elf64_aarch64_copy_indirect_symbol () will have been
5892 called to copy all relevant data from the generic to the concrete
5893 symbol instance.
5894 */
5895 if (h->root.type == bfd_link_hash_indirect)
5896 return TRUE;
5897
5898 if (h->root.type == bfd_link_hash_warning)
5899 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5900
5901 info = (struct bfd_link_info *) inf;
5902 htab = elf64_aarch64_hash_table (info);
5903
5904 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
5905 {
5906 /* Make sure this symbol is output as a dynamic symbol.
5907 Undefined weak syms won't yet be marked as dynamic. */
5908 if (h->dynindx == -1 && !h->forced_local)
5909 {
5910 if (!bfd_elf_link_record_dynamic_symbol (info, h))
5911 return FALSE;
5912 }
5913
5914 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
5915 {
5916 asection *s = htab->root.splt;
5917
5918 /* If this is the first .plt entry, make room for the special
5919 first entry. */
5920 if (s->size == 0)
5921 s->size += htab->plt_header_size;
5922
5923 h->plt.offset = s->size;
5924
5925 /* If this symbol is not defined in a regular file, and we are
5926 not generating a shared library, then set the symbol to this
5927 location in the .plt. This is required to make function
5928 pointers compare as equal between the normal executable and
5929 the shared library. */
5930 if (!info->shared && !h->def_regular)
5931 {
5932 h->root.u.def.section = s;
5933 h->root.u.def.value = h->plt.offset;
5934 }
5935
5936 /* Make room for this entry. For now we only create the
5937 small model PLT entries. We later need to find a way
5938 of relaxing into these from the large model PLT entries. */
5939 s->size += PLT_SMALL_ENTRY_SIZE;
5940
5941 /* We also need to make an entry in the .got.plt section, which
5942 will be placed in the .got section by the linker script. */
5943 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
5944
5945 /* We also need to make an entry in the .rela.plt section. */
5946 htab->root.srelplt->size += RELOC_SIZE (htab);
5947
5948 /* We need to ensure that all GOT entries that serve the PLT
5949 are consecutive with the special GOT slots [0] [1] and
5950 [2]. Any addtional relocations, such as
5951 R_AARCH64_TLSDESC, must be placed after the PLT related
5952 entries. We abuse the reloc_count such that during
5953 sizing we adjust reloc_count to indicate the number of
5954 PLT related reserved entries. In subsequent phases when
5955 filling in the contents of the reloc entries, PLT related
5956 entries are placed by computing their PLT index (0
5957 .. reloc_count). While other none PLT relocs are placed
5958 at the slot indicated by reloc_count and reloc_count is
5959 updated. */
5960
5961 htab->root.srelplt->reloc_count++;
5962 }
5963 else
5964 {
5965 h->plt.offset = (bfd_vma) - 1;
5966 h->needs_plt = 0;
5967 }
5968 }
5969 else
5970 {
5971 h->plt.offset = (bfd_vma) - 1;
5972 h->needs_plt = 0;
5973 }
5974
5975 eh = (struct elf64_aarch64_link_hash_entry *) h;
5976 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
5977
5978 if (h->got.refcount > 0)
5979 {
5980 bfd_boolean dyn;
5981 unsigned got_type = elf64_aarch64_hash_entry (h)->got_type;
5982
5983 h->got.offset = (bfd_vma) - 1;
5984
5985 dyn = htab->root.dynamic_sections_created;
5986
5987 /* Make sure this symbol is output as a dynamic symbol.
5988 Undefined weak syms won't yet be marked as dynamic. */
5989 if (dyn && h->dynindx == -1 && !h->forced_local)
5990 {
5991 if (!bfd_elf_link_record_dynamic_symbol (info, h))
5992 return FALSE;
5993 }
5994
5995 if (got_type == GOT_UNKNOWN)
5996 {
5997 }
5998 else if (got_type == GOT_NORMAL)
5999 {
6000 h->got.offset = htab->root.sgot->size;
6001 htab->root.sgot->size += GOT_ENTRY_SIZE;
6002 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6003 || h->root.type != bfd_link_hash_undefweak)
6004 && (info->shared
6005 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6006 {
6007 htab->root.srelgot->size += RELOC_SIZE (htab);
6008 }
6009 }
6010 else
6011 {
6012 int indx;
6013 if (got_type & GOT_TLSDESC_GD)
6014 {
6015 eh->tlsdesc_got_jump_table_offset =
6016 (htab->root.sgotplt->size
6017 - aarch64_compute_jump_table_size (htab));
6018 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6019 h->got.offset = (bfd_vma) - 2;
6020 }
6021
6022 if (got_type & GOT_TLS_GD)
6023 {
6024 h->got.offset = htab->root.sgot->size;
6025 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6026 }
6027
6028 if (got_type & GOT_TLS_IE)
6029 {
6030 h->got.offset = htab->root.sgot->size;
6031 htab->root.sgot->size += GOT_ENTRY_SIZE;
6032 }
6033
6034 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6035 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6036 || h->root.type != bfd_link_hash_undefweak)
6037 && (info->shared
6038 || indx != 0
6039 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6040 {
6041 if (got_type & GOT_TLSDESC_GD)
6042 {
6043 htab->root.srelplt->size += RELOC_SIZE (htab);
6044 /* Note reloc_count not incremented here! We have
6045 already adjusted reloc_count for this relocation
6046 type. */
6047
6048 /* TLSDESC PLT is now needed, but not yet determined. */
6049 htab->tlsdesc_plt = (bfd_vma) - 1;
6050 }
6051
6052 if (got_type & GOT_TLS_GD)
6053 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6054
6055 if (got_type & GOT_TLS_IE)
6056 htab->root.srelgot->size += RELOC_SIZE (htab);
6057 }
6058 }
6059 }
6060 else
6061 {
6062 h->got.offset = (bfd_vma) - 1;
6063 }
6064
6065 if (eh->dyn_relocs == NULL)
6066 return TRUE;
6067
6068 /* In the shared -Bsymbolic case, discard space allocated for
6069 dynamic pc-relative relocs against symbols which turn out to be
6070 defined in regular objects. For the normal shared case, discard
6071 space for pc-relative relocs that have become local due to symbol
6072 visibility changes. */
6073
6074 if (info->shared)
6075 {
6076 /* Relocs that use pc_count are those that appear on a call
6077 insn, or certain REL relocs that can generated via assembly.
6078 We want calls to protected symbols to resolve directly to the
6079 function rather than going via the plt. If people want
6080 function pointer comparisons to work as expected then they
6081 should avoid writing weird assembly. */
6082 if (SYMBOL_CALLS_LOCAL (info, h))
6083 {
6084 struct elf_dyn_relocs **pp;
6085
6086 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6087 {
6088 p->count -= p->pc_count;
6089 p->pc_count = 0;
6090 if (p->count == 0)
6091 *pp = p->next;
6092 else
6093 pp = &p->next;
6094 }
6095 }
6096
6097 /* Also discard relocs on undefined weak syms with non-default
6098 visibility. */
6099 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6100 {
6101 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6102 eh->dyn_relocs = NULL;
6103
6104 /* Make sure undefined weak symbols are output as a dynamic
6105 symbol in PIEs. */
6106 else if (h->dynindx == -1
6107 && !h->forced_local
6108 && !bfd_elf_link_record_dynamic_symbol (info, h))
6109 return FALSE;
6110 }
6111
6112 }
6113 else if (ELIMINATE_COPY_RELOCS)
6114 {
6115 /* For the non-shared case, discard space for relocs against
6116 symbols which turn out to need copy relocs or are not
6117 dynamic. */
6118
6119 if (!h->non_got_ref
6120 && ((h->def_dynamic
6121 && !h->def_regular)
6122 || (htab->root.dynamic_sections_created
6123 && (h->root.type == bfd_link_hash_undefweak
6124 || h->root.type == bfd_link_hash_undefined))))
6125 {
6126 /* Make sure this symbol is output as a dynamic symbol.
6127 Undefined weak syms won't yet be marked as dynamic. */
6128 if (h->dynindx == -1
6129 && !h->forced_local
6130 && !bfd_elf_link_record_dynamic_symbol (info, h))
6131 return FALSE;
6132
6133 /* If that succeeded, we know we'll be keeping all the
6134 relocs. */
6135 if (h->dynindx != -1)
6136 goto keep;
6137 }
6138
6139 eh->dyn_relocs = NULL;
6140
6141 keep:;
6142 }
6143
6144 /* Finally, allocate space. */
6145 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6146 {
6147 asection *sreloc;
6148
6149 sreloc = elf_section_data (p->sec)->sreloc;
6150
6151 BFD_ASSERT (sreloc != NULL);
6152
6153 sreloc->size += p->count * RELOC_SIZE (htab);
6154 }
6155
6156 return TRUE;
6157}
6158
6159
6160
6161
6162/* This is the most important function of all . Innocuosly named
6163 though ! */
6164static bfd_boolean
6165elf64_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6166 struct bfd_link_info *info)
6167{
6168 struct elf64_aarch64_link_hash_table *htab;
6169 bfd *dynobj;
6170 asection *s;
6171 bfd_boolean relocs;
6172 bfd *ibfd;
6173
6174 htab = elf64_aarch64_hash_table ((info));
6175 dynobj = htab->root.dynobj;
6176
6177 BFD_ASSERT (dynobj != NULL);
6178
6179 if (htab->root.dynamic_sections_created)
6180 {
6181 if (info->executable)
6182 {
6183 s = bfd_get_linker_section (dynobj, ".interp");
6184 if (s == NULL)
6185 abort ();
6186 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
6187 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
6188 }
6189 }
6190
6191 /* Set up .got offsets for local syms, and space for local dynamic
6192 relocs. */
6193 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
6194 {
6195 struct elf_aarch64_local_symbol *locals = NULL;
6196 Elf_Internal_Shdr *symtab_hdr;
6197 asection *srel;
6198 unsigned int i;
6199
6200 if (!is_aarch64_elf (ibfd))
6201 continue;
6202
6203 for (s = ibfd->sections; s != NULL; s = s->next)
6204 {
6205 struct elf_dyn_relocs *p;
6206
6207 for (p = (struct elf_dyn_relocs *)
6208 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
6209 {
6210 if (!bfd_is_abs_section (p->sec)
6211 && bfd_is_abs_section (p->sec->output_section))
6212 {
6213 /* Input section has been discarded, either because
6214 it is a copy of a linkonce section or due to
6215 linker script /DISCARD/, so we'll be discarding
6216 the relocs too. */
6217 }
6218 else if (p->count != 0)
6219 {
6220 srel = elf_section_data (p->sec)->sreloc;
6221 srel->size += p->count * RELOC_SIZE (htab);
6222 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
6223 info->flags |= DF_TEXTREL;
6224 }
6225 }
6226 }
6227
6228 locals = elf64_aarch64_locals (ibfd);
6229 if (!locals)
6230 continue;
6231
6232 symtab_hdr = &elf_symtab_hdr (ibfd);
6233 srel = htab->root.srelgot;
6234 for (i = 0; i < symtab_hdr->sh_info; i++)
6235 {
6236 locals[i].got_offset = (bfd_vma) - 1;
6237 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6238 if (locals[i].got_refcount > 0)
6239 {
6240 unsigned got_type = locals[i].got_type;
6241 if (got_type & GOT_TLSDESC_GD)
6242 {
6243 locals[i].tlsdesc_got_jump_table_offset =
6244 (htab->root.sgotplt->size
6245 - aarch64_compute_jump_table_size (htab));
6246 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6247 locals[i].got_offset = (bfd_vma) - 2;
6248 }
6249
6250 if (got_type & GOT_TLS_GD)
6251 {
6252 locals[i].got_offset = htab->root.sgot->size;
6253 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6254 }
6255
6256 if (got_type & GOT_TLS_IE)
6257 {
6258 locals[i].got_offset = htab->root.sgot->size;
6259 htab->root.sgot->size += GOT_ENTRY_SIZE;
6260 }
6261
6262 if (got_type == GOT_UNKNOWN)
6263 {
6264 }
6265
6266 if (got_type == GOT_NORMAL)
6267 {
6268 }
6269
6270 if (info->shared)
6271 {
6272 if (got_type & GOT_TLSDESC_GD)
6273 {
6274 htab->root.srelplt->size += RELOC_SIZE (htab);
6275 /* Note RELOC_COUNT not incremented here! */
6276 htab->tlsdesc_plt = (bfd_vma) - 1;
6277 }
6278
6279 if (got_type & GOT_TLS_GD)
6280 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6281
6282 if (got_type & GOT_TLS_IE)
6283 htab->root.srelgot->size += RELOC_SIZE (htab);
6284 }
6285 }
6286 else
6287 {
6288 locals[i].got_refcount = (bfd_vma) - 1;
6289 }
6290 }
6291 }
6292
6293
6294 /* Allocate global sym .plt and .got entries, and space for global
6295 sym dynamic relocs. */
6296 elf_link_hash_traverse (&htab->root, elf64_aarch64_allocate_dynrelocs,
6297 info);
6298
6299
6300 /* For every jump slot reserved in the sgotplt, reloc_count is
6301 incremented. However, when we reserve space for TLS descriptors,
6302 it's not incremented, so in order to compute the space reserved
6303 for them, it suffices to multiply the reloc count by the jump
6304 slot size. */
6305
6306 if (htab->root.srelplt)
6307 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
6308
6309 if (htab->tlsdesc_plt)
6310 {
6311 if (htab->root.splt->size == 0)
6312 htab->root.splt->size += PLT_ENTRY_SIZE;
6313
6314 htab->tlsdesc_plt = htab->root.splt->size;
6315 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
6316
6317 /* If we're not using lazy TLS relocations, don't generate the
6318 GOT entry required. */
6319 if (!(info->flags & DF_BIND_NOW))
6320 {
6321 htab->dt_tlsdesc_got = htab->root.sgot->size;
6322 htab->root.sgot->size += GOT_ENTRY_SIZE;
6323 }
6324 }
6325
6326 /* We now have determined the sizes of the various dynamic sections.
6327 Allocate memory for them. */
6328 relocs = FALSE;
6329 for (s = dynobj->sections; s != NULL; s = s->next)
6330 {
6331 if ((s->flags & SEC_LINKER_CREATED) == 0)
6332 continue;
6333
6334 if (s == htab->root.splt
6335 || s == htab->root.sgot
6336 || s == htab->root.sgotplt
6337 || s == htab->root.iplt
6338 || s == htab->root.igotplt || s == htab->sdynbss)
6339 {
6340 /* Strip this section if we don't need it; see the
6341 comment below. */
6342 }
6343 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
6344 {
6345 if (s->size != 0 && s != htab->root.srelplt)
6346 relocs = TRUE;
6347
6348 /* We use the reloc_count field as a counter if we need
6349 to copy relocs into the output file. */
6350 if (s != htab->root.srelplt)
6351 s->reloc_count = 0;
6352 }
6353 else
6354 {
6355 /* It's not one of our sections, so don't allocate space. */
6356 continue;
6357 }
6358
6359 if (s->size == 0)
6360 {
6361 /* If we don't need this section, strip it from the
6362 output file. This is mostly to handle .rela.bss and
6363 .rela.plt. We must create both sections in
6364 create_dynamic_sections, because they must be created
6365 before the linker maps input sections to output
6366 sections. The linker does that before
6367 adjust_dynamic_symbol is called, and it is that
6368 function which decides whether anything needs to go
6369 into these sections. */
6370
6371 s->flags |= SEC_EXCLUDE;
6372 continue;
6373 }
6374
6375 if ((s->flags & SEC_HAS_CONTENTS) == 0)
6376 continue;
6377
6378 /* Allocate memory for the section contents. We use bfd_zalloc
6379 here in case unused entries are not reclaimed before the
6380 section's contents are written out. This should not happen,
6381 but this way if it does, we get a R_AARCH64_NONE reloc instead
6382 of garbage. */
6383 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
6384 if (s->contents == NULL)
6385 return FALSE;
6386 }
6387
6388 if (htab->root.dynamic_sections_created)
6389 {
6390 /* Add some entries to the .dynamic section. We fill in the
6391 values later, in elf64_aarch64_finish_dynamic_sections, but we
6392 must add the entries now so that we get the correct size for
6393 the .dynamic section. The DT_DEBUG entry is filled in by the
6394 dynamic linker and used by the debugger. */
6395#define add_dynamic_entry(TAG, VAL) \
6396 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6397
6398 if (info->executable)
6399 {
6400 if (!add_dynamic_entry (DT_DEBUG, 0))
6401 return FALSE;
6402 }
6403
6404 if (htab->root.splt->size != 0)
6405 {
6406 if (!add_dynamic_entry (DT_PLTGOT, 0)
6407 || !add_dynamic_entry (DT_PLTRELSZ, 0)
6408 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
6409 || !add_dynamic_entry (DT_JMPREL, 0))
6410 return FALSE;
6411
6412 if (htab->tlsdesc_plt
6413 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
6414 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
6415 return FALSE;
6416 }
6417
6418 if (relocs)
6419 {
6420 if (!add_dynamic_entry (DT_RELA, 0)
6421 || !add_dynamic_entry (DT_RELASZ, 0)
6422 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
6423 return FALSE;
6424
6425 /* If any dynamic relocs apply to a read-only section,
6426 then we need a DT_TEXTREL entry. */
6427 if ((info->flags & DF_TEXTREL) != 0)
6428 {
6429 if (!add_dynamic_entry (DT_TEXTREL, 0))
6430 return FALSE;
6431 }
6432 }
6433 }
6434#undef add_dynamic_entry
6435
6436 return TRUE;
6437
6438
6439}
6440
6441static inline void
6442elf64_aarch64_update_plt_entry (bfd *output_bfd,
6443 unsigned int r_type,
6444 bfd_byte *plt_entry, bfd_vma value)
6445{
6446 reloc_howto_type *howto;
6447 howto = elf64_aarch64_howto_from_type (r_type);
6448 bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
6449}
6450
6451static void
6452elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
6453 struct elf64_aarch64_link_hash_table
6454 *htab, bfd *output_bfd)
6455{
6456 bfd_byte *plt_entry;
6457 bfd_vma plt_index;
6458 bfd_vma got_offset;
6459 bfd_vma gotplt_entry_address;
6460 bfd_vma plt_entry_address;
6461 Elf_Internal_Rela rela;
6462 bfd_byte *loc;
6463
6464 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
6465
6466 /* Offset in the GOT is PLT index plus got GOT headers(3)
6467 times 8. */
6468 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
6469 plt_entry = htab->root.splt->contents + h->plt.offset;
6470 plt_entry_address = htab->root.splt->output_section->vma
6471 + htab->root.splt->output_section->output_offset + h->plt.offset;
6472 gotplt_entry_address = htab->root.sgotplt->output_section->vma +
6473 htab->root.sgotplt->output_offset + got_offset;
6474
6475 /* Copy in the boiler-plate for the PLTn entry. */
6476 memcpy (plt_entry, elf64_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
6477
6478 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6479 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6480 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6481 plt_entry,
6482 PG (gotplt_entry_address) -
6483 PG (plt_entry_address));
6484
6485 /* Fill in the lo12 bits for the load from the pltgot. */
6486 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6487 plt_entry + 4,
6488 PG_OFFSET (gotplt_entry_address));
6489
6490 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6491 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6492 plt_entry + 8,
6493 PG_OFFSET (gotplt_entry_address));
6494
6495 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6496 bfd_put_64 (output_bfd,
6497 (htab->root.splt->output_section->vma
6498 + htab->root.splt->output_offset),
6499 htab->root.sgotplt->contents + got_offset);
6500
6501 /* Fill in the entry in the .rela.plt section. */
6502 rela.r_offset = gotplt_entry_address;
6503 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_JUMP_SLOT);
6504 rela.r_addend = 0;
6505
6506 /* Compute the relocation entry to used based on PLT index and do
6507 not adjust reloc_count. The reloc_count has already been adjusted
6508 to account for this entry. */
6509 loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab);
6510 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6511}
6512
6513/* Size sections even though they're not dynamic. We use it to setup
6514 _TLS_MODULE_BASE_, if needed. */
6515
6516static bfd_boolean
6517elf64_aarch64_always_size_sections (bfd *output_bfd,
6518 struct bfd_link_info *info)
6519{
6520 asection *tls_sec;
6521
6522 if (info->relocatable)
6523 return TRUE;
6524
6525 tls_sec = elf_hash_table (info)->tls_sec;
6526
6527 if (tls_sec)
6528 {
6529 struct elf_link_hash_entry *tlsbase;
6530
6531 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
6532 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
6533
6534 if (tlsbase)
6535 {
6536 struct bfd_link_hash_entry *h = NULL;
6537 const struct elf_backend_data *bed =
6538 get_elf_backend_data (output_bfd);
6539
6540 if (!(_bfd_generic_link_add_one_symbol
6541 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
6542 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
6543 return FALSE;
6544
6545 tlsbase->type = STT_TLS;
6546 tlsbase = (struct elf_link_hash_entry *) h;
6547 tlsbase->def_regular = 1;
6548 tlsbase->other = STV_HIDDEN;
6549 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
6550 }
6551 }
6552
6553 return TRUE;
6554}
6555
6556/* Finish up dynamic symbol handling. We set the contents of various
6557 dynamic sections here. */
6558static bfd_boolean
6559elf64_aarch64_finish_dynamic_symbol (bfd *output_bfd,
6560 struct bfd_link_info *info,
6561 struct elf_link_hash_entry *h,
6562 Elf_Internal_Sym *sym)
6563{
6564 struct elf64_aarch64_link_hash_table *htab;
6565 htab = elf64_aarch64_hash_table (info);
6566
6567 if (h->plt.offset != (bfd_vma) - 1)
6568 {
6569 /* This symbol has an entry in the procedure linkage table. Set
6570 it up. */
6571
6572 if (h->dynindx == -1
6573 || htab->root.splt == NULL
6574 || htab->root.sgotplt == NULL || htab->root.srelplt == NULL)
6575 abort ();
6576
6577 elf64_aarch64_create_small_pltn_entry (h, htab, output_bfd);
6578 if (!h->def_regular)
6579 {
6580 /* Mark the symbol as undefined, rather than as defined in
6581 the .plt section. Leave the value alone. This is a clue
6582 for the dynamic linker, to make function pointer
6583 comparisons work between an application and shared
6584 library. */
6585 sym->st_shndx = SHN_UNDEF;
6586 }
6587 }
6588
6589 if (h->got.offset != (bfd_vma) - 1
6590 && elf64_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
6591 {
6592 Elf_Internal_Rela rela;
6593 bfd_byte *loc;
6594
6595 /* This symbol has an entry in the global offset table. Set it
6596 up. */
6597 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
6598 abort ();
6599
6600 rela.r_offset = (htab->root.sgot->output_section->vma
6601 + htab->root.sgot->output_offset
6602 + (h->got.offset & ~(bfd_vma) 1));
6603
6604 if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
6605 {
6606 if (!h->def_regular)
6607 return FALSE;
6608
6609 BFD_ASSERT ((h->got.offset & 1) != 0);
6610 rela.r_info = ELF64_R_INFO (0, R_AARCH64_RELATIVE);
6611 rela.r_addend = (h->root.u.def.value
6612 + h->root.u.def.section->output_section->vma
6613 + h->root.u.def.section->output_offset);
6614 }
6615 else
6616 {
6617 BFD_ASSERT ((h->got.offset & 1) == 0);
6618 bfd_put_64 (output_bfd, (bfd_vma) 0,
6619 htab->root.sgot->contents + h->got.offset);
6620 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT);
6621 rela.r_addend = 0;
6622 }
6623
6624 loc = htab->root.srelgot->contents;
6625 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
6626 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6627 }
6628
6629 if (h->needs_copy)
6630 {
6631 Elf_Internal_Rela rela;
6632 bfd_byte *loc;
6633
6634 /* This symbol needs a copy reloc. Set it up. */
6635
6636 if (h->dynindx == -1
6637 || (h->root.type != bfd_link_hash_defined
6638 && h->root.type != bfd_link_hash_defweak)
6639 || htab->srelbss == NULL)
6640 abort ();
6641
6642 rela.r_offset = (h->root.u.def.value
6643 + h->root.u.def.section->output_section->vma
6644 + h->root.u.def.section->output_offset);
6645 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_COPY);
6646 rela.r_addend = 0;
6647 loc = htab->srelbss->contents;
6648 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
6649 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6650 }
6651
6652 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6653 be NULL for local symbols. */
6654 if (sym != NULL
9637f6ef 6655 && (h == elf_hash_table (info)->hdynamic
a06ea964
NC
6656 || h == elf_hash_table (info)->hgot))
6657 sym->st_shndx = SHN_ABS;
6658
6659 return TRUE;
6660}
6661
6662static void
6663elf64_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
6664 struct elf64_aarch64_link_hash_table
6665 *htab)
6666{
6667 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6668 small and large plts and at the minute just generates
6669 the small PLT. */
6670
6671 /* PLT0 of the small PLT looks like this -
6672 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6673 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6674 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6675 // symbol resolver
6676 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6677 // GOTPLT entry for this.
6678 br x17
6679 */
6680 bfd_vma plt_got_base;
6681 bfd_vma plt_base;
6682
6683
6684 memcpy (htab->root.splt->contents, elf64_aarch64_small_plt0_entry,
6685 PLT_ENTRY_SIZE);
6686 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
6687 PLT_ENTRY_SIZE;
6688
6689 plt_got_base = (htab->root.sgotplt->output_section->vma
6690 + htab->root.sgotplt->output_offset);
6691
6692 plt_base = htab->root.splt->output_section->vma +
6693 htab->root.splt->output_section->output_offset;
6694
6695 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6696 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6697 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6698 htab->root.splt->contents + 4,
6699 PG (plt_got_base + 16) - PG (plt_base + 4));
6700
6701 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6702 htab->root.splt->contents + 8,
6703 PG_OFFSET (plt_got_base + 16));
6704
6705 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6706 htab->root.splt->contents + 12,
6707 PG_OFFSET (plt_got_base + 16));
6708}
6709
6710static bfd_boolean
6711elf64_aarch64_finish_dynamic_sections (bfd *output_bfd,
6712 struct bfd_link_info *info)
6713{
6714 struct elf64_aarch64_link_hash_table *htab;
6715 bfd *dynobj;
6716 asection *sdyn;
6717
6718 htab = elf64_aarch64_hash_table (info);
6719 dynobj = htab->root.dynobj;
6720 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6721
6722 if (htab->root.dynamic_sections_created)
6723 {
6724 Elf64_External_Dyn *dyncon, *dynconend;
6725
6726 if (sdyn == NULL || htab->root.sgot == NULL)
6727 abort ();
6728
6729 dyncon = (Elf64_External_Dyn *) sdyn->contents;
6730 dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
6731 for (; dyncon < dynconend; dyncon++)
6732 {
6733 Elf_Internal_Dyn dyn;
6734 asection *s;
6735
6736 bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
6737
6738 switch (dyn.d_tag)
6739 {
6740 default:
6741 continue;
6742
6743 case DT_PLTGOT:
6744 s = htab->root.sgotplt;
6745 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6746 break;
6747
6748 case DT_JMPREL:
6749 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
6750 break;
6751
6752 case DT_PLTRELSZ:
6753 s = htab->root.srelplt->output_section;
6754 dyn.d_un.d_val = s->size;
6755 break;
6756
6757 case DT_RELASZ:
6758 /* The procedure linkage table relocs (DT_JMPREL) should
6759 not be included in the overall relocs (DT_RELA).
6760 Therefore, we override the DT_RELASZ entry here to
6761 make it not include the JMPREL relocs. Since the
6762 linker script arranges for .rela.plt to follow all
6763 other relocation sections, we don't have to worry
6764 about changing the DT_RELA entry. */
6765 if (htab->root.srelplt != NULL)
6766 {
6767 s = htab->root.srelplt->output_section;
6768 dyn.d_un.d_val -= s->size;
6769 }
6770 break;
6771
6772 case DT_TLSDESC_PLT:
6773 s = htab->root.splt;
6774 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6775 + htab->tlsdesc_plt;
6776 break;
6777
6778 case DT_TLSDESC_GOT:
6779 s = htab->root.sgot;
6780 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6781 + htab->dt_tlsdesc_got;
6782 break;
6783 }
6784
6785 bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
6786 }
6787
6788 }
6789
6790 /* Fill in the special first entry in the procedure linkage table. */
6791 if (htab->root.splt && htab->root.splt->size > 0)
6792 {
6793 elf64_aarch64_init_small_plt0_entry (output_bfd, htab);
6794
6795 elf_section_data (htab->root.splt->output_section)->
6796 this_hdr.sh_entsize = htab->plt_entry_size;
6797
6798
6799 if (htab->tlsdesc_plt)
6800 {
6801 bfd_put_64 (output_bfd, (bfd_vma) 0,
6802 htab->root.sgot->contents + htab->dt_tlsdesc_got);
6803
6804 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
6805 elf64_aarch64_tlsdesc_small_plt_entry,
6806 sizeof (elf64_aarch64_tlsdesc_small_plt_entry));
6807
6808 {
6809 bfd_vma adrp1_addr =
6810 htab->root.splt->output_section->vma
6811 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
6812
6813 bfd_vma adrp2_addr =
6814 htab->root.splt->output_section->vma
6815 + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
6816
6817 bfd_vma got_addr =
6818 htab->root.sgot->output_section->vma
6819 + htab->root.sgot->output_offset;
6820
6821 bfd_vma pltgot_addr =
6822 htab->root.sgotplt->output_section->vma
6823 + htab->root.sgotplt->output_offset;
6824
6825 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
6826 bfd_vma opcode;
6827
6828 /* adrp x2, DT_TLSDESC_GOT */
6829 opcode = bfd_get_32 (output_bfd,
6830 htab->root.splt->contents
6831 + htab->tlsdesc_plt + 4);
6832 opcode = reencode_adr_imm
6833 (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
6834 bfd_put_32 (output_bfd, opcode,
6835 htab->root.splt->contents + htab->tlsdesc_plt + 4);
6836
6837 /* adrp x3, 0 */
6838 opcode = bfd_get_32 (output_bfd,
6839 htab->root.splt->contents
6840 + htab->tlsdesc_plt + 8);
6841 opcode = reencode_adr_imm
6842 (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
6843 bfd_put_32 (output_bfd, opcode,
6844 htab->root.splt->contents + htab->tlsdesc_plt + 8);
6845
6846 /* ldr x2, [x2, #0] */
6847 opcode = bfd_get_32 (output_bfd,
6848 htab->root.splt->contents
6849 + htab->tlsdesc_plt + 12);
6850 opcode = reencode_ldst_pos_imm (opcode,
6851 PG_OFFSET (dt_tlsdesc_got) >> 3);
6852 bfd_put_32 (output_bfd, opcode,
6853 htab->root.splt->contents + htab->tlsdesc_plt + 12);
6854
6855 /* add x3, x3, 0 */
6856 opcode = bfd_get_32 (output_bfd,
6857 htab->root.splt->contents
6858 + htab->tlsdesc_plt + 16);
6859 opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
6860 bfd_put_32 (output_bfd, opcode,
6861 htab->root.splt->contents + htab->tlsdesc_plt + 16);
6862 }
6863 }
6864 }
6865
6866 if (htab->root.sgotplt)
6867 {
6868 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
6869 {
6870 (*_bfd_error_handler)
6871 (_("discarded output section: `%A'"), htab->root.sgotplt);
6872 return FALSE;
6873 }
6874
6875 /* Fill in the first three entries in the global offset table. */
6876 if (htab->root.sgotplt->size > 0)
6877 {
6878 /* Set the first entry in the global offset table to the address of
6879 the dynamic section. */
6880 if (sdyn == NULL)
6881 bfd_put_64 (output_bfd, (bfd_vma) 0,
6882 htab->root.sgotplt->contents);
6883 else
6884 bfd_put_64 (output_bfd,
6885 sdyn->output_section->vma + sdyn->output_offset,
6886 htab->root.sgotplt->contents);
6887 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6888 bfd_put_64 (output_bfd,
6889 (bfd_vma) 0,
6890 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
6891 bfd_put_64 (output_bfd,
6892 (bfd_vma) 0,
6893 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
6894 }
6895
6896 elf_section_data (htab->root.sgotplt->output_section)->
6897 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
6898 }
6899
6900 if (htab->root.sgot && htab->root.sgot->size > 0)
6901 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
6902 = GOT_ENTRY_SIZE;
6903
6904 return TRUE;
6905}
6906
6907/* Return address for Ith PLT stub in section PLT, for relocation REL
6908 or (bfd_vma) -1 if it should not be included. */
6909
6910static bfd_vma
6911elf64_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
6912 const arelent *rel ATTRIBUTE_UNUSED)
6913{
6914 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
6915}
6916
6917
6918/* We use this so we can override certain functions
6919 (though currently we don't). */
6920
6921const struct elf_size_info elf64_aarch64_size_info =
6922{
6923 sizeof (Elf64_External_Ehdr),
6924 sizeof (Elf64_External_Phdr),
6925 sizeof (Elf64_External_Shdr),
6926 sizeof (Elf64_External_Rel),
6927 sizeof (Elf64_External_Rela),
6928 sizeof (Elf64_External_Sym),
6929 sizeof (Elf64_External_Dyn),
6930 sizeof (Elf_External_Note),
6931 4, /* Hash table entry size. */
6932 1, /* Internal relocs per external relocs. */
6933 64, /* Arch size. */
6934 3, /* Log_file_align. */
6935 ELFCLASS64, EV_CURRENT,
6936 bfd_elf64_write_out_phdrs,
6937 bfd_elf64_write_shdrs_and_ehdr,
6938 bfd_elf64_checksum_contents,
6939 bfd_elf64_write_relocs,
6940 bfd_elf64_swap_symbol_in,
6941 bfd_elf64_swap_symbol_out,
6942 bfd_elf64_slurp_reloc_table,
6943 bfd_elf64_slurp_symbol_table,
6944 bfd_elf64_swap_dyn_in,
6945 bfd_elf64_swap_dyn_out,
6946 bfd_elf64_swap_reloc_in,
6947 bfd_elf64_swap_reloc_out,
6948 bfd_elf64_swap_reloca_in,
6949 bfd_elf64_swap_reloca_out
6950};
6951
6952#define ELF_ARCH bfd_arch_aarch64
6953#define ELF_MACHINE_CODE EM_AARCH64
6954#define ELF_MAXPAGESIZE 0x10000
6955#define ELF_MINPAGESIZE 0x1000
6956#define ELF_COMMONPAGESIZE 0x1000
6957
6958#define bfd_elf64_close_and_cleanup \
6959 elf64_aarch64_close_and_cleanup
6960
6961#define bfd_elf64_bfd_copy_private_bfd_data \
6962 elf64_aarch64_copy_private_bfd_data
6963
6964#define bfd_elf64_bfd_free_cached_info \
6965 elf64_aarch64_bfd_free_cached_info
6966
6967#define bfd_elf64_bfd_is_target_special_symbol \
6968 elf64_aarch64_is_target_special_symbol
6969
6970#define bfd_elf64_bfd_link_hash_table_create \
6971 elf64_aarch64_link_hash_table_create
6972
6973#define bfd_elf64_bfd_link_hash_table_free \
6974 elf64_aarch64_hash_table_free
6975
6976#define bfd_elf64_bfd_merge_private_bfd_data \
6977 elf64_aarch64_merge_private_bfd_data
6978
6979#define bfd_elf64_bfd_print_private_bfd_data \
6980 elf64_aarch64_print_private_bfd_data
6981
6982#define bfd_elf64_bfd_reloc_type_lookup \
6983 elf64_aarch64_reloc_type_lookup
6984
6985#define bfd_elf64_bfd_reloc_name_lookup \
6986 elf64_aarch64_reloc_name_lookup
6987
6988#define bfd_elf64_bfd_set_private_flags \
6989 elf64_aarch64_set_private_flags
6990
6991#define bfd_elf64_find_inliner_info \
6992 elf64_aarch64_find_inliner_info
6993
6994#define bfd_elf64_find_nearest_line \
6995 elf64_aarch64_find_nearest_line
6996
6997#define bfd_elf64_mkobject \
6998 elf64_aarch64_mkobject
6999
7000#define bfd_elf64_new_section_hook \
7001 elf64_aarch64_new_section_hook
7002
7003#define elf_backend_adjust_dynamic_symbol \
7004 elf64_aarch64_adjust_dynamic_symbol
7005
7006#define elf_backend_always_size_sections \
7007 elf64_aarch64_always_size_sections
7008
7009#define elf_backend_check_relocs \
7010 elf64_aarch64_check_relocs
7011
7012#define elf_backend_copy_indirect_symbol \
7013 elf64_aarch64_copy_indirect_symbol
7014
7015/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7016 to them in our hash. */
7017#define elf_backend_create_dynamic_sections \
7018 elf64_aarch64_create_dynamic_sections
7019
7020#define elf_backend_init_index_section \
7021 _bfd_elf_init_2_index_sections
7022
7023#define elf_backend_is_function_type \
7024 elf64_aarch64_is_function_type
7025
7026#define elf_backend_finish_dynamic_sections \
7027 elf64_aarch64_finish_dynamic_sections
7028
7029#define elf_backend_finish_dynamic_symbol \
7030 elf64_aarch64_finish_dynamic_symbol
7031
7032#define elf_backend_gc_sweep_hook \
7033 elf64_aarch64_gc_sweep_hook
7034
7035#define elf_backend_object_p \
7036 elf64_aarch64_object_p
7037
7038#define elf_backend_output_arch_local_syms \
7039 elf64_aarch64_output_arch_local_syms
7040
7041#define elf_backend_plt_sym_val \
7042 elf64_aarch64_plt_sym_val
7043
7044#define elf_backend_post_process_headers \
7045 elf64_aarch64_post_process_headers
7046
7047#define elf_backend_relocate_section \
7048 elf64_aarch64_relocate_section
7049
7050#define elf_backend_reloc_type_class \
7051 elf64_aarch64_reloc_type_class
7052
7053#define elf_backend_section_flags \
7054 elf64_aarch64_section_flags
7055
7056#define elf_backend_section_from_shdr \
7057 elf64_aarch64_section_from_shdr
7058
7059#define elf_backend_size_dynamic_sections \
7060 elf64_aarch64_size_dynamic_sections
7061
7062#define elf_backend_size_info \
7063 elf64_aarch64_size_info
7064
7065#define elf_backend_can_refcount 1
7066#define elf_backend_can_gc_sections 0
7067#define elf_backend_plt_readonly 1
7068#define elf_backend_want_got_plt 1
7069#define elf_backend_want_plt_sym 0
7070#define elf_backend_may_use_rel_p 0
7071#define elf_backend_may_use_rela_p 1
7072#define elf_backend_default_use_rela_p 1
7073#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
7074
7075#undef elf_backend_obj_attrs_section
7076#define elf_backend_obj_attrs_section ".ARM.attributes"
7077
7078#include "elf64-target.h"
This page took 0.350822 seconds and 4 git commands to generate.