* elf-bfd.h (struct core_elf_obj_tdata): New.
[deliverable/binutils-gdb.git] / bfd / elf64-aarch64.c
CommitLineData
a06ea964
NC
1/* ELF support for AArch64.
2 Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21/* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elf64_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elf64_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elf64_aarch64_relocate_section ()
123
124 Calls elf64_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elf64_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138#include "sysdep.h"
139#include "bfd.h"
140#include "libiberty.h"
141#include "libbfd.h"
142#include "bfd_stdint.h"
143#include "elf-bfd.h"
144#include "bfdlink.h"
145#include "elf/aarch64.h"
146
147static bfd_reloc_status_type
148bfd_elf_aarch64_put_addend (bfd *abfd,
149 bfd_byte *address,
150 reloc_howto_type *howto, bfd_signed_vma addend);
151
152#define IS_AARCH64_TLS_RELOC(R_TYPE) \
153 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
154 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
155 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
156 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
157 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
158 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
159 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
160 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
161 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
162 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
163 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
164 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
165 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
166 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
167 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
168 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
169 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
170 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
171 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
172
173#define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
174 ((R_TYPE) == R_AARCH64_TLSDESC_LD64_PREL19 \
175 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
176 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE \
177 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
178 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
179 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
180 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
181 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
182 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
183 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
184 || (R_TYPE) == R_AARCH64_TLSDESC)
185
186#define ELIMINATE_COPY_RELOCS 0
187
188/* Return the relocation section associated with NAME. HTAB is the
189 bfd's elf64_aarch64_link_hash_entry. */
190#define RELOC_SECTION(HTAB, NAME) \
191 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
192
193/* Return size of a relocation entry. HTAB is the bfd's
194 elf64_aarch64_link_hash_entry. */
195#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
196
197/* Return function to swap relocations in. HTAB is the bfd's
198 elf64_aarch64_link_hash_entry. */
199#define SWAP_RELOC_IN(HTAB) (bfd_elf64_swap_reloca_in)
200
201/* Return function to swap relocations out. HTAB is the bfd's
202 elf64_aarch64_link_hash_entry. */
203#define SWAP_RELOC_OUT(HTAB) (bfd_elf64_swap_reloca_out)
204
205/* GOT Entry size - 8 bytes. */
206#define GOT_ENTRY_SIZE (8)
207#define PLT_ENTRY_SIZE (32)
208#define PLT_SMALL_ENTRY_SIZE (16)
209#define PLT_TLSDESC_ENTRY_SIZE (32)
210
211/* Take the PAGE component of an address or offset. */
212#define PG(x) ((x) & ~ 0xfff)
213#define PG_OFFSET(x) ((x) & 0xfff)
214
215/* Encoding of the nop instruction */
216#define INSN_NOP 0xd503201f
217
218#define aarch64_compute_jump_table_size(htab) \
219 (((htab)->root.srelplt == NULL) ? 0 \
220 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
221
222/* The first entry in a procedure linkage table looks like this
223 if the distance between the PLTGOT and the PLT is < 4GB use
224 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
225 in x16 and needs to work out PLTGOT[1] by using an address of
226 [x16,#-8]. */
227static const bfd_byte elf64_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
228{
229 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
230 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
231 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
232 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
233 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
234 0x1f, 0x20, 0x03, 0xd5, /* nop */
235 0x1f, 0x20, 0x03, 0xd5, /* nop */
236 0x1f, 0x20, 0x03, 0xd5, /* nop */
237};
238
239/* Per function entry in a procedure linkage table looks like this
240 if the distance between the PLTGOT and the PLT is < 4GB use
241 these PLT entries. */
242static const bfd_byte elf64_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
243{
244 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
245 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
246 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
247 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
248};
249
250static const bfd_byte
251elf64_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
252{
253 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
254 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
255 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
256 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
257 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
258 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
259 0x1f, 0x20, 0x03, 0xd5, /* nop */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
261};
262
263#define elf_info_to_howto elf64_aarch64_info_to_howto
264#define elf_info_to_howto_rel elf64_aarch64_info_to_howto
265
266#define AARCH64_ELF_ABI_VERSION 0
267#define AARCH64_ELF_OS_ABI_VERSION 0
268
269/* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
270#define ALL_ONES (~ (bfd_vma) 0)
271
272static reloc_howto_type elf64_aarch64_howto_none =
273 HOWTO (R_AARCH64_NONE, /* type */
274 0, /* rightshift */
275 0, /* size (0 = byte, 1 = short, 2 = long) */
276 0, /* bitsize */
277 FALSE, /* pc_relative */
278 0, /* bitpos */
279 complain_overflow_dont,/* complain_on_overflow */
280 bfd_elf_generic_reloc, /* special_function */
281 "R_AARCH64_NONE", /* name */
282 FALSE, /* partial_inplace */
283 0, /* src_mask */
284 0, /* dst_mask */
285 FALSE); /* pcrel_offset */
286
287static reloc_howto_type elf64_aarch64_howto_dynrelocs[] =
288{
289 HOWTO (R_AARCH64_COPY, /* type */
290 0, /* rightshift */
291 2, /* size (0 = byte, 1 = short, 2 = long) */
292 64, /* bitsize */
293 FALSE, /* pc_relative */
294 0, /* bitpos */
295 complain_overflow_bitfield, /* complain_on_overflow */
296 bfd_elf_generic_reloc, /* special_function */
297 "R_AARCH64_COPY", /* name */
298 TRUE, /* partial_inplace */
299 0xffffffff, /* src_mask */
300 0xffffffff, /* dst_mask */
301 FALSE), /* pcrel_offset */
302
303 HOWTO (R_AARCH64_GLOB_DAT, /* type */
304 0, /* rightshift */
305 2, /* size (0 = byte, 1 = short, 2 = long) */
306 64, /* bitsize */
307 FALSE, /* pc_relative */
308 0, /* bitpos */
309 complain_overflow_bitfield, /* complain_on_overflow */
310 bfd_elf_generic_reloc, /* special_function */
311 "R_AARCH64_GLOB_DAT", /* name */
312 TRUE, /* partial_inplace */
313 0xffffffff, /* src_mask */
314 0xffffffff, /* dst_mask */
315 FALSE), /* pcrel_offset */
316
317 HOWTO (R_AARCH64_JUMP_SLOT, /* type */
318 0, /* rightshift */
319 2, /* size (0 = byte, 1 = short, 2 = long) */
320 64, /* bitsize */
321 FALSE, /* pc_relative */
322 0, /* bitpos */
323 complain_overflow_bitfield, /* complain_on_overflow */
324 bfd_elf_generic_reloc, /* special_function */
325 "R_AARCH64_JUMP_SLOT", /* name */
326 TRUE, /* partial_inplace */
327 0xffffffff, /* src_mask */
328 0xffffffff, /* dst_mask */
329 FALSE), /* pcrel_offset */
330
331 HOWTO (R_AARCH64_RELATIVE, /* type */
332 0, /* rightshift */
333 2, /* size (0 = byte, 1 = short, 2 = long) */
334 64, /* bitsize */
335 FALSE, /* pc_relative */
336 0, /* bitpos */
337 complain_overflow_bitfield, /* complain_on_overflow */
338 bfd_elf_generic_reloc, /* special_function */
339 "R_AARCH64_RELATIVE", /* name */
340 TRUE, /* partial_inplace */
341 ALL_ONES, /* src_mask */
342 ALL_ONES, /* dst_mask */
343 FALSE), /* pcrel_offset */
344
345 HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */
346 0, /* rightshift */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
348 64, /* bitsize */
349 FALSE, /* pc_relative */
350 0, /* bitpos */
351 complain_overflow_dont, /* complain_on_overflow */
352 bfd_elf_generic_reloc, /* special_function */
353 "R_AARCH64_TLS_DTPMOD64", /* name */
354 FALSE, /* partial_inplace */
355 0, /* src_mask */
356 ALL_ONES, /* dst_mask */
357 FALSE), /* pc_reloffset */
358
359 HOWTO (R_AARCH64_TLS_DTPREL64, /* type */
360 0, /* rightshift */
361 2, /* size (0 = byte, 1 = short, 2 = long) */
362 64, /* bitsize */
363 FALSE, /* pc_relative */
364 0, /* bitpos */
365 complain_overflow_dont, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 "R_AARCH64_TLS_DTPREL64", /* name */
368 FALSE, /* partial_inplace */
369 0, /* src_mask */
370 ALL_ONES, /* dst_mask */
371 FALSE), /* pcrel_offset */
372
373 HOWTO (R_AARCH64_TLS_TPREL64, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 64, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_dont, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_AARCH64_TLS_TPREL64", /* name */
382 FALSE, /* partial_inplace */
383 0, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 HOWTO (R_AARCH64_TLSDESC, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 64, /* bitsize */
391 FALSE, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_dont, /* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_AARCH64_TLSDESC", /* name */
396 FALSE, /* partial_inplace */
397 0, /* src_mask */
398 ALL_ONES, /* dst_mask */
399 FALSE), /* pcrel_offset */
400
401};
402
403/* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
404 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
405 in that slot. */
406
407static reloc_howto_type elf64_aarch64_howto_table[] =
408{
409 /* Basic data relocations. */
410
411 HOWTO (R_AARCH64_NULL, /* type */
412 0, /* rightshift */
413 0, /* size (0 = byte, 1 = short, 2 = long) */
414 0, /* bitsize */
415 FALSE, /* pc_relative */
416 0, /* bitpos */
417 complain_overflow_dont, /* complain_on_overflow */
418 bfd_elf_generic_reloc, /* special_function */
419 "R_AARCH64_NULL", /* name */
420 FALSE, /* partial_inplace */
421 0, /* src_mask */
422 0, /* dst_mask */
423 FALSE), /* pcrel_offset */
424
425 /* .xword: (S+A) */
426 HOWTO (R_AARCH64_ABS64, /* type */
427 0, /* rightshift */
428 4, /* size (4 = long long) */
429 64, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_unsigned, /* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_AARCH64_ABS64", /* name */
435 FALSE, /* partial_inplace */
436 ALL_ONES, /* src_mask */
437 ALL_ONES, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 /* .word: (S+A) */
441 HOWTO (R_AARCH64_ABS32, /* type */
442 0, /* rightshift */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
444 32, /* bitsize */
445 FALSE, /* pc_relative */
446 0, /* bitpos */
447 complain_overflow_unsigned, /* complain_on_overflow */
448 bfd_elf_generic_reloc, /* special_function */
449 "R_AARCH64_ABS32", /* name */
450 FALSE, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 FALSE), /* pcrel_offset */
454
455 /* .half: (S+A) */
456 HOWTO (R_AARCH64_ABS16, /* type */
457 0, /* rightshift */
458 1, /* size (0 = byte, 1 = short, 2 = long) */
459 16, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_unsigned, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_ABS16", /* name */
465 FALSE, /* partial_inplace */
466 0xffff, /* src_mask */
467 0xffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 /* .xword: (S+A-P) */
471 HOWTO (R_AARCH64_PREL64, /* type */
472 0, /* rightshift */
473 4, /* size (4 = long long) */
474 64, /* bitsize */
475 TRUE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_signed, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_AARCH64_PREL64", /* name */
480 FALSE, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 TRUE), /* pcrel_offset */
484
485 /* .word: (S+A-P) */
486 HOWTO (R_AARCH64_PREL32, /* type */
487 0, /* rightshift */
488 2, /* size (0 = byte, 1 = short, 2 = long) */
489 32, /* bitsize */
490 TRUE, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_signed, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 "R_AARCH64_PREL32", /* name */
495 FALSE, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 TRUE), /* pcrel_offset */
499
500 /* .half: (S+A-P) */
501 HOWTO (R_AARCH64_PREL16, /* type */
502 0, /* rightshift */
503 1, /* size (0 = byte, 1 = short, 2 = long) */
504 16, /* bitsize */
505 TRUE, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_signed, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 "R_AARCH64_PREL16", /* name */
510 FALSE, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 TRUE), /* pcrel_offset */
514
515 /* Group relocations to create a 16, 32, 48 or 64 bit
516 unsigned data or abs address inline. */
517
518 /* MOVZ: ((S+A) >> 0) & 0xffff */
519 HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */
520 0, /* rightshift */
521 2, /* size (0 = byte, 1 = short, 2 = long) */
522 16, /* bitsize */
523 FALSE, /* pc_relative */
524 0, /* bitpos */
525 complain_overflow_unsigned, /* complain_on_overflow */
526 bfd_elf_generic_reloc, /* special_function */
527 "R_AARCH64_MOVW_UABS_G0", /* name */
528 FALSE, /* partial_inplace */
529 0xffff, /* src_mask */
530 0xffff, /* dst_mask */
531 FALSE), /* pcrel_offset */
532
533 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
534 HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */
535 0, /* rightshift */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
537 16, /* bitsize */
538 FALSE, /* pc_relative */
539 0, /* bitpos */
540 complain_overflow_dont, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
547
548 /* MOVZ: ((S+A) >> 16) & 0xffff */
549 HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */
550 16, /* rightshift */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
552 16, /* bitsize */
553 FALSE, /* pc_relative */
554 0, /* bitpos */
555 complain_overflow_unsigned, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 "R_AARCH64_MOVW_UABS_G1", /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
562
563 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
564 HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */
565 16, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 16, /* bitsize */
568 FALSE, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_dont, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
577
578 /* MOVZ: ((S+A) >> 32) & 0xffff */
579 HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */
580 32, /* rightshift */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
582 16, /* bitsize */
583 FALSE, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_unsigned, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 "R_AARCH64_MOVW_UABS_G2", /* name */
588 FALSE, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE), /* pcrel_offset */
592
593 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
594 HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */
595 32, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 16, /* bitsize */
598 FALSE, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_dont, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
603 FALSE, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 /* MOVZ: ((S+A) >> 48) & 0xffff */
609 HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */
610 48, /* rightshift */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
612 16, /* bitsize */
613 FALSE, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_unsigned, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 "R_AARCH64_MOVW_UABS_G3", /* name */
618 FALSE, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE), /* pcrel_offset */
622
623 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
624 signed data or abs address inline. Will change instruction
625 to MOVN or MOVZ depending on sign of calculated value. */
626
627 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
628 HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */
629 0, /* rightshift */
630 2, /* size (0 = byte, 1 = short, 2 = long) */
631 16, /* bitsize */
632 FALSE, /* pc_relative */
633 0, /* bitpos */
634 complain_overflow_signed, /* complain_on_overflow */
635 bfd_elf_generic_reloc, /* special_function */
636 "R_AARCH64_MOVW_SABS_G0", /* name */
637 FALSE, /* partial_inplace */
638 0xffff, /* src_mask */
639 0xffff, /* dst_mask */
640 FALSE), /* pcrel_offset */
641
642 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
643 HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */
644 16, /* rightshift */
645 2, /* size (0 = byte, 1 = short, 2 = long) */
646 16, /* bitsize */
647 FALSE, /* pc_relative */
648 0, /* bitpos */
649 complain_overflow_signed, /* complain_on_overflow */
650 bfd_elf_generic_reloc, /* special_function */
651 "R_AARCH64_MOVW_SABS_G1", /* name */
652 FALSE, /* partial_inplace */
653 0xffff, /* src_mask */
654 0xffff, /* dst_mask */
655 FALSE), /* pcrel_offset */
656
657 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
658 HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */
659 32, /* rightshift */
660 2, /* size (0 = byte, 1 = short, 2 = long) */
661 16, /* bitsize */
662 FALSE, /* pc_relative */
663 0, /* bitpos */
664 complain_overflow_signed, /* complain_on_overflow */
665 bfd_elf_generic_reloc, /* special_function */
666 "R_AARCH64_MOVW_SABS_G2", /* name */
667 FALSE, /* partial_inplace */
668 0xffff, /* src_mask */
669 0xffff, /* dst_mask */
670 FALSE), /* pcrel_offset */
671
672/* Relocations to generate 19, 21 and 33 bit PC-relative load/store
673 addresses: PG(x) is (x & ~0xfff). */
674
675 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
676 HOWTO (R_AARCH64_LD_PREL_LO19, /* type */
677 2, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 19, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed, /* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_AARCH64_LD_PREL_LO19", /* name */
685 FALSE, /* partial_inplace */
686 0x7ffff, /* src_mask */
687 0x7ffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 /* ADR: (S+A-P) & 0x1fffff */
691 HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */
692 0, /* rightshift */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
694 21, /* bitsize */
695 TRUE, /* pc_relative */
696 0, /* bitpos */
697 complain_overflow_signed, /* complain_on_overflow */
698 bfd_elf_generic_reloc, /* special_function */
699 "R_AARCH64_ADR_PREL_LO21", /* name */
700 FALSE, /* partial_inplace */
701 0x1fffff, /* src_mask */
702 0x1fffff, /* dst_mask */
703 TRUE), /* pcrel_offset */
704
705 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
706 HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */
707 12, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 21, /* bitsize */
710 TRUE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_signed, /* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
715 FALSE, /* partial_inplace */
716 0x1fffff, /* src_mask */
717 0x1fffff, /* dst_mask */
718 TRUE), /* pcrel_offset */
719
720 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
721 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */
722 12, /* rightshift */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
724 21, /* bitsize */
725 TRUE, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_dont, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
730 FALSE, /* partial_inplace */
731 0x1fffff, /* src_mask */
732 0x1fffff, /* dst_mask */
733 TRUE), /* pcrel_offset */
734
735 /* ADD: (S+A) & 0xfff [no overflow check] */
736 HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 12, /* bitsize */
740 FALSE, /* pc_relative */
741 10, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
745 FALSE, /* partial_inplace */
746 0x3ffc00, /* src_mask */
747 0x3ffc00, /* dst_mask */
748 FALSE), /* pcrel_offset */
749
750 /* LD/ST8: (S+A) & 0xfff */
751 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 12, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
760 FALSE, /* partial_inplace */
761 0xfff, /* src_mask */
762 0xfff, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 /* Relocations for control-flow instructions. */
766
767 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
768 HOWTO (R_AARCH64_TSTBR14, /* type */
769 2, /* rightshift */
770 2, /* size (0 = byte, 1 = short, 2 = long) */
771 14, /* bitsize */
772 TRUE, /* pc_relative */
773 0, /* bitpos */
774 complain_overflow_signed, /* complain_on_overflow */
775 bfd_elf_generic_reloc, /* special_function */
776 "R_AARCH64_TSTBR14", /* name */
777 FALSE, /* partial_inplace */
778 0x3fff, /* src_mask */
779 0x3fff, /* dst_mask */
780 TRUE), /* pcrel_offset */
781
782 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
783 HOWTO (R_AARCH64_CONDBR19, /* type */
784 2, /* rightshift */
785 2, /* size (0 = byte, 1 = short, 2 = long) */
786 19, /* bitsize */
787 TRUE, /* pc_relative */
788 0, /* bitpos */
789 complain_overflow_signed, /* complain_on_overflow */
790 bfd_elf_generic_reloc, /* special_function */
791 "R_AARCH64_CONDBR19", /* name */
792 FALSE, /* partial_inplace */
793 0x7ffff, /* src_mask */
794 0x7ffff, /* dst_mask */
795 TRUE), /* pcrel_offset */
796
797 EMPTY_HOWTO (281),
798
799 /* B: ((S+A-P) >> 2) & 0x3ffffff */
800 HOWTO (R_AARCH64_JUMP26, /* type */
801 2, /* rightshift */
802 2, /* size (0 = byte, 1 = short, 2 = long) */
803 26, /* bitsize */
804 TRUE, /* pc_relative */
805 0, /* bitpos */
806 complain_overflow_signed, /* complain_on_overflow */
807 bfd_elf_generic_reloc, /* special_function */
808 "R_AARCH64_JUMP26", /* name */
809 FALSE, /* partial_inplace */
810 0x3ffffff, /* src_mask */
811 0x3ffffff, /* dst_mask */
812 TRUE), /* pcrel_offset */
813
814 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
815 HOWTO (R_AARCH64_CALL26, /* type */
816 2, /* rightshift */
817 2, /* size (0 = byte, 1 = short, 2 = long) */
818 26, /* bitsize */
819 TRUE, /* pc_relative */
820 0, /* bitpos */
821 complain_overflow_signed, /* complain_on_overflow */
822 bfd_elf_generic_reloc, /* special_function */
823 "R_AARCH64_CALL26", /* name */
824 FALSE, /* partial_inplace */
825 0x3ffffff, /* src_mask */
826 0x3ffffff, /* dst_mask */
827 TRUE), /* pcrel_offset */
828
829 /* LD/ST16: (S+A) & 0xffe */
830 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */
831 1, /* rightshift */
832 2, /* size (0 = byte, 1 = short, 2 = long) */
833 12, /* bitsize */
834 FALSE, /* pc_relative */
835 0, /* bitpos */
836 complain_overflow_dont, /* complain_on_overflow */
837 bfd_elf_generic_reloc, /* special_function */
838 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
839 FALSE, /* partial_inplace */
840 0xffe, /* src_mask */
841 0xffe, /* dst_mask */
842 FALSE), /* pcrel_offset */
843
844 /* LD/ST32: (S+A) & 0xffc */
845 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */
846 2, /* rightshift */
847 2, /* size (0 = byte, 1 = short, 2 = long) */
848 12, /* bitsize */
849 FALSE, /* pc_relative */
850 0, /* bitpos */
851 complain_overflow_dont, /* complain_on_overflow */
852 bfd_elf_generic_reloc, /* special_function */
853 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
854 FALSE, /* partial_inplace */
855 0xffc, /* src_mask */
856 0xffc, /* dst_mask */
857 FALSE), /* pcrel_offset */
858
859 /* LD/ST64: (S+A) & 0xff8 */
860 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */
861 3, /* rightshift */
862 2, /* size (0 = byte, 1 = short, 2 = long) */
863 12, /* bitsize */
864 FALSE, /* pc_relative */
865 0, /* bitpos */
866 complain_overflow_dont, /* complain_on_overflow */
867 bfd_elf_generic_reloc, /* special_function */
868 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
869 FALSE, /* partial_inplace */
870 0xff8, /* src_mask */
871 0xff8, /* dst_mask */
872 FALSE), /* pcrel_offset */
873
874 EMPTY_HOWTO (287),
875 EMPTY_HOWTO (288),
876 EMPTY_HOWTO (289),
877 EMPTY_HOWTO (290),
878 EMPTY_HOWTO (291),
879 EMPTY_HOWTO (292),
880 EMPTY_HOWTO (293),
881 EMPTY_HOWTO (294),
882 EMPTY_HOWTO (295),
883 EMPTY_HOWTO (296),
884 EMPTY_HOWTO (297),
885 EMPTY_HOWTO (298),
886
887 /* LD/ST128: (S+A) & 0xff0 */
888 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */
889 4, /* rightshift */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
891 12, /* bitsize */
892 FALSE, /* pc_relative */
893 0, /* bitpos */
894 complain_overflow_dont, /* complain_on_overflow */
895 bfd_elf_generic_reloc, /* special_function */
896 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
897 FALSE, /* partial_inplace */
898 0xff0, /* src_mask */
899 0xff0, /* dst_mask */
900 FALSE), /* pcrel_offset */
901
902 EMPTY_HOWTO (300),
903 EMPTY_HOWTO (301),
904 EMPTY_HOWTO (302),
905 EMPTY_HOWTO (303),
906 EMPTY_HOWTO (304),
907 EMPTY_HOWTO (305),
908 EMPTY_HOWTO (306),
909 EMPTY_HOWTO (307),
910 EMPTY_HOWTO (308),
f41aef5f
RE
911
912 /* Set a load-literal immediate field to bits
913 0x1FFFFC of G(S)-P */
914 HOWTO (R_AARCH64_GOT_LD_PREL19, /* type */
915 2, /* rightshift */
916 2, /* size (0 = byte,1 = short,2 = long) */
917 19, /* bitsize */
918 TRUE, /* pc_relative */
919 0, /* bitpos */
920 complain_overflow_signed, /* complain_on_overflow */
921 bfd_elf_generic_reloc, /* special_function */
922 "R_AARCH64_GOT_LD_PREL19", /* name */
923 FALSE, /* partial_inplace */
924 0xffffe0, /* src_mask */
925 0xffffe0, /* dst_mask */
926 TRUE), /* pcrel_offset */
927
a06ea964
NC
928 EMPTY_HOWTO (310),
929
930 /* Get to the page for the GOT entry for the symbol
931 (G(S) - P) using an ADRP instruction. */
932 HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */
933 12, /* rightshift */
934 2, /* size (0 = byte, 1 = short, 2 = long) */
935 21, /* bitsize */
936 TRUE, /* pc_relative */
937 0, /* bitpos */
938 complain_overflow_dont, /* complain_on_overflow */
939 bfd_elf_generic_reloc, /* special_function */
940 "R_AARCH64_ADR_GOT_PAGE", /* name */
941 FALSE, /* partial_inplace */
942 0x1fffff, /* src_mask */
943 0x1fffff, /* dst_mask */
944 TRUE), /* pcrel_offset */
945
946 /* LD64: GOT offset G(S) & 0xff8 */
947 HOWTO (R_AARCH64_LD64_GOT_LO12_NC, /* type */
948 3, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 12, /* bitsize */
951 FALSE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_AARCH64_LD64_GOT_LO12_NC", /* name */
956 FALSE, /* partial_inplace */
957 0xff8, /* src_mask */
958 0xff8, /* dst_mask */
959 FALSE) /* pcrel_offset */
960};
961
962static reloc_howto_type elf64_aarch64_tls_howto_table[] =
963{
964 EMPTY_HOWTO (512),
965
966 /* Get to the page for the GOT entry for the symbol
967 (G(S) - P) using an ADRP instruction. */
968 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */
969 12, /* rightshift */
970 2, /* size (0 = byte, 1 = short, 2 = long) */
971 21, /* bitsize */
972 TRUE, /* pc_relative */
973 0, /* bitpos */
974 complain_overflow_dont, /* complain_on_overflow */
975 bfd_elf_generic_reloc, /* special_function */
976 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
977 FALSE, /* partial_inplace */
978 0x1fffff, /* src_mask */
979 0x1fffff, /* dst_mask */
980 TRUE), /* pcrel_offset */
981
982 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
983 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */
984 0, /* rightshift */
985 2, /* size (0 = byte, 1 = short, 2 = long) */
986 12, /* bitsize */
987 FALSE, /* pc_relative */
988 0, /* bitpos */
989 complain_overflow_dont, /* complain_on_overflow */
990 bfd_elf_generic_reloc, /* special_function */
991 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
992 FALSE, /* partial_inplace */
993 0xfff, /* src_mask */
994 0xfff, /* dst_mask */
995 FALSE), /* pcrel_offset */
996
997 EMPTY_HOWTO (515),
998 EMPTY_HOWTO (516),
999 EMPTY_HOWTO (517),
1000 EMPTY_HOWTO (518),
1001 EMPTY_HOWTO (519),
1002 EMPTY_HOWTO (520),
1003 EMPTY_HOWTO (521),
1004 EMPTY_HOWTO (522),
1005 EMPTY_HOWTO (523),
1006 EMPTY_HOWTO (524),
1007 EMPTY_HOWTO (525),
1008 EMPTY_HOWTO (526),
1009 EMPTY_HOWTO (527),
1010 EMPTY_HOWTO (528),
1011 EMPTY_HOWTO (529),
1012 EMPTY_HOWTO (530),
1013 EMPTY_HOWTO (531),
1014 EMPTY_HOWTO (532),
1015 EMPTY_HOWTO (533),
1016 EMPTY_HOWTO (534),
1017 EMPTY_HOWTO (535),
1018 EMPTY_HOWTO (536),
1019 EMPTY_HOWTO (537),
1020 EMPTY_HOWTO (538),
1021
1022 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */
1023 16, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 16, /* bitsize */
1026 FALSE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1035
1036 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 FALSE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1049
1050 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 21, /* bitsize */
1054 FALSE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1059 FALSE, /* partial_inplace */
1060 0x1fffff, /* src_mask */
1061 0x1fffff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1063
1064 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */
1065 3, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 12, /* bitsize */
1068 FALSE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1073 FALSE, /* partial_inplace */
1074 0xff8, /* src_mask */
1075 0xff8, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1077
1078 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */
bb3f9ed8 1079 2, /* rightshift */
a06ea964
NC
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 21, /* bitsize */
1082 FALSE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1087 FALSE, /* partial_inplace */
1088 0x1ffffc, /* src_mask */
1089 0x1ffffc, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1091
1092 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */
bb3f9ed8 1093 32, /* rightshift */
a06ea964
NC
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 12, /* bitsize */
1096 FALSE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffff, /* src_mask */
1103 0xffff, /* dst_mask */
1104 FALSE), /* pcrel_offset */
1105
1106 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */
bb3f9ed8 1107 16, /* rightshift */
a06ea964
NC
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 12, /* bitsize */
1110 FALSE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffff, /* src_mask */
1117 0xffff, /* dst_mask */
1118 FALSE), /* pcrel_offset */
1119
1120 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */
bb3f9ed8 1121 16, /* rightshift */
a06ea964
NC
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 12, /* bitsize */
1124 FALSE, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont, /* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffff, /* src_mask */
1131 0xffff, /* dst_mask */
1132 FALSE), /* pcrel_offset */
1133
1134 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 12, /* bitsize */
1138 FALSE, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont, /* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffff, /* src_mask */
1145 0xffff, /* dst_mask */
1146 FALSE), /* pcrel_offset */
1147
1148 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 12, /* bitsize */
1152 FALSE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont, /* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffff, /* src_mask */
1159 0xffff, /* dst_mask */
1160 FALSE), /* pcrel_offset */
1161
1162 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */
bb3f9ed8 1163 12, /* rightshift */
a06ea964
NC
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 12, /* bitsize */
1166 FALSE, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont, /* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1171 FALSE, /* partial_inplace */
1172 0xfff, /* src_mask */
1173 0xfff, /* dst_mask */
1174 FALSE), /* pcrel_offset */
1175
1176 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 12, /* bitsize */
1180 FALSE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont, /* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1185 FALSE, /* partial_inplace */
1186 0xfff, /* src_mask */
1187 0xfff, /* dst_mask */
1188 FALSE), /* pcrel_offset */
1189
1190 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 12, /* bitsize */
1194 FALSE, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont, /* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1199 FALSE, /* partial_inplace */
1200 0xfff, /* src_mask */
1201 0xfff, /* dst_mask */
1202 FALSE), /* pcrel_offset */
1203};
1204
1205static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] =
1206{
1207 HOWTO (R_AARCH64_TLSDESC_LD64_PREL19, /* type */
bb3f9ed8 1208 2, /* rightshift */
a06ea964
NC
1209 2, /* size (0 = byte, 1 = short, 2 = long) */
1210 21, /* bitsize */
1211 TRUE, /* pc_relative */
1212 0, /* bitpos */
1213 complain_overflow_dont, /* complain_on_overflow */
1214 bfd_elf_generic_reloc, /* special_function */
1215 "R_AARCH64_TLSDESC_LD64_PREL19", /* name */
1216 FALSE, /* partial_inplace */
1217 0x1ffffc, /* src_mask */
1218 0x1ffffc, /* dst_mask */
1219 TRUE), /* pcrel_offset */
1220
1221 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */
1222 0, /* rightshift */
1223 2, /* size (0 = byte, 1 = short, 2 = long) */
1224 21, /* bitsize */
1225 TRUE, /* pc_relative */
1226 0, /* bitpos */
1227 complain_overflow_dont, /* complain_on_overflow */
1228 bfd_elf_generic_reloc, /* special_function */
1229 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1230 FALSE, /* partial_inplace */
1231 0x1fffff, /* src_mask */
1232 0x1fffff, /* dst_mask */
1233 TRUE), /* pcrel_offset */
1234
1235 /* Get to the page for the GOT entry for the symbol
1236 (G(S) - P) using an ADRP instruction. */
1237 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE, /* type */
1238 12, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1240 21, /* bitsize */
1241 TRUE, /* pc_relative */
1242 0, /* bitpos */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 "R_AARCH64_TLSDESC_ADR_PAGE", /* name */
1246 FALSE, /* partial_inplace */
1247 0x1fffff, /* src_mask */
1248 0x1fffff, /* dst_mask */
1249 TRUE), /* pcrel_offset */
1250
1251 /* LD64: GOT offset G(S) & 0xfff. */
1252 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */
1253 3, /* rightshift */
1254 2, /* size (0 = byte, 1 = short, 2 = long) */
1255 12, /* bitsize */
1256 FALSE, /* pc_relative */
1257 0, /* bitpos */
1258 complain_overflow_dont, /* complain_on_overflow */
1259 bfd_elf_generic_reloc, /* special_function */
1260 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1261 FALSE, /* partial_inplace */
1262 0xfff, /* src_mask */
1263 0xfff, /* dst_mask */
1264 FALSE), /* pcrel_offset */
1265
1266 /* ADD: GOT offset G(S) & 0xfff. */
1267 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */
1268 0, /* rightshift */
1269 2, /* size (0 = byte, 1 = short, 2 = long) */
1270 12, /* bitsize */
1271 FALSE, /* pc_relative */
1272 0, /* bitpos */
1273 complain_overflow_dont, /* complain_on_overflow */
1274 bfd_elf_generic_reloc, /* special_function */
1275 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1276 FALSE, /* partial_inplace */
1277 0xfff, /* src_mask */
1278 0xfff, /* dst_mask */
1279 FALSE), /* pcrel_offset */
1280
1281 HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */
bb3f9ed8 1282 16, /* rightshift */
a06ea964
NC
1283 2, /* size (0 = byte, 1 = short, 2 = long) */
1284 12, /* bitsize */
1285 FALSE, /* pc_relative */
1286 0, /* bitpos */
1287 complain_overflow_dont, /* complain_on_overflow */
1288 bfd_elf_generic_reloc, /* special_function */
1289 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1290 FALSE, /* partial_inplace */
1291 0xffff, /* src_mask */
1292 0xffff, /* dst_mask */
1293 FALSE), /* pcrel_offset */
1294
1295 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */
1296 0, /* rightshift */
1297 2, /* size (0 = byte, 1 = short, 2 = long) */
1298 12, /* bitsize */
1299 FALSE, /* pc_relative */
1300 0, /* bitpos */
1301 complain_overflow_dont, /* complain_on_overflow */
1302 bfd_elf_generic_reloc, /* special_function */
1303 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1304 FALSE, /* partial_inplace */
1305 0xffff, /* src_mask */
1306 0xffff, /* dst_mask */
1307 FALSE), /* pcrel_offset */
1308
1309 HOWTO (R_AARCH64_TLSDESC_LDR, /* type */
1310 0, /* rightshift */
1311 2, /* size (0 = byte, 1 = short, 2 = long) */
1312 12, /* bitsize */
1313 FALSE, /* pc_relative */
1314 0, /* bitpos */
1315 complain_overflow_dont, /* complain_on_overflow */
1316 bfd_elf_generic_reloc, /* special_function */
1317 "R_AARCH64_TLSDESC_LDR", /* name */
1318 FALSE, /* partial_inplace */
1319 0x0, /* src_mask */
1320 0x0, /* dst_mask */
1321 FALSE), /* pcrel_offset */
1322
1323 HOWTO (R_AARCH64_TLSDESC_ADD, /* type */
1324 0, /* rightshift */
1325 2, /* size (0 = byte, 1 = short, 2 = long) */
1326 12, /* bitsize */
1327 FALSE, /* pc_relative */
1328 0, /* bitpos */
1329 complain_overflow_dont, /* complain_on_overflow */
1330 bfd_elf_generic_reloc, /* special_function */
1331 "R_AARCH64_TLSDESC_ADD", /* name */
1332 FALSE, /* partial_inplace */
1333 0x0, /* src_mask */
1334 0x0, /* dst_mask */
1335 FALSE), /* pcrel_offset */
1336
1337 HOWTO (R_AARCH64_TLSDESC_CALL, /* type */
1338 0, /* rightshift */
1339 2, /* size (0 = byte, 1 = short, 2 = long) */
1340 12, /* bitsize */
1341 FALSE, /* pc_relative */
1342 0, /* bitpos */
1343 complain_overflow_dont, /* complain_on_overflow */
1344 bfd_elf_generic_reloc, /* special_function */
1345 "R_AARCH64_TLSDESC_CALL", /* name */
1346 FALSE, /* partial_inplace */
1347 0x0, /* src_mask */
1348 0x0, /* dst_mask */
1349 FALSE), /* pcrel_offset */
1350};
1351
1352static reloc_howto_type *
1353elf64_aarch64_howto_from_type (unsigned int r_type)
1354{
1355 if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max)
1356 return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min];
1357
1358 if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max)
1359 return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min];
1360
1361 if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max)
1362 return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min];
1363
1364 if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max)
1365 return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min];
1366
1367 switch (r_type)
1368 {
1369 case R_AARCH64_NONE:
1370 return &elf64_aarch64_howto_none;
1371
1372 }
1373 bfd_set_error (bfd_error_bad_value);
1374 return NULL;
1375}
1376
1377static void
1378elf64_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1379 Elf_Internal_Rela *elf_reloc)
1380{
1381 unsigned int r_type;
1382
1383 r_type = ELF64_R_TYPE (elf_reloc->r_info);
1384 bfd_reloc->howto = elf64_aarch64_howto_from_type (r_type);
1385}
1386
1387struct elf64_aarch64_reloc_map
1388{
1389 bfd_reloc_code_real_type bfd_reloc_val;
1390 unsigned int elf_reloc_val;
1391};
1392
1393/* All entries in this list must also be present in
1394 elf64_aarch64_howto_table. */
1395static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] =
1396{
1397 {BFD_RELOC_NONE, R_AARCH64_NONE},
1398
1399 /* Basic data relocations. */
1400 {BFD_RELOC_CTOR, R_AARCH64_ABS64},
1401 {BFD_RELOC_64, R_AARCH64_ABS64},
1402 {BFD_RELOC_32, R_AARCH64_ABS32},
1403 {BFD_RELOC_16, R_AARCH64_ABS16},
1404 {BFD_RELOC_64_PCREL, R_AARCH64_PREL64},
1405 {BFD_RELOC_32_PCREL, R_AARCH64_PREL32},
1406 {BFD_RELOC_16_PCREL, R_AARCH64_PREL16},
1407
1408 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1409 value inline. */
1410 {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC},
1411 {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC},
1412 {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC},
1413
1414 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1415 signed value inline. */
1416 {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0},
1417 {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1},
1418 {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2},
1419
1420 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1421 unsigned value inline. */
1422 {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0},
1423 {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1},
1424 {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2},
1425 {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3},
1426
1427 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1428 {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19},
1429 {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21},
1430 {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21},
1431 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC},
1432 {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC},
1433 {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC},
1434 {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC},
1435 {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC},
1436 {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC},
1437 {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC},
1438
1439 /* Relocations for control-flow instructions. */
1440 {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14},
1441 {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19},
1442 {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26},
1443 {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26},
1444
1445 /* Relocations for PIC. */
f41aef5f 1446 {BFD_RELOC_AARCH64_GOT_LD_PREL19, R_AARCH64_GOT_LD_PREL19},
a06ea964
NC
1447 {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE},
1448 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC},
1449
1450 /* Relocations for TLS. */
1451 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21},
1452 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC},
1453 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
1454 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1},
1455 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
1456 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC},
1457 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
1458 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21},
1459 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
1460 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
1461 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
1462 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
1463 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2},
1464 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1},
1465 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
1466 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC},
1467 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0},
1468 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
1469 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC},
1470 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12},
1471 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12},
1472 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
1473 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC},
1474 {BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19, R_AARCH64_TLSDESC_LD64_PREL19},
1475 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21},
1476 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE, R_AARCH64_TLSDESC_ADR_PAGE},
1477 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC},
1478 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC},
1479 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1},
1480 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC},
1481 {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR},
1482 {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD},
1483 {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL},
1484 {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64},
1485 {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64},
1486 {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64},
1487 {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC},
1488};
1489
1490static reloc_howto_type *
1491elf64_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1492 bfd_reloc_code_real_type code)
1493{
1494 unsigned int i;
1495
1496 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++)
1497 if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code)
1498 return elf64_aarch64_howto_from_type
1499 (elf64_aarch64_reloc_map[i].elf_reloc_val);
1500
1501 bfd_set_error (bfd_error_bad_value);
1502 return NULL;
1503}
1504
1505static reloc_howto_type *
1506elf64_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1507 const char *r_name)
1508{
1509 unsigned int i;
1510
1511 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++)
1512 if (elf64_aarch64_howto_table[i].name != NULL
1513 && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0)
1514 return &elf64_aarch64_howto_table[i];
1515
1516 return NULL;
1517}
1518
cd6fa7fd
YZ
1519/* Support for core dump NOTE sections. */
1520
1521static bfd_boolean
1522elf64_aarch64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1523{
1524 int offset;
1525 size_t size;
1526
1527 switch (note->descsz)
1528 {
1529 default:
1530 return FALSE;
1531
1532 case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
1533 /* pr_cursig */
228e534f 1534 elf_tdata (abfd)->core->signal
cd6fa7fd
YZ
1535 = bfd_get_16 (abfd, note->descdata + 12);
1536
1537 /* pr_pid */
228e534f 1538 elf_tdata (abfd)->core->lwpid
cd6fa7fd
YZ
1539 = bfd_get_32 (abfd, note->descdata + 32);
1540
1541 /* pr_reg */
1542 offset = 112;
170a8295 1543 size = 272;
cd6fa7fd
YZ
1544
1545 break;
1546 }
1547
1548 /* Make a ".reg/999" section. */
1549 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1550 size, note->descpos + offset);
1551}
1552
a06ea964
NC
1553#define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
1554#define TARGET_LITTLE_NAME "elf64-littleaarch64"
1555#define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
1556#define TARGET_BIG_NAME "elf64-bigaarch64"
1557
cd6fa7fd
YZ
1558#define elf_backend_grok_prstatus elf64_aarch64_grok_prstatus
1559
a06ea964
NC
1560typedef unsigned long int insn32;
1561
1562/* The linker script knows the section names for placement.
1563 The entry_names are used to do simple name mangling on the stubs.
1564 Given a function name, and its type, the stub can be found. The
1565 name can be changed. The only requirement is the %s be present. */
1566#define STUB_ENTRY_NAME "__%s_veneer"
1567
1568/* The name of the dynamic interpreter. This is put in the .interp
1569 section. */
1570#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1571
1572#define AARCH64_MAX_FWD_BRANCH_OFFSET \
1573 (((1 << 25) - 1) << 2)
1574#define AARCH64_MAX_BWD_BRANCH_OFFSET \
1575 (-((1 << 25) << 2))
1576
1577#define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1578#define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1579
1580static int
1581aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1582{
1583 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1584 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1585}
1586
1587static int
1588aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1589{
1590 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1591 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1592 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1593}
1594
1595static const uint32_t aarch64_adrp_branch_stub [] =
1596{
1597 0x90000010, /* adrp ip0, X */
1598 /* R_AARCH64_ADR_HI21_PCREL(X) */
1599 0x91000210, /* add ip0, ip0, :lo12:X */
1600 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1601 0xd61f0200, /* br ip0 */
1602};
1603
1604static const uint32_t aarch64_long_branch_stub[] =
1605{
1606 0x58000090, /* ldr ip0, 1f */
1607 0x10000011, /* adr ip1, #0 */
1608 0x8b110210, /* add ip0, ip0, ip1 */
1609 0xd61f0200, /* br ip0 */
1610 0x00000000, /* 1: .xword
1611 R_AARCH64_PREL64(X) + 12
1612 */
1613 0x00000000,
1614};
1615
1616/* Section name for stubs is the associated section name plus this
1617 string. */
1618#define STUB_SUFFIX ".stub"
1619
1620enum elf64_aarch64_stub_type
1621{
1622 aarch64_stub_none,
1623 aarch64_stub_adrp_branch,
1624 aarch64_stub_long_branch,
1625};
1626
1627struct elf64_aarch64_stub_hash_entry
1628{
1629 /* Base hash table entry structure. */
1630 struct bfd_hash_entry root;
1631
1632 /* The stub section. */
1633 asection *stub_sec;
1634
1635 /* Offset within stub_sec of the beginning of this stub. */
1636 bfd_vma stub_offset;
1637
1638 /* Given the symbol's value and its section we can determine its final
1639 value when building the stubs (so the stub knows where to jump). */
1640 bfd_vma target_value;
1641 asection *target_section;
1642
1643 enum elf64_aarch64_stub_type stub_type;
1644
1645 /* The symbol table entry, if any, that this was derived from. */
1646 struct elf64_aarch64_link_hash_entry *h;
1647
1648 /* Destination symbol type */
1649 unsigned char st_type;
1650
1651 /* Where this stub is being called from, or, in the case of combined
1652 stub sections, the first input section in the group. */
1653 asection *id_sec;
1654
1655 /* The name for the local symbol at the start of this stub. The
1656 stub name in the hash table has to be unique; this does not, so
1657 it can be friendlier. */
1658 char *output_name;
1659};
1660
1661/* Used to build a map of a section. This is required for mixed-endian
1662 code/data. */
1663
1664typedef struct elf64_elf_section_map
1665{
1666 bfd_vma vma;
1667 char type;
1668}
1669elf64_aarch64_section_map;
1670
1671
1672typedef struct _aarch64_elf_section_data
1673{
1674 struct bfd_elf_section_data elf;
1675 unsigned int mapcount;
1676 unsigned int mapsize;
1677 elf64_aarch64_section_map *map;
1678}
1679_aarch64_elf_section_data;
1680
1681#define elf64_aarch64_section_data(sec) \
1682 ((_aarch64_elf_section_data *) elf_section_data (sec))
1683
1684/* The size of the thread control block. */
1685#define TCB_SIZE 16
1686
1687struct elf_aarch64_local_symbol
1688{
1689 unsigned int got_type;
1690 bfd_signed_vma got_refcount;
1691 bfd_vma got_offset;
1692
1693 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1694 offset is from the end of the jump table and reserved entries
1695 within the PLTGOT.
1696
1697 The magic value (bfd_vma) -1 indicates that an offset has not be
1698 allocated. */
1699 bfd_vma tlsdesc_got_jump_table_offset;
1700};
1701
1702struct elf_aarch64_obj_tdata
1703{
1704 struct elf_obj_tdata root;
1705
1706 /* local symbol descriptors */
1707 struct elf_aarch64_local_symbol *locals;
1708
1709 /* Zero to warn when linking objects with incompatible enum sizes. */
1710 int no_enum_size_warning;
1711
1712 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1713 int no_wchar_size_warning;
1714};
1715
1716#define elf_aarch64_tdata(bfd) \
1717 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1718
1719#define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1720
1721#define is_aarch64_elf(bfd) \
1722 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1723 && elf_tdata (bfd) != NULL \
1724 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1725
1726static bfd_boolean
1727elf64_aarch64_mkobject (bfd *abfd)
1728{
1729 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1730 AARCH64_ELF_DATA);
1731}
1732
1733/* The AArch64 linker needs to keep track of the number of relocs that it
1734 decides to copy in check_relocs for each symbol. This is so that
1735 it can discard PC relative relocs if it doesn't need them when
1736 linking with -Bsymbolic. We store the information in a field
1737 extending the regular ELF linker hash table. */
1738
1739/* This structure keeps track of the number of relocs we have copied
1740 for a given symbol. */
1741struct elf64_aarch64_relocs_copied
1742{
1743 /* Next section. */
1744 struct elf64_aarch64_relocs_copied *next;
1745 /* A section in dynobj. */
1746 asection *section;
1747 /* Number of relocs copied in this section. */
1748 bfd_size_type count;
1749 /* Number of PC-relative relocs copied in this section. */
1750 bfd_size_type pc_count;
1751};
1752
1753#define elf64_aarch64_hash_entry(ent) \
1754 ((struct elf64_aarch64_link_hash_entry *)(ent))
1755
1756#define GOT_UNKNOWN 0
1757#define GOT_NORMAL 1
1758#define GOT_TLS_GD 2
1759#define GOT_TLS_IE 4
1760#define GOT_TLSDESC_GD 8
1761
1762#define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1763
1764/* AArch64 ELF linker hash entry. */
1765struct elf64_aarch64_link_hash_entry
1766{
1767 struct elf_link_hash_entry root;
1768
1769 /* Track dynamic relocs copied for this symbol. */
1770 struct elf_dyn_relocs *dyn_relocs;
1771
1772 /* Number of PC relative relocs copied for this symbol. */
1773 struct elf64_aarch64_relocs_copied *relocs_copied;
1774
1775 /* Since PLT entries have variable size, we need to record the
1776 index into .got.plt instead of recomputing it from the PLT
1777 offset. */
1778 bfd_signed_vma plt_got_offset;
1779
1780 /* Bit mask representing the type of GOT entry(s) if any required by
1781 this symbol. */
1782 unsigned int got_type;
1783
1784 /* A pointer to the most recently used stub hash entry against this
1785 symbol. */
1786 struct elf64_aarch64_stub_hash_entry *stub_cache;
1787
1788 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1789 is from the end of the jump table and reserved entries within the PLTGOT.
1790
1791 The magic value (bfd_vma) -1 indicates that an offset has not
1792 be allocated. */
1793 bfd_vma tlsdesc_got_jump_table_offset;
1794};
1795
1796static unsigned int
1797elf64_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1798 bfd *abfd,
1799 unsigned long r_symndx)
1800{
1801 if (h)
1802 return elf64_aarch64_hash_entry (h)->got_type;
1803
1804 if (! elf64_aarch64_locals (abfd))
1805 return GOT_UNKNOWN;
1806
1807 return elf64_aarch64_locals (abfd)[r_symndx].got_type;
1808}
1809
1810/* Traverse an AArch64 ELF linker hash table. */
1811#define elf64_aarch64_link_hash_traverse(table, func, info) \
1812 (elf_link_hash_traverse \
1813 (&(table)->root, \
1814 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
1815 (info)))
1816
1817/* Get the AArch64 elf linker hash table from a link_info structure. */
1818#define elf64_aarch64_hash_table(info) \
1819 ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
1820
1821#define aarch64_stub_hash_lookup(table, string, create, copy) \
1822 ((struct elf64_aarch64_stub_hash_entry *) \
1823 bfd_hash_lookup ((table), (string), (create), (copy)))
1824
1825/* AArch64 ELF linker hash table. */
1826struct elf64_aarch64_link_hash_table
1827{
1828 /* The main hash table. */
1829 struct elf_link_hash_table root;
1830
1831 /* Nonzero to force PIC branch veneers. */
1832 int pic_veneer;
1833
1834 /* The number of bytes in the initial entry in the PLT. */
1835 bfd_size_type plt_header_size;
1836
1837 /* The number of bytes in the subsequent PLT etries. */
1838 bfd_size_type plt_entry_size;
1839
1840 /* Short-cuts to get to dynamic linker sections. */
1841 asection *sdynbss;
1842 asection *srelbss;
1843
1844 /* Small local sym cache. */
1845 struct sym_cache sym_cache;
1846
1847 /* For convenience in allocate_dynrelocs. */
1848 bfd *obfd;
1849
1850 /* The amount of space used by the reserved portion of the sgotplt
1851 section, plus whatever space is used by the jump slots. */
1852 bfd_vma sgotplt_jump_table_size;
1853
1854 /* The stub hash table. */
1855 struct bfd_hash_table stub_hash_table;
1856
1857 /* Linker stub bfd. */
1858 bfd *stub_bfd;
1859
1860 /* Linker call-backs. */
1861 asection *(*add_stub_section) (const char *, asection *);
1862 void (*layout_sections_again) (void);
1863
1864 /* Array to keep track of which stub sections have been created, and
1865 information on stub grouping. */
1866 struct map_stub
1867 {
1868 /* This is the section to which stubs in the group will be
1869 attached. */
1870 asection *link_sec;
1871 /* The stub section. */
1872 asection *stub_sec;
1873 } *stub_group;
1874
1875 /* Assorted information used by elf64_aarch64_size_stubs. */
1876 unsigned int bfd_count;
1877 int top_index;
1878 asection **input_list;
1879
1880 /* The offset into splt of the PLT entry for the TLS descriptor
1881 resolver. Special values are 0, if not necessary (or not found
1882 to be necessary yet), and -1 if needed but not determined
1883 yet. */
1884 bfd_vma tlsdesc_plt;
1885
1886 /* The GOT offset for the lazy trampoline. Communicated to the
1887 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1888 indicates an offset is not allocated. */
1889 bfd_vma dt_tlsdesc_got;
1890};
1891
1892
1893/* Return non-zero if the indicated VALUE has overflowed the maximum
1894 range expressible by a unsigned number with the indicated number of
1895 BITS. */
1896
1897static bfd_reloc_status_type
1898aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
1899{
1900 bfd_vma lim;
1901 if (bits >= sizeof (bfd_vma) * 8)
1902 return bfd_reloc_ok;
1903 lim = (bfd_vma) 1 << bits;
1904 if (value >= lim)
1905 return bfd_reloc_overflow;
1906 return bfd_reloc_ok;
1907}
1908
1909
1910/* Return non-zero if the indicated VALUE has overflowed the maximum
1911 range expressible by an signed number with the indicated number of
1912 BITS. */
1913
1914static bfd_reloc_status_type
1915aarch64_signed_overflow (bfd_vma value, unsigned int bits)
1916{
1917 bfd_signed_vma svalue = (bfd_signed_vma) value;
1918 bfd_signed_vma lim;
1919
1920 if (bits >= sizeof (bfd_vma) * 8)
1921 return bfd_reloc_ok;
1922 lim = (bfd_signed_vma) 1 << (bits - 1);
1923 if (svalue < -lim || svalue >= lim)
1924 return bfd_reloc_overflow;
1925 return bfd_reloc_ok;
1926}
1927
1928/* Create an entry in an AArch64 ELF linker hash table. */
1929
1930static struct bfd_hash_entry *
1931elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1932 struct bfd_hash_table *table,
1933 const char *string)
1934{
1935 struct elf64_aarch64_link_hash_entry *ret =
1936 (struct elf64_aarch64_link_hash_entry *) entry;
1937
1938 /* Allocate the structure if it has not already been allocated by a
1939 subclass. */
1940 if (ret == NULL)
1941 ret = bfd_hash_allocate (table,
1942 sizeof (struct elf64_aarch64_link_hash_entry));
1943 if (ret == NULL)
1944 return (struct bfd_hash_entry *) ret;
1945
1946 /* Call the allocation method of the superclass. */
1947 ret = ((struct elf64_aarch64_link_hash_entry *)
1948 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1949 table, string));
1950 if (ret != NULL)
1951 {
1952 ret->dyn_relocs = NULL;
1953 ret->relocs_copied = NULL;
1954 ret->got_type = GOT_UNKNOWN;
1955 ret->plt_got_offset = (bfd_vma) - 1;
1956 ret->stub_cache = NULL;
1957 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1958 }
1959
1960 return (struct bfd_hash_entry *) ret;
1961}
1962
1963/* Initialize an entry in the stub hash table. */
1964
1965static struct bfd_hash_entry *
1966stub_hash_newfunc (struct bfd_hash_entry *entry,
1967 struct bfd_hash_table *table, const char *string)
1968{
1969 /* Allocate the structure if it has not already been allocated by a
1970 subclass. */
1971 if (entry == NULL)
1972 {
1973 entry = bfd_hash_allocate (table,
1974 sizeof (struct
1975 elf64_aarch64_stub_hash_entry));
1976 if (entry == NULL)
1977 return entry;
1978 }
1979
1980 /* Call the allocation method of the superclass. */
1981 entry = bfd_hash_newfunc (entry, table, string);
1982 if (entry != NULL)
1983 {
1984 struct elf64_aarch64_stub_hash_entry *eh;
1985
1986 /* Initialize the local fields. */
1987 eh = (struct elf64_aarch64_stub_hash_entry *) entry;
1988 eh->stub_sec = NULL;
1989 eh->stub_offset = 0;
1990 eh->target_value = 0;
1991 eh->target_section = NULL;
1992 eh->stub_type = aarch64_stub_none;
1993 eh->h = NULL;
1994 eh->id_sec = NULL;
1995 }
1996
1997 return entry;
1998}
1999
2000
2001/* Copy the extra info we tack onto an elf_link_hash_entry. */
2002
2003static void
2004elf64_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2005 struct elf_link_hash_entry *dir,
2006 struct elf_link_hash_entry *ind)
2007{
2008 struct elf64_aarch64_link_hash_entry *edir, *eind;
2009
2010 edir = (struct elf64_aarch64_link_hash_entry *) dir;
2011 eind = (struct elf64_aarch64_link_hash_entry *) ind;
2012
2013 if (eind->dyn_relocs != NULL)
2014 {
2015 if (edir->dyn_relocs != NULL)
2016 {
2017 struct elf_dyn_relocs **pp;
2018 struct elf_dyn_relocs *p;
2019
2020 /* Add reloc counts against the indirect sym to the direct sym
2021 list. Merge any entries against the same section. */
2022 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2023 {
2024 struct elf_dyn_relocs *q;
2025
2026 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2027 if (q->sec == p->sec)
2028 {
2029 q->pc_count += p->pc_count;
2030 q->count += p->count;
2031 *pp = p->next;
2032 break;
2033 }
2034 if (q == NULL)
2035 pp = &p->next;
2036 }
2037 *pp = edir->dyn_relocs;
2038 }
2039
2040 edir->dyn_relocs = eind->dyn_relocs;
2041 eind->dyn_relocs = NULL;
2042 }
2043
2044 if (eind->relocs_copied != NULL)
2045 {
2046 if (edir->relocs_copied != NULL)
2047 {
2048 struct elf64_aarch64_relocs_copied **pp;
2049 struct elf64_aarch64_relocs_copied *p;
2050
2051 /* Add reloc counts against the indirect sym to the direct sym
2052 list. Merge any entries against the same section. */
2053 for (pp = &eind->relocs_copied; (p = *pp) != NULL;)
2054 {
2055 struct elf64_aarch64_relocs_copied *q;
2056
2057 for (q = edir->relocs_copied; q != NULL; q = q->next)
2058 if (q->section == p->section)
2059 {
2060 q->pc_count += p->pc_count;
2061 q->count += p->count;
2062 *pp = p->next;
2063 break;
2064 }
2065 if (q == NULL)
2066 pp = &p->next;
2067 }
2068 *pp = edir->relocs_copied;
2069 }
2070
2071 edir->relocs_copied = eind->relocs_copied;
2072 eind->relocs_copied = NULL;
2073 }
2074
2075 if (ind->root.type == bfd_link_hash_indirect)
2076 {
2077 /* Copy over PLT info. */
2078 if (dir->got.refcount <= 0)
2079 {
2080 edir->got_type = eind->got_type;
2081 eind->got_type = GOT_UNKNOWN;
2082 }
2083 }
2084
2085 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2086}
2087
2088/* Create an AArch64 elf linker hash table. */
2089
2090static struct bfd_link_hash_table *
2091elf64_aarch64_link_hash_table_create (bfd *abfd)
2092{
2093 struct elf64_aarch64_link_hash_table *ret;
2094 bfd_size_type amt = sizeof (struct elf64_aarch64_link_hash_table);
2095
7bf52ea2 2096 ret = bfd_zmalloc (amt);
a06ea964
NC
2097 if (ret == NULL)
2098 return NULL;
2099
2100 if (!_bfd_elf_link_hash_table_init
2101 (&ret->root, abfd, elf64_aarch64_link_hash_newfunc,
2102 sizeof (struct elf64_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2103 {
2104 free (ret);
2105 return NULL;
2106 }
2107
a06ea964
NC
2108 ret->plt_header_size = PLT_ENTRY_SIZE;
2109 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
a06ea964 2110 ret->obfd = abfd;
a06ea964
NC
2111 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2112
2113 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2114 sizeof (struct elf64_aarch64_stub_hash_entry)))
2115 {
2116 free (ret);
2117 return NULL;
2118 }
2119
2120 return &ret->root.root;
2121}
2122
2123/* Free the derived linker hash table. */
2124
2125static void
2126elf64_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
2127{
2128 struct elf64_aarch64_link_hash_table *ret
2129 = (struct elf64_aarch64_link_hash_table *) hash;
2130
2131 bfd_hash_table_free (&ret->stub_hash_table);
9f7c3e5e 2132 _bfd_elf_link_hash_table_free (hash);
a06ea964
NC
2133}
2134
2135static bfd_vma
2136aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
2137 bfd_vma addend, bfd_boolean weak_undef_p)
2138{
2139 switch (r_type)
2140 {
2141 case R_AARCH64_TLSDESC_CALL:
2142 case R_AARCH64_NONE:
2143 case R_AARCH64_NULL:
2144 break;
2145
2146 case R_AARCH64_ADR_PREL_LO21:
2147 case R_AARCH64_CONDBR19:
2148 case R_AARCH64_LD_PREL_LO19:
2149 case R_AARCH64_PREL16:
2150 case R_AARCH64_PREL32:
2151 case R_AARCH64_PREL64:
2152 case R_AARCH64_TSTBR14:
2153 if (weak_undef_p)
2154 value = place;
2155 value = value + addend - place;
2156 break;
2157
2158 case R_AARCH64_CALL26:
2159 case R_AARCH64_JUMP26:
2160 value = value + addend - place;
2161 break;
2162
2163 case R_AARCH64_ABS16:
2164 case R_AARCH64_ABS32:
2165 case R_AARCH64_MOVW_SABS_G0:
2166 case R_AARCH64_MOVW_SABS_G1:
2167 case R_AARCH64_MOVW_SABS_G2:
2168 case R_AARCH64_MOVW_UABS_G0:
2169 case R_AARCH64_MOVW_UABS_G0_NC:
2170 case R_AARCH64_MOVW_UABS_G1:
2171 case R_AARCH64_MOVW_UABS_G1_NC:
2172 case R_AARCH64_MOVW_UABS_G2:
2173 case R_AARCH64_MOVW_UABS_G2_NC:
2174 case R_AARCH64_MOVW_UABS_G3:
2175 value = value + addend;
2176 break;
2177
2178 case R_AARCH64_ADR_PREL_PG_HI21:
2179 case R_AARCH64_ADR_PREL_PG_HI21_NC:
2180 if (weak_undef_p)
2181 value = PG (place);
2182 value = PG (value + addend) - PG (place);
2183 break;
2184
f41aef5f
RE
2185 case R_AARCH64_GOT_LD_PREL19:
2186 value = value + addend - place;
2187 break;
2188
a06ea964
NC
2189 case R_AARCH64_ADR_GOT_PAGE:
2190 case R_AARCH64_TLSDESC_ADR_PAGE:
2191 case R_AARCH64_TLSGD_ADR_PAGE21:
2192 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
2193 value = PG (value + addend) - PG (place);
2194 break;
2195
2196 case R_AARCH64_ADD_ABS_LO12_NC:
2197 case R_AARCH64_LD64_GOT_LO12_NC:
2198 case R_AARCH64_LDST8_ABS_LO12_NC:
2199 case R_AARCH64_LDST16_ABS_LO12_NC:
2200 case R_AARCH64_LDST32_ABS_LO12_NC:
2201 case R_AARCH64_LDST64_ABS_LO12_NC:
2202 case R_AARCH64_LDST128_ABS_LO12_NC:
2203 case R_AARCH64_TLSDESC_ADD_LO12_NC:
2204 case R_AARCH64_TLSDESC_ADD:
2205 case R_AARCH64_TLSDESC_LD64_LO12_NC:
2206 case R_AARCH64_TLSDESC_LDR:
2207 case R_AARCH64_TLSGD_ADD_LO12_NC:
2208 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
2209 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
2210 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
2211 value = PG_OFFSET (value + addend);
2212 break;
2213
2214 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
2215 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
2216 value = (value + addend) & (bfd_vma) 0xffff0000;
2217 break;
2218 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
2219 value = (value + addend) & (bfd_vma) 0xfff000;
2220 break;
2221
2222 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
2223 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
2224 value = (value + addend) & (bfd_vma) 0xffff;
2225 break;
2226
2227 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
2228 value = (value + addend) & ~(bfd_vma) 0xffffffff;
2229 value -= place & ~(bfd_vma) 0xffffffff;
2230 break;
2231 }
2232 return value;
2233}
2234
2235static bfd_boolean
2236aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2237 bfd_vma offset, bfd_vma value)
2238{
2239 reloc_howto_type *howto;
2240 bfd_vma place;
2241
2242 howto = elf64_aarch64_howto_from_type (r_type);
2243 place = (input_section->output_section->vma + input_section->output_offset
2244 + offset);
2245 value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
2246 return bfd_elf_aarch64_put_addend (input_bfd,
2247 input_section->contents + offset,
2248 howto, value);
2249}
2250
2251static enum elf64_aarch64_stub_type
2252aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2253{
2254 if (aarch64_valid_for_adrp_p (value, place))
2255 return aarch64_stub_adrp_branch;
2256 return aarch64_stub_long_branch;
2257}
2258
2259/* Determine the type of stub needed, if any, for a call. */
2260
2261static enum elf64_aarch64_stub_type
2262aarch64_type_of_stub (struct bfd_link_info *info,
2263 asection *input_sec,
2264 const Elf_Internal_Rela *rel,
2265 unsigned char st_type,
2266 struct elf64_aarch64_link_hash_entry *hash,
2267 bfd_vma destination)
2268{
2269 bfd_vma location;
2270 bfd_signed_vma branch_offset;
2271 unsigned int r_type;
2272 struct elf64_aarch64_link_hash_table *globals;
2273 enum elf64_aarch64_stub_type stub_type = aarch64_stub_none;
2274 bfd_boolean via_plt_p;
2275
2276 if (st_type != STT_FUNC)
2277 return stub_type;
2278
2279 globals = elf64_aarch64_hash_table (info);
2280 via_plt_p = (globals->root.splt != NULL && hash != NULL
2281 && hash->root.plt.offset != (bfd_vma) - 1);
2282
2283 if (via_plt_p)
2284 return stub_type;
2285
2286 /* Determine where the call point is. */
2287 location = (input_sec->output_offset
2288 + input_sec->output_section->vma + rel->r_offset);
2289
2290 branch_offset = (bfd_signed_vma) (destination - location);
2291
2292 r_type = ELF64_R_TYPE (rel->r_info);
2293
2294 /* We don't want to redirect any old unconditional jump in this way,
2295 only one which is being used for a sibcall, where it is
2296 acceptable for the IP0 and IP1 registers to be clobbered. */
2297 if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26)
2298 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2299 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2300 {
2301 stub_type = aarch64_stub_long_branch;
2302 }
2303
2304 return stub_type;
2305}
2306
2307/* Build a name for an entry in the stub hash table. */
2308
2309static char *
2310elf64_aarch64_stub_name (const asection *input_section,
2311 const asection *sym_sec,
2312 const struct elf64_aarch64_link_hash_entry *hash,
2313 const Elf_Internal_Rela *rel)
2314{
2315 char *stub_name;
2316 bfd_size_type len;
2317
2318 if (hash)
2319 {
2320 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2321 stub_name = bfd_malloc (len);
2322 if (stub_name != NULL)
2323 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2324 (unsigned int) input_section->id,
2325 hash->root.root.root.string,
2326 rel->r_addend);
2327 }
2328 else
2329 {
2330 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2331 stub_name = bfd_malloc (len);
2332 if (stub_name != NULL)
2333 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2334 (unsigned int) input_section->id,
2335 (unsigned int) sym_sec->id,
2336 (unsigned int) ELF64_R_SYM (rel->r_info),
2337 rel->r_addend);
2338 }
2339
2340 return stub_name;
2341}
2342
2343/* Look up an entry in the stub hash. Stub entries are cached because
2344 creating the stub name takes a bit of time. */
2345
2346static struct elf64_aarch64_stub_hash_entry *
2347elf64_aarch64_get_stub_entry (const asection *input_section,
2348 const asection *sym_sec,
2349 struct elf_link_hash_entry *hash,
2350 const Elf_Internal_Rela *rel,
2351 struct elf64_aarch64_link_hash_table *htab)
2352{
2353 struct elf64_aarch64_stub_hash_entry *stub_entry;
2354 struct elf64_aarch64_link_hash_entry *h =
2355 (struct elf64_aarch64_link_hash_entry *) hash;
2356 const asection *id_sec;
2357
2358 if ((input_section->flags & SEC_CODE) == 0)
2359 return NULL;
2360
2361 /* If this input section is part of a group of sections sharing one
2362 stub section, then use the id of the first section in the group.
2363 Stub names need to include a section id, as there may well be
2364 more than one stub used to reach say, printf, and we need to
2365 distinguish between them. */
2366 id_sec = htab->stub_group[input_section->id].link_sec;
2367
2368 if (h != NULL && h->stub_cache != NULL
2369 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2370 {
2371 stub_entry = h->stub_cache;
2372 }
2373 else
2374 {
2375 char *stub_name;
2376
2377 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, h, rel);
2378 if (stub_name == NULL)
2379 return NULL;
2380
2381 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2382 stub_name, FALSE, FALSE);
2383 if (h != NULL)
2384 h->stub_cache = stub_entry;
2385
2386 free (stub_name);
2387 }
2388
2389 return stub_entry;
2390}
2391
2392/* Add a new stub entry to the stub hash. Not all fields of the new
2393 stub entry are initialised. */
2394
2395static struct elf64_aarch64_stub_hash_entry *
2396elf64_aarch64_add_stub (const char *stub_name,
2397 asection *section,
2398 struct elf64_aarch64_link_hash_table *htab)
2399{
2400 asection *link_sec;
2401 asection *stub_sec;
2402 struct elf64_aarch64_stub_hash_entry *stub_entry;
2403
2404 link_sec = htab->stub_group[section->id].link_sec;
2405 stub_sec = htab->stub_group[section->id].stub_sec;
2406 if (stub_sec == NULL)
2407 {
2408 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2409 if (stub_sec == NULL)
2410 {
2411 size_t namelen;
2412 bfd_size_type len;
2413 char *s_name;
2414
2415 namelen = strlen (link_sec->name);
2416 len = namelen + sizeof (STUB_SUFFIX);
2417 s_name = bfd_alloc (htab->stub_bfd, len);
2418 if (s_name == NULL)
2419 return NULL;
2420
2421 memcpy (s_name, link_sec->name, namelen);
2422 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2423 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2424 if (stub_sec == NULL)
2425 return NULL;
2426 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2427 }
2428 htab->stub_group[section->id].stub_sec = stub_sec;
2429 }
2430
2431 /* Enter this entry into the linker stub hash table. */
2432 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2433 TRUE, FALSE);
2434 if (stub_entry == NULL)
2435 {
2436 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2437 section->owner, stub_name);
2438 return NULL;
2439 }
2440
2441 stub_entry->stub_sec = stub_sec;
2442 stub_entry->stub_offset = 0;
2443 stub_entry->id_sec = link_sec;
2444
2445 return stub_entry;
2446}
2447
2448static bfd_boolean
2449aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2450 void *in_arg ATTRIBUTE_UNUSED)
2451{
2452 struct elf64_aarch64_stub_hash_entry *stub_entry;
2453 asection *stub_sec;
2454 bfd *stub_bfd;
2455 bfd_byte *loc;
2456 bfd_vma sym_value;
2457 unsigned int template_size;
2458 const uint32_t *template;
2459 unsigned int i;
2460
2461 /* Massage our args to the form they really have. */
2462 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2463
2464 stub_sec = stub_entry->stub_sec;
2465
2466 /* Make a note of the offset within the stubs for this entry. */
2467 stub_entry->stub_offset = stub_sec->size;
2468 loc = stub_sec->contents + stub_entry->stub_offset;
2469
2470 stub_bfd = stub_sec->owner;
2471
2472 /* This is the address of the stub destination. */
2473 sym_value = (stub_entry->target_value
2474 + stub_entry->target_section->output_offset
2475 + stub_entry->target_section->output_section->vma);
2476
2477 if (stub_entry->stub_type == aarch64_stub_long_branch)
2478 {
2479 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2480 + stub_sec->output_offset);
2481
2482 /* See if we can relax the stub. */
2483 if (aarch64_valid_for_adrp_p (sym_value, place))
2484 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2485 }
2486
2487 switch (stub_entry->stub_type)
2488 {
2489 case aarch64_stub_adrp_branch:
2490 template = aarch64_adrp_branch_stub;
2491 template_size = sizeof (aarch64_adrp_branch_stub);
2492 break;
2493 case aarch64_stub_long_branch:
2494 template = aarch64_long_branch_stub;
2495 template_size = sizeof (aarch64_long_branch_stub);
2496 break;
2497 default:
2498 BFD_FAIL ();
2499 return FALSE;
2500 }
2501
2502 for (i = 0; i < (template_size / sizeof template[0]); i++)
2503 {
2504 bfd_putl32 (template[i], loc);
2505 loc += 4;
2506 }
2507
2508 template_size = (template_size + 7) & ~7;
2509 stub_sec->size += template_size;
2510
2511 switch (stub_entry->stub_type)
2512 {
2513 case aarch64_stub_adrp_branch:
2514 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec,
2515 stub_entry->stub_offset, sym_value))
2516 /* The stub would not have been relaxed if the offset was out
2517 of range. */
2518 BFD_FAIL ();
2519
2520 _bfd_final_link_relocate
2521 (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC),
2522 stub_bfd,
2523 stub_sec,
2524 stub_sec->contents,
2525 stub_entry->stub_offset + 4,
2526 sym_value,
2527 0);
2528 break;
2529
2530 case aarch64_stub_long_branch:
2531 /* We want the value relative to the address 12 bytes back from the
2532 value itself. */
2533 _bfd_final_link_relocate (elf64_aarch64_howto_from_type
2534 (R_AARCH64_PREL64), stub_bfd, stub_sec,
2535 stub_sec->contents,
2536 stub_entry->stub_offset + 16,
2537 sym_value + 12, 0);
2538 break;
2539 default:
2540 break;
2541 }
2542
2543 return TRUE;
2544}
2545
2546/* As above, but don't actually build the stub. Just bump offset so
2547 we know stub section sizes. */
2548
2549static bfd_boolean
2550aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2551 void *in_arg ATTRIBUTE_UNUSED)
2552{
2553 struct elf64_aarch64_stub_hash_entry *stub_entry;
2554 int size;
2555
2556 /* Massage our args to the form they really have. */
2557 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2558
2559 switch (stub_entry->stub_type)
2560 {
2561 case aarch64_stub_adrp_branch:
2562 size = sizeof (aarch64_adrp_branch_stub);
2563 break;
2564 case aarch64_stub_long_branch:
2565 size = sizeof (aarch64_long_branch_stub);
2566 break;
2567 default:
2568 BFD_FAIL ();
2569 return FALSE;
2570 break;
2571 }
2572
2573 size = (size + 7) & ~7;
2574 stub_entry->stub_sec->size += size;
2575 return TRUE;
2576}
2577
2578/* External entry points for sizing and building linker stubs. */
2579
2580/* Set up various things so that we can make a list of input sections
2581 for each output section included in the link. Returns -1 on error,
2582 0 when no stubs will be needed, and 1 on success. */
2583
2584int
2585elf64_aarch64_setup_section_lists (bfd *output_bfd,
2586 struct bfd_link_info *info)
2587{
2588 bfd *input_bfd;
2589 unsigned int bfd_count;
2590 int top_id, top_index;
2591 asection *section;
2592 asection **input_list, **list;
2593 bfd_size_type amt;
2594 struct elf64_aarch64_link_hash_table *htab =
2595 elf64_aarch64_hash_table (info);
2596
2597 if (!is_elf_hash_table (htab))
2598 return 0;
2599
2600 /* Count the number of input BFDs and find the top input section id. */
2601 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2602 input_bfd != NULL; input_bfd = input_bfd->link_next)
2603 {
2604 bfd_count += 1;
2605 for (section = input_bfd->sections;
2606 section != NULL; section = section->next)
2607 {
2608 if (top_id < section->id)
2609 top_id = section->id;
2610 }
2611 }
2612 htab->bfd_count = bfd_count;
2613
2614 amt = sizeof (struct map_stub) * (top_id + 1);
2615 htab->stub_group = bfd_zmalloc (amt);
2616 if (htab->stub_group == NULL)
2617 return -1;
2618
2619 /* We can't use output_bfd->section_count here to find the top output
2620 section index as some sections may have been removed, and
2621 _bfd_strip_section_from_output doesn't renumber the indices. */
2622 for (section = output_bfd->sections, top_index = 0;
2623 section != NULL; section = section->next)
2624 {
2625 if (top_index < section->index)
2626 top_index = section->index;
2627 }
2628
2629 htab->top_index = top_index;
2630 amt = sizeof (asection *) * (top_index + 1);
2631 input_list = bfd_malloc (amt);
2632 htab->input_list = input_list;
2633 if (input_list == NULL)
2634 return -1;
2635
2636 /* For sections we aren't interested in, mark their entries with a
2637 value we can check later. */
2638 list = input_list + top_index;
2639 do
2640 *list = bfd_abs_section_ptr;
2641 while (list-- != input_list);
2642
2643 for (section = output_bfd->sections;
2644 section != NULL; section = section->next)
2645 {
2646 if ((section->flags & SEC_CODE) != 0)
2647 input_list[section->index] = NULL;
2648 }
2649
2650 return 1;
2651}
2652
2653/* Used by elf64_aarch64_next_input_section and group_sections. */
2654#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2655
2656/* The linker repeatedly calls this function for each input section,
2657 in the order that input sections are linked into output sections.
2658 Build lists of input sections to determine groupings between which
2659 we may insert linker stubs. */
2660
2661void
2662elf64_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2663{
2664 struct elf64_aarch64_link_hash_table *htab =
2665 elf64_aarch64_hash_table (info);
2666
2667 if (isec->output_section->index <= htab->top_index)
2668 {
2669 asection **list = htab->input_list + isec->output_section->index;
2670
2671 if (*list != bfd_abs_section_ptr)
2672 {
2673 /* Steal the link_sec pointer for our list. */
2674 /* This happens to make the list in reverse order,
2675 which is what we want. */
2676 PREV_SEC (isec) = *list;
2677 *list = isec;
2678 }
2679 }
2680}
2681
2682/* See whether we can group stub sections together. Grouping stub
2683 sections may result in fewer stubs. More importantly, we need to
2684 put all .init* and .fini* stubs at the beginning of the .init or
2685 .fini output sections respectively, because glibc splits the
2686 _init and _fini functions into multiple parts. Putting a stub in
2687 the middle of a function is not a good idea. */
2688
2689static void
2690group_sections (struct elf64_aarch64_link_hash_table *htab,
2691 bfd_size_type stub_group_size,
2692 bfd_boolean stubs_always_before_branch)
2693{
2694 asection **list = htab->input_list + htab->top_index;
2695
2696 do
2697 {
2698 asection *tail = *list;
2699
2700 if (tail == bfd_abs_section_ptr)
2701 continue;
2702
2703 while (tail != NULL)
2704 {
2705 asection *curr;
2706 asection *prev;
2707 bfd_size_type total;
2708
2709 curr = tail;
2710 total = tail->size;
2711 while ((prev = PREV_SEC (curr)) != NULL
2712 && ((total += curr->output_offset - prev->output_offset)
2713 < stub_group_size))
2714 curr = prev;
2715
2716 /* OK, the size from the start of CURR to the end is less
2717 than stub_group_size and thus can be handled by one stub
2718 section. (Or the tail section is itself larger than
2719 stub_group_size, in which case we may be toast.)
2720 We should really be keeping track of the total size of
2721 stubs added here, as stubs contribute to the final output
2722 section size. */
2723 do
2724 {
2725 prev = PREV_SEC (tail);
2726 /* Set up this stub group. */
2727 htab->stub_group[tail->id].link_sec = curr;
2728 }
2729 while (tail != curr && (tail = prev) != NULL);
2730
2731 /* But wait, there's more! Input sections up to stub_group_size
2732 bytes before the stub section can be handled by it too. */
2733 if (!stubs_always_before_branch)
2734 {
2735 total = 0;
2736 while (prev != NULL
2737 && ((total += tail->output_offset - prev->output_offset)
2738 < stub_group_size))
2739 {
2740 tail = prev;
2741 prev = PREV_SEC (tail);
2742 htab->stub_group[tail->id].link_sec = curr;
2743 }
2744 }
2745 tail = prev;
2746 }
2747 }
2748 while (list-- != htab->input_list);
2749
2750 free (htab->input_list);
2751}
2752
2753#undef PREV_SEC
2754
2755/* Determine and set the size of the stub section for a final link.
2756
2757 The basic idea here is to examine all the relocations looking for
2758 PC-relative calls to a target that is unreachable with a "bl"
2759 instruction. */
2760
2761bfd_boolean
2762elf64_aarch64_size_stubs (bfd *output_bfd,
2763 bfd *stub_bfd,
2764 struct bfd_link_info *info,
2765 bfd_signed_vma group_size,
2766 asection * (*add_stub_section) (const char *,
2767 asection *),
2768 void (*layout_sections_again) (void))
2769{
2770 bfd_size_type stub_group_size;
2771 bfd_boolean stubs_always_before_branch;
2772 bfd_boolean stub_changed = 0;
2773 struct elf64_aarch64_link_hash_table *htab = elf64_aarch64_hash_table (info);
2774
2775 /* Propagate mach to stub bfd, because it may not have been
2776 finalized when we created stub_bfd. */
2777 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
2778 bfd_get_mach (output_bfd));
2779
2780 /* Stash our params away. */
2781 htab->stub_bfd = stub_bfd;
2782 htab->add_stub_section = add_stub_section;
2783 htab->layout_sections_again = layout_sections_again;
2784 stubs_always_before_branch = group_size < 0;
2785 if (group_size < 0)
2786 stub_group_size = -group_size;
2787 else
2788 stub_group_size = group_size;
2789
2790 if (stub_group_size == 1)
2791 {
2792 /* Default values. */
2793 /* Aarch64 branch range is +-128MB. The value used is 1MB less. */
2794 stub_group_size = 127 * 1024 * 1024;
2795 }
2796
2797 group_sections (htab, stub_group_size, stubs_always_before_branch);
2798
2799 while (1)
2800 {
2801 bfd *input_bfd;
2802 unsigned int bfd_indx;
2803 asection *stub_sec;
2804
2805 for (input_bfd = info->input_bfds, bfd_indx = 0;
2806 input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++)
2807 {
2808 Elf_Internal_Shdr *symtab_hdr;
2809 asection *section;
2810 Elf_Internal_Sym *local_syms = NULL;
2811
2812 /* We'll need the symbol table in a second. */
2813 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2814 if (symtab_hdr->sh_info == 0)
2815 continue;
2816
2817 /* Walk over each section attached to the input bfd. */
2818 for (section = input_bfd->sections;
2819 section != NULL; section = section->next)
2820 {
2821 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2822
2823 /* If there aren't any relocs, then there's nothing more
2824 to do. */
2825 if ((section->flags & SEC_RELOC) == 0
2826 || section->reloc_count == 0
2827 || (section->flags & SEC_CODE) == 0)
2828 continue;
2829
2830 /* If this section is a link-once section that will be
2831 discarded, then don't create any stubs. */
2832 if (section->output_section == NULL
2833 || section->output_section->owner != output_bfd)
2834 continue;
2835
2836 /* Get the relocs. */
2837 internal_relocs
2838 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
2839 NULL, info->keep_memory);
2840 if (internal_relocs == NULL)
2841 goto error_ret_free_local;
2842
2843 /* Now examine each relocation. */
2844 irela = internal_relocs;
2845 irelaend = irela + section->reloc_count;
2846 for (; irela < irelaend; irela++)
2847 {
2848 unsigned int r_type, r_indx;
2849 enum elf64_aarch64_stub_type stub_type;
2850 struct elf64_aarch64_stub_hash_entry *stub_entry;
2851 asection *sym_sec;
2852 bfd_vma sym_value;
2853 bfd_vma destination;
2854 struct elf64_aarch64_link_hash_entry *hash;
2855 const char *sym_name;
2856 char *stub_name;
2857 const asection *id_sec;
2858 unsigned char st_type;
2859 bfd_size_type len;
2860
2861 r_type = ELF64_R_TYPE (irela->r_info);
2862 r_indx = ELF64_R_SYM (irela->r_info);
2863
2864 if (r_type >= (unsigned int) R_AARCH64_end)
2865 {
2866 bfd_set_error (bfd_error_bad_value);
2867 error_ret_free_internal:
2868 if (elf_section_data (section)->relocs == NULL)
2869 free (internal_relocs);
2870 goto error_ret_free_local;
2871 }
2872
2873 /* Only look for stubs on unconditional branch and
2874 branch and link instructions. */
2875 if (r_type != (unsigned int) R_AARCH64_CALL26
2876 && r_type != (unsigned int) R_AARCH64_JUMP26)
2877 continue;
2878
2879 /* Now determine the call target, its name, value,
2880 section. */
2881 sym_sec = NULL;
2882 sym_value = 0;
2883 destination = 0;
2884 hash = NULL;
2885 sym_name = NULL;
2886 if (r_indx < symtab_hdr->sh_info)
2887 {
2888 /* It's a local symbol. */
2889 Elf_Internal_Sym *sym;
2890 Elf_Internal_Shdr *hdr;
2891
2892 if (local_syms == NULL)
2893 {
2894 local_syms
2895 = (Elf_Internal_Sym *) symtab_hdr->contents;
2896 if (local_syms == NULL)
2897 local_syms
2898 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
2899 symtab_hdr->sh_info, 0,
2900 NULL, NULL, NULL);
2901 if (local_syms == NULL)
2902 goto error_ret_free_internal;
2903 }
2904
2905 sym = local_syms + r_indx;
2906 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
2907 sym_sec = hdr->bfd_section;
2908 if (!sym_sec)
2909 /* This is an undefined symbol. It can never
2910 be resolved. */
2911 continue;
2912
2913 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
2914 sym_value = sym->st_value;
2915 destination = (sym_value + irela->r_addend
2916 + sym_sec->output_offset
2917 + sym_sec->output_section->vma);
2918 st_type = ELF_ST_TYPE (sym->st_info);
2919 sym_name
2920 = bfd_elf_string_from_elf_section (input_bfd,
2921 symtab_hdr->sh_link,
2922 sym->st_name);
2923 }
2924 else
2925 {
2926 int e_indx;
2927
2928 e_indx = r_indx - symtab_hdr->sh_info;
2929 hash = ((struct elf64_aarch64_link_hash_entry *)
2930 elf_sym_hashes (input_bfd)[e_indx]);
2931
2932 while (hash->root.root.type == bfd_link_hash_indirect
2933 || hash->root.root.type == bfd_link_hash_warning)
2934 hash = ((struct elf64_aarch64_link_hash_entry *)
2935 hash->root.root.u.i.link);
2936
2937 if (hash->root.root.type == bfd_link_hash_defined
2938 || hash->root.root.type == bfd_link_hash_defweak)
2939 {
2940 struct elf64_aarch64_link_hash_table *globals =
2941 elf64_aarch64_hash_table (info);
2942 sym_sec = hash->root.root.u.def.section;
2943 sym_value = hash->root.root.u.def.value;
2944 /* For a destination in a shared library,
2945 use the PLT stub as target address to
2946 decide whether a branch stub is
2947 needed. */
2948 if (globals->root.splt != NULL && hash != NULL
2949 && hash->root.plt.offset != (bfd_vma) - 1)
2950 {
2951 sym_sec = globals->root.splt;
2952 sym_value = hash->root.plt.offset;
2953 if (sym_sec->output_section != NULL)
2954 destination = (sym_value
2955 + sym_sec->output_offset
2956 +
2957 sym_sec->output_section->vma);
2958 }
2959 else if (sym_sec->output_section != NULL)
2960 destination = (sym_value + irela->r_addend
2961 + sym_sec->output_offset
2962 + sym_sec->output_section->vma);
2963 }
2964 else if (hash->root.root.type == bfd_link_hash_undefined
2965 || (hash->root.root.type
2966 == bfd_link_hash_undefweak))
2967 {
2968 /* For a shared library, use the PLT stub as
2969 target address to decide whether a long
2970 branch stub is needed.
2971 For absolute code, they cannot be handled. */
2972 struct elf64_aarch64_link_hash_table *globals =
2973 elf64_aarch64_hash_table (info);
2974
2975 if (globals->root.splt != NULL && hash != NULL
2976 && hash->root.plt.offset != (bfd_vma) - 1)
2977 {
2978 sym_sec = globals->root.splt;
2979 sym_value = hash->root.plt.offset;
2980 if (sym_sec->output_section != NULL)
2981 destination = (sym_value
2982 + sym_sec->output_offset
2983 +
2984 sym_sec->output_section->vma);
2985 }
2986 else
2987 continue;
2988 }
2989 else
2990 {
2991 bfd_set_error (bfd_error_bad_value);
2992 goto error_ret_free_internal;
2993 }
2994 st_type = ELF_ST_TYPE (hash->root.type);
2995 sym_name = hash->root.root.root.string;
2996 }
2997
2998 /* Determine what (if any) linker stub is needed. */
2999 stub_type = aarch64_type_of_stub
3000 (info, section, irela, st_type, hash, destination);
3001 if (stub_type == aarch64_stub_none)
3002 continue;
3003
3004 /* Support for grouping stub sections. */
3005 id_sec = htab->stub_group[section->id].link_sec;
3006
3007 /* Get the name of this stub. */
3008 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, hash,
3009 irela);
3010 if (!stub_name)
3011 goto error_ret_free_internal;
3012
3013 stub_entry =
3014 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3015 stub_name, FALSE, FALSE);
3016 if (stub_entry != NULL)
3017 {
3018 /* The proper stub has already been created. */
3019 free (stub_name);
3020 continue;
3021 }
3022
3023 stub_entry = elf64_aarch64_add_stub (stub_name, section,
3024 htab);
3025 if (stub_entry == NULL)
3026 {
3027 free (stub_name);
3028 goto error_ret_free_internal;
3029 }
3030
3031 stub_entry->target_value = sym_value;
3032 stub_entry->target_section = sym_sec;
3033 stub_entry->stub_type = stub_type;
3034 stub_entry->h = hash;
3035 stub_entry->st_type = st_type;
3036
3037 if (sym_name == NULL)
3038 sym_name = "unnamed";
3039 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3040 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3041 if (stub_entry->output_name == NULL)
3042 {
3043 free (stub_name);
3044 goto error_ret_free_internal;
3045 }
3046
3047 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3048 sym_name);
3049
3050 stub_changed = TRUE;
3051 }
3052
3053 /* We're done with the internal relocs, free them. */
3054 if (elf_section_data (section)->relocs == NULL)
3055 free (internal_relocs);
3056 }
3057 }
3058
3059 if (!stub_changed)
3060 break;
3061
3062 /* OK, we've added some stubs. Find out the new size of the
3063 stub sections. */
3064 for (stub_sec = htab->stub_bfd->sections;
3065 stub_sec != NULL; stub_sec = stub_sec->next)
3066 stub_sec->size = 0;
3067
3068 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3069
3070 /* Ask the linker to do its stuff. */
3071 (*htab->layout_sections_again) ();
3072 stub_changed = FALSE;
3073 }
3074
3075 return TRUE;
3076
3077error_ret_free_local:
3078 return FALSE;
3079}
3080
3081/* Build all the stubs associated with the current output file. The
3082 stubs are kept in a hash table attached to the main linker hash
3083 table. We also set up the .plt entries for statically linked PIC
3084 functions here. This function is called via aarch64_elf_finish in the
3085 linker. */
3086
3087bfd_boolean
3088elf64_aarch64_build_stubs (struct bfd_link_info *info)
3089{
3090 asection *stub_sec;
3091 struct bfd_hash_table *table;
3092 struct elf64_aarch64_link_hash_table *htab;
3093
3094 htab = elf64_aarch64_hash_table (info);
3095
3096 for (stub_sec = htab->stub_bfd->sections;
3097 stub_sec != NULL; stub_sec = stub_sec->next)
3098 {
3099 bfd_size_type size;
3100
3101 /* Ignore non-stub sections. */
3102 if (!strstr (stub_sec->name, STUB_SUFFIX))
3103 continue;
3104
3105 /* Allocate memory to hold the linker stubs. */
3106 size = stub_sec->size;
3107 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3108 if (stub_sec->contents == NULL && size != 0)
3109 return FALSE;
3110 stub_sec->size = 0;
3111 }
3112
3113 /* Build the stubs as directed by the stub hash table. */
3114 table = &htab->stub_hash_table;
3115 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3116
3117 return TRUE;
3118}
3119
3120
3121/* Add an entry to the code/data map for section SEC. */
3122
3123static void
3124elf64_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3125{
3126 struct _aarch64_elf_section_data *sec_data =
3127 elf64_aarch64_section_data (sec);
3128 unsigned int newidx;
3129
3130 if (sec_data->map == NULL)
3131 {
3132 sec_data->map = bfd_malloc (sizeof (elf64_aarch64_section_map));
3133 sec_data->mapcount = 0;
3134 sec_data->mapsize = 1;
3135 }
3136
3137 newidx = sec_data->mapcount++;
3138
3139 if (sec_data->mapcount > sec_data->mapsize)
3140 {
3141 sec_data->mapsize *= 2;
3142 sec_data->map = bfd_realloc_or_free
3143 (sec_data->map, sec_data->mapsize * sizeof (elf64_aarch64_section_map));
3144 }
3145
3146 if (sec_data->map)
3147 {
3148 sec_data->map[newidx].vma = vma;
3149 sec_data->map[newidx].type = type;
3150 }
3151}
3152
3153
3154/* Initialise maps of insn/data for input BFDs. */
3155void
3156bfd_elf64_aarch64_init_maps (bfd *abfd)
3157{
3158 Elf_Internal_Sym *isymbuf;
3159 Elf_Internal_Shdr *hdr;
3160 unsigned int i, localsyms;
3161
3162 /* Make sure that we are dealing with an AArch64 elf binary. */
3163 if (!is_aarch64_elf (abfd))
3164 return;
3165
3166 if ((abfd->flags & DYNAMIC) != 0)
3167 return;
3168
3169 hdr = &elf_symtab_hdr (abfd);
3170 localsyms = hdr->sh_info;
3171
3172 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3173 should contain the number of local symbols, which should come before any
3174 global symbols. Mapping symbols are always local. */
3175 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3176
3177 /* No internal symbols read? Skip this BFD. */
3178 if (isymbuf == NULL)
3179 return;
3180
3181 for (i = 0; i < localsyms; i++)
3182 {
3183 Elf_Internal_Sym *isym = &isymbuf[i];
3184 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3185 const char *name;
3186
3187 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3188 {
3189 name = bfd_elf_string_from_elf_section (abfd,
3190 hdr->sh_link,
3191 isym->st_name);
3192
3193 if (bfd_is_aarch64_special_symbol_name
3194 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3195 elf64_aarch64_section_map_add (sec, name[1], isym->st_value);
3196 }
3197 }
3198}
3199
3200/* Set option values needed during linking. */
3201void
3202bfd_elf64_aarch64_set_options (struct bfd *output_bfd,
3203 struct bfd_link_info *link_info,
3204 int no_enum_warn,
3205 int no_wchar_warn, int pic_veneer)
3206{
3207 struct elf64_aarch64_link_hash_table *globals;
3208
3209 globals = elf64_aarch64_hash_table (link_info);
3210 globals->pic_veneer = pic_veneer;
3211
3212 BFD_ASSERT (is_aarch64_elf (output_bfd));
3213 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3214 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3215}
3216
3217#define MASK(n) ((1u << (n)) - 1)
3218
3219/* Decode the 26-bit offset of unconditional branch. */
3220static inline uint32_t
3221decode_branch_ofs_26 (uint32_t insn)
3222{
3223 return insn & MASK (26);
3224}
3225
3226/* Decode the 19-bit offset of conditional branch and compare & branch. */
3227static inline uint32_t
3228decode_cond_branch_ofs_19 (uint32_t insn)
3229{
3230 return (insn >> 5) & MASK (19);
3231}
3232
3233/* Decode the 19-bit offset of load literal. */
3234static inline uint32_t
3235decode_ld_lit_ofs_19 (uint32_t insn)
3236{
3237 return (insn >> 5) & MASK (19);
3238}
3239
3240/* Decode the 14-bit offset of test & branch. */
3241static inline uint32_t
3242decode_tst_branch_ofs_14 (uint32_t insn)
3243{
3244 return (insn >> 5) & MASK (14);
3245}
3246
3247/* Decode the 16-bit imm of move wide. */
3248static inline uint32_t
3249decode_movw_imm (uint32_t insn)
3250{
3251 return (insn >> 5) & MASK (16);
3252}
3253
3254/* Decode the 21-bit imm of adr. */
3255static inline uint32_t
3256decode_adr_imm (uint32_t insn)
3257{
3258 return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
3259}
3260
3261/* Decode the 12-bit imm of add immediate. */
3262static inline uint32_t
3263decode_add_imm (uint32_t insn)
3264{
3265 return (insn >> 10) & MASK (12);
3266}
3267
3268
3269/* Encode the 26-bit offset of unconditional branch. */
3270static inline uint32_t
3271reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
3272{
3273 return (insn & ~MASK (26)) | (ofs & MASK (26));
3274}
3275
3276/* Encode the 19-bit offset of conditional branch and compare & branch. */
3277static inline uint32_t
3278reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
3279{
3280 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3281}
3282
3283/* Decode the 19-bit offset of load literal. */
3284static inline uint32_t
3285reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
3286{
3287 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3288}
3289
3290/* Encode the 14-bit offset of test & branch. */
3291static inline uint32_t
3292reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
3293{
3294 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
3295}
3296
3297/* Reencode the imm field of move wide. */
3298static inline uint32_t
3299reencode_movw_imm (uint32_t insn, uint32_t imm)
3300{
3301 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
3302}
3303
3304/* Reencode the imm field of adr. */
3305static inline uint32_t
3306reencode_adr_imm (uint32_t insn, uint32_t imm)
3307{
3308 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
3309 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
3310}
3311
3312/* Reencode the imm field of ld/st pos immediate. */
3313static inline uint32_t
3314reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
3315{
3316 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3317}
3318
3319/* Reencode the imm field of add immediate. */
3320static inline uint32_t
3321reencode_add_imm (uint32_t insn, uint32_t imm)
3322{
3323 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3324}
3325
3326/* Reencode mov[zn] to movz. */
3327static inline uint32_t
3328reencode_movzn_to_movz (uint32_t opcode)
3329{
3330 return opcode | (1 << 30);
3331}
3332
3333/* Reencode mov[zn] to movn. */
3334static inline uint32_t
3335reencode_movzn_to_movn (uint32_t opcode)
3336{
3337 return opcode & ~(1 << 30);
3338}
3339
3340/* Insert the addend/value into the instruction or data object being
3341 relocated. */
3342static bfd_reloc_status_type
3343bfd_elf_aarch64_put_addend (bfd *abfd,
3344 bfd_byte *address,
3345 reloc_howto_type *howto, bfd_signed_vma addend)
3346{
3347 bfd_reloc_status_type status = bfd_reloc_ok;
3348 bfd_signed_vma old_addend = addend;
3349 bfd_vma contents;
3350 int size;
3351
3352 size = bfd_get_reloc_size (howto);
3353 switch (size)
3354 {
3355 case 2:
3356 contents = bfd_get_16 (abfd, address);
3357 break;
3358 case 4:
3359 if (howto->src_mask != 0xffffffff)
3360 /* Must be 32-bit instruction, always little-endian. */
3361 contents = bfd_getl32 (address);
3362 else
3363 /* Must be 32-bit data (endianness dependent). */
3364 contents = bfd_get_32 (abfd, address);
3365 break;
3366 case 8:
3367 contents = bfd_get_64 (abfd, address);
3368 break;
3369 default:
3370 abort ();
3371 }
3372
3373 switch (howto->complain_on_overflow)
3374 {
3375 case complain_overflow_dont:
3376 break;
3377 case complain_overflow_signed:
3378 status = aarch64_signed_overflow (addend,
3379 howto->bitsize + howto->rightshift);
3380 break;
3381 case complain_overflow_unsigned:
3382 status = aarch64_unsigned_overflow (addend,
3383 howto->bitsize + howto->rightshift);
3384 break;
3385 case complain_overflow_bitfield:
3386 default:
3387 abort ();
3388 }
3389
3390 addend >>= howto->rightshift;
3391
3392 switch (howto->type)
3393 {
3394 case R_AARCH64_JUMP26:
3395 case R_AARCH64_CALL26:
3396 contents = reencode_branch_ofs_26 (contents, addend);
3397 break;
3398
3399 case R_AARCH64_CONDBR19:
3400 contents = reencode_cond_branch_ofs_19 (contents, addend);
3401 break;
3402
3403 case R_AARCH64_TSTBR14:
3404 contents = reencode_tst_branch_ofs_14 (contents, addend);
3405 break;
3406
3407 case R_AARCH64_LD_PREL_LO19:
f41aef5f 3408 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3409 if (old_addend & ((1 << howto->rightshift) - 1))
3410 return bfd_reloc_overflow;
3411 contents = reencode_ld_lit_ofs_19 (contents, addend);
3412 break;
3413
3414 case R_AARCH64_TLSDESC_CALL:
3415 break;
3416
3417 case R_AARCH64_TLSGD_ADR_PAGE21:
3418 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3419 case R_AARCH64_TLSDESC_ADR_PAGE:
3420 case R_AARCH64_ADR_GOT_PAGE:
3421 case R_AARCH64_ADR_PREL_LO21:
3422 case R_AARCH64_ADR_PREL_PG_HI21:
3423 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3424 contents = reencode_adr_imm (contents, addend);
3425 break;
3426
3427 case R_AARCH64_TLSGD_ADD_LO12_NC:
3428 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3429 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3430 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3431 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3432 case R_AARCH64_ADD_ABS_LO12_NC:
3433 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3434 12 bits of the page offset following
3435 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3436 (pc-relative) page base. */
3437 contents = reencode_add_imm (contents, addend);
3438 break;
3439
3440 case R_AARCH64_LDST8_ABS_LO12_NC:
3441 case R_AARCH64_LDST16_ABS_LO12_NC:
3442 case R_AARCH64_LDST32_ABS_LO12_NC:
3443 case R_AARCH64_LDST64_ABS_LO12_NC:
3444 case R_AARCH64_LDST128_ABS_LO12_NC:
3445 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3446 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3447 case R_AARCH64_LD64_GOT_LO12_NC:
3448 if (old_addend & ((1 << howto->rightshift) - 1))
3449 return bfd_reloc_overflow;
3450 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3451 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3452 which computes the (pc-relative) page base. */
3453 contents = reencode_ldst_pos_imm (contents, addend);
3454 break;
3455
3456 /* Group relocations to create high bits of a 16, 32, 48 or 64
3457 bit signed data or abs address inline. Will change
3458 instruction to MOVN or MOVZ depending on sign of calculated
3459 value. */
3460
3461 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3462 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3463 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3464 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3465 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3466 case R_AARCH64_MOVW_SABS_G0:
3467 case R_AARCH64_MOVW_SABS_G1:
3468 case R_AARCH64_MOVW_SABS_G2:
3469 /* NOTE: We can only come here with movz or movn. */
3470 if (addend < 0)
3471 {
3472 /* Force use of MOVN. */
3473 addend = ~addend;
3474 contents = reencode_movzn_to_movn (contents);
3475 }
3476 else
3477 {
3478 /* Force use of MOVZ. */
3479 contents = reencode_movzn_to_movz (contents);
3480 }
3481 /* fall through */
3482
3483 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3484 data or abs address inline. */
3485
3486 case R_AARCH64_MOVW_UABS_G0:
3487 case R_AARCH64_MOVW_UABS_G0_NC:
3488 case R_AARCH64_MOVW_UABS_G1:
3489 case R_AARCH64_MOVW_UABS_G1_NC:
3490 case R_AARCH64_MOVW_UABS_G2:
3491 case R_AARCH64_MOVW_UABS_G2_NC:
3492 case R_AARCH64_MOVW_UABS_G3:
3493 contents = reencode_movw_imm (contents, addend);
3494 break;
3495
3496 default:
3497 /* Repack simple data */
3498 if (howto->dst_mask & (howto->dst_mask + 1))
3499 return bfd_reloc_notsupported;
3500
3501 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
3502 break;
3503 }
3504
3505 switch (size)
3506 {
3507 case 2:
3508 bfd_put_16 (abfd, contents, address);
3509 break;
3510 case 4:
3511 if (howto->dst_mask != 0xffffffff)
3512 /* must be 32-bit instruction, always little-endian */
3513 bfd_putl32 (contents, address);
3514 else
3515 /* must be 32-bit data (endianness dependent) */
3516 bfd_put_32 (abfd, contents, address);
3517 break;
3518 case 8:
3519 bfd_put_64 (abfd, contents, address);
3520 break;
3521 default:
3522 abort ();
3523 }
3524
3525 return status;
3526}
3527
3528static bfd_vma
3529aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3530 struct elf64_aarch64_link_hash_table
3531 *globals, struct bfd_link_info *info,
3532 bfd_vma value, bfd *output_bfd,
3533 bfd_boolean *unresolved_reloc_p)
3534{
3535 bfd_vma off = (bfd_vma) - 1;
3536 asection *basegot = globals->root.sgot;
3537 bfd_boolean dyn = globals->root.dynamic_sections_created;
3538
3539 if (h != NULL)
3540 {
3541 off = h->got.offset;
3542 BFD_ASSERT (off != (bfd_vma) - 1);
3543 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3544 || (info->shared
3545 && SYMBOL_REFERENCES_LOCAL (info, h))
3546 || (ELF_ST_VISIBILITY (h->other)
3547 && h->root.type == bfd_link_hash_undefweak))
3548 {
3549 /* This is actually a static link, or it is a -Bsymbolic link
3550 and the symbol is defined locally. We must initialize this
3551 entry in the global offset table. Since the offset must
3552 always be a multiple of 8, we use the least significant bit
3553 to record whether we have initialized it already.
3554 When doing a dynamic link, we create a .rel(a).got relocation
3555 entry to initialize the value. This is done in the
3556 finish_dynamic_symbol routine. */
3557 if ((off & 1) != 0)
3558 off &= ~1;
3559 else
3560 {
3561 bfd_put_64 (output_bfd, value, basegot->contents + off);
3562 h->got.offset |= 1;
3563 }
3564 }
3565 else
3566 *unresolved_reloc_p = FALSE;
3567
3568 off = off + basegot->output_section->vma + basegot->output_offset;
3569 }
3570
3571 return off;
3572}
3573
3574/* Change R_TYPE to a more efficient access model where possible,
3575 return the new reloc type. */
3576
3577static unsigned int
3578aarch64_tls_transition_without_check (unsigned int r_type,
3579 struct elf_link_hash_entry *h)
3580{
3581 bfd_boolean is_local = h == NULL;
3582 switch (r_type)
3583 {
3584 case R_AARCH64_TLSGD_ADR_PAGE21:
3585 case R_AARCH64_TLSDESC_ADR_PAGE:
3586 return is_local
3587 ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
3588
3589 case R_AARCH64_TLSGD_ADD_LO12_NC:
3590 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3591 return is_local
3592 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3593 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
3594
3595 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3596 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3597
3598 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3599 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3600
3601 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3602 case R_AARCH64_TLSDESC_CALL:
3603 /* Instructions with these relocations will become NOPs. */
3604 return R_AARCH64_NONE;
3605 }
3606
3607 return r_type;
3608}
3609
3610static unsigned int
3611aarch64_reloc_got_type (unsigned int r_type)
3612{
3613 switch (r_type)
3614 {
3615 case R_AARCH64_LD64_GOT_LO12_NC:
3616 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 3617 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3618 return GOT_NORMAL;
3619
3620 case R_AARCH64_TLSGD_ADR_PAGE21:
3621 case R_AARCH64_TLSGD_ADD_LO12_NC:
3622 return GOT_TLS_GD;
3623
3624 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3625 case R_AARCH64_TLSDESC_ADR_PAGE:
3626 case R_AARCH64_TLSDESC_CALL:
3627 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3628 return GOT_TLSDESC_GD;
3629
3630 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3631 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3632 return GOT_TLS_IE;
3633
3634 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3635 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3636 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3637 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3638 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3639 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3640 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3641 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3642 return GOT_UNKNOWN;
3643 }
3644 return GOT_UNKNOWN;
3645}
3646
3647static bfd_boolean
3648aarch64_can_relax_tls (bfd *input_bfd,
3649 struct bfd_link_info *info,
3650 unsigned int r_type,
3651 struct elf_link_hash_entry *h,
3652 unsigned long r_symndx)
3653{
3654 unsigned int symbol_got_type;
3655 unsigned int reloc_got_type;
3656
3657 if (! IS_AARCH64_TLS_RELOC (r_type))
3658 return FALSE;
3659
3660 symbol_got_type = elf64_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3661 reloc_got_type = aarch64_reloc_got_type (r_type);
3662
3663 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3664 return TRUE;
3665
3666 if (info->shared)
3667 return FALSE;
3668
3669 if (h && h->root.type == bfd_link_hash_undefweak)
3670 return FALSE;
3671
3672 return TRUE;
3673}
3674
3675static unsigned int
3676aarch64_tls_transition (bfd *input_bfd,
3677 struct bfd_link_info *info,
3678 unsigned int r_type,
3679 struct elf_link_hash_entry *h,
3680 unsigned long r_symndx)
3681{
3682 if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx))
3683 return r_type;
3684
3685 return aarch64_tls_transition_without_check (r_type, h);
3686}
3687
3688/* Return the base VMA address which should be subtracted from real addresses
3689 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3690
3691static bfd_vma
3692dtpoff_base (struct bfd_link_info *info)
3693{
3694 /* If tls_sec is NULL, we should have signalled an error already. */
3695 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3696 return elf_hash_table (info)->tls_sec->vma;
3697}
3698
3699
3700/* Return the base VMA address which should be subtracted from real addresses
3701 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3702
3703static bfd_vma
3704tpoff_base (struct bfd_link_info *info)
3705{
3706 struct elf_link_hash_table *htab = elf_hash_table (info);
3707
3708 /* If tls_sec is NULL, we should have signalled an error already. */
3709 if (htab->tls_sec == NULL)
3710 return 0;
3711
3712 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3713 htab->tls_sec->alignment_power);
3714 return htab->tls_sec->vma - base;
3715}
3716
3717static bfd_vma *
3718symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3719 unsigned long r_symndx)
3720{
3721 /* Calculate the address of the GOT entry for symbol
3722 referred to in h. */
3723 if (h != NULL)
3724 return &h->got.offset;
3725 else
3726 {
3727 /* local symbol */
3728 struct elf_aarch64_local_symbol *l;
3729
3730 l = elf64_aarch64_locals (input_bfd);
3731 return &l[r_symndx].got_offset;
3732 }
3733}
3734
3735static void
3736symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3737 unsigned long r_symndx)
3738{
3739 bfd_vma *p;
3740 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3741 *p |= 1;
3742}
3743
3744static int
3745symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3746 unsigned long r_symndx)
3747{
3748 bfd_vma value;
3749 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3750 return value & 1;
3751}
3752
3753static bfd_vma
3754symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3755 unsigned long r_symndx)
3756{
3757 bfd_vma value;
3758 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3759 value &= ~1;
3760 return value;
3761}
3762
3763static bfd_vma *
3764symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3765 unsigned long r_symndx)
3766{
3767 /* Calculate the address of the GOT entry for symbol
3768 referred to in h. */
3769 if (h != NULL)
3770 {
3771 struct elf64_aarch64_link_hash_entry *eh;
3772 eh = (struct elf64_aarch64_link_hash_entry *) h;
3773 return &eh->tlsdesc_got_jump_table_offset;
3774 }
3775 else
3776 {
3777 /* local symbol */
3778 struct elf_aarch64_local_symbol *l;
3779
3780 l = elf64_aarch64_locals (input_bfd);
3781 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3782 }
3783}
3784
3785static void
3786symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3787 unsigned long r_symndx)
3788{
3789 bfd_vma *p;
3790 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3791 *p |= 1;
3792}
3793
3794static int
3795symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3796 struct elf_link_hash_entry *h,
3797 unsigned long r_symndx)
3798{
3799 bfd_vma value;
3800 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3801 return value & 1;
3802}
3803
3804static bfd_vma
3805symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3806 unsigned long r_symndx)
3807{
3808 bfd_vma value;
3809 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3810 value &= ~1;
3811 return value;
3812}
3813
3814/* Perform a relocation as part of a final link. */
3815static bfd_reloc_status_type
3816elf64_aarch64_final_link_relocate (reloc_howto_type *howto,
3817 bfd *input_bfd,
3818 bfd *output_bfd,
3819 asection *input_section,
3820 bfd_byte *contents,
3821 Elf_Internal_Rela *rel,
3822 bfd_vma value,
3823 struct bfd_link_info *info,
3824 asection *sym_sec,
3825 struct elf_link_hash_entry *h,
3826 bfd_boolean *unresolved_reloc_p,
3827 bfd_boolean save_addend,
3828 bfd_vma *saved_addend)
3829{
3830 unsigned int r_type = howto->type;
3831 unsigned long r_symndx;
3832 bfd_byte *hit_data = contents + rel->r_offset;
3833 bfd_vma place;
3834 bfd_signed_vma signed_addend;
3835 struct elf64_aarch64_link_hash_table *globals;
3836 bfd_boolean weak_undef_p;
3837
3838 globals = elf64_aarch64_hash_table (info);
3839
3840 BFD_ASSERT (is_aarch64_elf (input_bfd));
3841
3842 r_symndx = ELF64_R_SYM (rel->r_info);
3843
3844 /* It is possible to have linker relaxations on some TLS access
3845 models. Update our information here. */
3846 r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
3847
3848 if (r_type != howto->type)
3849 howto = elf64_aarch64_howto_from_type (r_type);
3850
3851 place = input_section->output_section->vma
3852 + input_section->output_offset + rel->r_offset;
3853
3854 /* Get addend, accumulating the addend for consecutive relocs
3855 which refer to the same offset. */
3856 signed_addend = saved_addend ? *saved_addend : 0;
3857 signed_addend += rel->r_addend;
3858
3859 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
3860 : bfd_is_und_section (sym_sec));
3861 switch (r_type)
3862 {
3863 case R_AARCH64_NONE:
3864 case R_AARCH64_NULL:
3865 case R_AARCH64_TLSDESC_CALL:
3866 *unresolved_reloc_p = FALSE;
3867 return bfd_reloc_ok;
3868
3869 case R_AARCH64_ABS64:
3870
3871 /* When generating a shared object or relocatable executable, these
3872 relocations are copied into the output file to be resolved at
3873 run time. */
3874 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
3875 && (input_section->flags & SEC_ALLOC)
3876 && (h == NULL
3877 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3878 || h->root.type != bfd_link_hash_undefweak))
3879 {
3880 Elf_Internal_Rela outrel;
3881 bfd_byte *loc;
3882 bfd_boolean skip, relocate;
3883 asection *sreloc;
3884
3885 *unresolved_reloc_p = FALSE;
3886
3887 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd,
3888 input_section, 1);
3889 if (sreloc == NULL)
3890 return bfd_reloc_notsupported;
3891
3892 skip = FALSE;
3893 relocate = FALSE;
3894
3895 outrel.r_addend = signed_addend;
3896 outrel.r_offset =
3897 _bfd_elf_section_offset (output_bfd, info, input_section,
3898 rel->r_offset);
3899 if (outrel.r_offset == (bfd_vma) - 1)
3900 skip = TRUE;
3901 else if (outrel.r_offset == (bfd_vma) - 2)
3902 {
3903 skip = TRUE;
3904 relocate = TRUE;
3905 }
3906
3907 outrel.r_offset += (input_section->output_section->vma
3908 + input_section->output_offset);
3909
3910 if (skip)
3911 memset (&outrel, 0, sizeof outrel);
3912 else if (h != NULL
3913 && h->dynindx != -1
3914 && (!info->shared || !info->symbolic || !h->def_regular))
3915 outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
3916 else
3917 {
3918 int symbol;
3919
3920 /* On SVR4-ish systems, the dynamic loader cannot
3921 relocate the text and data segments independently,
3922 so the symbol does not matter. */
3923 symbol = 0;
3924 outrel.r_info = ELF64_R_INFO (symbol, R_AARCH64_RELATIVE);
3925 outrel.r_addend += value;
3926 }
3927
3928 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab);
3929 bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
3930
3931 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
3932 {
3933 /* Sanity to check that we have previously allocated
3934 sufficient space in the relocation section for the
3935 number of relocations we actually want to emit. */
3936 abort ();
3937 }
3938
3939 /* If this reloc is against an external symbol, we do not want to
3940 fiddle with the addend. Otherwise, we need to include the symbol
3941 value so that it becomes an addend for the dynamic reloc. */
3942 if (!relocate)
3943 return bfd_reloc_ok;
3944
3945 return _bfd_final_link_relocate (howto, input_bfd, input_section,
3946 contents, rel->r_offset, value,
3947 signed_addend);
3948 }
3949 else
3950 value += signed_addend;
3951 break;
3952
3953 case R_AARCH64_JUMP26:
3954 case R_AARCH64_CALL26:
3955 {
3956 asection *splt = globals->root.splt;
3957 bfd_boolean via_plt_p =
3958 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
3959
3960 /* A call to an undefined weak symbol is converted to a jump to
3961 the next instruction unless a PLT entry will be created.
3962 The jump to the next instruction is optimized as a NOP.
3963 Do the same for local undefined symbols. */
3964 if (weak_undef_p && ! via_plt_p)
3965 {
3966 bfd_putl32 (INSN_NOP, hit_data);
3967 return bfd_reloc_ok;
3968 }
3969
3970 /* If the call goes through a PLT entry, make sure to
3971 check distance to the right destination address. */
3972 if (via_plt_p)
3973 {
3974 value = (splt->output_section->vma
3975 + splt->output_offset + h->plt.offset);
3976 *unresolved_reloc_p = FALSE;
3977 }
3978
3979 /* If the target symbol is global and marked as a function the
3980 relocation applies a function call or a tail call. In this
3981 situation we can veneer out of range branches. The veneers
3982 use IP0 and IP1 hence cannot be used arbitrary out of range
3983 branches that occur within the body of a function. */
3984 if (h && h->type == STT_FUNC)
3985 {
3986 /* Check if a stub has to be inserted because the destination
3987 is too far away. */
3988 if (! aarch64_valid_branch_p (value, place))
3989 {
3990 /* The target is out of reach, so redirect the branch to
3991 the local stub for this function. */
3992 struct elf64_aarch64_stub_hash_entry *stub_entry;
3993 stub_entry = elf64_aarch64_get_stub_entry (input_section,
3994 sym_sec, h,
3995 rel, globals);
3996 if (stub_entry != NULL)
3997 value = (stub_entry->stub_offset
3998 + stub_entry->stub_sec->output_offset
3999 + stub_entry->stub_sec->output_section->vma);
4000 }
4001 }
4002 }
4003 value = aarch64_resolve_relocation (r_type, place, value,
4004 signed_addend, weak_undef_p);
4005 break;
4006
4007 case R_AARCH64_ABS16:
4008 case R_AARCH64_ABS32:
4009 case R_AARCH64_ADD_ABS_LO12_NC:
4010 case R_AARCH64_ADR_PREL_LO21:
4011 case R_AARCH64_ADR_PREL_PG_HI21:
4012 case R_AARCH64_ADR_PREL_PG_HI21_NC:
4013 case R_AARCH64_CONDBR19:
4014 case R_AARCH64_LD_PREL_LO19:
4015 case R_AARCH64_LDST8_ABS_LO12_NC:
4016 case R_AARCH64_LDST16_ABS_LO12_NC:
4017 case R_AARCH64_LDST32_ABS_LO12_NC:
4018 case R_AARCH64_LDST64_ABS_LO12_NC:
4019 case R_AARCH64_LDST128_ABS_LO12_NC:
4020 case R_AARCH64_MOVW_SABS_G0:
4021 case R_AARCH64_MOVW_SABS_G1:
4022 case R_AARCH64_MOVW_SABS_G2:
4023 case R_AARCH64_MOVW_UABS_G0:
4024 case R_AARCH64_MOVW_UABS_G0_NC:
4025 case R_AARCH64_MOVW_UABS_G1:
4026 case R_AARCH64_MOVW_UABS_G1_NC:
4027 case R_AARCH64_MOVW_UABS_G2:
4028 case R_AARCH64_MOVW_UABS_G2_NC:
4029 case R_AARCH64_MOVW_UABS_G3:
4030 case R_AARCH64_PREL16:
4031 case R_AARCH64_PREL32:
4032 case R_AARCH64_PREL64:
4033 case R_AARCH64_TSTBR14:
4034 value = aarch64_resolve_relocation (r_type, place, value,
4035 signed_addend, weak_undef_p);
4036 break;
4037
4038 case R_AARCH64_LD64_GOT_LO12_NC:
4039 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 4040 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
4041 if (globals->root.sgot == NULL)
4042 BFD_ASSERT (h != NULL);
4043
4044 if (h != NULL)
4045 {
4046 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4047 output_bfd,
4048 unresolved_reloc_p);
4049 value = aarch64_resolve_relocation (r_type, place, value,
4050 0, weak_undef_p);
4051 }
4052 break;
4053
4054 case R_AARCH64_TLSGD_ADR_PAGE21:
4055 case R_AARCH64_TLSGD_ADD_LO12_NC:
4056 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4057 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4058 if (globals->root.sgot == NULL)
4059 return bfd_reloc_notsupported;
4060
4061 value = (symbol_got_offset (input_bfd, h, r_symndx)
4062 + globals->root.sgot->output_section->vma
4063 + globals->root.sgot->output_section->output_offset);
4064
4065 value = aarch64_resolve_relocation (r_type, place, value,
4066 0, weak_undef_p);
4067 *unresolved_reloc_p = FALSE;
4068 break;
4069
4070 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4071 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4072 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4073 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4074 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4075 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4076 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4077 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4078 value = aarch64_resolve_relocation (r_type, place, value,
bb3f9ed8 4079 signed_addend - tpoff_base (info), weak_undef_p);
a06ea964
NC
4080 *unresolved_reloc_p = FALSE;
4081 break;
4082
4083 case R_AARCH64_TLSDESC_ADR_PAGE:
4084 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4085 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4086 case R_AARCH64_TLSDESC_ADD:
4087 case R_AARCH64_TLSDESC_LDR:
4088 if (globals->root.sgot == NULL)
4089 return bfd_reloc_notsupported;
4090
4091 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4092 + globals->root.sgotplt->output_section->vma
4093 + globals->root.sgotplt->output_section->output_offset
4094 + globals->sgotplt_jump_table_size);
4095
4096 value = aarch64_resolve_relocation (r_type, place, value,
4097 0, weak_undef_p);
4098 *unresolved_reloc_p = FALSE;
4099 break;
4100
4101 default:
4102 return bfd_reloc_notsupported;
4103 }
4104
4105 if (saved_addend)
4106 *saved_addend = value;
4107
4108 /* Only apply the final relocation in a sequence. */
4109 if (save_addend)
4110 return bfd_reloc_continue;
4111
4112 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4113}
4114
4115/* Handle TLS relaxations. Relaxing is possible for symbols that use
4116 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4117 link.
4118
4119 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4120 is to then call final_link_relocate. Return other values in the
4121 case of error. */
4122
4123static bfd_reloc_status_type
4124elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table *globals,
4125 bfd *input_bfd, bfd_byte *contents,
4126 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4127{
4128 bfd_boolean is_local = h == NULL;
4129 unsigned int r_type = ELF64_R_TYPE (rel->r_info);
4130 unsigned long insn;
4131
4132 BFD_ASSERT (globals && input_bfd && contents && rel);
4133
4134 switch (r_type)
4135 {
4136 case R_AARCH64_TLSGD_ADR_PAGE21:
4137 case R_AARCH64_TLSDESC_ADR_PAGE:
4138 if (is_local)
4139 {
4140 /* GD->LE relaxation:
4141 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4142 or
4143 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4144 */
4145 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4146 return bfd_reloc_continue;
4147 }
4148 else
4149 {
4150 /* GD->IE relaxation:
4151 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4152 or
4153 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4154 */
4155 insn = bfd_getl32 (contents + rel->r_offset);
4156 return bfd_reloc_continue;
4157 }
4158
4159 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4160 if (is_local)
4161 {
4162 /* GD->LE relaxation:
4163 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4164 */
4165 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4166 return bfd_reloc_continue;
4167 }
4168 else
4169 {
4170 /* GD->IE relaxation:
4171 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4172 */
4173 insn = bfd_getl32 (contents + rel->r_offset);
4174 insn &= 0xfffffff0;
4175 bfd_putl32 (insn, contents + rel->r_offset);
4176 return bfd_reloc_continue;
4177 }
4178
4179 case R_AARCH64_TLSGD_ADD_LO12_NC:
4180 if (is_local)
4181 {
4182 /* GD->LE relaxation
4183 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4184 bl __tls_get_addr => mrs x1, tpidr_el0
4185 nop => add x0, x1, x0
4186 */
4187
4188 /* First kill the tls_get_addr reloc on the bl instruction. */
4189 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4190 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4191
4192 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4193 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4194 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4195 return bfd_reloc_continue;
4196 }
4197 else
4198 {
4199 /* GD->IE relaxation
4200 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4201 BL __tls_get_addr => mrs x1, tpidr_el0
4202 R_AARCH64_CALL26
4203 NOP => add x0, x1, x0
4204 */
4205
4206 BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26);
4207
4208 /* Remove the relocation on the BL instruction. */
4209 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4210
4211 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4212
4213 /* We choose to fixup the BL and NOP instructions using the
4214 offset from the second relocation to allow flexibility in
4215 scheduling instructions between the ADD and BL. */
4216 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4217 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4218 return bfd_reloc_continue;
4219 }
4220
4221 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4222 case R_AARCH64_TLSDESC_CALL:
4223 /* GD->IE/LE relaxation:
4224 add x0, x0, #:tlsdesc_lo12:var => nop
4225 blr xd => nop
4226 */
4227 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4228 return bfd_reloc_ok;
4229
4230 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4231 /* IE->LE relaxation:
4232 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4233 */
4234 if (is_local)
4235 {
4236 insn = bfd_getl32 (contents + rel->r_offset);
4237 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4238 }
4239 return bfd_reloc_continue;
4240
4241 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4242 /* IE->LE relaxation:
4243 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4244 */
4245 if (is_local)
4246 {
4247 insn = bfd_getl32 (contents + rel->r_offset);
4248 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4249 }
4250 return bfd_reloc_continue;
4251
4252 default:
4253 return bfd_reloc_continue;
4254 }
4255
4256 return bfd_reloc_ok;
4257}
4258
4259/* Relocate an AArch64 ELF section. */
4260
4261static bfd_boolean
4262elf64_aarch64_relocate_section (bfd *output_bfd,
4263 struct bfd_link_info *info,
4264 bfd *input_bfd,
4265 asection *input_section,
4266 bfd_byte *contents,
4267 Elf_Internal_Rela *relocs,
4268 Elf_Internal_Sym *local_syms,
4269 asection **local_sections)
4270{
4271 Elf_Internal_Shdr *symtab_hdr;
4272 struct elf_link_hash_entry **sym_hashes;
4273 Elf_Internal_Rela *rel;
4274 Elf_Internal_Rela *relend;
4275 const char *name;
4276 struct elf64_aarch64_link_hash_table *globals;
4277 bfd_boolean save_addend = FALSE;
4278 bfd_vma addend = 0;
4279
4280 globals = elf64_aarch64_hash_table (info);
4281
4282 symtab_hdr = &elf_symtab_hdr (input_bfd);
4283 sym_hashes = elf_sym_hashes (input_bfd);
4284
4285 rel = relocs;
4286 relend = relocs + input_section->reloc_count;
4287 for (; rel < relend; rel++)
4288 {
4289 unsigned int r_type;
4290 unsigned int relaxed_r_type;
4291 reloc_howto_type *howto;
4292 unsigned long r_symndx;
4293 Elf_Internal_Sym *sym;
4294 asection *sec;
4295 struct elf_link_hash_entry *h;
4296 bfd_vma relocation;
4297 bfd_reloc_status_type r;
4298 arelent bfd_reloc;
4299 char sym_type;
4300 bfd_boolean unresolved_reloc = FALSE;
4301 char *error_message = NULL;
4302
4303 r_symndx = ELF64_R_SYM (rel->r_info);
4304 r_type = ELF64_R_TYPE (rel->r_info);
4305
4306 bfd_reloc.howto = elf64_aarch64_howto_from_type (r_type);
4307 howto = bfd_reloc.howto;
4308
4309 h = NULL;
4310 sym = NULL;
4311 sec = NULL;
4312
4313 if (r_symndx < symtab_hdr->sh_info)
4314 {
4315 sym = local_syms + r_symndx;
4316 sym_type = ELF64_ST_TYPE (sym->st_info);
4317 sec = local_sections[r_symndx];
4318
4319 /* An object file might have a reference to a local
4320 undefined symbol. This is a daft object file, but we
4321 should at least do something about it. */
4322 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4323 && bfd_is_und_section (sec)
4324 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4325 {
4326 if (!info->callbacks->undefined_symbol
4327 (info, bfd_elf_string_from_elf_section
4328 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4329 input_bfd, input_section, rel->r_offset, TRUE))
4330 return FALSE;
4331 }
4332
4333 if (r_type >= R_AARCH64_dyn_max)
4334 {
4335 bfd_set_error (bfd_error_bad_value);
4336 return FALSE;
4337 }
4338
4339 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4340 }
4341 else
4342 {
4343 bfd_boolean warned;
4344
4345 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4346 r_symndx, symtab_hdr, sym_hashes,
4347 h, sec, relocation,
4348 unresolved_reloc, warned);
4349
4350 sym_type = h->type;
4351 }
4352
4353 if (sec != NULL && discarded_section (sec))
4354 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4355 rel, 1, relend, howto, 0, contents);
4356
4357 if (info->relocatable)
4358 {
4359 /* This is a relocatable link. We don't have to change
4360 anything, unless the reloc is against a section symbol,
4361 in which case we have to adjust according to where the
4362 section symbol winds up in the output section. */
4363 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
4364 rel->r_addend += sec->output_offset;
4365 continue;
4366 }
4367
4368 if (h != NULL)
4369 name = h->root.root.string;
4370 else
4371 {
4372 name = (bfd_elf_string_from_elf_section
4373 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4374 if (name == NULL || *name == '\0')
4375 name = bfd_section_name (input_bfd, sec);
4376 }
4377
4378 if (r_symndx != 0
4379 && r_type != R_AARCH64_NONE
4380 && r_type != R_AARCH64_NULL
4381 && (h == NULL
4382 || h->root.type == bfd_link_hash_defined
4383 || h->root.type == bfd_link_hash_defweak)
4384 && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
4385 {
4386 (*_bfd_error_handler)
4387 ((sym_type == STT_TLS
4388 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4389 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4390 input_bfd,
4391 input_section, (long) rel->r_offset, howto->name, name);
4392 }
4393
4394
4395 /* We relax only if we can see that there can be a valid transition
4396 from a reloc type to another.
4397 We call elf64_aarch64_final_link_relocate unless we're completely
4398 done, i.e., the relaxation produced the final output we want. */
4399
4400 relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4401 h, r_symndx);
4402 if (relaxed_r_type != r_type)
4403 {
4404 r_type = relaxed_r_type;
4405 howto = elf64_aarch64_howto_from_type (r_type);
4406
4407 r = elf64_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4408 unresolved_reloc = 0;
4409 }
4410 else
4411 r = bfd_reloc_continue;
4412
4413 /* There may be multiple consecutive relocations for the
4414 same offset. In that case we are supposed to treat the
4415 output of each relocation as the addend for the next. */
4416 if (rel + 1 < relend
4417 && rel->r_offset == rel[1].r_offset
4418 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4419 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4420 save_addend = TRUE;
4421 else
4422 save_addend = FALSE;
4423
4424 if (r == bfd_reloc_continue)
4425 r = elf64_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4426 input_section, contents, rel,
4427 relocation, info, sec,
4428 h, &unresolved_reloc,
4429 save_addend, &addend);
4430
4431 switch (r_type)
4432 {
4433 case R_AARCH64_TLSGD_ADR_PAGE21:
4434 case R_AARCH64_TLSGD_ADD_LO12_NC:
4435 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4436 {
4437 bfd_boolean need_relocs = FALSE;
4438 bfd_byte *loc;
4439 int indx;
4440 bfd_vma off;
4441
4442 off = symbol_got_offset (input_bfd, h, r_symndx);
4443 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4444
4445 need_relocs =
4446 (info->shared || indx != 0) &&
4447 (h == NULL
4448 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4449 || h->root.type != bfd_link_hash_undefweak);
4450
4451 BFD_ASSERT (globals->root.srelgot != NULL);
4452
4453 if (need_relocs)
4454 {
4455 Elf_Internal_Rela rela;
4456 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_DTPMOD64);
4457 rela.r_addend = 0;
4458 rela.r_offset = globals->root.sgot->output_section->vma +
4459 globals->root.sgot->output_offset + off;
4460
4461
4462 loc = globals->root.srelgot->contents;
4463 loc += globals->root.srelgot->reloc_count++
4464 * RELOC_SIZE (htab);
4465 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4466
4467 if (indx == 0)
4468 {
4469 bfd_put_64 (output_bfd,
4470 relocation - dtpoff_base (info),
4471 globals->root.sgot->contents + off
4472 + GOT_ENTRY_SIZE);
4473 }
4474 else
4475 {
4476 /* This TLS symbol is global. We emit a
4477 relocation to fixup the tls offset at load
4478 time. */
4479 rela.r_info =
4480 ELF64_R_INFO (indx, R_AARCH64_TLS_DTPREL64);
4481 rela.r_addend = 0;
4482 rela.r_offset =
4483 (globals->root.sgot->output_section->vma
4484 + globals->root.sgot->output_offset + off
4485 + GOT_ENTRY_SIZE);
4486
4487 loc = globals->root.srelgot->contents;
4488 loc += globals->root.srelgot->reloc_count++
4489 * RELOC_SIZE (globals);
4490 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4491 bfd_put_64 (output_bfd, (bfd_vma) 0,
4492 globals->root.sgot->contents + off
4493 + GOT_ENTRY_SIZE);
4494 }
4495 }
4496 else
4497 {
4498 bfd_put_64 (output_bfd, (bfd_vma) 1,
4499 globals->root.sgot->contents + off);
4500 bfd_put_64 (output_bfd,
4501 relocation - dtpoff_base (info),
4502 globals->root.sgot->contents + off
4503 + GOT_ENTRY_SIZE);
4504 }
4505
4506 symbol_got_offset_mark (input_bfd, h, r_symndx);
4507 }
4508 break;
4509
4510 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4511 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4512 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4513 {
4514 bfd_boolean need_relocs = FALSE;
4515 bfd_byte *loc;
4516 int indx;
4517 bfd_vma off;
4518
4519 off = symbol_got_offset (input_bfd, h, r_symndx);
4520
4521 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4522
4523 need_relocs =
4524 (info->shared || indx != 0) &&
4525 (h == NULL
4526 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4527 || h->root.type != bfd_link_hash_undefweak);
4528
4529 BFD_ASSERT (globals->root.srelgot != NULL);
4530
4531 if (need_relocs)
4532 {
4533 Elf_Internal_Rela rela;
4534
4535 if (indx == 0)
4536 rela.r_addend = relocation - dtpoff_base (info);
4537 else
4538 rela.r_addend = 0;
4539
4540 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_TPREL64);
4541 rela.r_offset = globals->root.sgot->output_section->vma +
4542 globals->root.sgot->output_offset + off;
4543
4544 loc = globals->root.srelgot->contents;
4545 loc += globals->root.srelgot->reloc_count++
4546 * RELOC_SIZE (htab);
4547
4548 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4549
4550 bfd_put_64 (output_bfd, rela.r_addend,
4551 globals->root.sgot->contents + off);
4552 }
4553 else
4554 bfd_put_64 (output_bfd, relocation - tpoff_base (info),
4555 globals->root.sgot->contents + off);
4556
4557 symbol_got_offset_mark (input_bfd, h, r_symndx);
4558 }
4559 break;
4560
4561 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4562 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4563 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4564 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4565 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4566 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4567 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4568 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4569 break;
4570
4571 case R_AARCH64_TLSDESC_ADR_PAGE:
4572 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4573 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4574 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
4575 {
4576 bfd_boolean need_relocs = FALSE;
4577 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
4578 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
4579
4580 need_relocs = (h == NULL
4581 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4582 || h->root.type != bfd_link_hash_undefweak);
4583
4584 BFD_ASSERT (globals->root.srelgot != NULL);
4585 BFD_ASSERT (globals->root.sgot != NULL);
4586
4587 if (need_relocs)
4588 {
4589 bfd_byte *loc;
4590 Elf_Internal_Rela rela;
4591 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLSDESC);
4592 rela.r_addend = 0;
4593 rela.r_offset = (globals->root.sgotplt->output_section->vma
4594 + globals->root.sgotplt->output_offset
4595 + off + globals->sgotplt_jump_table_size);
4596
4597 if (indx == 0)
4598 rela.r_addend = relocation - dtpoff_base (info);
4599
4600 /* Allocate the next available slot in the PLT reloc
4601 section to hold our R_AARCH64_TLSDESC, the next
4602 available slot is determined from reloc_count,
4603 which we step. But note, reloc_count was
4604 artifically moved down while allocating slots for
4605 real PLT relocs such that all of the PLT relocs
4606 will fit above the initial reloc_count and the
4607 extra stuff will fit below. */
4608 loc = globals->root.srelplt->contents;
4609 loc += globals->root.srelplt->reloc_count++
4610 * RELOC_SIZE (globals);
4611
4612 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4613
4614 bfd_put_64 (output_bfd, (bfd_vma) 0,
4615 globals->root.sgotplt->contents + off +
4616 globals->sgotplt_jump_table_size);
4617 bfd_put_64 (output_bfd, (bfd_vma) 0,
4618 globals->root.sgotplt->contents + off +
4619 globals->sgotplt_jump_table_size +
4620 GOT_ENTRY_SIZE);
4621 }
4622
4623 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
4624 }
4625 break;
4626 }
4627
4628 if (!save_addend)
4629 addend = 0;
4630
4631
4632 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4633 because such sections are not SEC_ALLOC and thus ld.so will
4634 not process them. */
4635 if (unresolved_reloc
4636 && !((input_section->flags & SEC_DEBUGGING) != 0
4637 && h->def_dynamic)
4638 && _bfd_elf_section_offset (output_bfd, info, input_section,
4639 +rel->r_offset) != (bfd_vma) - 1)
4640 {
4641 (*_bfd_error_handler)
4642 (_
4643 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4644 input_bfd, input_section, (long) rel->r_offset, howto->name,
4645 h->root.root.string);
4646 return FALSE;
4647 }
4648
4649 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
4650 {
4651 switch (r)
4652 {
4653 case bfd_reloc_overflow:
4654 /* If the overflowing reloc was to an undefined symbol,
4655 we have already printed one error message and there
4656 is no point complaining again. */
4657 if ((!h ||
4658 h->root.type != bfd_link_hash_undefined)
4659 && (!((*info->callbacks->reloc_overflow)
4660 (info, (h ? &h->root : NULL), name, howto->name,
4661 (bfd_vma) 0, input_bfd, input_section,
4662 rel->r_offset))))
4663 return FALSE;
4664 break;
4665
4666 case bfd_reloc_undefined:
4667 if (!((*info->callbacks->undefined_symbol)
4668 (info, name, input_bfd, input_section,
4669 rel->r_offset, TRUE)))
4670 return FALSE;
4671 break;
4672
4673 case bfd_reloc_outofrange:
4674 error_message = _("out of range");
4675 goto common_error;
4676
4677 case bfd_reloc_notsupported:
4678 error_message = _("unsupported relocation");
4679 goto common_error;
4680
4681 case bfd_reloc_dangerous:
4682 /* error_message should already be set. */
4683 goto common_error;
4684
4685 default:
4686 error_message = _("unknown error");
4687 /* Fall through. */
4688
4689 common_error:
4690 BFD_ASSERT (error_message != NULL);
4691 if (!((*info->callbacks->reloc_dangerous)
4692 (info, error_message, input_bfd, input_section,
4693 rel->r_offset)))
4694 return FALSE;
4695 break;
4696 }
4697 }
4698 }
4699
4700 return TRUE;
4701}
4702
4703/* Set the right machine number. */
4704
4705static bfd_boolean
4706elf64_aarch64_object_p (bfd *abfd)
4707{
4708 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
4709 return TRUE;
4710}
4711
4712/* Function to keep AArch64 specific flags in the ELF header. */
4713
4714static bfd_boolean
4715elf64_aarch64_set_private_flags (bfd *abfd, flagword flags)
4716{
4717 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
4718 {
4719 }
4720 else
4721 {
4722 elf_elfheader (abfd)->e_flags = flags;
4723 elf_flags_init (abfd) = TRUE;
4724 }
4725
4726 return TRUE;
4727}
4728
4729/* Copy backend specific data from one object module to another. */
4730
4731static bfd_boolean
4732elf64_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
4733{
4734 flagword in_flags;
4735
4736 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4737 return TRUE;
4738
4739 in_flags = elf_elfheader (ibfd)->e_flags;
4740
4741 elf_elfheader (obfd)->e_flags = in_flags;
4742 elf_flags_init (obfd) = TRUE;
4743
4744 /* Also copy the EI_OSABI field. */
4745 elf_elfheader (obfd)->e_ident[EI_OSABI] =
4746 elf_elfheader (ibfd)->e_ident[EI_OSABI];
4747
4748 /* Copy object attributes. */
4749 _bfd_elf_copy_obj_attributes (ibfd, obfd);
4750
4751 return TRUE;
4752}
4753
4754/* Merge backend specific data from an object file to the output
4755 object file when linking. */
4756
4757static bfd_boolean
4758elf64_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
4759{
4760 flagword out_flags;
4761 flagword in_flags;
4762 bfd_boolean flags_compatible = TRUE;
4763 asection *sec;
4764
4765 /* Check if we have the same endianess. */
4766 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
4767 return FALSE;
4768
4769 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4770 return TRUE;
4771
4772 /* The input BFD must have had its flags initialised. */
4773 /* The following seems bogus to me -- The flags are initialized in
4774 the assembler but I don't think an elf_flags_init field is
4775 written into the object. */
4776 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4777
4778 in_flags = elf_elfheader (ibfd)->e_flags;
4779 out_flags = elf_elfheader (obfd)->e_flags;
4780
4781 if (!elf_flags_init (obfd))
4782 {
4783 /* If the input is the default architecture and had the default
4784 flags then do not bother setting the flags for the output
4785 architecture, instead allow future merges to do this. If no
4786 future merges ever set these flags then they will retain their
4787 uninitialised values, which surprise surprise, correspond
4788 to the default values. */
4789 if (bfd_get_arch_info (ibfd)->the_default
4790 && elf_elfheader (ibfd)->e_flags == 0)
4791 return TRUE;
4792
4793 elf_flags_init (obfd) = TRUE;
4794 elf_elfheader (obfd)->e_flags = in_flags;
4795
4796 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
4797 && bfd_get_arch_info (obfd)->the_default)
4798 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
4799 bfd_get_mach (ibfd));
4800
4801 return TRUE;
4802 }
4803
4804 /* Identical flags must be compatible. */
4805 if (in_flags == out_flags)
4806 return TRUE;
4807
4808 /* Check to see if the input BFD actually contains any sections. If
4809 not, its flags may not have been initialised either, but it
4810 cannot actually cause any incompatiblity. Do not short-circuit
4811 dynamic objects; their section list may be emptied by
4812 elf_link_add_object_symbols.
4813
4814 Also check to see if there are no code sections in the input.
4815 In this case there is no need to check for code specific flags.
4816 XXX - do we need to worry about floating-point format compatability
4817 in data sections ? */
4818 if (!(ibfd->flags & DYNAMIC))
4819 {
4820 bfd_boolean null_input_bfd = TRUE;
4821 bfd_boolean only_data_sections = TRUE;
4822
4823 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4824 {
4825 if ((bfd_get_section_flags (ibfd, sec)
4826 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4827 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4828 only_data_sections = FALSE;
4829
4830 null_input_bfd = FALSE;
4831 break;
4832 }
4833
4834 if (null_input_bfd || only_data_sections)
4835 return TRUE;
4836 }
4837
4838 return flags_compatible;
4839}
4840
4841/* Display the flags field. */
4842
4843static bfd_boolean
4844elf64_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
4845{
4846 FILE *file = (FILE *) ptr;
4847 unsigned long flags;
4848
4849 BFD_ASSERT (abfd != NULL && ptr != NULL);
4850
4851 /* Print normal ELF private data. */
4852 _bfd_elf_print_private_bfd_data (abfd, ptr);
4853
4854 flags = elf_elfheader (abfd)->e_flags;
4855 /* Ignore init flag - it may not be set, despite the flags field
4856 containing valid data. */
4857
4858 /* xgettext:c-format */
4859 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
4860
4861 if (flags)
4862 fprintf (file, _("<Unrecognised flag bits set>"));
4863
4864 fputc ('\n', file);
4865
4866 return TRUE;
4867}
4868
4869/* Update the got entry reference counts for the section being removed. */
4870
4871static bfd_boolean
4872elf64_aarch64_gc_sweep_hook (bfd *abfd ATTRIBUTE_UNUSED,
4873 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4874 asection *sec ATTRIBUTE_UNUSED,
4875 const Elf_Internal_Rela *
4876 relocs ATTRIBUTE_UNUSED)
4877{
4878 return TRUE;
4879}
4880
4881/* Adjust a symbol defined by a dynamic object and referenced by a
4882 regular object. The current definition is in some section of the
4883 dynamic object, but we're not including those sections. We have to
4884 change the definition to something the rest of the link can
4885 understand. */
4886
4887static bfd_boolean
4888elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
4889 struct elf_link_hash_entry *h)
4890{
4891 struct elf64_aarch64_link_hash_table *htab;
4892 asection *s;
4893
4894 /* If this is a function, put it in the procedure linkage table. We
4895 will fill in the contents of the procedure linkage table later,
4896 when we know the address of the .got section. */
4897 if (h->type == STT_FUNC || h->needs_plt)
4898 {
4899 if (h->plt.refcount <= 0
4900 || SYMBOL_CALLS_LOCAL (info, h)
4901 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
4902 && h->root.type == bfd_link_hash_undefweak))
4903 {
4904 /* This case can occur if we saw a CALL26 reloc in
4905 an input file, but the symbol wasn't referred to
4906 by a dynamic object or all references were
4907 garbage collected. In which case we can end up
4908 resolving. */
4909 h->plt.offset = (bfd_vma) - 1;
4910 h->needs_plt = 0;
4911 }
4912
4913 return TRUE;
4914 }
4915 else
4916 /* It's possible that we incorrectly decided a .plt reloc was
4917 needed for an R_X86_64_PC32 reloc to a non-function sym in
4918 check_relocs. We can't decide accurately between function and
4919 non-function syms in check-relocs; Objects loaded later in
4920 the link may change h->type. So fix it now. */
4921 h->plt.offset = (bfd_vma) - 1;
4922
4923
4924 /* If this is a weak symbol, and there is a real definition, the
4925 processor independent code will have arranged for us to see the
4926 real definition first, and we can just use the same value. */
4927 if (h->u.weakdef != NULL)
4928 {
4929 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
4930 || h->u.weakdef->root.type == bfd_link_hash_defweak);
4931 h->root.u.def.section = h->u.weakdef->root.u.def.section;
4932 h->root.u.def.value = h->u.weakdef->root.u.def.value;
4933 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
4934 h->non_got_ref = h->u.weakdef->non_got_ref;
4935 return TRUE;
4936 }
4937
4938 /* If we are creating a shared library, we must presume that the
4939 only references to the symbol are via the global offset table.
4940 For such cases we need not do anything here; the relocations will
4941 be handled correctly by relocate_section. */
4942 if (info->shared)
4943 return TRUE;
4944
4945 /* If there are no references to this symbol that do not use the
4946 GOT, we don't need to generate a copy reloc. */
4947 if (!h->non_got_ref)
4948 return TRUE;
4949
4950 /* If -z nocopyreloc was given, we won't generate them either. */
4951 if (info->nocopyreloc)
4952 {
4953 h->non_got_ref = 0;
4954 return TRUE;
4955 }
4956
4957 /* We must allocate the symbol in our .dynbss section, which will
4958 become part of the .bss section of the executable. There will be
4959 an entry for this symbol in the .dynsym section. The dynamic
4960 object will contain position independent code, so all references
4961 from the dynamic object to this symbol will go through the global
4962 offset table. The dynamic linker will use the .dynsym entry to
4963 determine the address it must put in the global offset table, so
4964 both the dynamic object and the regular object will refer to the
4965 same memory location for the variable. */
4966
4967 htab = elf64_aarch64_hash_table (info);
4968
4969 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
4970 to copy the initial value out of the dynamic object and into the
4971 runtime process image. */
4972 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
4973 {
4974 htab->srelbss->size += RELOC_SIZE (htab);
4975 h->needs_copy = 1;
4976 }
4977
4978 s = htab->sdynbss;
4979
4980 return _bfd_elf_adjust_dynamic_copy (h, s);
4981
4982}
4983
4984static bfd_boolean
4985elf64_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
4986{
4987 struct elf_aarch64_local_symbol *locals;
4988 locals = elf64_aarch64_locals (abfd);
4989 if (locals == NULL)
4990 {
4991 locals = (struct elf_aarch64_local_symbol *)
4992 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
4993 if (locals == NULL)
4994 return FALSE;
4995 elf64_aarch64_locals (abfd) = locals;
4996 }
4997 return TRUE;
4998}
4999
5000/* Look through the relocs for a section during the first phase. */
5001
5002static bfd_boolean
5003elf64_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5004 asection *sec, const Elf_Internal_Rela *relocs)
5005{
5006 Elf_Internal_Shdr *symtab_hdr;
5007 struct elf_link_hash_entry **sym_hashes;
5008 const Elf_Internal_Rela *rel;
5009 const Elf_Internal_Rela *rel_end;
5010 asection *sreloc;
5011
5012 struct elf64_aarch64_link_hash_table *htab;
5013
5014 unsigned long nsyms;
5015
5016 if (info->relocatable)
5017 return TRUE;
5018
5019 BFD_ASSERT (is_aarch64_elf (abfd));
5020
5021 htab = elf64_aarch64_hash_table (info);
5022 sreloc = NULL;
5023
5024 symtab_hdr = &elf_symtab_hdr (abfd);
5025 sym_hashes = elf_sym_hashes (abfd);
5026 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
5027
5028 rel_end = relocs + sec->reloc_count;
5029 for (rel = relocs; rel < rel_end; rel++)
5030 {
5031 struct elf_link_hash_entry *h;
5032 unsigned long r_symndx;
5033 unsigned int r_type;
5034
5035 r_symndx = ELF64_R_SYM (rel->r_info);
5036 r_type = ELF64_R_TYPE (rel->r_info);
5037
5038 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5039 {
5040 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5041 r_symndx);
5042 return FALSE;
5043 }
5044
5045 if (r_symndx >= nsyms
5046 /* PR 9934: It is possible to have relocations that do not
5047 refer to symbols, thus it is also possible to have an
5048 object file containing relocations but no symbol table. */
5049 && (r_symndx > 0 || nsyms > 0))
5050 {
5051 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5052 r_symndx);
5053 return FALSE;
5054 }
5055
5056 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
5057 h = NULL;
5058 else
5059 {
5060 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5061 while (h->root.type == bfd_link_hash_indirect
5062 || h->root.type == bfd_link_hash_warning)
5063 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5064 }
5065
5066 /* Could be done earlier, if h were already available. */
5067 r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5068
5069 switch (r_type)
5070 {
5071 case R_AARCH64_ABS64:
5072
5073 /* We don't need to handle relocs into sections not going into
5074 the "real" output. */
5075 if ((sec->flags & SEC_ALLOC) == 0)
5076 break;
5077
5078 if (h != NULL)
5079 {
5080 if (!info->shared)
5081 h->non_got_ref = 1;
5082
5083 h->plt.refcount += 1;
5084 h->pointer_equality_needed = 1;
5085 }
5086
5087 /* No need to do anything if we're not creating a shared
5088 object. */
5089 if (! info->shared)
5090 break;
5091
5092 {
5093 struct elf_dyn_relocs *p;
5094 struct elf_dyn_relocs **head;
5095
5096 /* We must copy these reloc types into the output file.
5097 Create a reloc section in dynobj and make room for
5098 this reloc. */
5099 if (sreloc == NULL)
5100 {
5101 if (htab->root.dynobj == NULL)
5102 htab->root.dynobj = abfd;
5103
5104 sreloc = _bfd_elf_make_dynamic_reloc_section
5105 (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
5106
5107 if (sreloc == NULL)
5108 return FALSE;
5109 }
5110
5111 /* If this is a global symbol, we count the number of
5112 relocations we need for this symbol. */
5113 if (h != NULL)
5114 {
5115 struct elf64_aarch64_link_hash_entry *eh;
5116 eh = (struct elf64_aarch64_link_hash_entry *) h;
5117 head = &eh->dyn_relocs;
5118 }
5119 else
5120 {
5121 /* Track dynamic relocs needed for local syms too.
5122 We really need local syms available to do this
5123 easily. Oh well. */
5124
5125 asection *s;
5126 void **vpp;
5127 Elf_Internal_Sym *isym;
5128
5129 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5130 abfd, r_symndx);
5131 if (isym == NULL)
5132 return FALSE;
5133
5134 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5135 if (s == NULL)
5136 s = sec;
5137
5138 /* Beware of type punned pointers vs strict aliasing
5139 rules. */
5140 vpp = &(elf_section_data (s)->local_dynrel);
5141 head = (struct elf_dyn_relocs **) vpp;
5142 }
5143
5144 p = *head;
5145 if (p == NULL || p->sec != sec)
5146 {
5147 bfd_size_type amt = sizeof *p;
5148 p = ((struct elf_dyn_relocs *)
5149 bfd_zalloc (htab->root.dynobj, amt));
5150 if (p == NULL)
5151 return FALSE;
5152 p->next = *head;
5153 *head = p;
5154 p->sec = sec;
5155 }
5156
5157 p->count += 1;
5158
5159 }
5160 break;
5161
5162 /* RR: We probably want to keep a consistency check that
5163 there are no dangling GOT_PAGE relocs. */
5164 case R_AARCH64_LD64_GOT_LO12_NC:
f41aef5f 5165 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
5166 case R_AARCH64_ADR_GOT_PAGE:
5167 case R_AARCH64_TLSGD_ADR_PAGE21:
5168 case R_AARCH64_TLSGD_ADD_LO12_NC:
5169 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5170 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5171 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
5172 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
5173 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5174 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
5175 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
5176 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5177 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
5178 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5179 case R_AARCH64_TLSDESC_ADR_PAGE:
5180 case R_AARCH64_TLSDESC_ADD_LO12_NC:
5181 case R_AARCH64_TLSDESC_LD64_LO12_NC:
5182 {
5183 unsigned got_type;
5184 unsigned old_got_type;
5185
5186 got_type = aarch64_reloc_got_type (r_type);
5187
5188 if (h)
5189 {
5190 h->got.refcount += 1;
5191 old_got_type = elf64_aarch64_hash_entry (h)->got_type;
5192 }
5193 else
5194 {
5195 struct elf_aarch64_local_symbol *locals;
5196
5197 if (!elf64_aarch64_allocate_local_symbols
5198 (abfd, symtab_hdr->sh_info))
5199 return FALSE;
5200
5201 locals = elf64_aarch64_locals (abfd);
5202 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5203 locals[r_symndx].got_refcount += 1;
5204 old_got_type = locals[r_symndx].got_type;
5205 }
5206
5207 /* If a variable is accessed with both general dynamic TLS
5208 methods, two slots may be created. */
5209 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5210 got_type |= old_got_type;
5211
5212 /* We will already have issued an error message if there
5213 is a TLS/non-TLS mismatch, based on the symbol type.
5214 So just combine any TLS types needed. */
5215 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5216 && got_type != GOT_NORMAL)
5217 got_type |= old_got_type;
5218
5219 /* If the symbol is accessed by both IE and GD methods, we
5220 are able to relax. Turn off the GD flag, without
5221 messing up with any other kind of TLS types that may be
5222 involved. */
5223 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5224 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5225
5226 if (old_got_type != got_type)
5227 {
5228 if (h != NULL)
5229 elf64_aarch64_hash_entry (h)->got_type = got_type;
5230 else
5231 {
5232 struct elf_aarch64_local_symbol *locals;
5233 locals = elf64_aarch64_locals (abfd);
5234 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5235 locals[r_symndx].got_type = got_type;
5236 }
5237 }
5238
5239 if (htab->root.sgot == NULL)
5240 {
5241 if (htab->root.dynobj == NULL)
5242 htab->root.dynobj = abfd;
5243 if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
5244 return FALSE;
5245 }
5246 break;
5247 }
5248
5249 case R_AARCH64_ADR_PREL_PG_HI21_NC:
5250 case R_AARCH64_ADR_PREL_PG_HI21:
f41aef5f 5251 case R_AARCH64_ADR_PREL_LO21:
a06ea964
NC
5252 if (h != NULL && info->executable)
5253 {
5254 /* If this reloc is in a read-only section, we might
5255 need a copy reloc. We can't check reliably at this
5256 stage whether the section is read-only, as input
5257 sections have not yet been mapped to output sections.
5258 Tentatively set the flag for now, and correct in
5259 adjust_dynamic_symbol. */
5260 h->non_got_ref = 1;
5261 h->plt.refcount += 1;
5262 h->pointer_equality_needed = 1;
5263 }
5264 /* FIXME:: RR need to handle these in shared libraries
5265 and essentially bomb out as these being non-PIC
5266 relocations in shared libraries. */
5267 break;
5268
5269 case R_AARCH64_CALL26:
5270 case R_AARCH64_JUMP26:
5271 /* If this is a local symbol then we resolve it
5272 directly without creating a PLT entry. */
5273 if (h == NULL)
5274 continue;
5275
5276 h->needs_plt = 1;
5277 h->plt.refcount += 1;
5278 break;
5279 }
5280 }
5281 return TRUE;
5282}
5283
5284/* Treat mapping symbols as special target symbols. */
5285
5286static bfd_boolean
5287elf64_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
5288 asymbol *sym)
5289{
5290 return bfd_is_aarch64_special_symbol_name (sym->name,
5291 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
5292}
5293
5294/* This is a copy of elf_find_function () from elf.c except that
5295 AArch64 mapping symbols are ignored when looking for function names. */
5296
5297static bfd_boolean
5298aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
5299 asection *section,
5300 asymbol **symbols,
5301 bfd_vma offset,
5302 const char **filename_ptr,
5303 const char **functionname_ptr)
5304{
5305 const char *filename = NULL;
5306 asymbol *func = NULL;
5307 bfd_vma low_func = 0;
5308 asymbol **p;
5309
5310 for (p = symbols; *p != NULL; p++)
5311 {
5312 elf_symbol_type *q;
5313
5314 q = (elf_symbol_type *) * p;
5315
5316 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
5317 {
5318 default:
5319 break;
5320 case STT_FILE:
5321 filename = bfd_asymbol_name (&q->symbol);
5322 break;
5323 case STT_FUNC:
5324 case STT_NOTYPE:
5325 /* Skip mapping symbols. */
5326 if ((q->symbol.flags & BSF_LOCAL)
5327 && (bfd_is_aarch64_special_symbol_name
5328 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
5329 continue;
5330 /* Fall through. */
5331 if (bfd_get_section (&q->symbol) == section
5332 && q->symbol.value >= low_func && q->symbol.value <= offset)
5333 {
5334 func = (asymbol *) q;
5335 low_func = q->symbol.value;
5336 }
5337 break;
5338 }
5339 }
5340
5341 if (func == NULL)
5342 return FALSE;
5343
5344 if (filename_ptr)
5345 *filename_ptr = filename;
5346 if (functionname_ptr)
5347 *functionname_ptr = bfd_asymbol_name (func);
5348
5349 return TRUE;
5350}
5351
5352
5353/* Find the nearest line to a particular section and offset, for error
5354 reporting. This code is a duplicate of the code in elf.c, except
5355 that it uses aarch64_elf_find_function. */
5356
5357static bfd_boolean
5358elf64_aarch64_find_nearest_line (bfd *abfd,
5359 asection *section,
5360 asymbol **symbols,
5361 bfd_vma offset,
5362 const char **filename_ptr,
5363 const char **functionname_ptr,
5364 unsigned int *line_ptr)
5365{
5366 bfd_boolean found = FALSE;
5367
5368 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5369 toolchain uses it. */
5370
5371 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
5372 section, symbols, offset,
5373 filename_ptr, functionname_ptr,
5374 line_ptr, NULL, 0,
5375 &elf_tdata (abfd)->dwarf2_find_line_info))
5376 {
5377 if (!*functionname_ptr)
5378 aarch64_elf_find_function (abfd, section, symbols, offset,
5379 *filename_ptr ? NULL : filename_ptr,
5380 functionname_ptr);
5381
5382 return TRUE;
5383 }
5384
5385 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
5386 &found, filename_ptr,
5387 functionname_ptr, line_ptr,
5388 &elf_tdata (abfd)->line_info))
5389 return FALSE;
5390
5391 if (found && (*functionname_ptr || *line_ptr))
5392 return TRUE;
5393
5394 if (symbols == NULL)
5395 return FALSE;
5396
5397 if (!aarch64_elf_find_function (abfd, section, symbols, offset,
5398 filename_ptr, functionname_ptr))
5399 return FALSE;
5400
5401 *line_ptr = 0;
5402 return TRUE;
5403}
5404
5405static bfd_boolean
5406elf64_aarch64_find_inliner_info (bfd *abfd,
5407 const char **filename_ptr,
5408 const char **functionname_ptr,
5409 unsigned int *line_ptr)
5410{
5411 bfd_boolean found;
5412 found = _bfd_dwarf2_find_inliner_info
5413 (abfd, filename_ptr,
5414 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
5415 return found;
5416}
5417
5418
5419static void
5420elf64_aarch64_post_process_headers (bfd *abfd,
5421 struct bfd_link_info *link_info
5422 ATTRIBUTE_UNUSED)
5423{
5424 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
5425
5426 i_ehdrp = elf_elfheader (abfd);
5427 i_ehdrp->e_ident[EI_OSABI] = 0;
5428 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
5429}
5430
5431static enum elf_reloc_type_class
5432elf64_aarch64_reloc_type_class (const Elf_Internal_Rela *rela)
5433{
5434 switch ((int) ELF64_R_TYPE (rela->r_info))
5435 {
5436 case R_AARCH64_RELATIVE:
5437 return reloc_class_relative;
5438 case R_AARCH64_JUMP_SLOT:
5439 return reloc_class_plt;
5440 case R_AARCH64_COPY:
5441 return reloc_class_copy;
5442 default:
5443 return reloc_class_normal;
5444 }
5445}
5446
5447/* Set the right machine number for an AArch64 ELF file. */
5448
5449static bfd_boolean
5450elf64_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
5451{
5452 if (hdr->sh_type == SHT_NOTE)
5453 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
5454
5455 return TRUE;
5456}
5457
5458/* Handle an AArch64 specific section when reading an object file. This is
5459 called when bfd_section_from_shdr finds a section with an unknown
5460 type. */
5461
5462static bfd_boolean
5463elf64_aarch64_section_from_shdr (bfd *abfd,
5464 Elf_Internal_Shdr *hdr,
5465 const char *name, int shindex)
5466{
5467 /* There ought to be a place to keep ELF backend specific flags, but
5468 at the moment there isn't one. We just keep track of the
5469 sections by their name, instead. Fortunately, the ABI gives
5470 names for all the AArch64 specific sections, so we will probably get
5471 away with this. */
5472 switch (hdr->sh_type)
5473 {
5474 case SHT_AARCH64_ATTRIBUTES:
5475 break;
5476
5477 default:
5478 return FALSE;
5479 }
5480
5481 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5482 return FALSE;
5483
5484 return TRUE;
5485}
5486
5487/* A structure used to record a list of sections, independently
5488 of the next and prev fields in the asection structure. */
5489typedef struct section_list
5490{
5491 asection *sec;
5492 struct section_list *next;
5493 struct section_list *prev;
5494}
5495section_list;
5496
5497/* Unfortunately we need to keep a list of sections for which
5498 an _aarch64_elf_section_data structure has been allocated. This
5499 is because it is possible for functions like elf64_aarch64_write_section
5500 to be called on a section which has had an elf_data_structure
5501 allocated for it (and so the used_by_bfd field is valid) but
5502 for which the AArch64 extended version of this structure - the
5503 _aarch64_elf_section_data structure - has not been allocated. */
5504static section_list *sections_with_aarch64_elf_section_data = NULL;
5505
5506static void
5507record_section_with_aarch64_elf_section_data (asection *sec)
5508{
5509 struct section_list *entry;
5510
5511 entry = bfd_malloc (sizeof (*entry));
5512 if (entry == NULL)
5513 return;
5514 entry->sec = sec;
5515 entry->next = sections_with_aarch64_elf_section_data;
5516 entry->prev = NULL;
5517 if (entry->next != NULL)
5518 entry->next->prev = entry;
5519 sections_with_aarch64_elf_section_data = entry;
5520}
5521
5522static struct section_list *
5523find_aarch64_elf_section_entry (asection *sec)
5524{
5525 struct section_list *entry;
5526 static struct section_list *last_entry = NULL;
5527
5528 /* This is a short cut for the typical case where the sections are added
5529 to the sections_with_aarch64_elf_section_data list in forward order and
5530 then looked up here in backwards order. This makes a real difference
5531 to the ld-srec/sec64k.exp linker test. */
5532 entry = sections_with_aarch64_elf_section_data;
5533 if (last_entry != NULL)
5534 {
5535 if (last_entry->sec == sec)
5536 entry = last_entry;
5537 else if (last_entry->next != NULL && last_entry->next->sec == sec)
5538 entry = last_entry->next;
5539 }
5540
5541 for (; entry; entry = entry->next)
5542 if (entry->sec == sec)
5543 break;
5544
5545 if (entry)
5546 /* Record the entry prior to this one - it is the entry we are
5547 most likely to want to locate next time. Also this way if we
5548 have been called from
5549 unrecord_section_with_aarch64_elf_section_data () we will not
5550 be caching a pointer that is about to be freed. */
5551 last_entry = entry->prev;
5552
5553 return entry;
5554}
5555
5556static void
5557unrecord_section_with_aarch64_elf_section_data (asection *sec)
5558{
5559 struct section_list *entry;
5560
5561 entry = find_aarch64_elf_section_entry (sec);
5562
5563 if (entry)
5564 {
5565 if (entry->prev != NULL)
5566 entry->prev->next = entry->next;
5567 if (entry->next != NULL)
5568 entry->next->prev = entry->prev;
5569 if (entry == sections_with_aarch64_elf_section_data)
5570 sections_with_aarch64_elf_section_data = entry->next;
5571 free (entry);
5572 }
5573}
5574
5575
5576typedef struct
5577{
5578 void *finfo;
5579 struct bfd_link_info *info;
5580 asection *sec;
5581 int sec_shndx;
5582 int (*func) (void *, const char *, Elf_Internal_Sym *,
5583 asection *, struct elf_link_hash_entry *);
5584} output_arch_syminfo;
5585
5586enum map_symbol_type
5587{
5588 AARCH64_MAP_INSN,
5589 AARCH64_MAP_DATA
5590};
5591
5592
5593/* Output a single mapping symbol. */
5594
5595static bfd_boolean
5596elf64_aarch64_output_map_sym (output_arch_syminfo *osi,
5597 enum map_symbol_type type, bfd_vma offset)
5598{
5599 static const char *names[2] = { "$x", "$d" };
5600 Elf_Internal_Sym sym;
5601
5602 sym.st_value = (osi->sec->output_section->vma
5603 + osi->sec->output_offset + offset);
5604 sym.st_size = 0;
5605 sym.st_other = 0;
5606 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5607 sym.st_shndx = osi->sec_shndx;
5608 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
5609}
5610
5611
5612
5613/* Output mapping symbols for PLT entries associated with H. */
5614
5615static bfd_boolean
5616elf64_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
5617{
5618 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
5619 bfd_vma addr;
5620
5621 if (h->root.type == bfd_link_hash_indirect)
5622 return TRUE;
5623
5624 if (h->root.type == bfd_link_hash_warning)
5625 /* When warning symbols are created, they **replace** the "real"
5626 entry in the hash table, thus we never get to see the real
5627 symbol in a hash traversal. So look at it now. */
5628 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5629
5630 if (h->plt.offset == (bfd_vma) - 1)
5631 return TRUE;
5632
5633 addr = h->plt.offset;
5634 if (addr == 32)
5635 {
5636 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5637 return FALSE;
5638 }
5639 return TRUE;
5640}
5641
5642
5643/* Output a single local symbol for a generated stub. */
5644
5645static bfd_boolean
5646elf64_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
5647 bfd_vma offset, bfd_vma size)
5648{
5649 Elf_Internal_Sym sym;
5650
5651 sym.st_value = (osi->sec->output_section->vma
5652 + osi->sec->output_offset + offset);
5653 sym.st_size = size;
5654 sym.st_other = 0;
5655 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5656 sym.st_shndx = osi->sec_shndx;
5657 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
5658}
5659
5660static bfd_boolean
5661aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
5662{
5663 struct elf64_aarch64_stub_hash_entry *stub_entry;
5664 asection *stub_sec;
5665 bfd_vma addr;
5666 char *stub_name;
5667 output_arch_syminfo *osi;
5668
5669 /* Massage our args to the form they really have. */
5670 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
5671 osi = (output_arch_syminfo *) in_arg;
5672
5673 stub_sec = stub_entry->stub_sec;
5674
5675 /* Ensure this stub is attached to the current section being
5676 processed. */
5677 if (stub_sec != osi->sec)
5678 return TRUE;
5679
5680 addr = (bfd_vma) stub_entry->stub_offset;
5681
5682 stub_name = stub_entry->output_name;
5683
5684 switch (stub_entry->stub_type)
5685 {
5686 case aarch64_stub_adrp_branch:
5687 if (!elf64_aarch64_output_stub_sym (osi, stub_name, addr,
5688 sizeof (aarch64_adrp_branch_stub)))
5689 return FALSE;
5690 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5691 return FALSE;
5692 break;
5693 case aarch64_stub_long_branch:
5694 if (!elf64_aarch64_output_stub_sym
5695 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
5696 return FALSE;
5697 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5698 return FALSE;
5699 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
5700 return FALSE;
5701 break;
5702 default:
5703 BFD_FAIL ();
5704 }
5705
5706 return TRUE;
5707}
5708
5709/* Output mapping symbols for linker generated sections. */
5710
5711static bfd_boolean
5712elf64_aarch64_output_arch_local_syms (bfd *output_bfd,
5713 struct bfd_link_info *info,
5714 void *finfo,
5715 int (*func) (void *, const char *,
5716 Elf_Internal_Sym *,
5717 asection *,
5718 struct elf_link_hash_entry
5719 *))
5720{
5721 output_arch_syminfo osi;
5722 struct elf64_aarch64_link_hash_table *htab;
5723
5724 htab = elf64_aarch64_hash_table (info);
5725
5726 osi.finfo = finfo;
5727 osi.info = info;
5728 osi.func = func;
5729
5730 /* Long calls stubs. */
5731 if (htab->stub_bfd && htab->stub_bfd->sections)
5732 {
5733 asection *stub_sec;
5734
5735 for (stub_sec = htab->stub_bfd->sections;
5736 stub_sec != NULL; stub_sec = stub_sec->next)
5737 {
5738 /* Ignore non-stub sections. */
5739 if (!strstr (stub_sec->name, STUB_SUFFIX))
5740 continue;
5741
5742 osi.sec = stub_sec;
5743
5744 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5745 (output_bfd, osi.sec->output_section);
5746
5747 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
5748 &osi);
5749 }
5750 }
5751
5752 /* Finally, output mapping symbols for the PLT. */
5753 if (!htab->root.splt || htab->root.splt->size == 0)
5754 return TRUE;
5755
5756 /* For now live without mapping symbols for the plt. */
5757 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5758 (output_bfd, htab->root.splt->output_section);
5759 osi.sec = htab->root.splt;
5760
5761 elf_link_hash_traverse (&htab->root, elf64_aarch64_output_plt_map,
5762 (void *) &osi);
5763
5764 return TRUE;
5765
5766}
5767
5768/* Allocate target specific section data. */
5769
5770static bfd_boolean
5771elf64_aarch64_new_section_hook (bfd *abfd, asection *sec)
5772{
5773 if (!sec->used_by_bfd)
5774 {
5775 _aarch64_elf_section_data *sdata;
5776 bfd_size_type amt = sizeof (*sdata);
5777
5778 sdata = bfd_zalloc (abfd, amt);
5779 if (sdata == NULL)
5780 return FALSE;
5781 sec->used_by_bfd = sdata;
5782 }
5783
5784 record_section_with_aarch64_elf_section_data (sec);
5785
5786 return _bfd_elf_new_section_hook (abfd, sec);
5787}
5788
5789
5790static void
5791unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
5792 asection *sec,
5793 void *ignore ATTRIBUTE_UNUSED)
5794{
5795 unrecord_section_with_aarch64_elf_section_data (sec);
5796}
5797
5798static bfd_boolean
5799elf64_aarch64_close_and_cleanup (bfd *abfd)
5800{
5801 if (abfd->sections)
5802 bfd_map_over_sections (abfd,
5803 unrecord_section_via_map_over_sections, NULL);
5804
5805 return _bfd_elf_close_and_cleanup (abfd);
5806}
5807
5808static bfd_boolean
5809elf64_aarch64_bfd_free_cached_info (bfd *abfd)
5810{
5811 if (abfd->sections)
5812 bfd_map_over_sections (abfd,
5813 unrecord_section_via_map_over_sections, NULL);
5814
5815 return _bfd_free_cached_info (abfd);
5816}
5817
5818static bfd_boolean
5819elf64_aarch64_is_function_type (unsigned int type)
5820{
5821 return type == STT_FUNC;
5822}
5823
5824/* Create dynamic sections. This is different from the ARM backend in that
5825 the got, plt, gotplt and their relocation sections are all created in the
5826 standard part of the bfd elf backend. */
5827
5828static bfd_boolean
5829elf64_aarch64_create_dynamic_sections (bfd *dynobj,
5830 struct bfd_link_info *info)
5831{
5832 struct elf64_aarch64_link_hash_table *htab;
5833 struct elf_link_hash_entry *h;
5834
5835 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
5836 return FALSE;
5837
5838 htab = elf64_aarch64_hash_table (info);
5839 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
5840 if (!info->shared)
5841 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
5842
5843 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
5844 abort ();
5845
5846 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5847 dynobj's .got section. We don't do this in the linker script
5848 because we don't want to define the symbol if we are not creating
5849 a global offset table. */
5850 h = _bfd_elf_define_linkage_sym (dynobj, info,
5851 htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
5852 elf_hash_table (info)->hgot = h;
5853 if (h == NULL)
5854 return FALSE;
5855
5856 return TRUE;
5857}
5858
5859
5860/* Allocate space in .plt, .got and associated reloc sections for
5861 dynamic relocs. */
5862
5863static bfd_boolean
5864elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
5865{
5866 struct bfd_link_info *info;
5867 struct elf64_aarch64_link_hash_table *htab;
5868 struct elf64_aarch64_link_hash_entry *eh;
5869 struct elf_dyn_relocs *p;
5870
5871 /* An example of a bfd_link_hash_indirect symbol is versioned
5872 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
5873 -> __gxx_personality_v0(bfd_link_hash_defined)
5874
5875 There is no need to process bfd_link_hash_indirect symbols here
5876 because we will also be presented with the concrete instance of
5877 the symbol and elf64_aarch64_copy_indirect_symbol () will have been
5878 called to copy all relevant data from the generic to the concrete
5879 symbol instance.
5880 */
5881 if (h->root.type == bfd_link_hash_indirect)
5882 return TRUE;
5883
5884 if (h->root.type == bfd_link_hash_warning)
5885 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5886
5887 info = (struct bfd_link_info *) inf;
5888 htab = elf64_aarch64_hash_table (info);
5889
5890 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
5891 {
5892 /* Make sure this symbol is output as a dynamic symbol.
5893 Undefined weak syms won't yet be marked as dynamic. */
5894 if (h->dynindx == -1 && !h->forced_local)
5895 {
5896 if (!bfd_elf_link_record_dynamic_symbol (info, h))
5897 return FALSE;
5898 }
5899
5900 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
5901 {
5902 asection *s = htab->root.splt;
5903
5904 /* If this is the first .plt entry, make room for the special
5905 first entry. */
5906 if (s->size == 0)
5907 s->size += htab->plt_header_size;
5908
5909 h->plt.offset = s->size;
5910
5911 /* If this symbol is not defined in a regular file, and we are
5912 not generating a shared library, then set the symbol to this
5913 location in the .plt. This is required to make function
5914 pointers compare as equal between the normal executable and
5915 the shared library. */
5916 if (!info->shared && !h->def_regular)
5917 {
5918 h->root.u.def.section = s;
5919 h->root.u.def.value = h->plt.offset;
5920 }
5921
5922 /* Make room for this entry. For now we only create the
5923 small model PLT entries. We later need to find a way
5924 of relaxing into these from the large model PLT entries. */
5925 s->size += PLT_SMALL_ENTRY_SIZE;
5926
5927 /* We also need to make an entry in the .got.plt section, which
5928 will be placed in the .got section by the linker script. */
5929 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
5930
5931 /* We also need to make an entry in the .rela.plt section. */
5932 htab->root.srelplt->size += RELOC_SIZE (htab);
5933
5934 /* We need to ensure that all GOT entries that serve the PLT
5935 are consecutive with the special GOT slots [0] [1] and
5936 [2]. Any addtional relocations, such as
5937 R_AARCH64_TLSDESC, must be placed after the PLT related
5938 entries. We abuse the reloc_count such that during
5939 sizing we adjust reloc_count to indicate the number of
5940 PLT related reserved entries. In subsequent phases when
5941 filling in the contents of the reloc entries, PLT related
5942 entries are placed by computing their PLT index (0
5943 .. reloc_count). While other none PLT relocs are placed
5944 at the slot indicated by reloc_count and reloc_count is
5945 updated. */
5946
5947 htab->root.srelplt->reloc_count++;
5948 }
5949 else
5950 {
5951 h->plt.offset = (bfd_vma) - 1;
5952 h->needs_plt = 0;
5953 }
5954 }
5955 else
5956 {
5957 h->plt.offset = (bfd_vma) - 1;
5958 h->needs_plt = 0;
5959 }
5960
5961 eh = (struct elf64_aarch64_link_hash_entry *) h;
5962 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
5963
5964 if (h->got.refcount > 0)
5965 {
5966 bfd_boolean dyn;
5967 unsigned got_type = elf64_aarch64_hash_entry (h)->got_type;
5968
5969 h->got.offset = (bfd_vma) - 1;
5970
5971 dyn = htab->root.dynamic_sections_created;
5972
5973 /* Make sure this symbol is output as a dynamic symbol.
5974 Undefined weak syms won't yet be marked as dynamic. */
5975 if (dyn && h->dynindx == -1 && !h->forced_local)
5976 {
5977 if (!bfd_elf_link_record_dynamic_symbol (info, h))
5978 return FALSE;
5979 }
5980
5981 if (got_type == GOT_UNKNOWN)
5982 {
5983 }
5984 else if (got_type == GOT_NORMAL)
5985 {
5986 h->got.offset = htab->root.sgot->size;
5987 htab->root.sgot->size += GOT_ENTRY_SIZE;
5988 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5989 || h->root.type != bfd_link_hash_undefweak)
5990 && (info->shared
5991 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
5992 {
5993 htab->root.srelgot->size += RELOC_SIZE (htab);
5994 }
5995 }
5996 else
5997 {
5998 int indx;
5999 if (got_type & GOT_TLSDESC_GD)
6000 {
6001 eh->tlsdesc_got_jump_table_offset =
6002 (htab->root.sgotplt->size
6003 - aarch64_compute_jump_table_size (htab));
6004 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6005 h->got.offset = (bfd_vma) - 2;
6006 }
6007
6008 if (got_type & GOT_TLS_GD)
6009 {
6010 h->got.offset = htab->root.sgot->size;
6011 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6012 }
6013
6014 if (got_type & GOT_TLS_IE)
6015 {
6016 h->got.offset = htab->root.sgot->size;
6017 htab->root.sgot->size += GOT_ENTRY_SIZE;
6018 }
6019
6020 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6021 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6022 || h->root.type != bfd_link_hash_undefweak)
6023 && (info->shared
6024 || indx != 0
6025 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6026 {
6027 if (got_type & GOT_TLSDESC_GD)
6028 {
6029 htab->root.srelplt->size += RELOC_SIZE (htab);
6030 /* Note reloc_count not incremented here! We have
6031 already adjusted reloc_count for this relocation
6032 type. */
6033
6034 /* TLSDESC PLT is now needed, but not yet determined. */
6035 htab->tlsdesc_plt = (bfd_vma) - 1;
6036 }
6037
6038 if (got_type & GOT_TLS_GD)
6039 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6040
6041 if (got_type & GOT_TLS_IE)
6042 htab->root.srelgot->size += RELOC_SIZE (htab);
6043 }
6044 }
6045 }
6046 else
6047 {
6048 h->got.offset = (bfd_vma) - 1;
6049 }
6050
6051 if (eh->dyn_relocs == NULL)
6052 return TRUE;
6053
6054 /* In the shared -Bsymbolic case, discard space allocated for
6055 dynamic pc-relative relocs against symbols which turn out to be
6056 defined in regular objects. For the normal shared case, discard
6057 space for pc-relative relocs that have become local due to symbol
6058 visibility changes. */
6059
6060 if (info->shared)
6061 {
6062 /* Relocs that use pc_count are those that appear on a call
6063 insn, or certain REL relocs that can generated via assembly.
6064 We want calls to protected symbols to resolve directly to the
6065 function rather than going via the plt. If people want
6066 function pointer comparisons to work as expected then they
6067 should avoid writing weird assembly. */
6068 if (SYMBOL_CALLS_LOCAL (info, h))
6069 {
6070 struct elf_dyn_relocs **pp;
6071
6072 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6073 {
6074 p->count -= p->pc_count;
6075 p->pc_count = 0;
6076 if (p->count == 0)
6077 *pp = p->next;
6078 else
6079 pp = &p->next;
6080 }
6081 }
6082
6083 /* Also discard relocs on undefined weak syms with non-default
6084 visibility. */
6085 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6086 {
6087 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6088 eh->dyn_relocs = NULL;
6089
6090 /* Make sure undefined weak symbols are output as a dynamic
6091 symbol in PIEs. */
6092 else if (h->dynindx == -1
6093 && !h->forced_local
6094 && !bfd_elf_link_record_dynamic_symbol (info, h))
6095 return FALSE;
6096 }
6097
6098 }
6099 else if (ELIMINATE_COPY_RELOCS)
6100 {
6101 /* For the non-shared case, discard space for relocs against
6102 symbols which turn out to need copy relocs or are not
6103 dynamic. */
6104
6105 if (!h->non_got_ref
6106 && ((h->def_dynamic
6107 && !h->def_regular)
6108 || (htab->root.dynamic_sections_created
6109 && (h->root.type == bfd_link_hash_undefweak
6110 || h->root.type == bfd_link_hash_undefined))))
6111 {
6112 /* Make sure this symbol is output as a dynamic symbol.
6113 Undefined weak syms won't yet be marked as dynamic. */
6114 if (h->dynindx == -1
6115 && !h->forced_local
6116 && !bfd_elf_link_record_dynamic_symbol (info, h))
6117 return FALSE;
6118
6119 /* If that succeeded, we know we'll be keeping all the
6120 relocs. */
6121 if (h->dynindx != -1)
6122 goto keep;
6123 }
6124
6125 eh->dyn_relocs = NULL;
6126
6127 keep:;
6128 }
6129
6130 /* Finally, allocate space. */
6131 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6132 {
6133 asection *sreloc;
6134
6135 sreloc = elf_section_data (p->sec)->sreloc;
6136
6137 BFD_ASSERT (sreloc != NULL);
6138
6139 sreloc->size += p->count * RELOC_SIZE (htab);
6140 }
6141
6142 return TRUE;
6143}
6144
6145
6146
6147
6148/* This is the most important function of all . Innocuosly named
6149 though ! */
6150static bfd_boolean
6151elf64_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6152 struct bfd_link_info *info)
6153{
6154 struct elf64_aarch64_link_hash_table *htab;
6155 bfd *dynobj;
6156 asection *s;
6157 bfd_boolean relocs;
6158 bfd *ibfd;
6159
6160 htab = elf64_aarch64_hash_table ((info));
6161 dynobj = htab->root.dynobj;
6162
6163 BFD_ASSERT (dynobj != NULL);
6164
6165 if (htab->root.dynamic_sections_created)
6166 {
6167 if (info->executable)
6168 {
6169 s = bfd_get_linker_section (dynobj, ".interp");
6170 if (s == NULL)
6171 abort ();
6172 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
6173 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
6174 }
6175 }
6176
6177 /* Set up .got offsets for local syms, and space for local dynamic
6178 relocs. */
6179 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
6180 {
6181 struct elf_aarch64_local_symbol *locals = NULL;
6182 Elf_Internal_Shdr *symtab_hdr;
6183 asection *srel;
6184 unsigned int i;
6185
6186 if (!is_aarch64_elf (ibfd))
6187 continue;
6188
6189 for (s = ibfd->sections; s != NULL; s = s->next)
6190 {
6191 struct elf_dyn_relocs *p;
6192
6193 for (p = (struct elf_dyn_relocs *)
6194 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
6195 {
6196 if (!bfd_is_abs_section (p->sec)
6197 && bfd_is_abs_section (p->sec->output_section))
6198 {
6199 /* Input section has been discarded, either because
6200 it is a copy of a linkonce section or due to
6201 linker script /DISCARD/, so we'll be discarding
6202 the relocs too. */
6203 }
6204 else if (p->count != 0)
6205 {
6206 srel = elf_section_data (p->sec)->sreloc;
6207 srel->size += p->count * RELOC_SIZE (htab);
6208 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
6209 info->flags |= DF_TEXTREL;
6210 }
6211 }
6212 }
6213
6214 locals = elf64_aarch64_locals (ibfd);
6215 if (!locals)
6216 continue;
6217
6218 symtab_hdr = &elf_symtab_hdr (ibfd);
6219 srel = htab->root.srelgot;
6220 for (i = 0; i < symtab_hdr->sh_info; i++)
6221 {
6222 locals[i].got_offset = (bfd_vma) - 1;
6223 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6224 if (locals[i].got_refcount > 0)
6225 {
6226 unsigned got_type = locals[i].got_type;
6227 if (got_type & GOT_TLSDESC_GD)
6228 {
6229 locals[i].tlsdesc_got_jump_table_offset =
6230 (htab->root.sgotplt->size
6231 - aarch64_compute_jump_table_size (htab));
6232 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6233 locals[i].got_offset = (bfd_vma) - 2;
6234 }
6235
6236 if (got_type & GOT_TLS_GD)
6237 {
6238 locals[i].got_offset = htab->root.sgot->size;
6239 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6240 }
6241
6242 if (got_type & GOT_TLS_IE)
6243 {
6244 locals[i].got_offset = htab->root.sgot->size;
6245 htab->root.sgot->size += GOT_ENTRY_SIZE;
6246 }
6247
6248 if (got_type == GOT_UNKNOWN)
6249 {
6250 }
6251
6252 if (got_type == GOT_NORMAL)
6253 {
6254 }
6255
6256 if (info->shared)
6257 {
6258 if (got_type & GOT_TLSDESC_GD)
6259 {
6260 htab->root.srelplt->size += RELOC_SIZE (htab);
6261 /* Note RELOC_COUNT not incremented here! */
6262 htab->tlsdesc_plt = (bfd_vma) - 1;
6263 }
6264
6265 if (got_type & GOT_TLS_GD)
6266 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6267
6268 if (got_type & GOT_TLS_IE)
6269 htab->root.srelgot->size += RELOC_SIZE (htab);
6270 }
6271 }
6272 else
6273 {
6274 locals[i].got_refcount = (bfd_vma) - 1;
6275 }
6276 }
6277 }
6278
6279
6280 /* Allocate global sym .plt and .got entries, and space for global
6281 sym dynamic relocs. */
6282 elf_link_hash_traverse (&htab->root, elf64_aarch64_allocate_dynrelocs,
6283 info);
6284
6285
6286 /* For every jump slot reserved in the sgotplt, reloc_count is
6287 incremented. However, when we reserve space for TLS descriptors,
6288 it's not incremented, so in order to compute the space reserved
6289 for them, it suffices to multiply the reloc count by the jump
6290 slot size. */
6291
6292 if (htab->root.srelplt)
6293 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
6294
6295 if (htab->tlsdesc_plt)
6296 {
6297 if (htab->root.splt->size == 0)
6298 htab->root.splt->size += PLT_ENTRY_SIZE;
6299
6300 htab->tlsdesc_plt = htab->root.splt->size;
6301 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
6302
6303 /* If we're not using lazy TLS relocations, don't generate the
6304 GOT entry required. */
6305 if (!(info->flags & DF_BIND_NOW))
6306 {
6307 htab->dt_tlsdesc_got = htab->root.sgot->size;
6308 htab->root.sgot->size += GOT_ENTRY_SIZE;
6309 }
6310 }
6311
6312 /* We now have determined the sizes of the various dynamic sections.
6313 Allocate memory for them. */
6314 relocs = FALSE;
6315 for (s = dynobj->sections; s != NULL; s = s->next)
6316 {
6317 if ((s->flags & SEC_LINKER_CREATED) == 0)
6318 continue;
6319
6320 if (s == htab->root.splt
6321 || s == htab->root.sgot
6322 || s == htab->root.sgotplt
6323 || s == htab->root.iplt
6324 || s == htab->root.igotplt || s == htab->sdynbss)
6325 {
6326 /* Strip this section if we don't need it; see the
6327 comment below. */
6328 }
6329 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
6330 {
6331 if (s->size != 0 && s != htab->root.srelplt)
6332 relocs = TRUE;
6333
6334 /* We use the reloc_count field as a counter if we need
6335 to copy relocs into the output file. */
6336 if (s != htab->root.srelplt)
6337 s->reloc_count = 0;
6338 }
6339 else
6340 {
6341 /* It's not one of our sections, so don't allocate space. */
6342 continue;
6343 }
6344
6345 if (s->size == 0)
6346 {
6347 /* If we don't need this section, strip it from the
6348 output file. This is mostly to handle .rela.bss and
6349 .rela.plt. We must create both sections in
6350 create_dynamic_sections, because they must be created
6351 before the linker maps input sections to output
6352 sections. The linker does that before
6353 adjust_dynamic_symbol is called, and it is that
6354 function which decides whether anything needs to go
6355 into these sections. */
6356
6357 s->flags |= SEC_EXCLUDE;
6358 continue;
6359 }
6360
6361 if ((s->flags & SEC_HAS_CONTENTS) == 0)
6362 continue;
6363
6364 /* Allocate memory for the section contents. We use bfd_zalloc
6365 here in case unused entries are not reclaimed before the
6366 section's contents are written out. This should not happen,
6367 but this way if it does, we get a R_AARCH64_NONE reloc instead
6368 of garbage. */
6369 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
6370 if (s->contents == NULL)
6371 return FALSE;
6372 }
6373
6374 if (htab->root.dynamic_sections_created)
6375 {
6376 /* Add some entries to the .dynamic section. We fill in the
6377 values later, in elf64_aarch64_finish_dynamic_sections, but we
6378 must add the entries now so that we get the correct size for
6379 the .dynamic section. The DT_DEBUG entry is filled in by the
6380 dynamic linker and used by the debugger. */
6381#define add_dynamic_entry(TAG, VAL) \
6382 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6383
6384 if (info->executable)
6385 {
6386 if (!add_dynamic_entry (DT_DEBUG, 0))
6387 return FALSE;
6388 }
6389
6390 if (htab->root.splt->size != 0)
6391 {
6392 if (!add_dynamic_entry (DT_PLTGOT, 0)
6393 || !add_dynamic_entry (DT_PLTRELSZ, 0)
6394 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
6395 || !add_dynamic_entry (DT_JMPREL, 0))
6396 return FALSE;
6397
6398 if (htab->tlsdesc_plt
6399 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
6400 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
6401 return FALSE;
6402 }
6403
6404 if (relocs)
6405 {
6406 if (!add_dynamic_entry (DT_RELA, 0)
6407 || !add_dynamic_entry (DT_RELASZ, 0)
6408 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
6409 return FALSE;
6410
6411 /* If any dynamic relocs apply to a read-only section,
6412 then we need a DT_TEXTREL entry. */
6413 if ((info->flags & DF_TEXTREL) != 0)
6414 {
6415 if (!add_dynamic_entry (DT_TEXTREL, 0))
6416 return FALSE;
6417 }
6418 }
6419 }
6420#undef add_dynamic_entry
6421
6422 return TRUE;
6423
6424
6425}
6426
6427static inline void
6428elf64_aarch64_update_plt_entry (bfd *output_bfd,
6429 unsigned int r_type,
6430 bfd_byte *plt_entry, bfd_vma value)
6431{
6432 reloc_howto_type *howto;
6433 howto = elf64_aarch64_howto_from_type (r_type);
6434 bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
6435}
6436
6437static void
6438elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
6439 struct elf64_aarch64_link_hash_table
6440 *htab, bfd *output_bfd)
6441{
6442 bfd_byte *plt_entry;
6443 bfd_vma plt_index;
6444 bfd_vma got_offset;
6445 bfd_vma gotplt_entry_address;
6446 bfd_vma plt_entry_address;
6447 Elf_Internal_Rela rela;
6448 bfd_byte *loc;
6449
6450 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
6451
6452 /* Offset in the GOT is PLT index plus got GOT headers(3)
6453 times 8. */
6454 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
6455 plt_entry = htab->root.splt->contents + h->plt.offset;
6456 plt_entry_address = htab->root.splt->output_section->vma
6457 + htab->root.splt->output_section->output_offset + h->plt.offset;
6458 gotplt_entry_address = htab->root.sgotplt->output_section->vma +
6459 htab->root.sgotplt->output_offset + got_offset;
6460
6461 /* Copy in the boiler-plate for the PLTn entry. */
6462 memcpy (plt_entry, elf64_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
6463
6464 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6465 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6466 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6467 plt_entry,
6468 PG (gotplt_entry_address) -
6469 PG (plt_entry_address));
6470
6471 /* Fill in the lo12 bits for the load from the pltgot. */
6472 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6473 plt_entry + 4,
6474 PG_OFFSET (gotplt_entry_address));
6475
6476 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6477 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6478 plt_entry + 8,
6479 PG_OFFSET (gotplt_entry_address));
6480
6481 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6482 bfd_put_64 (output_bfd,
6483 (htab->root.splt->output_section->vma
6484 + htab->root.splt->output_offset),
6485 htab->root.sgotplt->contents + got_offset);
6486
6487 /* Fill in the entry in the .rela.plt section. */
6488 rela.r_offset = gotplt_entry_address;
6489 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_JUMP_SLOT);
6490 rela.r_addend = 0;
6491
6492 /* Compute the relocation entry to used based on PLT index and do
6493 not adjust reloc_count. The reloc_count has already been adjusted
6494 to account for this entry. */
6495 loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab);
6496 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6497}
6498
6499/* Size sections even though they're not dynamic. We use it to setup
6500 _TLS_MODULE_BASE_, if needed. */
6501
6502static bfd_boolean
6503elf64_aarch64_always_size_sections (bfd *output_bfd,
6504 struct bfd_link_info *info)
6505{
6506 asection *tls_sec;
6507
6508 if (info->relocatable)
6509 return TRUE;
6510
6511 tls_sec = elf_hash_table (info)->tls_sec;
6512
6513 if (tls_sec)
6514 {
6515 struct elf_link_hash_entry *tlsbase;
6516
6517 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
6518 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
6519
6520 if (tlsbase)
6521 {
6522 struct bfd_link_hash_entry *h = NULL;
6523 const struct elf_backend_data *bed =
6524 get_elf_backend_data (output_bfd);
6525
6526 if (!(_bfd_generic_link_add_one_symbol
6527 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
6528 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
6529 return FALSE;
6530
6531 tlsbase->type = STT_TLS;
6532 tlsbase = (struct elf_link_hash_entry *) h;
6533 tlsbase->def_regular = 1;
6534 tlsbase->other = STV_HIDDEN;
6535 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
6536 }
6537 }
6538
6539 return TRUE;
6540}
6541
6542/* Finish up dynamic symbol handling. We set the contents of various
6543 dynamic sections here. */
6544static bfd_boolean
6545elf64_aarch64_finish_dynamic_symbol (bfd *output_bfd,
6546 struct bfd_link_info *info,
6547 struct elf_link_hash_entry *h,
6548 Elf_Internal_Sym *sym)
6549{
6550 struct elf64_aarch64_link_hash_table *htab;
6551 htab = elf64_aarch64_hash_table (info);
6552
6553 if (h->plt.offset != (bfd_vma) - 1)
6554 {
6555 /* This symbol has an entry in the procedure linkage table. Set
6556 it up. */
6557
6558 if (h->dynindx == -1
6559 || htab->root.splt == NULL
6560 || htab->root.sgotplt == NULL || htab->root.srelplt == NULL)
6561 abort ();
6562
6563 elf64_aarch64_create_small_pltn_entry (h, htab, output_bfd);
6564 if (!h->def_regular)
6565 {
6566 /* Mark the symbol as undefined, rather than as defined in
6567 the .plt section. Leave the value alone. This is a clue
6568 for the dynamic linker, to make function pointer
6569 comparisons work between an application and shared
6570 library. */
6571 sym->st_shndx = SHN_UNDEF;
6572 }
6573 }
6574
6575 if (h->got.offset != (bfd_vma) - 1
6576 && elf64_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
6577 {
6578 Elf_Internal_Rela rela;
6579 bfd_byte *loc;
6580
6581 /* This symbol has an entry in the global offset table. Set it
6582 up. */
6583 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
6584 abort ();
6585
6586 rela.r_offset = (htab->root.sgot->output_section->vma
6587 + htab->root.sgot->output_offset
6588 + (h->got.offset & ~(bfd_vma) 1));
6589
6590 if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
6591 {
6592 if (!h->def_regular)
6593 return FALSE;
6594
6595 BFD_ASSERT ((h->got.offset & 1) != 0);
6596 rela.r_info = ELF64_R_INFO (0, R_AARCH64_RELATIVE);
6597 rela.r_addend = (h->root.u.def.value
6598 + h->root.u.def.section->output_section->vma
6599 + h->root.u.def.section->output_offset);
6600 }
6601 else
6602 {
6603 BFD_ASSERT ((h->got.offset & 1) == 0);
6604 bfd_put_64 (output_bfd, (bfd_vma) 0,
6605 htab->root.sgot->contents + h->got.offset);
6606 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT);
6607 rela.r_addend = 0;
6608 }
6609
6610 loc = htab->root.srelgot->contents;
6611 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
6612 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6613 }
6614
6615 if (h->needs_copy)
6616 {
6617 Elf_Internal_Rela rela;
6618 bfd_byte *loc;
6619
6620 /* This symbol needs a copy reloc. Set it up. */
6621
6622 if (h->dynindx == -1
6623 || (h->root.type != bfd_link_hash_defined
6624 && h->root.type != bfd_link_hash_defweak)
6625 || htab->srelbss == NULL)
6626 abort ();
6627
6628 rela.r_offset = (h->root.u.def.value
6629 + h->root.u.def.section->output_section->vma
6630 + h->root.u.def.section->output_offset);
6631 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_COPY);
6632 rela.r_addend = 0;
6633 loc = htab->srelbss->contents;
6634 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
6635 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6636 }
6637
6638 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6639 be NULL for local symbols. */
6640 if (sym != NULL
9637f6ef 6641 && (h == elf_hash_table (info)->hdynamic
a06ea964
NC
6642 || h == elf_hash_table (info)->hgot))
6643 sym->st_shndx = SHN_ABS;
6644
6645 return TRUE;
6646}
6647
6648static void
6649elf64_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
6650 struct elf64_aarch64_link_hash_table
6651 *htab)
6652{
6653 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6654 small and large plts and at the minute just generates
6655 the small PLT. */
6656
6657 /* PLT0 of the small PLT looks like this -
6658 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6659 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6660 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6661 // symbol resolver
6662 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6663 // GOTPLT entry for this.
6664 br x17
6665 */
6666 bfd_vma plt_got_base;
6667 bfd_vma plt_base;
6668
6669
6670 memcpy (htab->root.splt->contents, elf64_aarch64_small_plt0_entry,
6671 PLT_ENTRY_SIZE);
6672 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
6673 PLT_ENTRY_SIZE;
6674
6675 plt_got_base = (htab->root.sgotplt->output_section->vma
6676 + htab->root.sgotplt->output_offset);
6677
6678 plt_base = htab->root.splt->output_section->vma +
6679 htab->root.splt->output_section->output_offset;
6680
6681 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6682 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6683 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6684 htab->root.splt->contents + 4,
6685 PG (plt_got_base + 16) - PG (plt_base + 4));
6686
6687 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6688 htab->root.splt->contents + 8,
6689 PG_OFFSET (plt_got_base + 16));
6690
6691 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6692 htab->root.splt->contents + 12,
6693 PG_OFFSET (plt_got_base + 16));
6694}
6695
6696static bfd_boolean
6697elf64_aarch64_finish_dynamic_sections (bfd *output_bfd,
6698 struct bfd_link_info *info)
6699{
6700 struct elf64_aarch64_link_hash_table *htab;
6701 bfd *dynobj;
6702 asection *sdyn;
6703
6704 htab = elf64_aarch64_hash_table (info);
6705 dynobj = htab->root.dynobj;
6706 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6707
6708 if (htab->root.dynamic_sections_created)
6709 {
6710 Elf64_External_Dyn *dyncon, *dynconend;
6711
6712 if (sdyn == NULL || htab->root.sgot == NULL)
6713 abort ();
6714
6715 dyncon = (Elf64_External_Dyn *) sdyn->contents;
6716 dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
6717 for (; dyncon < dynconend; dyncon++)
6718 {
6719 Elf_Internal_Dyn dyn;
6720 asection *s;
6721
6722 bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
6723
6724 switch (dyn.d_tag)
6725 {
6726 default:
6727 continue;
6728
6729 case DT_PLTGOT:
6730 s = htab->root.sgotplt;
6731 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6732 break;
6733
6734 case DT_JMPREL:
6735 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
6736 break;
6737
6738 case DT_PLTRELSZ:
6739 s = htab->root.srelplt->output_section;
6740 dyn.d_un.d_val = s->size;
6741 break;
6742
6743 case DT_RELASZ:
6744 /* The procedure linkage table relocs (DT_JMPREL) should
6745 not be included in the overall relocs (DT_RELA).
6746 Therefore, we override the DT_RELASZ entry here to
6747 make it not include the JMPREL relocs. Since the
6748 linker script arranges for .rela.plt to follow all
6749 other relocation sections, we don't have to worry
6750 about changing the DT_RELA entry. */
6751 if (htab->root.srelplt != NULL)
6752 {
6753 s = htab->root.srelplt->output_section;
6754 dyn.d_un.d_val -= s->size;
6755 }
6756 break;
6757
6758 case DT_TLSDESC_PLT:
6759 s = htab->root.splt;
6760 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6761 + htab->tlsdesc_plt;
6762 break;
6763
6764 case DT_TLSDESC_GOT:
6765 s = htab->root.sgot;
6766 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6767 + htab->dt_tlsdesc_got;
6768 break;
6769 }
6770
6771 bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
6772 }
6773
6774 }
6775
6776 /* Fill in the special first entry in the procedure linkage table. */
6777 if (htab->root.splt && htab->root.splt->size > 0)
6778 {
6779 elf64_aarch64_init_small_plt0_entry (output_bfd, htab);
6780
6781 elf_section_data (htab->root.splt->output_section)->
6782 this_hdr.sh_entsize = htab->plt_entry_size;
6783
6784
6785 if (htab->tlsdesc_plt)
6786 {
6787 bfd_put_64 (output_bfd, (bfd_vma) 0,
6788 htab->root.sgot->contents + htab->dt_tlsdesc_got);
6789
6790 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
6791 elf64_aarch64_tlsdesc_small_plt_entry,
6792 sizeof (elf64_aarch64_tlsdesc_small_plt_entry));
6793
6794 {
6795 bfd_vma adrp1_addr =
6796 htab->root.splt->output_section->vma
6797 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
6798
6799 bfd_vma adrp2_addr =
6800 htab->root.splt->output_section->vma
6801 + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
6802
6803 bfd_vma got_addr =
6804 htab->root.sgot->output_section->vma
6805 + htab->root.sgot->output_offset;
6806
6807 bfd_vma pltgot_addr =
6808 htab->root.sgotplt->output_section->vma
6809 + htab->root.sgotplt->output_offset;
6810
6811 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
6812 bfd_vma opcode;
6813
6814 /* adrp x2, DT_TLSDESC_GOT */
6815 opcode = bfd_get_32 (output_bfd,
6816 htab->root.splt->contents
6817 + htab->tlsdesc_plt + 4);
6818 opcode = reencode_adr_imm
6819 (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
6820 bfd_put_32 (output_bfd, opcode,
6821 htab->root.splt->contents + htab->tlsdesc_plt + 4);
6822
6823 /* adrp x3, 0 */
6824 opcode = bfd_get_32 (output_bfd,
6825 htab->root.splt->contents
6826 + htab->tlsdesc_plt + 8);
6827 opcode = reencode_adr_imm
6828 (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
6829 bfd_put_32 (output_bfd, opcode,
6830 htab->root.splt->contents + htab->tlsdesc_plt + 8);
6831
6832 /* ldr x2, [x2, #0] */
6833 opcode = bfd_get_32 (output_bfd,
6834 htab->root.splt->contents
6835 + htab->tlsdesc_plt + 12);
6836 opcode = reencode_ldst_pos_imm (opcode,
6837 PG_OFFSET (dt_tlsdesc_got) >> 3);
6838 bfd_put_32 (output_bfd, opcode,
6839 htab->root.splt->contents + htab->tlsdesc_plt + 12);
6840
6841 /* add x3, x3, 0 */
6842 opcode = bfd_get_32 (output_bfd,
6843 htab->root.splt->contents
6844 + htab->tlsdesc_plt + 16);
6845 opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
6846 bfd_put_32 (output_bfd, opcode,
6847 htab->root.splt->contents + htab->tlsdesc_plt + 16);
6848 }
6849 }
6850 }
6851
6852 if (htab->root.sgotplt)
6853 {
6854 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
6855 {
6856 (*_bfd_error_handler)
6857 (_("discarded output section: `%A'"), htab->root.sgotplt);
6858 return FALSE;
6859 }
6860
6861 /* Fill in the first three entries in the global offset table. */
6862 if (htab->root.sgotplt->size > 0)
6863 {
6864 /* Set the first entry in the global offset table to the address of
6865 the dynamic section. */
6866 if (sdyn == NULL)
6867 bfd_put_64 (output_bfd, (bfd_vma) 0,
6868 htab->root.sgotplt->contents);
6869 else
6870 bfd_put_64 (output_bfd,
6871 sdyn->output_section->vma + sdyn->output_offset,
6872 htab->root.sgotplt->contents);
6873 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6874 bfd_put_64 (output_bfd,
6875 (bfd_vma) 0,
6876 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
6877 bfd_put_64 (output_bfd,
6878 (bfd_vma) 0,
6879 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
6880 }
6881
6882 elf_section_data (htab->root.sgotplt->output_section)->
6883 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
6884 }
6885
6886 if (htab->root.sgot && htab->root.sgot->size > 0)
6887 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
6888 = GOT_ENTRY_SIZE;
6889
6890 return TRUE;
6891}
6892
6893/* Return address for Ith PLT stub in section PLT, for relocation REL
6894 or (bfd_vma) -1 if it should not be included. */
6895
6896static bfd_vma
6897elf64_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
6898 const arelent *rel ATTRIBUTE_UNUSED)
6899{
6900 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
6901}
6902
6903
6904/* We use this so we can override certain functions
6905 (though currently we don't). */
6906
6907const struct elf_size_info elf64_aarch64_size_info =
6908{
6909 sizeof (Elf64_External_Ehdr),
6910 sizeof (Elf64_External_Phdr),
6911 sizeof (Elf64_External_Shdr),
6912 sizeof (Elf64_External_Rel),
6913 sizeof (Elf64_External_Rela),
6914 sizeof (Elf64_External_Sym),
6915 sizeof (Elf64_External_Dyn),
6916 sizeof (Elf_External_Note),
6917 4, /* Hash table entry size. */
6918 1, /* Internal relocs per external relocs. */
6919 64, /* Arch size. */
6920 3, /* Log_file_align. */
6921 ELFCLASS64, EV_CURRENT,
6922 bfd_elf64_write_out_phdrs,
6923 bfd_elf64_write_shdrs_and_ehdr,
6924 bfd_elf64_checksum_contents,
6925 bfd_elf64_write_relocs,
6926 bfd_elf64_swap_symbol_in,
6927 bfd_elf64_swap_symbol_out,
6928 bfd_elf64_slurp_reloc_table,
6929 bfd_elf64_slurp_symbol_table,
6930 bfd_elf64_swap_dyn_in,
6931 bfd_elf64_swap_dyn_out,
6932 bfd_elf64_swap_reloc_in,
6933 bfd_elf64_swap_reloc_out,
6934 bfd_elf64_swap_reloca_in,
6935 bfd_elf64_swap_reloca_out
6936};
6937
6938#define ELF_ARCH bfd_arch_aarch64
6939#define ELF_MACHINE_CODE EM_AARCH64
6940#define ELF_MAXPAGESIZE 0x10000
6941#define ELF_MINPAGESIZE 0x1000
6942#define ELF_COMMONPAGESIZE 0x1000
6943
6944#define bfd_elf64_close_and_cleanup \
6945 elf64_aarch64_close_and_cleanup
6946
6947#define bfd_elf64_bfd_copy_private_bfd_data \
6948 elf64_aarch64_copy_private_bfd_data
6949
6950#define bfd_elf64_bfd_free_cached_info \
6951 elf64_aarch64_bfd_free_cached_info
6952
6953#define bfd_elf64_bfd_is_target_special_symbol \
6954 elf64_aarch64_is_target_special_symbol
6955
6956#define bfd_elf64_bfd_link_hash_table_create \
6957 elf64_aarch64_link_hash_table_create
6958
6959#define bfd_elf64_bfd_link_hash_table_free \
6960 elf64_aarch64_hash_table_free
6961
6962#define bfd_elf64_bfd_merge_private_bfd_data \
6963 elf64_aarch64_merge_private_bfd_data
6964
6965#define bfd_elf64_bfd_print_private_bfd_data \
6966 elf64_aarch64_print_private_bfd_data
6967
6968#define bfd_elf64_bfd_reloc_type_lookup \
6969 elf64_aarch64_reloc_type_lookup
6970
6971#define bfd_elf64_bfd_reloc_name_lookup \
6972 elf64_aarch64_reloc_name_lookup
6973
6974#define bfd_elf64_bfd_set_private_flags \
6975 elf64_aarch64_set_private_flags
6976
6977#define bfd_elf64_find_inliner_info \
6978 elf64_aarch64_find_inliner_info
6979
6980#define bfd_elf64_find_nearest_line \
6981 elf64_aarch64_find_nearest_line
6982
6983#define bfd_elf64_mkobject \
6984 elf64_aarch64_mkobject
6985
6986#define bfd_elf64_new_section_hook \
6987 elf64_aarch64_new_section_hook
6988
6989#define elf_backend_adjust_dynamic_symbol \
6990 elf64_aarch64_adjust_dynamic_symbol
6991
6992#define elf_backend_always_size_sections \
6993 elf64_aarch64_always_size_sections
6994
6995#define elf_backend_check_relocs \
6996 elf64_aarch64_check_relocs
6997
6998#define elf_backend_copy_indirect_symbol \
6999 elf64_aarch64_copy_indirect_symbol
7000
7001/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7002 to them in our hash. */
7003#define elf_backend_create_dynamic_sections \
7004 elf64_aarch64_create_dynamic_sections
7005
7006#define elf_backend_init_index_section \
7007 _bfd_elf_init_2_index_sections
7008
7009#define elf_backend_is_function_type \
7010 elf64_aarch64_is_function_type
7011
7012#define elf_backend_finish_dynamic_sections \
7013 elf64_aarch64_finish_dynamic_sections
7014
7015#define elf_backend_finish_dynamic_symbol \
7016 elf64_aarch64_finish_dynamic_symbol
7017
7018#define elf_backend_gc_sweep_hook \
7019 elf64_aarch64_gc_sweep_hook
7020
7021#define elf_backend_object_p \
7022 elf64_aarch64_object_p
7023
7024#define elf_backend_output_arch_local_syms \
7025 elf64_aarch64_output_arch_local_syms
7026
7027#define elf_backend_plt_sym_val \
7028 elf64_aarch64_plt_sym_val
7029
7030#define elf_backend_post_process_headers \
7031 elf64_aarch64_post_process_headers
7032
7033#define elf_backend_relocate_section \
7034 elf64_aarch64_relocate_section
7035
7036#define elf_backend_reloc_type_class \
7037 elf64_aarch64_reloc_type_class
7038
7039#define elf_backend_section_flags \
7040 elf64_aarch64_section_flags
7041
7042#define elf_backend_section_from_shdr \
7043 elf64_aarch64_section_from_shdr
7044
7045#define elf_backend_size_dynamic_sections \
7046 elf64_aarch64_size_dynamic_sections
7047
7048#define elf_backend_size_info \
7049 elf64_aarch64_size_info
7050
7051#define elf_backend_can_refcount 1
7052#define elf_backend_can_gc_sections 0
7053#define elf_backend_plt_readonly 1
7054#define elf_backend_want_got_plt 1
7055#define elf_backend_want_plt_sym 0
7056#define elf_backend_may_use_rel_p 0
7057#define elf_backend_may_use_rela_p 1
7058#define elf_backend_default_use_rela_p 1
7059#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
7060
7061#undef elf_backend_obj_attrs_section
7062#define elf_backend_obj_attrs_section ".ARM.attributes"
7063
7064#include "elf64-target.h"
This page took 0.328041 seconds and 4 git commands to generate.