2013-03-29 Sriraman Tallam <tmsriram@google.com>
[deliverable/binutils-gdb.git] / bfd / elf64-aarch64.c
CommitLineData
a06ea964 1/* ELF support for AArch64.
59c108f7 2 Copyright 2009-2013 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21/* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elf64_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elf64_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elf64_aarch64_relocate_section ()
123
124 Calls elf64_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elf64_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138#include "sysdep.h"
139#include "bfd.h"
140#include "libiberty.h"
141#include "libbfd.h"
142#include "bfd_stdint.h"
143#include "elf-bfd.h"
144#include "bfdlink.h"
145#include "elf/aarch64.h"
146
147static bfd_reloc_status_type
148bfd_elf_aarch64_put_addend (bfd *abfd,
149 bfd_byte *address,
150 reloc_howto_type *howto, bfd_signed_vma addend);
151
152#define IS_AARCH64_TLS_RELOC(R_TYPE) \
153 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
154 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
155 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
156 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
157 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
158 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
159 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
160 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
161 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
162 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
163 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
164 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
165 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
166 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
167 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
168 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
169 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
170 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
171 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
172
173#define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
174 ((R_TYPE) == R_AARCH64_TLSDESC_LD64_PREL19 \
175 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
176 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE \
177 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
178 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
179 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
180 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
181 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
182 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
183 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
184 || (R_TYPE) == R_AARCH64_TLSDESC)
185
186#define ELIMINATE_COPY_RELOCS 0
187
188/* Return the relocation section associated with NAME. HTAB is the
189 bfd's elf64_aarch64_link_hash_entry. */
190#define RELOC_SECTION(HTAB, NAME) \
191 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
192
193/* Return size of a relocation entry. HTAB is the bfd's
194 elf64_aarch64_link_hash_entry. */
195#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
196
197/* Return function to swap relocations in. HTAB is the bfd's
198 elf64_aarch64_link_hash_entry. */
199#define SWAP_RELOC_IN(HTAB) (bfd_elf64_swap_reloca_in)
200
201/* Return function to swap relocations out. HTAB is the bfd's
202 elf64_aarch64_link_hash_entry. */
203#define SWAP_RELOC_OUT(HTAB) (bfd_elf64_swap_reloca_out)
204
205/* GOT Entry size - 8 bytes. */
206#define GOT_ENTRY_SIZE (8)
207#define PLT_ENTRY_SIZE (32)
208#define PLT_SMALL_ENTRY_SIZE (16)
209#define PLT_TLSDESC_ENTRY_SIZE (32)
210
211/* Take the PAGE component of an address or offset. */
212#define PG(x) ((x) & ~ 0xfff)
213#define PG_OFFSET(x) ((x) & 0xfff)
214
215/* Encoding of the nop instruction */
216#define INSN_NOP 0xd503201f
217
218#define aarch64_compute_jump_table_size(htab) \
219 (((htab)->root.srelplt == NULL) ? 0 \
220 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
221
222/* The first entry in a procedure linkage table looks like this
223 if the distance between the PLTGOT and the PLT is < 4GB use
224 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
225 in x16 and needs to work out PLTGOT[1] by using an address of
226 [x16,#-8]. */
227static const bfd_byte elf64_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
228{
229 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
230 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
231 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
232 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
233 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
234 0x1f, 0x20, 0x03, 0xd5, /* nop */
235 0x1f, 0x20, 0x03, 0xd5, /* nop */
236 0x1f, 0x20, 0x03, 0xd5, /* nop */
237};
238
239/* Per function entry in a procedure linkage table looks like this
240 if the distance between the PLTGOT and the PLT is < 4GB use
241 these PLT entries. */
242static const bfd_byte elf64_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
243{
244 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
245 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
246 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
247 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
248};
249
250static const bfd_byte
251elf64_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
252{
253 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
254 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
255 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
256 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
257 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
258 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
259 0x1f, 0x20, 0x03, 0xd5, /* nop */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
261};
262
263#define elf_info_to_howto elf64_aarch64_info_to_howto
264#define elf_info_to_howto_rel elf64_aarch64_info_to_howto
265
266#define AARCH64_ELF_ABI_VERSION 0
267#define AARCH64_ELF_OS_ABI_VERSION 0
268
269/* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
270#define ALL_ONES (~ (bfd_vma) 0)
271
272static reloc_howto_type elf64_aarch64_howto_none =
273 HOWTO (R_AARCH64_NONE, /* type */
274 0, /* rightshift */
275 0, /* size (0 = byte, 1 = short, 2 = long) */
276 0, /* bitsize */
277 FALSE, /* pc_relative */
278 0, /* bitpos */
279 complain_overflow_dont,/* complain_on_overflow */
280 bfd_elf_generic_reloc, /* special_function */
281 "R_AARCH64_NONE", /* name */
282 FALSE, /* partial_inplace */
283 0, /* src_mask */
284 0, /* dst_mask */
285 FALSE); /* pcrel_offset */
286
287static reloc_howto_type elf64_aarch64_howto_dynrelocs[] =
288{
289 HOWTO (R_AARCH64_COPY, /* type */
290 0, /* rightshift */
291 2, /* size (0 = byte, 1 = short, 2 = long) */
292 64, /* bitsize */
293 FALSE, /* pc_relative */
294 0, /* bitpos */
295 complain_overflow_bitfield, /* complain_on_overflow */
296 bfd_elf_generic_reloc, /* special_function */
297 "R_AARCH64_COPY", /* name */
298 TRUE, /* partial_inplace */
299 0xffffffff, /* src_mask */
300 0xffffffff, /* dst_mask */
301 FALSE), /* pcrel_offset */
302
303 HOWTO (R_AARCH64_GLOB_DAT, /* type */
304 0, /* rightshift */
305 2, /* size (0 = byte, 1 = short, 2 = long) */
306 64, /* bitsize */
307 FALSE, /* pc_relative */
308 0, /* bitpos */
309 complain_overflow_bitfield, /* complain_on_overflow */
310 bfd_elf_generic_reloc, /* special_function */
311 "R_AARCH64_GLOB_DAT", /* name */
312 TRUE, /* partial_inplace */
313 0xffffffff, /* src_mask */
314 0xffffffff, /* dst_mask */
315 FALSE), /* pcrel_offset */
316
317 HOWTO (R_AARCH64_JUMP_SLOT, /* type */
318 0, /* rightshift */
319 2, /* size (0 = byte, 1 = short, 2 = long) */
320 64, /* bitsize */
321 FALSE, /* pc_relative */
322 0, /* bitpos */
323 complain_overflow_bitfield, /* complain_on_overflow */
324 bfd_elf_generic_reloc, /* special_function */
325 "R_AARCH64_JUMP_SLOT", /* name */
326 TRUE, /* partial_inplace */
327 0xffffffff, /* src_mask */
328 0xffffffff, /* dst_mask */
329 FALSE), /* pcrel_offset */
330
331 HOWTO (R_AARCH64_RELATIVE, /* type */
332 0, /* rightshift */
333 2, /* size (0 = byte, 1 = short, 2 = long) */
334 64, /* bitsize */
335 FALSE, /* pc_relative */
336 0, /* bitpos */
337 complain_overflow_bitfield, /* complain_on_overflow */
338 bfd_elf_generic_reloc, /* special_function */
339 "R_AARCH64_RELATIVE", /* name */
340 TRUE, /* partial_inplace */
341 ALL_ONES, /* src_mask */
342 ALL_ONES, /* dst_mask */
343 FALSE), /* pcrel_offset */
344
345 HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */
346 0, /* rightshift */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
348 64, /* bitsize */
349 FALSE, /* pc_relative */
350 0, /* bitpos */
351 complain_overflow_dont, /* complain_on_overflow */
352 bfd_elf_generic_reloc, /* special_function */
353 "R_AARCH64_TLS_DTPMOD64", /* name */
354 FALSE, /* partial_inplace */
355 0, /* src_mask */
356 ALL_ONES, /* dst_mask */
357 FALSE), /* pc_reloffset */
358
359 HOWTO (R_AARCH64_TLS_DTPREL64, /* type */
360 0, /* rightshift */
361 2, /* size (0 = byte, 1 = short, 2 = long) */
362 64, /* bitsize */
363 FALSE, /* pc_relative */
364 0, /* bitpos */
365 complain_overflow_dont, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 "R_AARCH64_TLS_DTPREL64", /* name */
368 FALSE, /* partial_inplace */
369 0, /* src_mask */
370 ALL_ONES, /* dst_mask */
371 FALSE), /* pcrel_offset */
372
373 HOWTO (R_AARCH64_TLS_TPREL64, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 64, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_dont, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_AARCH64_TLS_TPREL64", /* name */
382 FALSE, /* partial_inplace */
383 0, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 HOWTO (R_AARCH64_TLSDESC, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 64, /* bitsize */
391 FALSE, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_dont, /* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_AARCH64_TLSDESC", /* name */
396 FALSE, /* partial_inplace */
397 0, /* src_mask */
398 ALL_ONES, /* dst_mask */
399 FALSE), /* pcrel_offset */
400
401};
402
403/* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
404 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
405 in that slot. */
406
407static reloc_howto_type elf64_aarch64_howto_table[] =
408{
409 /* Basic data relocations. */
410
411 HOWTO (R_AARCH64_NULL, /* type */
412 0, /* rightshift */
413 0, /* size (0 = byte, 1 = short, 2 = long) */
414 0, /* bitsize */
415 FALSE, /* pc_relative */
416 0, /* bitpos */
417 complain_overflow_dont, /* complain_on_overflow */
418 bfd_elf_generic_reloc, /* special_function */
419 "R_AARCH64_NULL", /* name */
420 FALSE, /* partial_inplace */
421 0, /* src_mask */
422 0, /* dst_mask */
423 FALSE), /* pcrel_offset */
424
425 /* .xword: (S+A) */
426 HOWTO (R_AARCH64_ABS64, /* type */
427 0, /* rightshift */
428 4, /* size (4 = long long) */
429 64, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_unsigned, /* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_AARCH64_ABS64", /* name */
435 FALSE, /* partial_inplace */
436 ALL_ONES, /* src_mask */
437 ALL_ONES, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 /* .word: (S+A) */
441 HOWTO (R_AARCH64_ABS32, /* type */
442 0, /* rightshift */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
444 32, /* bitsize */
445 FALSE, /* pc_relative */
446 0, /* bitpos */
447 complain_overflow_unsigned, /* complain_on_overflow */
448 bfd_elf_generic_reloc, /* special_function */
449 "R_AARCH64_ABS32", /* name */
450 FALSE, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 FALSE), /* pcrel_offset */
454
455 /* .half: (S+A) */
456 HOWTO (R_AARCH64_ABS16, /* type */
457 0, /* rightshift */
458 1, /* size (0 = byte, 1 = short, 2 = long) */
459 16, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_unsigned, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_ABS16", /* name */
465 FALSE, /* partial_inplace */
466 0xffff, /* src_mask */
467 0xffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 /* .xword: (S+A-P) */
471 HOWTO (R_AARCH64_PREL64, /* type */
472 0, /* rightshift */
473 4, /* size (4 = long long) */
474 64, /* bitsize */
475 TRUE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_signed, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_AARCH64_PREL64", /* name */
480 FALSE, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 TRUE), /* pcrel_offset */
484
485 /* .word: (S+A-P) */
486 HOWTO (R_AARCH64_PREL32, /* type */
487 0, /* rightshift */
488 2, /* size (0 = byte, 1 = short, 2 = long) */
489 32, /* bitsize */
490 TRUE, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_signed, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 "R_AARCH64_PREL32", /* name */
495 FALSE, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 TRUE), /* pcrel_offset */
499
500 /* .half: (S+A-P) */
501 HOWTO (R_AARCH64_PREL16, /* type */
502 0, /* rightshift */
503 1, /* size (0 = byte, 1 = short, 2 = long) */
504 16, /* bitsize */
505 TRUE, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_signed, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 "R_AARCH64_PREL16", /* name */
510 FALSE, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 TRUE), /* pcrel_offset */
514
515 /* Group relocations to create a 16, 32, 48 or 64 bit
516 unsigned data or abs address inline. */
517
518 /* MOVZ: ((S+A) >> 0) & 0xffff */
519 HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */
520 0, /* rightshift */
521 2, /* size (0 = byte, 1 = short, 2 = long) */
522 16, /* bitsize */
523 FALSE, /* pc_relative */
524 0, /* bitpos */
525 complain_overflow_unsigned, /* complain_on_overflow */
526 bfd_elf_generic_reloc, /* special_function */
527 "R_AARCH64_MOVW_UABS_G0", /* name */
528 FALSE, /* partial_inplace */
529 0xffff, /* src_mask */
530 0xffff, /* dst_mask */
531 FALSE), /* pcrel_offset */
532
533 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
534 HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */
535 0, /* rightshift */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
537 16, /* bitsize */
538 FALSE, /* pc_relative */
539 0, /* bitpos */
540 complain_overflow_dont, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
547
548 /* MOVZ: ((S+A) >> 16) & 0xffff */
549 HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */
550 16, /* rightshift */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
552 16, /* bitsize */
553 FALSE, /* pc_relative */
554 0, /* bitpos */
555 complain_overflow_unsigned, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 "R_AARCH64_MOVW_UABS_G1", /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
562
563 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
564 HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */
565 16, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 16, /* bitsize */
568 FALSE, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_dont, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
577
578 /* MOVZ: ((S+A) >> 32) & 0xffff */
579 HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */
580 32, /* rightshift */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
582 16, /* bitsize */
583 FALSE, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_unsigned, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 "R_AARCH64_MOVW_UABS_G2", /* name */
588 FALSE, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE), /* pcrel_offset */
592
593 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
594 HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */
595 32, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 16, /* bitsize */
598 FALSE, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_dont, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
603 FALSE, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 /* MOVZ: ((S+A) >> 48) & 0xffff */
609 HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */
610 48, /* rightshift */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
612 16, /* bitsize */
613 FALSE, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_unsigned, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 "R_AARCH64_MOVW_UABS_G3", /* name */
618 FALSE, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE), /* pcrel_offset */
622
623 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
624 signed data or abs address inline. Will change instruction
625 to MOVN or MOVZ depending on sign of calculated value. */
626
627 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
628 HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */
629 0, /* rightshift */
630 2, /* size (0 = byte, 1 = short, 2 = long) */
631 16, /* bitsize */
632 FALSE, /* pc_relative */
633 0, /* bitpos */
634 complain_overflow_signed, /* complain_on_overflow */
635 bfd_elf_generic_reloc, /* special_function */
636 "R_AARCH64_MOVW_SABS_G0", /* name */
637 FALSE, /* partial_inplace */
638 0xffff, /* src_mask */
639 0xffff, /* dst_mask */
640 FALSE), /* pcrel_offset */
641
642 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
643 HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */
644 16, /* rightshift */
645 2, /* size (0 = byte, 1 = short, 2 = long) */
646 16, /* bitsize */
647 FALSE, /* pc_relative */
648 0, /* bitpos */
649 complain_overflow_signed, /* complain_on_overflow */
650 bfd_elf_generic_reloc, /* special_function */
651 "R_AARCH64_MOVW_SABS_G1", /* name */
652 FALSE, /* partial_inplace */
653 0xffff, /* src_mask */
654 0xffff, /* dst_mask */
655 FALSE), /* pcrel_offset */
656
657 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
658 HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */
659 32, /* rightshift */
660 2, /* size (0 = byte, 1 = short, 2 = long) */
661 16, /* bitsize */
662 FALSE, /* pc_relative */
663 0, /* bitpos */
664 complain_overflow_signed, /* complain_on_overflow */
665 bfd_elf_generic_reloc, /* special_function */
666 "R_AARCH64_MOVW_SABS_G2", /* name */
667 FALSE, /* partial_inplace */
668 0xffff, /* src_mask */
669 0xffff, /* dst_mask */
670 FALSE), /* pcrel_offset */
671
672/* Relocations to generate 19, 21 and 33 bit PC-relative load/store
673 addresses: PG(x) is (x & ~0xfff). */
674
675 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
676 HOWTO (R_AARCH64_LD_PREL_LO19, /* type */
677 2, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 19, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed, /* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_AARCH64_LD_PREL_LO19", /* name */
685 FALSE, /* partial_inplace */
686 0x7ffff, /* src_mask */
687 0x7ffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 /* ADR: (S+A-P) & 0x1fffff */
691 HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */
692 0, /* rightshift */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
694 21, /* bitsize */
695 TRUE, /* pc_relative */
696 0, /* bitpos */
697 complain_overflow_signed, /* complain_on_overflow */
698 bfd_elf_generic_reloc, /* special_function */
699 "R_AARCH64_ADR_PREL_LO21", /* name */
700 FALSE, /* partial_inplace */
701 0x1fffff, /* src_mask */
702 0x1fffff, /* dst_mask */
703 TRUE), /* pcrel_offset */
704
705 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
706 HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */
707 12, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 21, /* bitsize */
710 TRUE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_signed, /* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
715 FALSE, /* partial_inplace */
716 0x1fffff, /* src_mask */
717 0x1fffff, /* dst_mask */
718 TRUE), /* pcrel_offset */
719
720 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
721 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */
722 12, /* rightshift */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
724 21, /* bitsize */
725 TRUE, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_dont, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
730 FALSE, /* partial_inplace */
731 0x1fffff, /* src_mask */
732 0x1fffff, /* dst_mask */
733 TRUE), /* pcrel_offset */
734
735 /* ADD: (S+A) & 0xfff [no overflow check] */
736 HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 12, /* bitsize */
740 FALSE, /* pc_relative */
741 10, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
745 FALSE, /* partial_inplace */
746 0x3ffc00, /* src_mask */
747 0x3ffc00, /* dst_mask */
748 FALSE), /* pcrel_offset */
749
750 /* LD/ST8: (S+A) & 0xfff */
751 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 12, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
760 FALSE, /* partial_inplace */
761 0xfff, /* src_mask */
762 0xfff, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 /* Relocations for control-flow instructions. */
766
767 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
768 HOWTO (R_AARCH64_TSTBR14, /* type */
769 2, /* rightshift */
770 2, /* size (0 = byte, 1 = short, 2 = long) */
771 14, /* bitsize */
772 TRUE, /* pc_relative */
773 0, /* bitpos */
774 complain_overflow_signed, /* complain_on_overflow */
775 bfd_elf_generic_reloc, /* special_function */
776 "R_AARCH64_TSTBR14", /* name */
777 FALSE, /* partial_inplace */
778 0x3fff, /* src_mask */
779 0x3fff, /* dst_mask */
780 TRUE), /* pcrel_offset */
781
782 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
783 HOWTO (R_AARCH64_CONDBR19, /* type */
784 2, /* rightshift */
785 2, /* size (0 = byte, 1 = short, 2 = long) */
786 19, /* bitsize */
787 TRUE, /* pc_relative */
788 0, /* bitpos */
789 complain_overflow_signed, /* complain_on_overflow */
790 bfd_elf_generic_reloc, /* special_function */
791 "R_AARCH64_CONDBR19", /* name */
792 FALSE, /* partial_inplace */
793 0x7ffff, /* src_mask */
794 0x7ffff, /* dst_mask */
795 TRUE), /* pcrel_offset */
796
797 EMPTY_HOWTO (281),
798
799 /* B: ((S+A-P) >> 2) & 0x3ffffff */
800 HOWTO (R_AARCH64_JUMP26, /* type */
801 2, /* rightshift */
802 2, /* size (0 = byte, 1 = short, 2 = long) */
803 26, /* bitsize */
804 TRUE, /* pc_relative */
805 0, /* bitpos */
806 complain_overflow_signed, /* complain_on_overflow */
807 bfd_elf_generic_reloc, /* special_function */
808 "R_AARCH64_JUMP26", /* name */
809 FALSE, /* partial_inplace */
810 0x3ffffff, /* src_mask */
811 0x3ffffff, /* dst_mask */
812 TRUE), /* pcrel_offset */
813
814 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
815 HOWTO (R_AARCH64_CALL26, /* type */
816 2, /* rightshift */
817 2, /* size (0 = byte, 1 = short, 2 = long) */
818 26, /* bitsize */
819 TRUE, /* pc_relative */
820 0, /* bitpos */
821 complain_overflow_signed, /* complain_on_overflow */
822 bfd_elf_generic_reloc, /* special_function */
823 "R_AARCH64_CALL26", /* name */
824 FALSE, /* partial_inplace */
825 0x3ffffff, /* src_mask */
826 0x3ffffff, /* dst_mask */
827 TRUE), /* pcrel_offset */
828
829 /* LD/ST16: (S+A) & 0xffe */
830 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */
831 1, /* rightshift */
832 2, /* size (0 = byte, 1 = short, 2 = long) */
833 12, /* bitsize */
834 FALSE, /* pc_relative */
835 0, /* bitpos */
836 complain_overflow_dont, /* complain_on_overflow */
837 bfd_elf_generic_reloc, /* special_function */
838 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
839 FALSE, /* partial_inplace */
840 0xffe, /* src_mask */
841 0xffe, /* dst_mask */
842 FALSE), /* pcrel_offset */
843
844 /* LD/ST32: (S+A) & 0xffc */
845 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */
846 2, /* rightshift */
847 2, /* size (0 = byte, 1 = short, 2 = long) */
848 12, /* bitsize */
849 FALSE, /* pc_relative */
850 0, /* bitpos */
851 complain_overflow_dont, /* complain_on_overflow */
852 bfd_elf_generic_reloc, /* special_function */
853 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
854 FALSE, /* partial_inplace */
855 0xffc, /* src_mask */
856 0xffc, /* dst_mask */
857 FALSE), /* pcrel_offset */
858
859 /* LD/ST64: (S+A) & 0xff8 */
860 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */
861 3, /* rightshift */
862 2, /* size (0 = byte, 1 = short, 2 = long) */
863 12, /* bitsize */
864 FALSE, /* pc_relative */
865 0, /* bitpos */
866 complain_overflow_dont, /* complain_on_overflow */
867 bfd_elf_generic_reloc, /* special_function */
868 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
869 FALSE, /* partial_inplace */
870 0xff8, /* src_mask */
871 0xff8, /* dst_mask */
872 FALSE), /* pcrel_offset */
873
874 EMPTY_HOWTO (287),
875 EMPTY_HOWTO (288),
876 EMPTY_HOWTO (289),
877 EMPTY_HOWTO (290),
878 EMPTY_HOWTO (291),
879 EMPTY_HOWTO (292),
880 EMPTY_HOWTO (293),
881 EMPTY_HOWTO (294),
882 EMPTY_HOWTO (295),
883 EMPTY_HOWTO (296),
884 EMPTY_HOWTO (297),
885 EMPTY_HOWTO (298),
886
887 /* LD/ST128: (S+A) & 0xff0 */
888 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */
889 4, /* rightshift */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
891 12, /* bitsize */
892 FALSE, /* pc_relative */
893 0, /* bitpos */
894 complain_overflow_dont, /* complain_on_overflow */
895 bfd_elf_generic_reloc, /* special_function */
896 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
897 FALSE, /* partial_inplace */
898 0xff0, /* src_mask */
899 0xff0, /* dst_mask */
900 FALSE), /* pcrel_offset */
901
902 EMPTY_HOWTO (300),
903 EMPTY_HOWTO (301),
904 EMPTY_HOWTO (302),
905 EMPTY_HOWTO (303),
906 EMPTY_HOWTO (304),
907 EMPTY_HOWTO (305),
908 EMPTY_HOWTO (306),
909 EMPTY_HOWTO (307),
910 EMPTY_HOWTO (308),
f41aef5f
RE
911
912 /* Set a load-literal immediate field to bits
913 0x1FFFFC of G(S)-P */
914 HOWTO (R_AARCH64_GOT_LD_PREL19, /* type */
915 2, /* rightshift */
916 2, /* size (0 = byte,1 = short,2 = long) */
917 19, /* bitsize */
918 TRUE, /* pc_relative */
919 0, /* bitpos */
920 complain_overflow_signed, /* complain_on_overflow */
921 bfd_elf_generic_reloc, /* special_function */
922 "R_AARCH64_GOT_LD_PREL19", /* name */
923 FALSE, /* partial_inplace */
924 0xffffe0, /* src_mask */
925 0xffffe0, /* dst_mask */
926 TRUE), /* pcrel_offset */
927
a06ea964
NC
928 EMPTY_HOWTO (310),
929
930 /* Get to the page for the GOT entry for the symbol
931 (G(S) - P) using an ADRP instruction. */
932 HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */
933 12, /* rightshift */
934 2, /* size (0 = byte, 1 = short, 2 = long) */
935 21, /* bitsize */
936 TRUE, /* pc_relative */
937 0, /* bitpos */
938 complain_overflow_dont, /* complain_on_overflow */
939 bfd_elf_generic_reloc, /* special_function */
940 "R_AARCH64_ADR_GOT_PAGE", /* name */
941 FALSE, /* partial_inplace */
942 0x1fffff, /* src_mask */
943 0x1fffff, /* dst_mask */
944 TRUE), /* pcrel_offset */
945
946 /* LD64: GOT offset G(S) & 0xff8 */
947 HOWTO (R_AARCH64_LD64_GOT_LO12_NC, /* type */
948 3, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 12, /* bitsize */
951 FALSE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_AARCH64_LD64_GOT_LO12_NC", /* name */
956 FALSE, /* partial_inplace */
957 0xff8, /* src_mask */
958 0xff8, /* dst_mask */
959 FALSE) /* pcrel_offset */
960};
961
962static reloc_howto_type elf64_aarch64_tls_howto_table[] =
963{
964 EMPTY_HOWTO (512),
965
966 /* Get to the page for the GOT entry for the symbol
967 (G(S) - P) using an ADRP instruction. */
968 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */
969 12, /* rightshift */
970 2, /* size (0 = byte, 1 = short, 2 = long) */
971 21, /* bitsize */
972 TRUE, /* pc_relative */
973 0, /* bitpos */
974 complain_overflow_dont, /* complain_on_overflow */
975 bfd_elf_generic_reloc, /* special_function */
976 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
977 FALSE, /* partial_inplace */
978 0x1fffff, /* src_mask */
979 0x1fffff, /* dst_mask */
980 TRUE), /* pcrel_offset */
981
982 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
983 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */
984 0, /* rightshift */
985 2, /* size (0 = byte, 1 = short, 2 = long) */
986 12, /* bitsize */
987 FALSE, /* pc_relative */
988 0, /* bitpos */
989 complain_overflow_dont, /* complain_on_overflow */
990 bfd_elf_generic_reloc, /* special_function */
991 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
992 FALSE, /* partial_inplace */
993 0xfff, /* src_mask */
994 0xfff, /* dst_mask */
995 FALSE), /* pcrel_offset */
996
997 EMPTY_HOWTO (515),
998 EMPTY_HOWTO (516),
999 EMPTY_HOWTO (517),
1000 EMPTY_HOWTO (518),
1001 EMPTY_HOWTO (519),
1002 EMPTY_HOWTO (520),
1003 EMPTY_HOWTO (521),
1004 EMPTY_HOWTO (522),
1005 EMPTY_HOWTO (523),
1006 EMPTY_HOWTO (524),
1007 EMPTY_HOWTO (525),
1008 EMPTY_HOWTO (526),
1009 EMPTY_HOWTO (527),
1010 EMPTY_HOWTO (528),
1011 EMPTY_HOWTO (529),
1012 EMPTY_HOWTO (530),
1013 EMPTY_HOWTO (531),
1014 EMPTY_HOWTO (532),
1015 EMPTY_HOWTO (533),
1016 EMPTY_HOWTO (534),
1017 EMPTY_HOWTO (535),
1018 EMPTY_HOWTO (536),
1019 EMPTY_HOWTO (537),
1020 EMPTY_HOWTO (538),
1021
1022 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */
1023 16, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 16, /* bitsize */
1026 FALSE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1035
1036 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 FALSE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1049
1050 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 21, /* bitsize */
1054 FALSE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1059 FALSE, /* partial_inplace */
1060 0x1fffff, /* src_mask */
1061 0x1fffff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1063
1064 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */
1065 3, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 12, /* bitsize */
1068 FALSE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1073 FALSE, /* partial_inplace */
1074 0xff8, /* src_mask */
1075 0xff8, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1077
1078 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */
bb3f9ed8 1079 2, /* rightshift */
a06ea964
NC
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 21, /* bitsize */
1082 FALSE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1087 FALSE, /* partial_inplace */
1088 0x1ffffc, /* src_mask */
1089 0x1ffffc, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1091
1092 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */
bb3f9ed8 1093 32, /* rightshift */
a06ea964
NC
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 12, /* bitsize */
1096 FALSE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffff, /* src_mask */
1103 0xffff, /* dst_mask */
1104 FALSE), /* pcrel_offset */
1105
1106 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */
bb3f9ed8 1107 16, /* rightshift */
a06ea964
NC
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 12, /* bitsize */
1110 FALSE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffff, /* src_mask */
1117 0xffff, /* dst_mask */
1118 FALSE), /* pcrel_offset */
1119
1120 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */
bb3f9ed8 1121 16, /* rightshift */
a06ea964
NC
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 12, /* bitsize */
1124 FALSE, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont, /* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffff, /* src_mask */
1131 0xffff, /* dst_mask */
1132 FALSE), /* pcrel_offset */
1133
1134 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 12, /* bitsize */
1138 FALSE, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont, /* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffff, /* src_mask */
1145 0xffff, /* dst_mask */
1146 FALSE), /* pcrel_offset */
1147
1148 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 12, /* bitsize */
1152 FALSE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont, /* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffff, /* src_mask */
1159 0xffff, /* dst_mask */
1160 FALSE), /* pcrel_offset */
1161
1162 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */
bb3f9ed8 1163 12, /* rightshift */
a06ea964
NC
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 12, /* bitsize */
1166 FALSE, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont, /* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1171 FALSE, /* partial_inplace */
1172 0xfff, /* src_mask */
1173 0xfff, /* dst_mask */
1174 FALSE), /* pcrel_offset */
1175
1176 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 12, /* bitsize */
1180 FALSE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont, /* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1185 FALSE, /* partial_inplace */
1186 0xfff, /* src_mask */
1187 0xfff, /* dst_mask */
1188 FALSE), /* pcrel_offset */
1189
1190 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 12, /* bitsize */
1194 FALSE, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont, /* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1199 FALSE, /* partial_inplace */
1200 0xfff, /* src_mask */
1201 0xfff, /* dst_mask */
1202 FALSE), /* pcrel_offset */
1203};
1204
1205static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] =
1206{
1207 HOWTO (R_AARCH64_TLSDESC_LD64_PREL19, /* type */
bb3f9ed8 1208 2, /* rightshift */
a06ea964
NC
1209 2, /* size (0 = byte, 1 = short, 2 = long) */
1210 21, /* bitsize */
1211 TRUE, /* pc_relative */
1212 0, /* bitpos */
1213 complain_overflow_dont, /* complain_on_overflow */
1214 bfd_elf_generic_reloc, /* special_function */
1215 "R_AARCH64_TLSDESC_LD64_PREL19", /* name */
1216 FALSE, /* partial_inplace */
1217 0x1ffffc, /* src_mask */
1218 0x1ffffc, /* dst_mask */
1219 TRUE), /* pcrel_offset */
1220
1221 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */
1222 0, /* rightshift */
1223 2, /* size (0 = byte, 1 = short, 2 = long) */
1224 21, /* bitsize */
1225 TRUE, /* pc_relative */
1226 0, /* bitpos */
1227 complain_overflow_dont, /* complain_on_overflow */
1228 bfd_elf_generic_reloc, /* special_function */
1229 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1230 FALSE, /* partial_inplace */
1231 0x1fffff, /* src_mask */
1232 0x1fffff, /* dst_mask */
1233 TRUE), /* pcrel_offset */
1234
1235 /* Get to the page for the GOT entry for the symbol
1236 (G(S) - P) using an ADRP instruction. */
1237 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE, /* type */
1238 12, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1240 21, /* bitsize */
1241 TRUE, /* pc_relative */
1242 0, /* bitpos */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 "R_AARCH64_TLSDESC_ADR_PAGE", /* name */
1246 FALSE, /* partial_inplace */
1247 0x1fffff, /* src_mask */
1248 0x1fffff, /* dst_mask */
1249 TRUE), /* pcrel_offset */
1250
1251 /* LD64: GOT offset G(S) & 0xfff. */
1252 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */
1253 3, /* rightshift */
1254 2, /* size (0 = byte, 1 = short, 2 = long) */
1255 12, /* bitsize */
1256 FALSE, /* pc_relative */
1257 0, /* bitpos */
1258 complain_overflow_dont, /* complain_on_overflow */
1259 bfd_elf_generic_reloc, /* special_function */
1260 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1261 FALSE, /* partial_inplace */
1262 0xfff, /* src_mask */
1263 0xfff, /* dst_mask */
1264 FALSE), /* pcrel_offset */
1265
1266 /* ADD: GOT offset G(S) & 0xfff. */
1267 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */
1268 0, /* rightshift */
1269 2, /* size (0 = byte, 1 = short, 2 = long) */
1270 12, /* bitsize */
1271 FALSE, /* pc_relative */
1272 0, /* bitpos */
1273 complain_overflow_dont, /* complain_on_overflow */
1274 bfd_elf_generic_reloc, /* special_function */
1275 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1276 FALSE, /* partial_inplace */
1277 0xfff, /* src_mask */
1278 0xfff, /* dst_mask */
1279 FALSE), /* pcrel_offset */
1280
1281 HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */
bb3f9ed8 1282 16, /* rightshift */
a06ea964
NC
1283 2, /* size (0 = byte, 1 = short, 2 = long) */
1284 12, /* bitsize */
1285 FALSE, /* pc_relative */
1286 0, /* bitpos */
1287 complain_overflow_dont, /* complain_on_overflow */
1288 bfd_elf_generic_reloc, /* special_function */
1289 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1290 FALSE, /* partial_inplace */
1291 0xffff, /* src_mask */
1292 0xffff, /* dst_mask */
1293 FALSE), /* pcrel_offset */
1294
1295 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */
1296 0, /* rightshift */
1297 2, /* size (0 = byte, 1 = short, 2 = long) */
1298 12, /* bitsize */
1299 FALSE, /* pc_relative */
1300 0, /* bitpos */
1301 complain_overflow_dont, /* complain_on_overflow */
1302 bfd_elf_generic_reloc, /* special_function */
1303 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1304 FALSE, /* partial_inplace */
1305 0xffff, /* src_mask */
1306 0xffff, /* dst_mask */
1307 FALSE), /* pcrel_offset */
1308
1309 HOWTO (R_AARCH64_TLSDESC_LDR, /* type */
1310 0, /* rightshift */
1311 2, /* size (0 = byte, 1 = short, 2 = long) */
1312 12, /* bitsize */
1313 FALSE, /* pc_relative */
1314 0, /* bitpos */
1315 complain_overflow_dont, /* complain_on_overflow */
1316 bfd_elf_generic_reloc, /* special_function */
1317 "R_AARCH64_TLSDESC_LDR", /* name */
1318 FALSE, /* partial_inplace */
1319 0x0, /* src_mask */
1320 0x0, /* dst_mask */
1321 FALSE), /* pcrel_offset */
1322
1323 HOWTO (R_AARCH64_TLSDESC_ADD, /* type */
1324 0, /* rightshift */
1325 2, /* size (0 = byte, 1 = short, 2 = long) */
1326 12, /* bitsize */
1327 FALSE, /* pc_relative */
1328 0, /* bitpos */
1329 complain_overflow_dont, /* complain_on_overflow */
1330 bfd_elf_generic_reloc, /* special_function */
1331 "R_AARCH64_TLSDESC_ADD", /* name */
1332 FALSE, /* partial_inplace */
1333 0x0, /* src_mask */
1334 0x0, /* dst_mask */
1335 FALSE), /* pcrel_offset */
1336
1337 HOWTO (R_AARCH64_TLSDESC_CALL, /* type */
1338 0, /* rightshift */
1339 2, /* size (0 = byte, 1 = short, 2 = long) */
1340 12, /* bitsize */
1341 FALSE, /* pc_relative */
1342 0, /* bitpos */
1343 complain_overflow_dont, /* complain_on_overflow */
1344 bfd_elf_generic_reloc, /* special_function */
1345 "R_AARCH64_TLSDESC_CALL", /* name */
1346 FALSE, /* partial_inplace */
1347 0x0, /* src_mask */
1348 0x0, /* dst_mask */
1349 FALSE), /* pcrel_offset */
1350};
1351
1352static reloc_howto_type *
1353elf64_aarch64_howto_from_type (unsigned int r_type)
1354{
1355 if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max)
1356 return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min];
1357
1358 if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max)
1359 return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min];
1360
1361 if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max)
1362 return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min];
1363
1364 if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max)
1365 return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min];
1366
1367 switch (r_type)
1368 {
1369 case R_AARCH64_NONE:
1370 return &elf64_aarch64_howto_none;
1371
1372 }
1373 bfd_set_error (bfd_error_bad_value);
1374 return NULL;
1375}
1376
1377static void
1378elf64_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1379 Elf_Internal_Rela *elf_reloc)
1380{
1381 unsigned int r_type;
1382
1383 r_type = ELF64_R_TYPE (elf_reloc->r_info);
1384 bfd_reloc->howto = elf64_aarch64_howto_from_type (r_type);
1385}
1386
1387struct elf64_aarch64_reloc_map
1388{
1389 bfd_reloc_code_real_type bfd_reloc_val;
1390 unsigned int elf_reloc_val;
1391};
1392
1393/* All entries in this list must also be present in
1394 elf64_aarch64_howto_table. */
1395static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] =
1396{
1397 {BFD_RELOC_NONE, R_AARCH64_NONE},
1398
1399 /* Basic data relocations. */
1400 {BFD_RELOC_CTOR, R_AARCH64_ABS64},
1401 {BFD_RELOC_64, R_AARCH64_ABS64},
1402 {BFD_RELOC_32, R_AARCH64_ABS32},
1403 {BFD_RELOC_16, R_AARCH64_ABS16},
1404 {BFD_RELOC_64_PCREL, R_AARCH64_PREL64},
1405 {BFD_RELOC_32_PCREL, R_AARCH64_PREL32},
1406 {BFD_RELOC_16_PCREL, R_AARCH64_PREL16},
1407
1408 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1409 value inline. */
1410 {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC},
1411 {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC},
1412 {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC},
1413
1414 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1415 signed value inline. */
1416 {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0},
1417 {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1},
1418 {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2},
1419
1420 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1421 unsigned value inline. */
1422 {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0},
1423 {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1},
1424 {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2},
1425 {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3},
1426
1427 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1428 {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19},
1429 {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21},
1430 {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21},
1431 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC},
1432 {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC},
1433 {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC},
1434 {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC},
1435 {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC},
1436 {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC},
1437 {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC},
1438
1439 /* Relocations for control-flow instructions. */
1440 {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14},
1441 {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19},
1442 {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26},
1443 {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26},
1444
1445 /* Relocations for PIC. */
f41aef5f 1446 {BFD_RELOC_AARCH64_GOT_LD_PREL19, R_AARCH64_GOT_LD_PREL19},
a06ea964
NC
1447 {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE},
1448 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC},
1449
1450 /* Relocations for TLS. */
1451 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21},
1452 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC},
1453 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
1454 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1},
1455 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
1456 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC},
1457 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
1458 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21},
1459 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
1460 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
1461 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
1462 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
1463 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2},
1464 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1},
1465 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
1466 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC},
1467 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0},
1468 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
1469 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC},
1470 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12},
1471 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12},
1472 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
1473 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC},
1474 {BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19, R_AARCH64_TLSDESC_LD64_PREL19},
1475 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21},
1476 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE, R_AARCH64_TLSDESC_ADR_PAGE},
1477 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC},
1478 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC},
1479 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1},
1480 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC},
1481 {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR},
1482 {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD},
1483 {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL},
1484 {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64},
1485 {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64},
1486 {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64},
1487 {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC},
1488};
1489
1490static reloc_howto_type *
1491elf64_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1492 bfd_reloc_code_real_type code)
1493{
1494 unsigned int i;
1495
1496 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++)
1497 if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code)
1498 return elf64_aarch64_howto_from_type
1499 (elf64_aarch64_reloc_map[i].elf_reloc_val);
1500
1501 bfd_set_error (bfd_error_bad_value);
1502 return NULL;
1503}
1504
1505static reloc_howto_type *
1506elf64_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1507 const char *r_name)
1508{
1509 unsigned int i;
1510
1511 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++)
1512 if (elf64_aarch64_howto_table[i].name != NULL
1513 && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0)
1514 return &elf64_aarch64_howto_table[i];
1515
1516 return NULL;
1517}
1518
cd6fa7fd
YZ
1519/* Support for core dump NOTE sections. */
1520
1521static bfd_boolean
1522elf64_aarch64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1523{
1524 int offset;
1525 size_t size;
1526
1527 switch (note->descsz)
1528 {
1529 default:
1530 return FALSE;
1531
1532 case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
1533 /* pr_cursig */
228e534f 1534 elf_tdata (abfd)->core->signal
cd6fa7fd
YZ
1535 = bfd_get_16 (abfd, note->descdata + 12);
1536
1537 /* pr_pid */
228e534f 1538 elf_tdata (abfd)->core->lwpid
cd6fa7fd
YZ
1539 = bfd_get_32 (abfd, note->descdata + 32);
1540
1541 /* pr_reg */
1542 offset = 112;
170a8295 1543 size = 272;
cd6fa7fd
YZ
1544
1545 break;
1546 }
1547
1548 /* Make a ".reg/999" section. */
1549 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1550 size, note->descpos + offset);
1551}
1552
a06ea964
NC
1553#define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
1554#define TARGET_LITTLE_NAME "elf64-littleaarch64"
1555#define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
1556#define TARGET_BIG_NAME "elf64-bigaarch64"
1557
cd6fa7fd
YZ
1558#define elf_backend_grok_prstatus elf64_aarch64_grok_prstatus
1559
a06ea964
NC
1560typedef unsigned long int insn32;
1561
1562/* The linker script knows the section names for placement.
1563 The entry_names are used to do simple name mangling on the stubs.
1564 Given a function name, and its type, the stub can be found. The
1565 name can be changed. The only requirement is the %s be present. */
1566#define STUB_ENTRY_NAME "__%s_veneer"
1567
1568/* The name of the dynamic interpreter. This is put in the .interp
1569 section. */
1570#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1571
1572#define AARCH64_MAX_FWD_BRANCH_OFFSET \
1573 (((1 << 25) - 1) << 2)
1574#define AARCH64_MAX_BWD_BRANCH_OFFSET \
1575 (-((1 << 25) << 2))
1576
1577#define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1578#define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1579
1580static int
1581aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1582{
1583 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1584 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1585}
1586
1587static int
1588aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1589{
1590 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1591 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1592 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1593}
1594
1595static const uint32_t aarch64_adrp_branch_stub [] =
1596{
1597 0x90000010, /* adrp ip0, X */
1598 /* R_AARCH64_ADR_HI21_PCREL(X) */
1599 0x91000210, /* add ip0, ip0, :lo12:X */
1600 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1601 0xd61f0200, /* br ip0 */
1602};
1603
1604static const uint32_t aarch64_long_branch_stub[] =
1605{
1606 0x58000090, /* ldr ip0, 1f */
1607 0x10000011, /* adr ip1, #0 */
1608 0x8b110210, /* add ip0, ip0, ip1 */
1609 0xd61f0200, /* br ip0 */
1610 0x00000000, /* 1: .xword
1611 R_AARCH64_PREL64(X) + 12
1612 */
1613 0x00000000,
1614};
1615
1616/* Section name for stubs is the associated section name plus this
1617 string. */
1618#define STUB_SUFFIX ".stub"
1619
1620enum elf64_aarch64_stub_type
1621{
1622 aarch64_stub_none,
1623 aarch64_stub_adrp_branch,
1624 aarch64_stub_long_branch,
1625};
1626
1627struct elf64_aarch64_stub_hash_entry
1628{
1629 /* Base hash table entry structure. */
1630 struct bfd_hash_entry root;
1631
1632 /* The stub section. */
1633 asection *stub_sec;
1634
1635 /* Offset within stub_sec of the beginning of this stub. */
1636 bfd_vma stub_offset;
1637
1638 /* Given the symbol's value and its section we can determine its final
1639 value when building the stubs (so the stub knows where to jump). */
1640 bfd_vma target_value;
1641 asection *target_section;
1642
1643 enum elf64_aarch64_stub_type stub_type;
1644
1645 /* The symbol table entry, if any, that this was derived from. */
1646 struct elf64_aarch64_link_hash_entry *h;
1647
1648 /* Destination symbol type */
1649 unsigned char st_type;
1650
1651 /* Where this stub is being called from, or, in the case of combined
1652 stub sections, the first input section in the group. */
1653 asection *id_sec;
1654
1655 /* The name for the local symbol at the start of this stub. The
1656 stub name in the hash table has to be unique; this does not, so
1657 it can be friendlier. */
1658 char *output_name;
1659};
1660
1661/* Used to build a map of a section. This is required for mixed-endian
1662 code/data. */
1663
1664typedef struct elf64_elf_section_map
1665{
1666 bfd_vma vma;
1667 char type;
1668}
1669elf64_aarch64_section_map;
1670
1671
1672typedef struct _aarch64_elf_section_data
1673{
1674 struct bfd_elf_section_data elf;
1675 unsigned int mapcount;
1676 unsigned int mapsize;
1677 elf64_aarch64_section_map *map;
1678}
1679_aarch64_elf_section_data;
1680
1681#define elf64_aarch64_section_data(sec) \
1682 ((_aarch64_elf_section_data *) elf_section_data (sec))
1683
1684/* The size of the thread control block. */
1685#define TCB_SIZE 16
1686
1687struct elf_aarch64_local_symbol
1688{
1689 unsigned int got_type;
1690 bfd_signed_vma got_refcount;
1691 bfd_vma got_offset;
1692
1693 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1694 offset is from the end of the jump table and reserved entries
1695 within the PLTGOT.
1696
1697 The magic value (bfd_vma) -1 indicates that an offset has not be
1698 allocated. */
1699 bfd_vma tlsdesc_got_jump_table_offset;
1700};
1701
1702struct elf_aarch64_obj_tdata
1703{
1704 struct elf_obj_tdata root;
1705
1706 /* local symbol descriptors */
1707 struct elf_aarch64_local_symbol *locals;
1708
1709 /* Zero to warn when linking objects with incompatible enum sizes. */
1710 int no_enum_size_warning;
1711
1712 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1713 int no_wchar_size_warning;
1714};
1715
1716#define elf_aarch64_tdata(bfd) \
1717 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1718
1719#define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1720
1721#define is_aarch64_elf(bfd) \
1722 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1723 && elf_tdata (bfd) != NULL \
1724 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1725
1726static bfd_boolean
1727elf64_aarch64_mkobject (bfd *abfd)
1728{
1729 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1730 AARCH64_ELF_DATA);
1731}
1732
1733/* The AArch64 linker needs to keep track of the number of relocs that it
1734 decides to copy in check_relocs for each symbol. This is so that
1735 it can discard PC relative relocs if it doesn't need them when
1736 linking with -Bsymbolic. We store the information in a field
1737 extending the regular ELF linker hash table. */
1738
1739/* This structure keeps track of the number of relocs we have copied
1740 for a given symbol. */
1741struct elf64_aarch64_relocs_copied
1742{
1743 /* Next section. */
1744 struct elf64_aarch64_relocs_copied *next;
1745 /* A section in dynobj. */
1746 asection *section;
1747 /* Number of relocs copied in this section. */
1748 bfd_size_type count;
1749 /* Number of PC-relative relocs copied in this section. */
1750 bfd_size_type pc_count;
1751};
1752
1753#define elf64_aarch64_hash_entry(ent) \
1754 ((struct elf64_aarch64_link_hash_entry *)(ent))
1755
1756#define GOT_UNKNOWN 0
1757#define GOT_NORMAL 1
1758#define GOT_TLS_GD 2
1759#define GOT_TLS_IE 4
1760#define GOT_TLSDESC_GD 8
1761
1762#define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1763
1764/* AArch64 ELF linker hash entry. */
1765struct elf64_aarch64_link_hash_entry
1766{
1767 struct elf_link_hash_entry root;
1768
1769 /* Track dynamic relocs copied for this symbol. */
1770 struct elf_dyn_relocs *dyn_relocs;
1771
1772 /* Number of PC relative relocs copied for this symbol. */
1773 struct elf64_aarch64_relocs_copied *relocs_copied;
1774
1775 /* Since PLT entries have variable size, we need to record the
1776 index into .got.plt instead of recomputing it from the PLT
1777 offset. */
1778 bfd_signed_vma plt_got_offset;
1779
1780 /* Bit mask representing the type of GOT entry(s) if any required by
1781 this symbol. */
1782 unsigned int got_type;
1783
1784 /* A pointer to the most recently used stub hash entry against this
1785 symbol. */
1786 struct elf64_aarch64_stub_hash_entry *stub_cache;
1787
1788 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1789 is from the end of the jump table and reserved entries within the PLTGOT.
1790
1791 The magic value (bfd_vma) -1 indicates that an offset has not
1792 be allocated. */
1793 bfd_vma tlsdesc_got_jump_table_offset;
1794};
1795
1796static unsigned int
1797elf64_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1798 bfd *abfd,
1799 unsigned long r_symndx)
1800{
1801 if (h)
1802 return elf64_aarch64_hash_entry (h)->got_type;
1803
1804 if (! elf64_aarch64_locals (abfd))
1805 return GOT_UNKNOWN;
1806
1807 return elf64_aarch64_locals (abfd)[r_symndx].got_type;
1808}
1809
1810/* Traverse an AArch64 ELF linker hash table. */
1811#define elf64_aarch64_link_hash_traverse(table, func, info) \
1812 (elf_link_hash_traverse \
1813 (&(table)->root, \
1814 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
1815 (info)))
1816
1817/* Get the AArch64 elf linker hash table from a link_info structure. */
1818#define elf64_aarch64_hash_table(info) \
1819 ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
1820
1821#define aarch64_stub_hash_lookup(table, string, create, copy) \
1822 ((struct elf64_aarch64_stub_hash_entry *) \
1823 bfd_hash_lookup ((table), (string), (create), (copy)))
1824
1825/* AArch64 ELF linker hash table. */
1826struct elf64_aarch64_link_hash_table
1827{
1828 /* The main hash table. */
1829 struct elf_link_hash_table root;
1830
1831 /* Nonzero to force PIC branch veneers. */
1832 int pic_veneer;
1833
1834 /* The number of bytes in the initial entry in the PLT. */
1835 bfd_size_type plt_header_size;
1836
1837 /* The number of bytes in the subsequent PLT etries. */
1838 bfd_size_type plt_entry_size;
1839
1840 /* Short-cuts to get to dynamic linker sections. */
1841 asection *sdynbss;
1842 asection *srelbss;
1843
1844 /* Small local sym cache. */
1845 struct sym_cache sym_cache;
1846
1847 /* For convenience in allocate_dynrelocs. */
1848 bfd *obfd;
1849
1850 /* The amount of space used by the reserved portion of the sgotplt
1851 section, plus whatever space is used by the jump slots. */
1852 bfd_vma sgotplt_jump_table_size;
1853
1854 /* The stub hash table. */
1855 struct bfd_hash_table stub_hash_table;
1856
1857 /* Linker stub bfd. */
1858 bfd *stub_bfd;
1859
1860 /* Linker call-backs. */
1861 asection *(*add_stub_section) (const char *, asection *);
1862 void (*layout_sections_again) (void);
1863
1864 /* Array to keep track of which stub sections have been created, and
1865 information on stub grouping. */
1866 struct map_stub
1867 {
1868 /* This is the section to which stubs in the group will be
1869 attached. */
1870 asection *link_sec;
1871 /* The stub section. */
1872 asection *stub_sec;
1873 } *stub_group;
1874
1875 /* Assorted information used by elf64_aarch64_size_stubs. */
1876 unsigned int bfd_count;
1877 int top_index;
1878 asection **input_list;
1879
1880 /* The offset into splt of the PLT entry for the TLS descriptor
1881 resolver. Special values are 0, if not necessary (or not found
1882 to be necessary yet), and -1 if needed but not determined
1883 yet. */
1884 bfd_vma tlsdesc_plt;
1885
1886 /* The GOT offset for the lazy trampoline. Communicated to the
1887 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1888 indicates an offset is not allocated. */
1889 bfd_vma dt_tlsdesc_got;
1890};
1891
1892
1893/* Return non-zero if the indicated VALUE has overflowed the maximum
1894 range expressible by a unsigned number with the indicated number of
1895 BITS. */
1896
1897static bfd_reloc_status_type
1898aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
1899{
1900 bfd_vma lim;
1901 if (bits >= sizeof (bfd_vma) * 8)
1902 return bfd_reloc_ok;
1903 lim = (bfd_vma) 1 << bits;
1904 if (value >= lim)
1905 return bfd_reloc_overflow;
1906 return bfd_reloc_ok;
1907}
1908
1909
1910/* Return non-zero if the indicated VALUE has overflowed the maximum
1911 range expressible by an signed number with the indicated number of
1912 BITS. */
1913
1914static bfd_reloc_status_type
1915aarch64_signed_overflow (bfd_vma value, unsigned int bits)
1916{
1917 bfd_signed_vma svalue = (bfd_signed_vma) value;
1918 bfd_signed_vma lim;
1919
1920 if (bits >= sizeof (bfd_vma) * 8)
1921 return bfd_reloc_ok;
1922 lim = (bfd_signed_vma) 1 << (bits - 1);
1923 if (svalue < -lim || svalue >= lim)
1924 return bfd_reloc_overflow;
1925 return bfd_reloc_ok;
1926}
1927
1928/* Create an entry in an AArch64 ELF linker hash table. */
1929
1930static struct bfd_hash_entry *
1931elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1932 struct bfd_hash_table *table,
1933 const char *string)
1934{
1935 struct elf64_aarch64_link_hash_entry *ret =
1936 (struct elf64_aarch64_link_hash_entry *) entry;
1937
1938 /* Allocate the structure if it has not already been allocated by a
1939 subclass. */
1940 if (ret == NULL)
1941 ret = bfd_hash_allocate (table,
1942 sizeof (struct elf64_aarch64_link_hash_entry));
1943 if (ret == NULL)
1944 return (struct bfd_hash_entry *) ret;
1945
1946 /* Call the allocation method of the superclass. */
1947 ret = ((struct elf64_aarch64_link_hash_entry *)
1948 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1949 table, string));
1950 if (ret != NULL)
1951 {
1952 ret->dyn_relocs = NULL;
1953 ret->relocs_copied = NULL;
1954 ret->got_type = GOT_UNKNOWN;
1955 ret->plt_got_offset = (bfd_vma) - 1;
1956 ret->stub_cache = NULL;
1957 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1958 }
1959
1960 return (struct bfd_hash_entry *) ret;
1961}
1962
1963/* Initialize an entry in the stub hash table. */
1964
1965static struct bfd_hash_entry *
1966stub_hash_newfunc (struct bfd_hash_entry *entry,
1967 struct bfd_hash_table *table, const char *string)
1968{
1969 /* Allocate the structure if it has not already been allocated by a
1970 subclass. */
1971 if (entry == NULL)
1972 {
1973 entry = bfd_hash_allocate (table,
1974 sizeof (struct
1975 elf64_aarch64_stub_hash_entry));
1976 if (entry == NULL)
1977 return entry;
1978 }
1979
1980 /* Call the allocation method of the superclass. */
1981 entry = bfd_hash_newfunc (entry, table, string);
1982 if (entry != NULL)
1983 {
1984 struct elf64_aarch64_stub_hash_entry *eh;
1985
1986 /* Initialize the local fields. */
1987 eh = (struct elf64_aarch64_stub_hash_entry *) entry;
1988 eh->stub_sec = NULL;
1989 eh->stub_offset = 0;
1990 eh->target_value = 0;
1991 eh->target_section = NULL;
1992 eh->stub_type = aarch64_stub_none;
1993 eh->h = NULL;
1994 eh->id_sec = NULL;
1995 }
1996
1997 return entry;
1998}
1999
2000
2001/* Copy the extra info we tack onto an elf_link_hash_entry. */
2002
2003static void
2004elf64_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2005 struct elf_link_hash_entry *dir,
2006 struct elf_link_hash_entry *ind)
2007{
2008 struct elf64_aarch64_link_hash_entry *edir, *eind;
2009
2010 edir = (struct elf64_aarch64_link_hash_entry *) dir;
2011 eind = (struct elf64_aarch64_link_hash_entry *) ind;
2012
2013 if (eind->dyn_relocs != NULL)
2014 {
2015 if (edir->dyn_relocs != NULL)
2016 {
2017 struct elf_dyn_relocs **pp;
2018 struct elf_dyn_relocs *p;
2019
2020 /* Add reloc counts against the indirect sym to the direct sym
2021 list. Merge any entries against the same section. */
2022 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2023 {
2024 struct elf_dyn_relocs *q;
2025
2026 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2027 if (q->sec == p->sec)
2028 {
2029 q->pc_count += p->pc_count;
2030 q->count += p->count;
2031 *pp = p->next;
2032 break;
2033 }
2034 if (q == NULL)
2035 pp = &p->next;
2036 }
2037 *pp = edir->dyn_relocs;
2038 }
2039
2040 edir->dyn_relocs = eind->dyn_relocs;
2041 eind->dyn_relocs = NULL;
2042 }
2043
2044 if (eind->relocs_copied != NULL)
2045 {
2046 if (edir->relocs_copied != NULL)
2047 {
2048 struct elf64_aarch64_relocs_copied **pp;
2049 struct elf64_aarch64_relocs_copied *p;
2050
2051 /* Add reloc counts against the indirect sym to the direct sym
2052 list. Merge any entries against the same section. */
2053 for (pp = &eind->relocs_copied; (p = *pp) != NULL;)
2054 {
2055 struct elf64_aarch64_relocs_copied *q;
2056
2057 for (q = edir->relocs_copied; q != NULL; q = q->next)
2058 if (q->section == p->section)
2059 {
2060 q->pc_count += p->pc_count;
2061 q->count += p->count;
2062 *pp = p->next;
2063 break;
2064 }
2065 if (q == NULL)
2066 pp = &p->next;
2067 }
2068 *pp = edir->relocs_copied;
2069 }
2070
2071 edir->relocs_copied = eind->relocs_copied;
2072 eind->relocs_copied = NULL;
2073 }
2074
2075 if (ind->root.type == bfd_link_hash_indirect)
2076 {
2077 /* Copy over PLT info. */
2078 if (dir->got.refcount <= 0)
2079 {
2080 edir->got_type = eind->got_type;
2081 eind->got_type = GOT_UNKNOWN;
2082 }
2083 }
2084
2085 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2086}
2087
2088/* Create an AArch64 elf linker hash table. */
2089
2090static struct bfd_link_hash_table *
2091elf64_aarch64_link_hash_table_create (bfd *abfd)
2092{
2093 struct elf64_aarch64_link_hash_table *ret;
2094 bfd_size_type amt = sizeof (struct elf64_aarch64_link_hash_table);
2095
7bf52ea2 2096 ret = bfd_zmalloc (amt);
a06ea964
NC
2097 if (ret == NULL)
2098 return NULL;
2099
2100 if (!_bfd_elf_link_hash_table_init
2101 (&ret->root, abfd, elf64_aarch64_link_hash_newfunc,
2102 sizeof (struct elf64_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2103 {
2104 free (ret);
2105 return NULL;
2106 }
2107
a06ea964
NC
2108 ret->plt_header_size = PLT_ENTRY_SIZE;
2109 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
a06ea964 2110 ret->obfd = abfd;
a06ea964
NC
2111 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2112
2113 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2114 sizeof (struct elf64_aarch64_stub_hash_entry)))
2115 {
2116 free (ret);
2117 return NULL;
2118 }
2119
2120 return &ret->root.root;
2121}
2122
2123/* Free the derived linker hash table. */
2124
2125static void
2126elf64_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
2127{
2128 struct elf64_aarch64_link_hash_table *ret
2129 = (struct elf64_aarch64_link_hash_table *) hash;
2130
2131 bfd_hash_table_free (&ret->stub_hash_table);
9f7c3e5e 2132 _bfd_elf_link_hash_table_free (hash);
a06ea964
NC
2133}
2134
2135static bfd_vma
2136aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
2137 bfd_vma addend, bfd_boolean weak_undef_p)
2138{
2139 switch (r_type)
2140 {
2141 case R_AARCH64_TLSDESC_CALL:
2142 case R_AARCH64_NONE:
2143 case R_AARCH64_NULL:
2144 break;
2145
2146 case R_AARCH64_ADR_PREL_LO21:
2147 case R_AARCH64_CONDBR19:
2148 case R_AARCH64_LD_PREL_LO19:
2149 case R_AARCH64_PREL16:
2150 case R_AARCH64_PREL32:
2151 case R_AARCH64_PREL64:
2152 case R_AARCH64_TSTBR14:
2153 if (weak_undef_p)
2154 value = place;
2155 value = value + addend - place;
2156 break;
2157
2158 case R_AARCH64_CALL26:
2159 case R_AARCH64_JUMP26:
2160 value = value + addend - place;
2161 break;
2162
2163 case R_AARCH64_ABS16:
2164 case R_AARCH64_ABS32:
2165 case R_AARCH64_MOVW_SABS_G0:
2166 case R_AARCH64_MOVW_SABS_G1:
2167 case R_AARCH64_MOVW_SABS_G2:
2168 case R_AARCH64_MOVW_UABS_G0:
2169 case R_AARCH64_MOVW_UABS_G0_NC:
2170 case R_AARCH64_MOVW_UABS_G1:
2171 case R_AARCH64_MOVW_UABS_G1_NC:
2172 case R_AARCH64_MOVW_UABS_G2:
2173 case R_AARCH64_MOVW_UABS_G2_NC:
2174 case R_AARCH64_MOVW_UABS_G3:
2175 value = value + addend;
2176 break;
2177
2178 case R_AARCH64_ADR_PREL_PG_HI21:
2179 case R_AARCH64_ADR_PREL_PG_HI21_NC:
2180 if (weak_undef_p)
2181 value = PG (place);
2182 value = PG (value + addend) - PG (place);
2183 break;
2184
f41aef5f
RE
2185 case R_AARCH64_GOT_LD_PREL19:
2186 value = value + addend - place;
2187 break;
2188
a06ea964
NC
2189 case R_AARCH64_ADR_GOT_PAGE:
2190 case R_AARCH64_TLSDESC_ADR_PAGE:
2191 case R_AARCH64_TLSGD_ADR_PAGE21:
2192 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
2193 value = PG (value + addend) - PG (place);
2194 break;
2195
2196 case R_AARCH64_ADD_ABS_LO12_NC:
2197 case R_AARCH64_LD64_GOT_LO12_NC:
2198 case R_AARCH64_LDST8_ABS_LO12_NC:
2199 case R_AARCH64_LDST16_ABS_LO12_NC:
2200 case R_AARCH64_LDST32_ABS_LO12_NC:
2201 case R_AARCH64_LDST64_ABS_LO12_NC:
2202 case R_AARCH64_LDST128_ABS_LO12_NC:
2203 case R_AARCH64_TLSDESC_ADD_LO12_NC:
2204 case R_AARCH64_TLSDESC_ADD:
2205 case R_AARCH64_TLSDESC_LD64_LO12_NC:
2206 case R_AARCH64_TLSDESC_LDR:
2207 case R_AARCH64_TLSGD_ADD_LO12_NC:
2208 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
2209 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
2210 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
2211 value = PG_OFFSET (value + addend);
2212 break;
2213
2214 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
2215 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
2216 value = (value + addend) & (bfd_vma) 0xffff0000;
2217 break;
2218 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
2219 value = (value + addend) & (bfd_vma) 0xfff000;
2220 break;
2221
2222 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
2223 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
2224 value = (value + addend) & (bfd_vma) 0xffff;
2225 break;
2226
2227 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
2228 value = (value + addend) & ~(bfd_vma) 0xffffffff;
2229 value -= place & ~(bfd_vma) 0xffffffff;
2230 break;
2231 }
2232 return value;
2233}
2234
2235static bfd_boolean
2236aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2237 bfd_vma offset, bfd_vma value)
2238{
2239 reloc_howto_type *howto;
2240 bfd_vma place;
2241
2242 howto = elf64_aarch64_howto_from_type (r_type);
2243 place = (input_section->output_section->vma + input_section->output_offset
2244 + offset);
2245 value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
2246 return bfd_elf_aarch64_put_addend (input_bfd,
2247 input_section->contents + offset,
2248 howto, value);
2249}
2250
2251static enum elf64_aarch64_stub_type
2252aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2253{
2254 if (aarch64_valid_for_adrp_p (value, place))
2255 return aarch64_stub_adrp_branch;
2256 return aarch64_stub_long_branch;
2257}
2258
2259/* Determine the type of stub needed, if any, for a call. */
2260
2261static enum elf64_aarch64_stub_type
2262aarch64_type_of_stub (struct bfd_link_info *info,
2263 asection *input_sec,
2264 const Elf_Internal_Rela *rel,
2265 unsigned char st_type,
2266 struct elf64_aarch64_link_hash_entry *hash,
2267 bfd_vma destination)
2268{
2269 bfd_vma location;
2270 bfd_signed_vma branch_offset;
2271 unsigned int r_type;
2272 struct elf64_aarch64_link_hash_table *globals;
2273 enum elf64_aarch64_stub_type stub_type = aarch64_stub_none;
2274 bfd_boolean via_plt_p;
2275
2276 if (st_type != STT_FUNC)
2277 return stub_type;
2278
2279 globals = elf64_aarch64_hash_table (info);
2280 via_plt_p = (globals->root.splt != NULL && hash != NULL
2281 && hash->root.plt.offset != (bfd_vma) - 1);
2282
2283 if (via_plt_p)
2284 return stub_type;
2285
2286 /* Determine where the call point is. */
2287 location = (input_sec->output_offset
2288 + input_sec->output_section->vma + rel->r_offset);
2289
2290 branch_offset = (bfd_signed_vma) (destination - location);
2291
2292 r_type = ELF64_R_TYPE (rel->r_info);
2293
2294 /* We don't want to redirect any old unconditional jump in this way,
2295 only one which is being used for a sibcall, where it is
2296 acceptable for the IP0 and IP1 registers to be clobbered. */
2297 if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26)
2298 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2299 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2300 {
2301 stub_type = aarch64_stub_long_branch;
2302 }
2303
2304 return stub_type;
2305}
2306
2307/* Build a name for an entry in the stub hash table. */
2308
2309static char *
2310elf64_aarch64_stub_name (const asection *input_section,
2311 const asection *sym_sec,
2312 const struct elf64_aarch64_link_hash_entry *hash,
2313 const Elf_Internal_Rela *rel)
2314{
2315 char *stub_name;
2316 bfd_size_type len;
2317
2318 if (hash)
2319 {
2320 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2321 stub_name = bfd_malloc (len);
2322 if (stub_name != NULL)
2323 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2324 (unsigned int) input_section->id,
2325 hash->root.root.root.string,
2326 rel->r_addend);
2327 }
2328 else
2329 {
2330 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2331 stub_name = bfd_malloc (len);
2332 if (stub_name != NULL)
2333 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2334 (unsigned int) input_section->id,
2335 (unsigned int) sym_sec->id,
2336 (unsigned int) ELF64_R_SYM (rel->r_info),
2337 rel->r_addend);
2338 }
2339
2340 return stub_name;
2341}
2342
2343/* Look up an entry in the stub hash. Stub entries are cached because
2344 creating the stub name takes a bit of time. */
2345
2346static struct elf64_aarch64_stub_hash_entry *
2347elf64_aarch64_get_stub_entry (const asection *input_section,
2348 const asection *sym_sec,
2349 struct elf_link_hash_entry *hash,
2350 const Elf_Internal_Rela *rel,
2351 struct elf64_aarch64_link_hash_table *htab)
2352{
2353 struct elf64_aarch64_stub_hash_entry *stub_entry;
2354 struct elf64_aarch64_link_hash_entry *h =
2355 (struct elf64_aarch64_link_hash_entry *) hash;
2356 const asection *id_sec;
2357
2358 if ((input_section->flags & SEC_CODE) == 0)
2359 return NULL;
2360
2361 /* If this input section is part of a group of sections sharing one
2362 stub section, then use the id of the first section in the group.
2363 Stub names need to include a section id, as there may well be
2364 more than one stub used to reach say, printf, and we need to
2365 distinguish between them. */
2366 id_sec = htab->stub_group[input_section->id].link_sec;
2367
2368 if (h != NULL && h->stub_cache != NULL
2369 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2370 {
2371 stub_entry = h->stub_cache;
2372 }
2373 else
2374 {
2375 char *stub_name;
2376
2377 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, h, rel);
2378 if (stub_name == NULL)
2379 return NULL;
2380
2381 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2382 stub_name, FALSE, FALSE);
2383 if (h != NULL)
2384 h->stub_cache = stub_entry;
2385
2386 free (stub_name);
2387 }
2388
2389 return stub_entry;
2390}
2391
2392/* Add a new stub entry to the stub hash. Not all fields of the new
2393 stub entry are initialised. */
2394
2395static struct elf64_aarch64_stub_hash_entry *
2396elf64_aarch64_add_stub (const char *stub_name,
2397 asection *section,
2398 struct elf64_aarch64_link_hash_table *htab)
2399{
2400 asection *link_sec;
2401 asection *stub_sec;
2402 struct elf64_aarch64_stub_hash_entry *stub_entry;
2403
2404 link_sec = htab->stub_group[section->id].link_sec;
2405 stub_sec = htab->stub_group[section->id].stub_sec;
2406 if (stub_sec == NULL)
2407 {
2408 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2409 if (stub_sec == NULL)
2410 {
2411 size_t namelen;
2412 bfd_size_type len;
2413 char *s_name;
2414
2415 namelen = strlen (link_sec->name);
2416 len = namelen + sizeof (STUB_SUFFIX);
2417 s_name = bfd_alloc (htab->stub_bfd, len);
2418 if (s_name == NULL)
2419 return NULL;
2420
2421 memcpy (s_name, link_sec->name, namelen);
2422 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2423 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2424 if (stub_sec == NULL)
2425 return NULL;
2426 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2427 }
2428 htab->stub_group[section->id].stub_sec = stub_sec;
2429 }
2430
2431 /* Enter this entry into the linker stub hash table. */
2432 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2433 TRUE, FALSE);
2434 if (stub_entry == NULL)
2435 {
2436 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2437 section->owner, stub_name);
2438 return NULL;
2439 }
2440
2441 stub_entry->stub_sec = stub_sec;
2442 stub_entry->stub_offset = 0;
2443 stub_entry->id_sec = link_sec;
2444
2445 return stub_entry;
2446}
2447
2448static bfd_boolean
2449aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2450 void *in_arg ATTRIBUTE_UNUSED)
2451{
2452 struct elf64_aarch64_stub_hash_entry *stub_entry;
2453 asection *stub_sec;
2454 bfd *stub_bfd;
2455 bfd_byte *loc;
2456 bfd_vma sym_value;
2457 unsigned int template_size;
2458 const uint32_t *template;
2459 unsigned int i;
2460
2461 /* Massage our args to the form they really have. */
2462 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2463
2464 stub_sec = stub_entry->stub_sec;
2465
2466 /* Make a note of the offset within the stubs for this entry. */
2467 stub_entry->stub_offset = stub_sec->size;
2468 loc = stub_sec->contents + stub_entry->stub_offset;
2469
2470 stub_bfd = stub_sec->owner;
2471
2472 /* This is the address of the stub destination. */
2473 sym_value = (stub_entry->target_value
2474 + stub_entry->target_section->output_offset
2475 + stub_entry->target_section->output_section->vma);
2476
2477 if (stub_entry->stub_type == aarch64_stub_long_branch)
2478 {
2479 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2480 + stub_sec->output_offset);
2481
2482 /* See if we can relax the stub. */
2483 if (aarch64_valid_for_adrp_p (sym_value, place))
2484 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2485 }
2486
2487 switch (stub_entry->stub_type)
2488 {
2489 case aarch64_stub_adrp_branch:
2490 template = aarch64_adrp_branch_stub;
2491 template_size = sizeof (aarch64_adrp_branch_stub);
2492 break;
2493 case aarch64_stub_long_branch:
2494 template = aarch64_long_branch_stub;
2495 template_size = sizeof (aarch64_long_branch_stub);
2496 break;
2497 default:
2498 BFD_FAIL ();
2499 return FALSE;
2500 }
2501
2502 for (i = 0; i < (template_size / sizeof template[0]); i++)
2503 {
2504 bfd_putl32 (template[i], loc);
2505 loc += 4;
2506 }
2507
2508 template_size = (template_size + 7) & ~7;
2509 stub_sec->size += template_size;
2510
2511 switch (stub_entry->stub_type)
2512 {
2513 case aarch64_stub_adrp_branch:
2514 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec,
2515 stub_entry->stub_offset, sym_value))
2516 /* The stub would not have been relaxed if the offset was out
2517 of range. */
2518 BFD_FAIL ();
2519
2520 _bfd_final_link_relocate
2521 (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC),
2522 stub_bfd,
2523 stub_sec,
2524 stub_sec->contents,
2525 stub_entry->stub_offset + 4,
2526 sym_value,
2527 0);
2528 break;
2529
2530 case aarch64_stub_long_branch:
2531 /* We want the value relative to the address 12 bytes back from the
2532 value itself. */
2533 _bfd_final_link_relocate (elf64_aarch64_howto_from_type
2534 (R_AARCH64_PREL64), stub_bfd, stub_sec,
2535 stub_sec->contents,
2536 stub_entry->stub_offset + 16,
2537 sym_value + 12, 0);
2538 break;
2539 default:
2540 break;
2541 }
2542
2543 return TRUE;
2544}
2545
2546/* As above, but don't actually build the stub. Just bump offset so
2547 we know stub section sizes. */
2548
2549static bfd_boolean
2550aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2551 void *in_arg ATTRIBUTE_UNUSED)
2552{
2553 struct elf64_aarch64_stub_hash_entry *stub_entry;
2554 int size;
2555
2556 /* Massage our args to the form they really have. */
2557 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2558
2559 switch (stub_entry->stub_type)
2560 {
2561 case aarch64_stub_adrp_branch:
2562 size = sizeof (aarch64_adrp_branch_stub);
2563 break;
2564 case aarch64_stub_long_branch:
2565 size = sizeof (aarch64_long_branch_stub);
2566 break;
2567 default:
2568 BFD_FAIL ();
2569 return FALSE;
2570 break;
2571 }
2572
2573 size = (size + 7) & ~7;
2574 stub_entry->stub_sec->size += size;
2575 return TRUE;
2576}
2577
2578/* External entry points for sizing and building linker stubs. */
2579
2580/* Set up various things so that we can make a list of input sections
2581 for each output section included in the link. Returns -1 on error,
2582 0 when no stubs will be needed, and 1 on success. */
2583
2584int
2585elf64_aarch64_setup_section_lists (bfd *output_bfd,
2586 struct bfd_link_info *info)
2587{
2588 bfd *input_bfd;
2589 unsigned int bfd_count;
2590 int top_id, top_index;
2591 asection *section;
2592 asection **input_list, **list;
2593 bfd_size_type amt;
2594 struct elf64_aarch64_link_hash_table *htab =
2595 elf64_aarch64_hash_table (info);
2596
2597 if (!is_elf_hash_table (htab))
2598 return 0;
2599
2600 /* Count the number of input BFDs and find the top input section id. */
2601 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2602 input_bfd != NULL; input_bfd = input_bfd->link_next)
2603 {
2604 bfd_count += 1;
2605 for (section = input_bfd->sections;
2606 section != NULL; section = section->next)
2607 {
2608 if (top_id < section->id)
2609 top_id = section->id;
2610 }
2611 }
2612 htab->bfd_count = bfd_count;
2613
2614 amt = sizeof (struct map_stub) * (top_id + 1);
2615 htab->stub_group = bfd_zmalloc (amt);
2616 if (htab->stub_group == NULL)
2617 return -1;
2618
2619 /* We can't use output_bfd->section_count here to find the top output
2620 section index as some sections may have been removed, and
2621 _bfd_strip_section_from_output doesn't renumber the indices. */
2622 for (section = output_bfd->sections, top_index = 0;
2623 section != NULL; section = section->next)
2624 {
2625 if (top_index < section->index)
2626 top_index = section->index;
2627 }
2628
2629 htab->top_index = top_index;
2630 amt = sizeof (asection *) * (top_index + 1);
2631 input_list = bfd_malloc (amt);
2632 htab->input_list = input_list;
2633 if (input_list == NULL)
2634 return -1;
2635
2636 /* For sections we aren't interested in, mark their entries with a
2637 value we can check later. */
2638 list = input_list + top_index;
2639 do
2640 *list = bfd_abs_section_ptr;
2641 while (list-- != input_list);
2642
2643 for (section = output_bfd->sections;
2644 section != NULL; section = section->next)
2645 {
2646 if ((section->flags & SEC_CODE) != 0)
2647 input_list[section->index] = NULL;
2648 }
2649
2650 return 1;
2651}
2652
2653/* Used by elf64_aarch64_next_input_section and group_sections. */
2654#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2655
2656/* The linker repeatedly calls this function for each input section,
2657 in the order that input sections are linked into output sections.
2658 Build lists of input sections to determine groupings between which
2659 we may insert linker stubs. */
2660
2661void
2662elf64_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2663{
2664 struct elf64_aarch64_link_hash_table *htab =
2665 elf64_aarch64_hash_table (info);
2666
2667 if (isec->output_section->index <= htab->top_index)
2668 {
2669 asection **list = htab->input_list + isec->output_section->index;
2670
2671 if (*list != bfd_abs_section_ptr)
2672 {
2673 /* Steal the link_sec pointer for our list. */
2674 /* This happens to make the list in reverse order,
2675 which is what we want. */
2676 PREV_SEC (isec) = *list;
2677 *list = isec;
2678 }
2679 }
2680}
2681
2682/* See whether we can group stub sections together. Grouping stub
2683 sections may result in fewer stubs. More importantly, we need to
2684 put all .init* and .fini* stubs at the beginning of the .init or
2685 .fini output sections respectively, because glibc splits the
2686 _init and _fini functions into multiple parts. Putting a stub in
2687 the middle of a function is not a good idea. */
2688
2689static void
2690group_sections (struct elf64_aarch64_link_hash_table *htab,
2691 bfd_size_type stub_group_size,
2692 bfd_boolean stubs_always_before_branch)
2693{
2694 asection **list = htab->input_list + htab->top_index;
2695
2696 do
2697 {
2698 asection *tail = *list;
2699
2700 if (tail == bfd_abs_section_ptr)
2701 continue;
2702
2703 while (tail != NULL)
2704 {
2705 asection *curr;
2706 asection *prev;
2707 bfd_size_type total;
2708
2709 curr = tail;
2710 total = tail->size;
2711 while ((prev = PREV_SEC (curr)) != NULL
2712 && ((total += curr->output_offset - prev->output_offset)
2713 < stub_group_size))
2714 curr = prev;
2715
2716 /* OK, the size from the start of CURR to the end is less
2717 than stub_group_size and thus can be handled by one stub
2718 section. (Or the tail section is itself larger than
2719 stub_group_size, in which case we may be toast.)
2720 We should really be keeping track of the total size of
2721 stubs added here, as stubs contribute to the final output
2722 section size. */
2723 do
2724 {
2725 prev = PREV_SEC (tail);
2726 /* Set up this stub group. */
2727 htab->stub_group[tail->id].link_sec = curr;
2728 }
2729 while (tail != curr && (tail = prev) != NULL);
2730
2731 /* But wait, there's more! Input sections up to stub_group_size
2732 bytes before the stub section can be handled by it too. */
2733 if (!stubs_always_before_branch)
2734 {
2735 total = 0;
2736 while (prev != NULL
2737 && ((total += tail->output_offset - prev->output_offset)
2738 < stub_group_size))
2739 {
2740 tail = prev;
2741 prev = PREV_SEC (tail);
2742 htab->stub_group[tail->id].link_sec = curr;
2743 }
2744 }
2745 tail = prev;
2746 }
2747 }
2748 while (list-- != htab->input_list);
2749
2750 free (htab->input_list);
2751}
2752
2753#undef PREV_SEC
2754
2755/* Determine and set the size of the stub section for a final link.
2756
2757 The basic idea here is to examine all the relocations looking for
2758 PC-relative calls to a target that is unreachable with a "bl"
2759 instruction. */
2760
2761bfd_boolean
2762elf64_aarch64_size_stubs (bfd *output_bfd,
2763 bfd *stub_bfd,
2764 struct bfd_link_info *info,
2765 bfd_signed_vma group_size,
2766 asection * (*add_stub_section) (const char *,
2767 asection *),
2768 void (*layout_sections_again) (void))
2769{
2770 bfd_size_type stub_group_size;
2771 bfd_boolean stubs_always_before_branch;
2772 bfd_boolean stub_changed = 0;
2773 struct elf64_aarch64_link_hash_table *htab = elf64_aarch64_hash_table (info);
2774
2775 /* Propagate mach to stub bfd, because it may not have been
2776 finalized when we created stub_bfd. */
2777 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
2778 bfd_get_mach (output_bfd));
2779
2780 /* Stash our params away. */
2781 htab->stub_bfd = stub_bfd;
2782 htab->add_stub_section = add_stub_section;
2783 htab->layout_sections_again = layout_sections_again;
2784 stubs_always_before_branch = group_size < 0;
2785 if (group_size < 0)
2786 stub_group_size = -group_size;
2787 else
2788 stub_group_size = group_size;
2789
2790 if (stub_group_size == 1)
2791 {
2792 /* Default values. */
2793 /* Aarch64 branch range is +-128MB. The value used is 1MB less. */
2794 stub_group_size = 127 * 1024 * 1024;
2795 }
2796
2797 group_sections (htab, stub_group_size, stubs_always_before_branch);
2798
2799 while (1)
2800 {
2801 bfd *input_bfd;
2802 unsigned int bfd_indx;
2803 asection *stub_sec;
2804
2805 for (input_bfd = info->input_bfds, bfd_indx = 0;
2806 input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++)
2807 {
2808 Elf_Internal_Shdr *symtab_hdr;
2809 asection *section;
2810 Elf_Internal_Sym *local_syms = NULL;
2811
2812 /* We'll need the symbol table in a second. */
2813 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2814 if (symtab_hdr->sh_info == 0)
2815 continue;
2816
2817 /* Walk over each section attached to the input bfd. */
2818 for (section = input_bfd->sections;
2819 section != NULL; section = section->next)
2820 {
2821 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2822
2823 /* If there aren't any relocs, then there's nothing more
2824 to do. */
2825 if ((section->flags & SEC_RELOC) == 0
2826 || section->reloc_count == 0
2827 || (section->flags & SEC_CODE) == 0)
2828 continue;
2829
2830 /* If this section is a link-once section that will be
2831 discarded, then don't create any stubs. */
2832 if (section->output_section == NULL
2833 || section->output_section->owner != output_bfd)
2834 continue;
2835
2836 /* Get the relocs. */
2837 internal_relocs
2838 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
2839 NULL, info->keep_memory);
2840 if (internal_relocs == NULL)
2841 goto error_ret_free_local;
2842
2843 /* Now examine each relocation. */
2844 irela = internal_relocs;
2845 irelaend = irela + section->reloc_count;
2846 for (; irela < irelaend; irela++)
2847 {
2848 unsigned int r_type, r_indx;
2849 enum elf64_aarch64_stub_type stub_type;
2850 struct elf64_aarch64_stub_hash_entry *stub_entry;
2851 asection *sym_sec;
2852 bfd_vma sym_value;
2853 bfd_vma destination;
2854 struct elf64_aarch64_link_hash_entry *hash;
2855 const char *sym_name;
2856 char *stub_name;
2857 const asection *id_sec;
2858 unsigned char st_type;
2859 bfd_size_type len;
2860
2861 r_type = ELF64_R_TYPE (irela->r_info);
2862 r_indx = ELF64_R_SYM (irela->r_info);
2863
2864 if (r_type >= (unsigned int) R_AARCH64_end)
2865 {
2866 bfd_set_error (bfd_error_bad_value);
2867 error_ret_free_internal:
2868 if (elf_section_data (section)->relocs == NULL)
2869 free (internal_relocs);
2870 goto error_ret_free_local;
2871 }
2872
2873 /* Only look for stubs on unconditional branch and
2874 branch and link instructions. */
2875 if (r_type != (unsigned int) R_AARCH64_CALL26
2876 && r_type != (unsigned int) R_AARCH64_JUMP26)
2877 continue;
2878
2879 /* Now determine the call target, its name, value,
2880 section. */
2881 sym_sec = NULL;
2882 sym_value = 0;
2883 destination = 0;
2884 hash = NULL;
2885 sym_name = NULL;
2886 if (r_indx < symtab_hdr->sh_info)
2887 {
2888 /* It's a local symbol. */
2889 Elf_Internal_Sym *sym;
2890 Elf_Internal_Shdr *hdr;
2891
2892 if (local_syms == NULL)
2893 {
2894 local_syms
2895 = (Elf_Internal_Sym *) symtab_hdr->contents;
2896 if (local_syms == NULL)
2897 local_syms
2898 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
2899 symtab_hdr->sh_info, 0,
2900 NULL, NULL, NULL);
2901 if (local_syms == NULL)
2902 goto error_ret_free_internal;
2903 }
2904
2905 sym = local_syms + r_indx;
2906 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
2907 sym_sec = hdr->bfd_section;
2908 if (!sym_sec)
2909 /* This is an undefined symbol. It can never
2910 be resolved. */
2911 continue;
2912
2913 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
2914 sym_value = sym->st_value;
2915 destination = (sym_value + irela->r_addend
2916 + sym_sec->output_offset
2917 + sym_sec->output_section->vma);
2918 st_type = ELF_ST_TYPE (sym->st_info);
2919 sym_name
2920 = bfd_elf_string_from_elf_section (input_bfd,
2921 symtab_hdr->sh_link,
2922 sym->st_name);
2923 }
2924 else
2925 {
2926 int e_indx;
2927
2928 e_indx = r_indx - symtab_hdr->sh_info;
2929 hash = ((struct elf64_aarch64_link_hash_entry *)
2930 elf_sym_hashes (input_bfd)[e_indx]);
2931
2932 while (hash->root.root.type == bfd_link_hash_indirect
2933 || hash->root.root.type == bfd_link_hash_warning)
2934 hash = ((struct elf64_aarch64_link_hash_entry *)
2935 hash->root.root.u.i.link);
2936
2937 if (hash->root.root.type == bfd_link_hash_defined
2938 || hash->root.root.type == bfd_link_hash_defweak)
2939 {
2940 struct elf64_aarch64_link_hash_table *globals =
2941 elf64_aarch64_hash_table (info);
2942 sym_sec = hash->root.root.u.def.section;
2943 sym_value = hash->root.root.u.def.value;
2944 /* For a destination in a shared library,
2945 use the PLT stub as target address to
2946 decide whether a branch stub is
2947 needed. */
2948 if (globals->root.splt != NULL && hash != NULL
2949 && hash->root.plt.offset != (bfd_vma) - 1)
2950 {
2951 sym_sec = globals->root.splt;
2952 sym_value = hash->root.plt.offset;
2953 if (sym_sec->output_section != NULL)
2954 destination = (sym_value
2955 + sym_sec->output_offset
2956 +
2957 sym_sec->output_section->vma);
2958 }
2959 else if (sym_sec->output_section != NULL)
2960 destination = (sym_value + irela->r_addend
2961 + sym_sec->output_offset
2962 + sym_sec->output_section->vma);
2963 }
2964 else if (hash->root.root.type == bfd_link_hash_undefined
2965 || (hash->root.root.type
2966 == bfd_link_hash_undefweak))
2967 {
2968 /* For a shared library, use the PLT stub as
2969 target address to decide whether a long
2970 branch stub is needed.
2971 For absolute code, they cannot be handled. */
2972 struct elf64_aarch64_link_hash_table *globals =
2973 elf64_aarch64_hash_table (info);
2974
2975 if (globals->root.splt != NULL && hash != NULL
2976 && hash->root.plt.offset != (bfd_vma) - 1)
2977 {
2978 sym_sec = globals->root.splt;
2979 sym_value = hash->root.plt.offset;
2980 if (sym_sec->output_section != NULL)
2981 destination = (sym_value
2982 + sym_sec->output_offset
2983 +
2984 sym_sec->output_section->vma);
2985 }
2986 else
2987 continue;
2988 }
2989 else
2990 {
2991 bfd_set_error (bfd_error_bad_value);
2992 goto error_ret_free_internal;
2993 }
2994 st_type = ELF_ST_TYPE (hash->root.type);
2995 sym_name = hash->root.root.root.string;
2996 }
2997
2998 /* Determine what (if any) linker stub is needed. */
2999 stub_type = aarch64_type_of_stub
3000 (info, section, irela, st_type, hash, destination);
3001 if (stub_type == aarch64_stub_none)
3002 continue;
3003
3004 /* Support for grouping stub sections. */
3005 id_sec = htab->stub_group[section->id].link_sec;
3006
3007 /* Get the name of this stub. */
3008 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, hash,
3009 irela);
3010 if (!stub_name)
3011 goto error_ret_free_internal;
3012
3013 stub_entry =
3014 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3015 stub_name, FALSE, FALSE);
3016 if (stub_entry != NULL)
3017 {
3018 /* The proper stub has already been created. */
3019 free (stub_name);
3020 continue;
3021 }
3022
3023 stub_entry = elf64_aarch64_add_stub (stub_name, section,
3024 htab);
3025 if (stub_entry == NULL)
3026 {
3027 free (stub_name);
3028 goto error_ret_free_internal;
3029 }
3030
3031 stub_entry->target_value = sym_value;
3032 stub_entry->target_section = sym_sec;
3033 stub_entry->stub_type = stub_type;
3034 stub_entry->h = hash;
3035 stub_entry->st_type = st_type;
3036
3037 if (sym_name == NULL)
3038 sym_name = "unnamed";
3039 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3040 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3041 if (stub_entry->output_name == NULL)
3042 {
3043 free (stub_name);
3044 goto error_ret_free_internal;
3045 }
3046
3047 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3048 sym_name);
3049
3050 stub_changed = TRUE;
3051 }
3052
3053 /* We're done with the internal relocs, free them. */
3054 if (elf_section_data (section)->relocs == NULL)
3055 free (internal_relocs);
3056 }
3057 }
3058
3059 if (!stub_changed)
3060 break;
3061
3062 /* OK, we've added some stubs. Find out the new size of the
3063 stub sections. */
3064 for (stub_sec = htab->stub_bfd->sections;
3065 stub_sec != NULL; stub_sec = stub_sec->next)
3066 stub_sec->size = 0;
3067
3068 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3069
3070 /* Ask the linker to do its stuff. */
3071 (*htab->layout_sections_again) ();
3072 stub_changed = FALSE;
3073 }
3074
3075 return TRUE;
3076
3077error_ret_free_local:
3078 return FALSE;
3079}
3080
3081/* Build all the stubs associated with the current output file. The
3082 stubs are kept in a hash table attached to the main linker hash
3083 table. We also set up the .plt entries for statically linked PIC
3084 functions here. This function is called via aarch64_elf_finish in the
3085 linker. */
3086
3087bfd_boolean
3088elf64_aarch64_build_stubs (struct bfd_link_info *info)
3089{
3090 asection *stub_sec;
3091 struct bfd_hash_table *table;
3092 struct elf64_aarch64_link_hash_table *htab;
3093
3094 htab = elf64_aarch64_hash_table (info);
3095
3096 for (stub_sec = htab->stub_bfd->sections;
3097 stub_sec != NULL; stub_sec = stub_sec->next)
3098 {
3099 bfd_size_type size;
3100
3101 /* Ignore non-stub sections. */
3102 if (!strstr (stub_sec->name, STUB_SUFFIX))
3103 continue;
3104
3105 /* Allocate memory to hold the linker stubs. */
3106 size = stub_sec->size;
3107 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3108 if (stub_sec->contents == NULL && size != 0)
3109 return FALSE;
3110 stub_sec->size = 0;
3111 }
3112
3113 /* Build the stubs as directed by the stub hash table. */
3114 table = &htab->stub_hash_table;
3115 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3116
3117 return TRUE;
3118}
3119
3120
3121/* Add an entry to the code/data map for section SEC. */
3122
3123static void
3124elf64_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3125{
3126 struct _aarch64_elf_section_data *sec_data =
3127 elf64_aarch64_section_data (sec);
3128 unsigned int newidx;
3129
3130 if (sec_data->map == NULL)
3131 {
3132 sec_data->map = bfd_malloc (sizeof (elf64_aarch64_section_map));
3133 sec_data->mapcount = 0;
3134 sec_data->mapsize = 1;
3135 }
3136
3137 newidx = sec_data->mapcount++;
3138
3139 if (sec_data->mapcount > sec_data->mapsize)
3140 {
3141 sec_data->mapsize *= 2;
3142 sec_data->map = bfd_realloc_or_free
3143 (sec_data->map, sec_data->mapsize * sizeof (elf64_aarch64_section_map));
3144 }
3145
3146 if (sec_data->map)
3147 {
3148 sec_data->map[newidx].vma = vma;
3149 sec_data->map[newidx].type = type;
3150 }
3151}
3152
3153
3154/* Initialise maps of insn/data for input BFDs. */
3155void
3156bfd_elf64_aarch64_init_maps (bfd *abfd)
3157{
3158 Elf_Internal_Sym *isymbuf;
3159 Elf_Internal_Shdr *hdr;
3160 unsigned int i, localsyms;
3161
3162 /* Make sure that we are dealing with an AArch64 elf binary. */
3163 if (!is_aarch64_elf (abfd))
3164 return;
3165
3166 if ((abfd->flags & DYNAMIC) != 0)
3167 return;
3168
3169 hdr = &elf_symtab_hdr (abfd);
3170 localsyms = hdr->sh_info;
3171
3172 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3173 should contain the number of local symbols, which should come before any
3174 global symbols. Mapping symbols are always local. */
3175 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3176
3177 /* No internal symbols read? Skip this BFD. */
3178 if (isymbuf == NULL)
3179 return;
3180
3181 for (i = 0; i < localsyms; i++)
3182 {
3183 Elf_Internal_Sym *isym = &isymbuf[i];
3184 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3185 const char *name;
3186
3187 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3188 {
3189 name = bfd_elf_string_from_elf_section (abfd,
3190 hdr->sh_link,
3191 isym->st_name);
3192
3193 if (bfd_is_aarch64_special_symbol_name
3194 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3195 elf64_aarch64_section_map_add (sec, name[1], isym->st_value);
3196 }
3197 }
3198}
3199
3200/* Set option values needed during linking. */
3201void
3202bfd_elf64_aarch64_set_options (struct bfd *output_bfd,
3203 struct bfd_link_info *link_info,
3204 int no_enum_warn,
3205 int no_wchar_warn, int pic_veneer)
3206{
3207 struct elf64_aarch64_link_hash_table *globals;
3208
3209 globals = elf64_aarch64_hash_table (link_info);
3210 globals->pic_veneer = pic_veneer;
3211
3212 BFD_ASSERT (is_aarch64_elf (output_bfd));
3213 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3214 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3215}
3216
3217#define MASK(n) ((1u << (n)) - 1)
3218
3219/* Decode the 26-bit offset of unconditional branch. */
3220static inline uint32_t
3221decode_branch_ofs_26 (uint32_t insn)
3222{
3223 return insn & MASK (26);
3224}
3225
3226/* Decode the 19-bit offset of conditional branch and compare & branch. */
3227static inline uint32_t
3228decode_cond_branch_ofs_19 (uint32_t insn)
3229{
3230 return (insn >> 5) & MASK (19);
3231}
3232
3233/* Decode the 19-bit offset of load literal. */
3234static inline uint32_t
3235decode_ld_lit_ofs_19 (uint32_t insn)
3236{
3237 return (insn >> 5) & MASK (19);
3238}
3239
3240/* Decode the 14-bit offset of test & branch. */
3241static inline uint32_t
3242decode_tst_branch_ofs_14 (uint32_t insn)
3243{
3244 return (insn >> 5) & MASK (14);
3245}
3246
3247/* Decode the 16-bit imm of move wide. */
3248static inline uint32_t
3249decode_movw_imm (uint32_t insn)
3250{
3251 return (insn >> 5) & MASK (16);
3252}
3253
3254/* Decode the 21-bit imm of adr. */
3255static inline uint32_t
3256decode_adr_imm (uint32_t insn)
3257{
3258 return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
3259}
3260
3261/* Decode the 12-bit imm of add immediate. */
3262static inline uint32_t
3263decode_add_imm (uint32_t insn)
3264{
3265 return (insn >> 10) & MASK (12);
3266}
3267
3268
3269/* Encode the 26-bit offset of unconditional branch. */
3270static inline uint32_t
3271reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
3272{
3273 return (insn & ~MASK (26)) | (ofs & MASK (26));
3274}
3275
3276/* Encode the 19-bit offset of conditional branch and compare & branch. */
3277static inline uint32_t
3278reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
3279{
3280 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3281}
3282
3283/* Decode the 19-bit offset of load literal. */
3284static inline uint32_t
3285reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
3286{
3287 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3288}
3289
3290/* Encode the 14-bit offset of test & branch. */
3291static inline uint32_t
3292reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
3293{
3294 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
3295}
3296
3297/* Reencode the imm field of move wide. */
3298static inline uint32_t
3299reencode_movw_imm (uint32_t insn, uint32_t imm)
3300{
3301 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
3302}
3303
3304/* Reencode the imm field of adr. */
3305static inline uint32_t
3306reencode_adr_imm (uint32_t insn, uint32_t imm)
3307{
3308 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
3309 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
3310}
3311
3312/* Reencode the imm field of ld/st pos immediate. */
3313static inline uint32_t
3314reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
3315{
3316 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3317}
3318
3319/* Reencode the imm field of add immediate. */
3320static inline uint32_t
3321reencode_add_imm (uint32_t insn, uint32_t imm)
3322{
3323 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3324}
3325
3326/* Reencode mov[zn] to movz. */
3327static inline uint32_t
3328reencode_movzn_to_movz (uint32_t opcode)
3329{
3330 return opcode | (1 << 30);
3331}
3332
3333/* Reencode mov[zn] to movn. */
3334static inline uint32_t
3335reencode_movzn_to_movn (uint32_t opcode)
3336{
3337 return opcode & ~(1 << 30);
3338}
3339
3340/* Insert the addend/value into the instruction or data object being
3341 relocated. */
3342static bfd_reloc_status_type
3343bfd_elf_aarch64_put_addend (bfd *abfd,
3344 bfd_byte *address,
3345 reloc_howto_type *howto, bfd_signed_vma addend)
3346{
3347 bfd_reloc_status_type status = bfd_reloc_ok;
3348 bfd_signed_vma old_addend = addend;
3349 bfd_vma contents;
3350 int size;
3351
3352 size = bfd_get_reloc_size (howto);
3353 switch (size)
3354 {
3355 case 2:
3356 contents = bfd_get_16 (abfd, address);
3357 break;
3358 case 4:
3359 if (howto->src_mask != 0xffffffff)
3360 /* Must be 32-bit instruction, always little-endian. */
3361 contents = bfd_getl32 (address);
3362 else
3363 /* Must be 32-bit data (endianness dependent). */
3364 contents = bfd_get_32 (abfd, address);
3365 break;
3366 case 8:
3367 contents = bfd_get_64 (abfd, address);
3368 break;
3369 default:
3370 abort ();
3371 }
3372
3373 switch (howto->complain_on_overflow)
3374 {
3375 case complain_overflow_dont:
3376 break;
3377 case complain_overflow_signed:
3378 status = aarch64_signed_overflow (addend,
3379 howto->bitsize + howto->rightshift);
3380 break;
3381 case complain_overflow_unsigned:
3382 status = aarch64_unsigned_overflow (addend,
3383 howto->bitsize + howto->rightshift);
3384 break;
3385 case complain_overflow_bitfield:
3386 default:
3387 abort ();
3388 }
3389
3390 addend >>= howto->rightshift;
3391
3392 switch (howto->type)
3393 {
3394 case R_AARCH64_JUMP26:
3395 case R_AARCH64_CALL26:
3396 contents = reencode_branch_ofs_26 (contents, addend);
3397 break;
3398
3399 case R_AARCH64_CONDBR19:
3400 contents = reencode_cond_branch_ofs_19 (contents, addend);
3401 break;
3402
3403 case R_AARCH64_TSTBR14:
3404 contents = reencode_tst_branch_ofs_14 (contents, addend);
3405 break;
3406
3407 case R_AARCH64_LD_PREL_LO19:
f41aef5f 3408 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3409 if (old_addend & ((1 << howto->rightshift) - 1))
3410 return bfd_reloc_overflow;
3411 contents = reencode_ld_lit_ofs_19 (contents, addend);
3412 break;
3413
3414 case R_AARCH64_TLSDESC_CALL:
3415 break;
3416
3417 case R_AARCH64_TLSGD_ADR_PAGE21:
3418 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3419 case R_AARCH64_TLSDESC_ADR_PAGE:
3420 case R_AARCH64_ADR_GOT_PAGE:
3421 case R_AARCH64_ADR_PREL_LO21:
3422 case R_AARCH64_ADR_PREL_PG_HI21:
3423 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3424 contents = reencode_adr_imm (contents, addend);
3425 break;
3426
3427 case R_AARCH64_TLSGD_ADD_LO12_NC:
3428 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3429 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3430 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3431 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3432 case R_AARCH64_ADD_ABS_LO12_NC:
3433 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3434 12 bits of the page offset following
3435 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3436 (pc-relative) page base. */
3437 contents = reencode_add_imm (contents, addend);
3438 break;
3439
3440 case R_AARCH64_LDST8_ABS_LO12_NC:
3441 case R_AARCH64_LDST16_ABS_LO12_NC:
3442 case R_AARCH64_LDST32_ABS_LO12_NC:
3443 case R_AARCH64_LDST64_ABS_LO12_NC:
3444 case R_AARCH64_LDST128_ABS_LO12_NC:
3445 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3446 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3447 case R_AARCH64_LD64_GOT_LO12_NC:
3448 if (old_addend & ((1 << howto->rightshift) - 1))
3449 return bfd_reloc_overflow;
3450 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3451 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3452 which computes the (pc-relative) page base. */
3453 contents = reencode_ldst_pos_imm (contents, addend);
3454 break;
3455
3456 /* Group relocations to create high bits of a 16, 32, 48 or 64
3457 bit signed data or abs address inline. Will change
3458 instruction to MOVN or MOVZ depending on sign of calculated
3459 value. */
3460
3461 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3462 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3463 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3464 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3465 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3466 case R_AARCH64_MOVW_SABS_G0:
3467 case R_AARCH64_MOVW_SABS_G1:
3468 case R_AARCH64_MOVW_SABS_G2:
3469 /* NOTE: We can only come here with movz or movn. */
3470 if (addend < 0)
3471 {
3472 /* Force use of MOVN. */
3473 addend = ~addend;
3474 contents = reencode_movzn_to_movn (contents);
3475 }
3476 else
3477 {
3478 /* Force use of MOVZ. */
3479 contents = reencode_movzn_to_movz (contents);
3480 }
3481 /* fall through */
3482
3483 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3484 data or abs address inline. */
3485
3486 case R_AARCH64_MOVW_UABS_G0:
3487 case R_AARCH64_MOVW_UABS_G0_NC:
3488 case R_AARCH64_MOVW_UABS_G1:
3489 case R_AARCH64_MOVW_UABS_G1_NC:
3490 case R_AARCH64_MOVW_UABS_G2:
3491 case R_AARCH64_MOVW_UABS_G2_NC:
3492 case R_AARCH64_MOVW_UABS_G3:
3493 contents = reencode_movw_imm (contents, addend);
3494 break;
3495
3496 default:
3497 /* Repack simple data */
3498 if (howto->dst_mask & (howto->dst_mask + 1))
3499 return bfd_reloc_notsupported;
3500
3501 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
3502 break;
3503 }
3504
3505 switch (size)
3506 {
3507 case 2:
3508 bfd_put_16 (abfd, contents, address);
3509 break;
3510 case 4:
3511 if (howto->dst_mask != 0xffffffff)
3512 /* must be 32-bit instruction, always little-endian */
3513 bfd_putl32 (contents, address);
3514 else
3515 /* must be 32-bit data (endianness dependent) */
3516 bfd_put_32 (abfd, contents, address);
3517 break;
3518 case 8:
3519 bfd_put_64 (abfd, contents, address);
3520 break;
3521 default:
3522 abort ();
3523 }
3524
3525 return status;
3526}
3527
3528static bfd_vma
3529aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3530 struct elf64_aarch64_link_hash_table
3531 *globals, struct bfd_link_info *info,
3532 bfd_vma value, bfd *output_bfd,
3533 bfd_boolean *unresolved_reloc_p)
3534{
3535 bfd_vma off = (bfd_vma) - 1;
3536 asection *basegot = globals->root.sgot;
3537 bfd_boolean dyn = globals->root.dynamic_sections_created;
3538
3539 if (h != NULL)
3540 {
3541 off = h->got.offset;
3542 BFD_ASSERT (off != (bfd_vma) - 1);
3543 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3544 || (info->shared
3545 && SYMBOL_REFERENCES_LOCAL (info, h))
3546 || (ELF_ST_VISIBILITY (h->other)
3547 && h->root.type == bfd_link_hash_undefweak))
3548 {
3549 /* This is actually a static link, or it is a -Bsymbolic link
3550 and the symbol is defined locally. We must initialize this
3551 entry in the global offset table. Since the offset must
3552 always be a multiple of 8, we use the least significant bit
3553 to record whether we have initialized it already.
3554 When doing a dynamic link, we create a .rel(a).got relocation
3555 entry to initialize the value. This is done in the
3556 finish_dynamic_symbol routine. */
3557 if ((off & 1) != 0)
3558 off &= ~1;
3559 else
3560 {
3561 bfd_put_64 (output_bfd, value, basegot->contents + off);
3562 h->got.offset |= 1;
3563 }
3564 }
3565 else
3566 *unresolved_reloc_p = FALSE;
3567
3568 off = off + basegot->output_section->vma + basegot->output_offset;
3569 }
3570
3571 return off;
3572}
3573
3574/* Change R_TYPE to a more efficient access model where possible,
3575 return the new reloc type. */
3576
3577static unsigned int
3578aarch64_tls_transition_without_check (unsigned int r_type,
3579 struct elf_link_hash_entry *h)
3580{
3581 bfd_boolean is_local = h == NULL;
3582 switch (r_type)
3583 {
3584 case R_AARCH64_TLSGD_ADR_PAGE21:
3585 case R_AARCH64_TLSDESC_ADR_PAGE:
3586 return is_local
3587 ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
3588
3589 case R_AARCH64_TLSGD_ADD_LO12_NC:
3590 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3591 return is_local
3592 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3593 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
3594
3595 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3596 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3597
3598 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3599 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3600
3601 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3602 case R_AARCH64_TLSDESC_CALL:
3603 /* Instructions with these relocations will become NOPs. */
3604 return R_AARCH64_NONE;
3605 }
3606
3607 return r_type;
3608}
3609
3610static unsigned int
3611aarch64_reloc_got_type (unsigned int r_type)
3612{
3613 switch (r_type)
3614 {
3615 case R_AARCH64_LD64_GOT_LO12_NC:
3616 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 3617 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3618 return GOT_NORMAL;
3619
3620 case R_AARCH64_TLSGD_ADR_PAGE21:
3621 case R_AARCH64_TLSGD_ADD_LO12_NC:
3622 return GOT_TLS_GD;
3623
3624 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3625 case R_AARCH64_TLSDESC_ADR_PAGE:
3626 case R_AARCH64_TLSDESC_CALL:
3627 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3628 return GOT_TLSDESC_GD;
3629
3630 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3631 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3632 return GOT_TLS_IE;
3633
3634 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3635 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3636 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3637 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3638 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3639 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3640 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3641 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3642 return GOT_UNKNOWN;
3643 }
3644 return GOT_UNKNOWN;
3645}
3646
3647static bfd_boolean
3648aarch64_can_relax_tls (bfd *input_bfd,
3649 struct bfd_link_info *info,
3650 unsigned int r_type,
3651 struct elf_link_hash_entry *h,
3652 unsigned long r_symndx)
3653{
3654 unsigned int symbol_got_type;
3655 unsigned int reloc_got_type;
3656
3657 if (! IS_AARCH64_TLS_RELOC (r_type))
3658 return FALSE;
3659
3660 symbol_got_type = elf64_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3661 reloc_got_type = aarch64_reloc_got_type (r_type);
3662
3663 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3664 return TRUE;
3665
3666 if (info->shared)
3667 return FALSE;
3668
3669 if (h && h->root.type == bfd_link_hash_undefweak)
3670 return FALSE;
3671
3672 return TRUE;
3673}
3674
3675static unsigned int
3676aarch64_tls_transition (bfd *input_bfd,
3677 struct bfd_link_info *info,
3678 unsigned int r_type,
3679 struct elf_link_hash_entry *h,
3680 unsigned long r_symndx)
3681{
3682 if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx))
3683 return r_type;
3684
3685 return aarch64_tls_transition_without_check (r_type, h);
3686}
3687
3688/* Return the base VMA address which should be subtracted from real addresses
3689 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3690
3691static bfd_vma
3692dtpoff_base (struct bfd_link_info *info)
3693{
3694 /* If tls_sec is NULL, we should have signalled an error already. */
3695 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3696 return elf_hash_table (info)->tls_sec->vma;
3697}
3698
3699
3700/* Return the base VMA address which should be subtracted from real addresses
3701 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3702
3703static bfd_vma
3704tpoff_base (struct bfd_link_info *info)
3705{
3706 struct elf_link_hash_table *htab = elf_hash_table (info);
3707
3708 /* If tls_sec is NULL, we should have signalled an error already. */
3709 if (htab->tls_sec == NULL)
3710 return 0;
3711
3712 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3713 htab->tls_sec->alignment_power);
3714 return htab->tls_sec->vma - base;
3715}
3716
3717static bfd_vma *
3718symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3719 unsigned long r_symndx)
3720{
3721 /* Calculate the address of the GOT entry for symbol
3722 referred to in h. */
3723 if (h != NULL)
3724 return &h->got.offset;
3725 else
3726 {
3727 /* local symbol */
3728 struct elf_aarch64_local_symbol *l;
3729
3730 l = elf64_aarch64_locals (input_bfd);
3731 return &l[r_symndx].got_offset;
3732 }
3733}
3734
3735static void
3736symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3737 unsigned long r_symndx)
3738{
3739 bfd_vma *p;
3740 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3741 *p |= 1;
3742}
3743
3744static int
3745symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3746 unsigned long r_symndx)
3747{
3748 bfd_vma value;
3749 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3750 return value & 1;
3751}
3752
3753static bfd_vma
3754symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3755 unsigned long r_symndx)
3756{
3757 bfd_vma value;
3758 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3759 value &= ~1;
3760 return value;
3761}
3762
3763static bfd_vma *
3764symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3765 unsigned long r_symndx)
3766{
3767 /* Calculate the address of the GOT entry for symbol
3768 referred to in h. */
3769 if (h != NULL)
3770 {
3771 struct elf64_aarch64_link_hash_entry *eh;
3772 eh = (struct elf64_aarch64_link_hash_entry *) h;
3773 return &eh->tlsdesc_got_jump_table_offset;
3774 }
3775 else
3776 {
3777 /* local symbol */
3778 struct elf_aarch64_local_symbol *l;
3779
3780 l = elf64_aarch64_locals (input_bfd);
3781 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3782 }
3783}
3784
3785static void
3786symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3787 unsigned long r_symndx)
3788{
3789 bfd_vma *p;
3790 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3791 *p |= 1;
3792}
3793
3794static int
3795symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3796 struct elf_link_hash_entry *h,
3797 unsigned long r_symndx)
3798{
3799 bfd_vma value;
3800 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3801 return value & 1;
3802}
3803
3804static bfd_vma
3805symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3806 unsigned long r_symndx)
3807{
3808 bfd_vma value;
3809 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3810 value &= ~1;
3811 return value;
3812}
3813
3814/* Perform a relocation as part of a final link. */
3815static bfd_reloc_status_type
3816elf64_aarch64_final_link_relocate (reloc_howto_type *howto,
3817 bfd *input_bfd,
3818 bfd *output_bfd,
3819 asection *input_section,
3820 bfd_byte *contents,
3821 Elf_Internal_Rela *rel,
3822 bfd_vma value,
3823 struct bfd_link_info *info,
3824 asection *sym_sec,
3825 struct elf_link_hash_entry *h,
3826 bfd_boolean *unresolved_reloc_p,
3827 bfd_boolean save_addend,
3828 bfd_vma *saved_addend)
3829{
3830 unsigned int r_type = howto->type;
3831 unsigned long r_symndx;
3832 bfd_byte *hit_data = contents + rel->r_offset;
3833 bfd_vma place;
3834 bfd_signed_vma signed_addend;
3835 struct elf64_aarch64_link_hash_table *globals;
3836 bfd_boolean weak_undef_p;
3837
3838 globals = elf64_aarch64_hash_table (info);
3839
3840 BFD_ASSERT (is_aarch64_elf (input_bfd));
3841
3842 r_symndx = ELF64_R_SYM (rel->r_info);
3843
3844 /* It is possible to have linker relaxations on some TLS access
3845 models. Update our information here. */
3846 r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
3847
3848 if (r_type != howto->type)
3849 howto = elf64_aarch64_howto_from_type (r_type);
3850
3851 place = input_section->output_section->vma
3852 + input_section->output_offset + rel->r_offset;
3853
3854 /* Get addend, accumulating the addend for consecutive relocs
3855 which refer to the same offset. */
3856 signed_addend = saved_addend ? *saved_addend : 0;
3857 signed_addend += rel->r_addend;
3858
3859 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
3860 : bfd_is_und_section (sym_sec));
3861 switch (r_type)
3862 {
3863 case R_AARCH64_NONE:
3864 case R_AARCH64_NULL:
3865 case R_AARCH64_TLSDESC_CALL:
3866 *unresolved_reloc_p = FALSE;
3867 return bfd_reloc_ok;
3868
3869 case R_AARCH64_ABS64:
3870
3871 /* When generating a shared object or relocatable executable, these
3872 relocations are copied into the output file to be resolved at
3873 run time. */
3874 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
3875 && (input_section->flags & SEC_ALLOC)
3876 && (h == NULL
3877 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3878 || h->root.type != bfd_link_hash_undefweak))
3879 {
3880 Elf_Internal_Rela outrel;
3881 bfd_byte *loc;
3882 bfd_boolean skip, relocate;
3883 asection *sreloc;
3884
3885 *unresolved_reloc_p = FALSE;
3886
3887 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd,
3888 input_section, 1);
3889 if (sreloc == NULL)
3890 return bfd_reloc_notsupported;
3891
3892 skip = FALSE;
3893 relocate = FALSE;
3894
3895 outrel.r_addend = signed_addend;
3896 outrel.r_offset =
3897 _bfd_elf_section_offset (output_bfd, info, input_section,
3898 rel->r_offset);
3899 if (outrel.r_offset == (bfd_vma) - 1)
3900 skip = TRUE;
3901 else if (outrel.r_offset == (bfd_vma) - 2)
3902 {
3903 skip = TRUE;
3904 relocate = TRUE;
3905 }
3906
3907 outrel.r_offset += (input_section->output_section->vma
3908 + input_section->output_offset);
3909
3910 if (skip)
3911 memset (&outrel, 0, sizeof outrel);
3912 else if (h != NULL
3913 && h->dynindx != -1
3914 && (!info->shared || !info->symbolic || !h->def_regular))
3915 outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
3916 else
3917 {
3918 int symbol;
3919
3920 /* On SVR4-ish systems, the dynamic loader cannot
3921 relocate the text and data segments independently,
3922 so the symbol does not matter. */
3923 symbol = 0;
3924 outrel.r_info = ELF64_R_INFO (symbol, R_AARCH64_RELATIVE);
3925 outrel.r_addend += value;
3926 }
3927
3928 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab);
3929 bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
3930
3931 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
3932 {
3933 /* Sanity to check that we have previously allocated
3934 sufficient space in the relocation section for the
3935 number of relocations we actually want to emit. */
3936 abort ();
3937 }
3938
3939 /* If this reloc is against an external symbol, we do not want to
3940 fiddle with the addend. Otherwise, we need to include the symbol
3941 value so that it becomes an addend for the dynamic reloc. */
3942 if (!relocate)
3943 return bfd_reloc_ok;
3944
3945 return _bfd_final_link_relocate (howto, input_bfd, input_section,
3946 contents, rel->r_offset, value,
3947 signed_addend);
3948 }
3949 else
3950 value += signed_addend;
3951 break;
3952
3953 case R_AARCH64_JUMP26:
3954 case R_AARCH64_CALL26:
3955 {
3956 asection *splt = globals->root.splt;
3957 bfd_boolean via_plt_p =
3958 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
3959
3960 /* A call to an undefined weak symbol is converted to a jump to
3961 the next instruction unless a PLT entry will be created.
3962 The jump to the next instruction is optimized as a NOP.
3963 Do the same for local undefined symbols. */
3964 if (weak_undef_p && ! via_plt_p)
3965 {
3966 bfd_putl32 (INSN_NOP, hit_data);
3967 return bfd_reloc_ok;
3968 }
3969
3970 /* If the call goes through a PLT entry, make sure to
3971 check distance to the right destination address. */
3972 if (via_plt_p)
3973 {
3974 value = (splt->output_section->vma
3975 + splt->output_offset + h->plt.offset);
3976 *unresolved_reloc_p = FALSE;
3977 }
3978
3979 /* If the target symbol is global and marked as a function the
3980 relocation applies a function call or a tail call. In this
3981 situation we can veneer out of range branches. The veneers
3982 use IP0 and IP1 hence cannot be used arbitrary out of range
3983 branches that occur within the body of a function. */
3984 if (h && h->type == STT_FUNC)
3985 {
3986 /* Check if a stub has to be inserted because the destination
3987 is too far away. */
3988 if (! aarch64_valid_branch_p (value, place))
3989 {
3990 /* The target is out of reach, so redirect the branch to
3991 the local stub for this function. */
3992 struct elf64_aarch64_stub_hash_entry *stub_entry;
3993 stub_entry = elf64_aarch64_get_stub_entry (input_section,
3994 sym_sec, h,
3995 rel, globals);
3996 if (stub_entry != NULL)
3997 value = (stub_entry->stub_offset
3998 + stub_entry->stub_sec->output_offset
3999 + stub_entry->stub_sec->output_section->vma);
4000 }
4001 }
4002 }
4003 value = aarch64_resolve_relocation (r_type, place, value,
4004 signed_addend, weak_undef_p);
4005 break;
4006
4007 case R_AARCH64_ABS16:
4008 case R_AARCH64_ABS32:
4009 case R_AARCH64_ADD_ABS_LO12_NC:
4010 case R_AARCH64_ADR_PREL_LO21:
4011 case R_AARCH64_ADR_PREL_PG_HI21:
4012 case R_AARCH64_ADR_PREL_PG_HI21_NC:
4013 case R_AARCH64_CONDBR19:
4014 case R_AARCH64_LD_PREL_LO19:
4015 case R_AARCH64_LDST8_ABS_LO12_NC:
4016 case R_AARCH64_LDST16_ABS_LO12_NC:
4017 case R_AARCH64_LDST32_ABS_LO12_NC:
4018 case R_AARCH64_LDST64_ABS_LO12_NC:
4019 case R_AARCH64_LDST128_ABS_LO12_NC:
4020 case R_AARCH64_MOVW_SABS_G0:
4021 case R_AARCH64_MOVW_SABS_G1:
4022 case R_AARCH64_MOVW_SABS_G2:
4023 case R_AARCH64_MOVW_UABS_G0:
4024 case R_AARCH64_MOVW_UABS_G0_NC:
4025 case R_AARCH64_MOVW_UABS_G1:
4026 case R_AARCH64_MOVW_UABS_G1_NC:
4027 case R_AARCH64_MOVW_UABS_G2:
4028 case R_AARCH64_MOVW_UABS_G2_NC:
4029 case R_AARCH64_MOVW_UABS_G3:
4030 case R_AARCH64_PREL16:
4031 case R_AARCH64_PREL32:
4032 case R_AARCH64_PREL64:
4033 case R_AARCH64_TSTBR14:
4034 value = aarch64_resolve_relocation (r_type, place, value,
4035 signed_addend, weak_undef_p);
4036 break;
4037
4038 case R_AARCH64_LD64_GOT_LO12_NC:
4039 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 4040 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
4041 if (globals->root.sgot == NULL)
4042 BFD_ASSERT (h != NULL);
4043
4044 if (h != NULL)
4045 {
4046 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4047 output_bfd,
4048 unresolved_reloc_p);
4049 value = aarch64_resolve_relocation (r_type, place, value,
4050 0, weak_undef_p);
4051 }
4052 break;
4053
4054 case R_AARCH64_TLSGD_ADR_PAGE21:
4055 case R_AARCH64_TLSGD_ADD_LO12_NC:
4056 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4057 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4058 if (globals->root.sgot == NULL)
4059 return bfd_reloc_notsupported;
4060
4061 value = (symbol_got_offset (input_bfd, h, r_symndx)
4062 + globals->root.sgot->output_section->vma
4063 + globals->root.sgot->output_section->output_offset);
4064
4065 value = aarch64_resolve_relocation (r_type, place, value,
4066 0, weak_undef_p);
4067 *unresolved_reloc_p = FALSE;
4068 break;
4069
4070 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4071 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4072 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4073 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4074 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4075 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4076 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4077 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4078 value = aarch64_resolve_relocation (r_type, place, value,
bb3f9ed8 4079 signed_addend - tpoff_base (info), weak_undef_p);
a06ea964
NC
4080 *unresolved_reloc_p = FALSE;
4081 break;
4082
4083 case R_AARCH64_TLSDESC_ADR_PAGE:
4084 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4085 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4086 case R_AARCH64_TLSDESC_ADD:
4087 case R_AARCH64_TLSDESC_LDR:
4088 if (globals->root.sgot == NULL)
4089 return bfd_reloc_notsupported;
4090
4091 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4092 + globals->root.sgotplt->output_section->vma
4093 + globals->root.sgotplt->output_section->output_offset
4094 + globals->sgotplt_jump_table_size);
4095
4096 value = aarch64_resolve_relocation (r_type, place, value,
4097 0, weak_undef_p);
4098 *unresolved_reloc_p = FALSE;
4099 break;
4100
4101 default:
4102 return bfd_reloc_notsupported;
4103 }
4104
4105 if (saved_addend)
4106 *saved_addend = value;
4107
4108 /* Only apply the final relocation in a sequence. */
4109 if (save_addend)
4110 return bfd_reloc_continue;
4111
4112 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4113}
4114
4115/* Handle TLS relaxations. Relaxing is possible for symbols that use
4116 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4117 link.
4118
4119 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4120 is to then call final_link_relocate. Return other values in the
4121 case of error. */
4122
4123static bfd_reloc_status_type
4124elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table *globals,
4125 bfd *input_bfd, bfd_byte *contents,
4126 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4127{
4128 bfd_boolean is_local = h == NULL;
4129 unsigned int r_type = ELF64_R_TYPE (rel->r_info);
4130 unsigned long insn;
4131
4132 BFD_ASSERT (globals && input_bfd && contents && rel);
4133
4134 switch (r_type)
4135 {
4136 case R_AARCH64_TLSGD_ADR_PAGE21:
4137 case R_AARCH64_TLSDESC_ADR_PAGE:
4138 if (is_local)
4139 {
4140 /* GD->LE relaxation:
4141 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4142 or
4143 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4144 */
4145 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4146 return bfd_reloc_continue;
4147 }
4148 else
4149 {
4150 /* GD->IE relaxation:
4151 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4152 or
4153 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4154 */
4155 insn = bfd_getl32 (contents + rel->r_offset);
4156 return bfd_reloc_continue;
4157 }
4158
4159 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4160 if (is_local)
4161 {
4162 /* GD->LE relaxation:
4163 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4164 */
4165 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4166 return bfd_reloc_continue;
4167 }
4168 else
4169 {
4170 /* GD->IE relaxation:
4171 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4172 */
4173 insn = bfd_getl32 (contents + rel->r_offset);
4174 insn &= 0xfffffff0;
4175 bfd_putl32 (insn, contents + rel->r_offset);
4176 return bfd_reloc_continue;
4177 }
4178
4179 case R_AARCH64_TLSGD_ADD_LO12_NC:
4180 if (is_local)
4181 {
4182 /* GD->LE relaxation
4183 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4184 bl __tls_get_addr => mrs x1, tpidr_el0
4185 nop => add x0, x1, x0
4186 */
4187
4188 /* First kill the tls_get_addr reloc on the bl instruction. */
4189 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4190 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4191
4192 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4193 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4194 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4195 return bfd_reloc_continue;
4196 }
4197 else
4198 {
4199 /* GD->IE relaxation
4200 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4201 BL __tls_get_addr => mrs x1, tpidr_el0
4202 R_AARCH64_CALL26
4203 NOP => add x0, x1, x0
4204 */
4205
4206 BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26);
4207
4208 /* Remove the relocation on the BL instruction. */
4209 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4210
4211 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4212
4213 /* We choose to fixup the BL and NOP instructions using the
4214 offset from the second relocation to allow flexibility in
4215 scheduling instructions between the ADD and BL. */
4216 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4217 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4218 return bfd_reloc_continue;
4219 }
4220
4221 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4222 case R_AARCH64_TLSDESC_CALL:
4223 /* GD->IE/LE relaxation:
4224 add x0, x0, #:tlsdesc_lo12:var => nop
4225 blr xd => nop
4226 */
4227 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4228 return bfd_reloc_ok;
4229
4230 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4231 /* IE->LE relaxation:
4232 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4233 */
4234 if (is_local)
4235 {
4236 insn = bfd_getl32 (contents + rel->r_offset);
4237 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4238 }
4239 return bfd_reloc_continue;
4240
4241 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4242 /* IE->LE relaxation:
4243 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4244 */
4245 if (is_local)
4246 {
4247 insn = bfd_getl32 (contents + rel->r_offset);
4248 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4249 }
4250 return bfd_reloc_continue;
4251
4252 default:
4253 return bfd_reloc_continue;
4254 }
4255
4256 return bfd_reloc_ok;
4257}
4258
4259/* Relocate an AArch64 ELF section. */
4260
4261static bfd_boolean
4262elf64_aarch64_relocate_section (bfd *output_bfd,
4263 struct bfd_link_info *info,
4264 bfd *input_bfd,
4265 asection *input_section,
4266 bfd_byte *contents,
4267 Elf_Internal_Rela *relocs,
4268 Elf_Internal_Sym *local_syms,
4269 asection **local_sections)
4270{
4271 Elf_Internal_Shdr *symtab_hdr;
4272 struct elf_link_hash_entry **sym_hashes;
4273 Elf_Internal_Rela *rel;
4274 Elf_Internal_Rela *relend;
4275 const char *name;
4276 struct elf64_aarch64_link_hash_table *globals;
4277 bfd_boolean save_addend = FALSE;
4278 bfd_vma addend = 0;
4279
4280 globals = elf64_aarch64_hash_table (info);
4281
4282 symtab_hdr = &elf_symtab_hdr (input_bfd);
4283 sym_hashes = elf_sym_hashes (input_bfd);
4284
4285 rel = relocs;
4286 relend = relocs + input_section->reloc_count;
4287 for (; rel < relend; rel++)
4288 {
4289 unsigned int r_type;
4290 unsigned int relaxed_r_type;
4291 reloc_howto_type *howto;
4292 unsigned long r_symndx;
4293 Elf_Internal_Sym *sym;
4294 asection *sec;
4295 struct elf_link_hash_entry *h;
4296 bfd_vma relocation;
4297 bfd_reloc_status_type r;
4298 arelent bfd_reloc;
4299 char sym_type;
4300 bfd_boolean unresolved_reloc = FALSE;
4301 char *error_message = NULL;
4302
4303 r_symndx = ELF64_R_SYM (rel->r_info);
4304 r_type = ELF64_R_TYPE (rel->r_info);
4305
4306 bfd_reloc.howto = elf64_aarch64_howto_from_type (r_type);
4307 howto = bfd_reloc.howto;
4308
4309 h = NULL;
4310 sym = NULL;
4311 sec = NULL;
4312
4313 if (r_symndx < symtab_hdr->sh_info)
4314 {
4315 sym = local_syms + r_symndx;
4316 sym_type = ELF64_ST_TYPE (sym->st_info);
4317 sec = local_sections[r_symndx];
4318
4319 /* An object file might have a reference to a local
4320 undefined symbol. This is a daft object file, but we
4321 should at least do something about it. */
4322 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4323 && bfd_is_und_section (sec)
4324 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4325 {
4326 if (!info->callbacks->undefined_symbol
4327 (info, bfd_elf_string_from_elf_section
4328 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4329 input_bfd, input_section, rel->r_offset, TRUE))
4330 return FALSE;
4331 }
4332
4333 if (r_type >= R_AARCH64_dyn_max)
4334 {
4335 bfd_set_error (bfd_error_bad_value);
4336 return FALSE;
4337 }
4338
4339 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4340 }
4341 else
4342 {
4343 bfd_boolean warned;
4344
4345 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4346 r_symndx, symtab_hdr, sym_hashes,
4347 h, sec, relocation,
4348 unresolved_reloc, warned);
4349
4350 sym_type = h->type;
4351 }
4352
4353 if (sec != NULL && discarded_section (sec))
4354 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4355 rel, 1, relend, howto, 0, contents);
4356
4357 if (info->relocatable)
4358 {
4359 /* This is a relocatable link. We don't have to change
4360 anything, unless the reloc is against a section symbol,
4361 in which case we have to adjust according to where the
4362 section symbol winds up in the output section. */
4363 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
4364 rel->r_addend += sec->output_offset;
4365 continue;
4366 }
4367
4368 if (h != NULL)
4369 name = h->root.root.string;
4370 else
4371 {
4372 name = (bfd_elf_string_from_elf_section
4373 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4374 if (name == NULL || *name == '\0')
4375 name = bfd_section_name (input_bfd, sec);
4376 }
4377
4378 if (r_symndx != 0
4379 && r_type != R_AARCH64_NONE
4380 && r_type != R_AARCH64_NULL
4381 && (h == NULL
4382 || h->root.type == bfd_link_hash_defined
4383 || h->root.type == bfd_link_hash_defweak)
4384 && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
4385 {
4386 (*_bfd_error_handler)
4387 ((sym_type == STT_TLS
4388 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4389 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4390 input_bfd,
4391 input_section, (long) rel->r_offset, howto->name, name);
4392 }
4393
4394
4395 /* We relax only if we can see that there can be a valid transition
4396 from a reloc type to another.
4397 We call elf64_aarch64_final_link_relocate unless we're completely
4398 done, i.e., the relaxation produced the final output we want. */
4399
4400 relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4401 h, r_symndx);
4402 if (relaxed_r_type != r_type)
4403 {
4404 r_type = relaxed_r_type;
4405 howto = elf64_aarch64_howto_from_type (r_type);
4406
4407 r = elf64_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4408 unresolved_reloc = 0;
4409 }
4410 else
4411 r = bfd_reloc_continue;
4412
4413 /* There may be multiple consecutive relocations for the
4414 same offset. In that case we are supposed to treat the
4415 output of each relocation as the addend for the next. */
4416 if (rel + 1 < relend
4417 && rel->r_offset == rel[1].r_offset
4418 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4419 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4420 save_addend = TRUE;
4421 else
4422 save_addend = FALSE;
4423
4424 if (r == bfd_reloc_continue)
4425 r = elf64_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4426 input_section, contents, rel,
4427 relocation, info, sec,
4428 h, &unresolved_reloc,
4429 save_addend, &addend);
4430
4431 switch (r_type)
4432 {
4433 case R_AARCH64_TLSGD_ADR_PAGE21:
4434 case R_AARCH64_TLSGD_ADD_LO12_NC:
4435 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4436 {
4437 bfd_boolean need_relocs = FALSE;
4438 bfd_byte *loc;
4439 int indx;
4440 bfd_vma off;
4441
4442 off = symbol_got_offset (input_bfd, h, r_symndx);
4443 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4444
4445 need_relocs =
4446 (info->shared || indx != 0) &&
4447 (h == NULL
4448 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4449 || h->root.type != bfd_link_hash_undefweak);
4450
4451 BFD_ASSERT (globals->root.srelgot != NULL);
4452
4453 if (need_relocs)
4454 {
4455 Elf_Internal_Rela rela;
4456 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_DTPMOD64);
4457 rela.r_addend = 0;
4458 rela.r_offset = globals->root.sgot->output_section->vma +
4459 globals->root.sgot->output_offset + off;
4460
4461
4462 loc = globals->root.srelgot->contents;
4463 loc += globals->root.srelgot->reloc_count++
4464 * RELOC_SIZE (htab);
4465 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4466
4467 if (indx == 0)
4468 {
4469 bfd_put_64 (output_bfd,
4470 relocation - dtpoff_base (info),
4471 globals->root.sgot->contents + off
4472 + GOT_ENTRY_SIZE);
4473 }
4474 else
4475 {
4476 /* This TLS symbol is global. We emit a
4477 relocation to fixup the tls offset at load
4478 time. */
4479 rela.r_info =
4480 ELF64_R_INFO (indx, R_AARCH64_TLS_DTPREL64);
4481 rela.r_addend = 0;
4482 rela.r_offset =
4483 (globals->root.sgot->output_section->vma
4484 + globals->root.sgot->output_offset + off
4485 + GOT_ENTRY_SIZE);
4486
4487 loc = globals->root.srelgot->contents;
4488 loc += globals->root.srelgot->reloc_count++
4489 * RELOC_SIZE (globals);
4490 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4491 bfd_put_64 (output_bfd, (bfd_vma) 0,
4492 globals->root.sgot->contents + off
4493 + GOT_ENTRY_SIZE);
4494 }
4495 }
4496 else
4497 {
4498 bfd_put_64 (output_bfd, (bfd_vma) 1,
4499 globals->root.sgot->contents + off);
4500 bfd_put_64 (output_bfd,
4501 relocation - dtpoff_base (info),
4502 globals->root.sgot->contents + off
4503 + GOT_ENTRY_SIZE);
4504 }
4505
4506 symbol_got_offset_mark (input_bfd, h, r_symndx);
4507 }
4508 break;
4509
4510 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4511 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4512 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4513 {
4514 bfd_boolean need_relocs = FALSE;
4515 bfd_byte *loc;
4516 int indx;
4517 bfd_vma off;
4518
4519 off = symbol_got_offset (input_bfd, h, r_symndx);
4520
4521 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4522
4523 need_relocs =
4524 (info->shared || indx != 0) &&
4525 (h == NULL
4526 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4527 || h->root.type != bfd_link_hash_undefweak);
4528
4529 BFD_ASSERT (globals->root.srelgot != NULL);
4530
4531 if (need_relocs)
4532 {
4533 Elf_Internal_Rela rela;
4534
4535 if (indx == 0)
4536 rela.r_addend = relocation - dtpoff_base (info);
4537 else
4538 rela.r_addend = 0;
4539
4540 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_TPREL64);
4541 rela.r_offset = globals->root.sgot->output_section->vma +
4542 globals->root.sgot->output_offset + off;
4543
4544 loc = globals->root.srelgot->contents;
4545 loc += globals->root.srelgot->reloc_count++
4546 * RELOC_SIZE (htab);
4547
4548 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4549
4550 bfd_put_64 (output_bfd, rela.r_addend,
4551 globals->root.sgot->contents + off);
4552 }
4553 else
4554 bfd_put_64 (output_bfd, relocation - tpoff_base (info),
4555 globals->root.sgot->contents + off);
4556
4557 symbol_got_offset_mark (input_bfd, h, r_symndx);
4558 }
4559 break;
4560
4561 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4562 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4563 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4564 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4565 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4566 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4567 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4568 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4569 break;
4570
4571 case R_AARCH64_TLSDESC_ADR_PAGE:
4572 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4573 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4574 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
4575 {
4576 bfd_boolean need_relocs = FALSE;
4577 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
4578 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
4579
4580 need_relocs = (h == NULL
4581 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4582 || h->root.type != bfd_link_hash_undefweak);
4583
4584 BFD_ASSERT (globals->root.srelgot != NULL);
4585 BFD_ASSERT (globals->root.sgot != NULL);
4586
4587 if (need_relocs)
4588 {
4589 bfd_byte *loc;
4590 Elf_Internal_Rela rela;
4591 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLSDESC);
4592 rela.r_addend = 0;
4593 rela.r_offset = (globals->root.sgotplt->output_section->vma
4594 + globals->root.sgotplt->output_offset
4595 + off + globals->sgotplt_jump_table_size);
4596
4597 if (indx == 0)
4598 rela.r_addend = relocation - dtpoff_base (info);
4599
4600 /* Allocate the next available slot in the PLT reloc
4601 section to hold our R_AARCH64_TLSDESC, the next
4602 available slot is determined from reloc_count,
4603 which we step. But note, reloc_count was
4604 artifically moved down while allocating slots for
4605 real PLT relocs such that all of the PLT relocs
4606 will fit above the initial reloc_count and the
4607 extra stuff will fit below. */
4608 loc = globals->root.srelplt->contents;
4609 loc += globals->root.srelplt->reloc_count++
4610 * RELOC_SIZE (globals);
4611
4612 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4613
4614 bfd_put_64 (output_bfd, (bfd_vma) 0,
4615 globals->root.sgotplt->contents + off +
4616 globals->sgotplt_jump_table_size);
4617 bfd_put_64 (output_bfd, (bfd_vma) 0,
4618 globals->root.sgotplt->contents + off +
4619 globals->sgotplt_jump_table_size +
4620 GOT_ENTRY_SIZE);
4621 }
4622
4623 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
4624 }
4625 break;
4626 }
4627
4628 if (!save_addend)
4629 addend = 0;
4630
4631
4632 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4633 because such sections are not SEC_ALLOC and thus ld.so will
4634 not process them. */
4635 if (unresolved_reloc
4636 && !((input_section->flags & SEC_DEBUGGING) != 0
4637 && h->def_dynamic)
4638 && _bfd_elf_section_offset (output_bfd, info, input_section,
4639 +rel->r_offset) != (bfd_vma) - 1)
4640 {
4641 (*_bfd_error_handler)
4642 (_
4643 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4644 input_bfd, input_section, (long) rel->r_offset, howto->name,
4645 h->root.root.string);
4646 return FALSE;
4647 }
4648
4649 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
4650 {
4651 switch (r)
4652 {
4653 case bfd_reloc_overflow:
4654 /* If the overflowing reloc was to an undefined symbol,
4655 we have already printed one error message and there
4656 is no point complaining again. */
4657 if ((!h ||
4658 h->root.type != bfd_link_hash_undefined)
4659 && (!((*info->callbacks->reloc_overflow)
4660 (info, (h ? &h->root : NULL), name, howto->name,
4661 (bfd_vma) 0, input_bfd, input_section,
4662 rel->r_offset))))
4663 return FALSE;
4664 break;
4665
4666 case bfd_reloc_undefined:
4667 if (!((*info->callbacks->undefined_symbol)
4668 (info, name, input_bfd, input_section,
4669 rel->r_offset, TRUE)))
4670 return FALSE;
4671 break;
4672
4673 case bfd_reloc_outofrange:
4674 error_message = _("out of range");
4675 goto common_error;
4676
4677 case bfd_reloc_notsupported:
4678 error_message = _("unsupported relocation");
4679 goto common_error;
4680
4681 case bfd_reloc_dangerous:
4682 /* error_message should already be set. */
4683 goto common_error;
4684
4685 default:
4686 error_message = _("unknown error");
4687 /* Fall through. */
4688
4689 common_error:
4690 BFD_ASSERT (error_message != NULL);
4691 if (!((*info->callbacks->reloc_dangerous)
4692 (info, error_message, input_bfd, input_section,
4693 rel->r_offset)))
4694 return FALSE;
4695 break;
4696 }
4697 }
4698 }
4699
4700 return TRUE;
4701}
4702
4703/* Set the right machine number. */
4704
4705static bfd_boolean
4706elf64_aarch64_object_p (bfd *abfd)
4707{
4708 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
4709 return TRUE;
4710}
4711
4712/* Function to keep AArch64 specific flags in the ELF header. */
4713
4714static bfd_boolean
4715elf64_aarch64_set_private_flags (bfd *abfd, flagword flags)
4716{
4717 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
4718 {
4719 }
4720 else
4721 {
4722 elf_elfheader (abfd)->e_flags = flags;
4723 elf_flags_init (abfd) = TRUE;
4724 }
4725
4726 return TRUE;
4727}
4728
4729/* Copy backend specific data from one object module to another. */
4730
4731static bfd_boolean
4732elf64_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
4733{
4734 flagword in_flags;
4735
4736 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4737 return TRUE;
4738
4739 in_flags = elf_elfheader (ibfd)->e_flags;
4740
4741 elf_elfheader (obfd)->e_flags = in_flags;
4742 elf_flags_init (obfd) = TRUE;
4743
4744 /* Also copy the EI_OSABI field. */
4745 elf_elfheader (obfd)->e_ident[EI_OSABI] =
4746 elf_elfheader (ibfd)->e_ident[EI_OSABI];
4747
4748 /* Copy object attributes. */
4749 _bfd_elf_copy_obj_attributes (ibfd, obfd);
4750
4751 return TRUE;
4752}
4753
4754/* Merge backend specific data from an object file to the output
4755 object file when linking. */
4756
4757static bfd_boolean
4758elf64_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
4759{
4760 flagword out_flags;
4761 flagword in_flags;
4762 bfd_boolean flags_compatible = TRUE;
4763 asection *sec;
4764
4765 /* Check if we have the same endianess. */
4766 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
4767 return FALSE;
4768
4769 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4770 return TRUE;
4771
4772 /* The input BFD must have had its flags initialised. */
4773 /* The following seems bogus to me -- The flags are initialized in
4774 the assembler but I don't think an elf_flags_init field is
4775 written into the object. */
4776 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4777
4778 in_flags = elf_elfheader (ibfd)->e_flags;
4779 out_flags = elf_elfheader (obfd)->e_flags;
4780
4781 if (!elf_flags_init (obfd))
4782 {
4783 /* If the input is the default architecture and had the default
4784 flags then do not bother setting the flags for the output
4785 architecture, instead allow future merges to do this. If no
4786 future merges ever set these flags then they will retain their
4787 uninitialised values, which surprise surprise, correspond
4788 to the default values. */
4789 if (bfd_get_arch_info (ibfd)->the_default
4790 && elf_elfheader (ibfd)->e_flags == 0)
4791 return TRUE;
4792
4793 elf_flags_init (obfd) = TRUE;
4794 elf_elfheader (obfd)->e_flags = in_flags;
4795
4796 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
4797 && bfd_get_arch_info (obfd)->the_default)
4798 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
4799 bfd_get_mach (ibfd));
4800
4801 return TRUE;
4802 }
4803
4804 /* Identical flags must be compatible. */
4805 if (in_flags == out_flags)
4806 return TRUE;
4807
4808 /* Check to see if the input BFD actually contains any sections. If
4809 not, its flags may not have been initialised either, but it
4810 cannot actually cause any incompatiblity. Do not short-circuit
4811 dynamic objects; their section list may be emptied by
4812 elf_link_add_object_symbols.
4813
4814 Also check to see if there are no code sections in the input.
4815 In this case there is no need to check for code specific flags.
4816 XXX - do we need to worry about floating-point format compatability
4817 in data sections ? */
4818 if (!(ibfd->flags & DYNAMIC))
4819 {
4820 bfd_boolean null_input_bfd = TRUE;
4821 bfd_boolean only_data_sections = TRUE;
4822
4823 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4824 {
4825 if ((bfd_get_section_flags (ibfd, sec)
4826 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4827 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4828 only_data_sections = FALSE;
4829
4830 null_input_bfd = FALSE;
4831 break;
4832 }
4833
4834 if (null_input_bfd || only_data_sections)
4835 return TRUE;
4836 }
4837
4838 return flags_compatible;
4839}
4840
4841/* Display the flags field. */
4842
4843static bfd_boolean
4844elf64_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
4845{
4846 FILE *file = (FILE *) ptr;
4847 unsigned long flags;
4848
4849 BFD_ASSERT (abfd != NULL && ptr != NULL);
4850
4851 /* Print normal ELF private data. */
4852 _bfd_elf_print_private_bfd_data (abfd, ptr);
4853
4854 flags = elf_elfheader (abfd)->e_flags;
4855 /* Ignore init flag - it may not be set, despite the flags field
4856 containing valid data. */
4857
4858 /* xgettext:c-format */
4859 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
4860
4861 if (flags)
4862 fprintf (file, _("<Unrecognised flag bits set>"));
4863
4864 fputc ('\n', file);
4865
4866 return TRUE;
4867}
4868
4869/* Update the got entry reference counts for the section being removed. */
4870
4871static bfd_boolean
4872elf64_aarch64_gc_sweep_hook (bfd *abfd ATTRIBUTE_UNUSED,
4873 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4874 asection *sec ATTRIBUTE_UNUSED,
4875 const Elf_Internal_Rela *
4876 relocs ATTRIBUTE_UNUSED)
4877{
59c108f7
NC
4878 struct elf64_aarch64_link_hash_table *htab;
4879 Elf_Internal_Shdr *symtab_hdr;
4880 struct elf_link_hash_entry **sym_hashes;
4881 bfd_signed_vma *local_got_refcounts;
4882 const Elf_Internal_Rela *rel, *relend;
4883
4884 if (info->relocatable)
4885 return TRUE;
4886
4887 htab = elf64_aarch64_hash_table (info);
4888
4889 if (htab == NULL)
4890 return FALSE;
4891
4892 elf_section_data (sec)->local_dynrel = NULL;
4893
4894 symtab_hdr = &elf_symtab_hdr (abfd);
4895 sym_hashes = elf_sym_hashes (abfd);
4896
4897 local_got_refcounts = elf_local_got_refcounts (abfd);
4898
4899 relend = relocs + sec->reloc_count;
4900 for (rel = relocs; rel < relend; rel++)
4901 {
4902 unsigned long r_symndx;
4903 unsigned int r_type;
4904 struct elf_link_hash_entry *h = NULL;
4905
4906 r_symndx = ELF64_R_SYM (rel->r_info);
4907
4908 if (r_symndx >= symtab_hdr->sh_info)
4909 {
4910 struct elf64_aarch64_link_hash_entry *eh;
4911 struct elf_dyn_relocs **pp;
4912 struct elf_dyn_relocs *p;
4913
4914 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4915 while (h->root.type == bfd_link_hash_indirect
4916 || h->root.type == bfd_link_hash_warning)
4917 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4918 eh = (struct elf64_aarch64_link_hash_entry *) h;
4919
4920 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
4921 {
4922 if (p->sec == sec)
4923 {
4924 /* Everything must go for SEC. */
4925 *pp = p->next;
4926 break;
4927 }
4928 }
4929 }
4930 else
4931 {
4932 Elf_Internal_Sym *isym;
4933
4934 /* A local symbol. */
4935 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
4936 abfd, r_symndx);
4937 if (isym == NULL)
4938 return FALSE;
4939 }
4940
4941 r_type = ELF64_R_TYPE (rel->r_info);
4942 r_type = aarch64_tls_transition (abfd,info, r_type, h ,r_symndx);
4943 switch (r_type)
4944 {
4945 case R_AARCH64_LD64_GOT_LO12_NC:
4946 case R_AARCH64_GOT_LD_PREL19:
4947 case R_AARCH64_ADR_GOT_PAGE:
4948 case R_AARCH64_TLSGD_ADR_PAGE21:
4949 case R_AARCH64_TLSGD_ADD_LO12_NC:
4950 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4951 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4952 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4953 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4954 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4955 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4956 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4957 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4958 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4959 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4960 case R_AARCH64_TLSDESC_ADR_PAGE:
4961 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4962 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4963 if (h != NULL)
4964 {
4965 if (h->got.refcount > 0)
4966 h->got.refcount -= 1;
4967 }
4968 else if (local_got_refcounts != NULL)
4969 {
4970 if (local_got_refcounts[r_symndx] > 0)
4971 local_got_refcounts[r_symndx] -= 1;
4972 }
4973 break;
4974
4975 case R_AARCH64_ADR_PREL_PG_HI21_NC:
4976 case R_AARCH64_ADR_PREL_PG_HI21:
4977 case R_AARCH64_ADR_PREL_LO21:
4978 if (h != NULL && info->executable)
4979 {
4980 if (h->plt.refcount > 0)
4981 h->plt.refcount -= 1;
4982 }
4983 break;
4984
4985 case R_AARCH64_CALL26:
4986 case R_AARCH64_JUMP26:
4987 /* If this is a local symbol then we resolve it
4988 directly without creating a PLT entry. */
4989 if (h == NULL)
4990 continue;
4991
4992 if (h->plt.refcount > 0)
4993 h->plt.refcount -= 1;
4994 break;
4995
4996 case R_AARCH64_ABS64:
4997 if (h != NULL && info->executable)
4998 {
4999 if (h->plt.refcount > 0)
5000 h->plt.refcount -= 1;
5001 }
5002 break;
5003
5004 default:
5005 break;
5006 }
5007 }
5008
a06ea964
NC
5009 return TRUE;
5010}
5011
5012/* Adjust a symbol defined by a dynamic object and referenced by a
5013 regular object. The current definition is in some section of the
5014 dynamic object, but we're not including those sections. We have to
5015 change the definition to something the rest of the link can
5016 understand. */
5017
5018static bfd_boolean
5019elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5020 struct elf_link_hash_entry *h)
5021{
5022 struct elf64_aarch64_link_hash_table *htab;
5023 asection *s;
5024
5025 /* If this is a function, put it in the procedure linkage table. We
5026 will fill in the contents of the procedure linkage table later,
5027 when we know the address of the .got section. */
5028 if (h->type == STT_FUNC || h->needs_plt)
5029 {
5030 if (h->plt.refcount <= 0
5031 || SYMBOL_CALLS_LOCAL (info, h)
5032 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
5033 && h->root.type == bfd_link_hash_undefweak))
5034 {
5035 /* This case can occur if we saw a CALL26 reloc in
5036 an input file, but the symbol wasn't referred to
5037 by a dynamic object or all references were
5038 garbage collected. In which case we can end up
5039 resolving. */
5040 h->plt.offset = (bfd_vma) - 1;
5041 h->needs_plt = 0;
5042 }
5043
5044 return TRUE;
5045 }
5046 else
5047 /* It's possible that we incorrectly decided a .plt reloc was
5048 needed for an R_X86_64_PC32 reloc to a non-function sym in
5049 check_relocs. We can't decide accurately between function and
5050 non-function syms in check-relocs; Objects loaded later in
5051 the link may change h->type. So fix it now. */
5052 h->plt.offset = (bfd_vma) - 1;
5053
5054
5055 /* If this is a weak symbol, and there is a real definition, the
5056 processor independent code will have arranged for us to see the
5057 real definition first, and we can just use the same value. */
5058 if (h->u.weakdef != NULL)
5059 {
5060 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5061 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5062 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5063 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5064 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5065 h->non_got_ref = h->u.weakdef->non_got_ref;
5066 return TRUE;
5067 }
5068
5069 /* If we are creating a shared library, we must presume that the
5070 only references to the symbol are via the global offset table.
5071 For such cases we need not do anything here; the relocations will
5072 be handled correctly by relocate_section. */
5073 if (info->shared)
5074 return TRUE;
5075
5076 /* If there are no references to this symbol that do not use the
5077 GOT, we don't need to generate a copy reloc. */
5078 if (!h->non_got_ref)
5079 return TRUE;
5080
5081 /* If -z nocopyreloc was given, we won't generate them either. */
5082 if (info->nocopyreloc)
5083 {
5084 h->non_got_ref = 0;
5085 return TRUE;
5086 }
5087
5088 /* We must allocate the symbol in our .dynbss section, which will
5089 become part of the .bss section of the executable. There will be
5090 an entry for this symbol in the .dynsym section. The dynamic
5091 object will contain position independent code, so all references
5092 from the dynamic object to this symbol will go through the global
5093 offset table. The dynamic linker will use the .dynsym entry to
5094 determine the address it must put in the global offset table, so
5095 both the dynamic object and the regular object will refer to the
5096 same memory location for the variable. */
5097
5098 htab = elf64_aarch64_hash_table (info);
5099
5100 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5101 to copy the initial value out of the dynamic object and into the
5102 runtime process image. */
5103 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5104 {
5105 htab->srelbss->size += RELOC_SIZE (htab);
5106 h->needs_copy = 1;
5107 }
5108
5109 s = htab->sdynbss;
5110
5111 return _bfd_elf_adjust_dynamic_copy (h, s);
5112
5113}
5114
5115static bfd_boolean
5116elf64_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5117{
5118 struct elf_aarch64_local_symbol *locals;
5119 locals = elf64_aarch64_locals (abfd);
5120 if (locals == NULL)
5121 {
5122 locals = (struct elf_aarch64_local_symbol *)
5123 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5124 if (locals == NULL)
5125 return FALSE;
5126 elf64_aarch64_locals (abfd) = locals;
5127 }
5128 return TRUE;
5129}
5130
5131/* Look through the relocs for a section during the first phase. */
5132
5133static bfd_boolean
5134elf64_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5135 asection *sec, const Elf_Internal_Rela *relocs)
5136{
5137 Elf_Internal_Shdr *symtab_hdr;
5138 struct elf_link_hash_entry **sym_hashes;
5139 const Elf_Internal_Rela *rel;
5140 const Elf_Internal_Rela *rel_end;
5141 asection *sreloc;
5142
5143 struct elf64_aarch64_link_hash_table *htab;
5144
5145 unsigned long nsyms;
5146
5147 if (info->relocatable)
5148 return TRUE;
5149
5150 BFD_ASSERT (is_aarch64_elf (abfd));
5151
5152 htab = elf64_aarch64_hash_table (info);
5153 sreloc = NULL;
5154
5155 symtab_hdr = &elf_symtab_hdr (abfd);
5156 sym_hashes = elf_sym_hashes (abfd);
5157 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
5158
5159 rel_end = relocs + sec->reloc_count;
5160 for (rel = relocs; rel < rel_end; rel++)
5161 {
5162 struct elf_link_hash_entry *h;
5163 unsigned long r_symndx;
5164 unsigned int r_type;
5165
5166 r_symndx = ELF64_R_SYM (rel->r_info);
5167 r_type = ELF64_R_TYPE (rel->r_info);
5168
5169 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5170 {
5171 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5172 r_symndx);
5173 return FALSE;
5174 }
5175
5176 if (r_symndx >= nsyms
5177 /* PR 9934: It is possible to have relocations that do not
5178 refer to symbols, thus it is also possible to have an
5179 object file containing relocations but no symbol table. */
5180 && (r_symndx > 0 || nsyms > 0))
5181 {
5182 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5183 r_symndx);
5184 return FALSE;
5185 }
5186
5187 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
5188 h = NULL;
5189 else
5190 {
5191 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5192 while (h->root.type == bfd_link_hash_indirect
5193 || h->root.type == bfd_link_hash_warning)
5194 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5195 }
5196
5197 /* Could be done earlier, if h were already available. */
5198 r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5199
5200 switch (r_type)
5201 {
5202 case R_AARCH64_ABS64:
5203
5204 /* We don't need to handle relocs into sections not going into
5205 the "real" output. */
5206 if ((sec->flags & SEC_ALLOC) == 0)
5207 break;
5208
5209 if (h != NULL)
5210 {
5211 if (!info->shared)
5212 h->non_got_ref = 1;
5213
5214 h->plt.refcount += 1;
5215 h->pointer_equality_needed = 1;
5216 }
5217
5218 /* No need to do anything if we're not creating a shared
5219 object. */
5220 if (! info->shared)
5221 break;
5222
5223 {
5224 struct elf_dyn_relocs *p;
5225 struct elf_dyn_relocs **head;
5226
5227 /* We must copy these reloc types into the output file.
5228 Create a reloc section in dynobj and make room for
5229 this reloc. */
5230 if (sreloc == NULL)
5231 {
5232 if (htab->root.dynobj == NULL)
5233 htab->root.dynobj = abfd;
5234
5235 sreloc = _bfd_elf_make_dynamic_reloc_section
5236 (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
5237
5238 if (sreloc == NULL)
5239 return FALSE;
5240 }
5241
5242 /* If this is a global symbol, we count the number of
5243 relocations we need for this symbol. */
5244 if (h != NULL)
5245 {
5246 struct elf64_aarch64_link_hash_entry *eh;
5247 eh = (struct elf64_aarch64_link_hash_entry *) h;
5248 head = &eh->dyn_relocs;
5249 }
5250 else
5251 {
5252 /* Track dynamic relocs needed for local syms too.
5253 We really need local syms available to do this
5254 easily. Oh well. */
5255
5256 asection *s;
5257 void **vpp;
5258 Elf_Internal_Sym *isym;
5259
5260 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5261 abfd, r_symndx);
5262 if (isym == NULL)
5263 return FALSE;
5264
5265 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5266 if (s == NULL)
5267 s = sec;
5268
5269 /* Beware of type punned pointers vs strict aliasing
5270 rules. */
5271 vpp = &(elf_section_data (s)->local_dynrel);
5272 head = (struct elf_dyn_relocs **) vpp;
5273 }
5274
5275 p = *head;
5276 if (p == NULL || p->sec != sec)
5277 {
5278 bfd_size_type amt = sizeof *p;
5279 p = ((struct elf_dyn_relocs *)
5280 bfd_zalloc (htab->root.dynobj, amt));
5281 if (p == NULL)
5282 return FALSE;
5283 p->next = *head;
5284 *head = p;
5285 p->sec = sec;
5286 }
5287
5288 p->count += 1;
5289
5290 }
5291 break;
5292
5293 /* RR: We probably want to keep a consistency check that
5294 there are no dangling GOT_PAGE relocs. */
5295 case R_AARCH64_LD64_GOT_LO12_NC:
f41aef5f 5296 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
5297 case R_AARCH64_ADR_GOT_PAGE:
5298 case R_AARCH64_TLSGD_ADR_PAGE21:
5299 case R_AARCH64_TLSGD_ADD_LO12_NC:
5300 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5301 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5302 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
5303 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
5304 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5305 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
5306 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
5307 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5308 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
5309 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5310 case R_AARCH64_TLSDESC_ADR_PAGE:
5311 case R_AARCH64_TLSDESC_ADD_LO12_NC:
5312 case R_AARCH64_TLSDESC_LD64_LO12_NC:
5313 {
5314 unsigned got_type;
5315 unsigned old_got_type;
5316
5317 got_type = aarch64_reloc_got_type (r_type);
5318
5319 if (h)
5320 {
5321 h->got.refcount += 1;
5322 old_got_type = elf64_aarch64_hash_entry (h)->got_type;
5323 }
5324 else
5325 {
5326 struct elf_aarch64_local_symbol *locals;
5327
5328 if (!elf64_aarch64_allocate_local_symbols
5329 (abfd, symtab_hdr->sh_info))
5330 return FALSE;
5331
5332 locals = elf64_aarch64_locals (abfd);
5333 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5334 locals[r_symndx].got_refcount += 1;
5335 old_got_type = locals[r_symndx].got_type;
5336 }
5337
5338 /* If a variable is accessed with both general dynamic TLS
5339 methods, two slots may be created. */
5340 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5341 got_type |= old_got_type;
5342
5343 /* We will already have issued an error message if there
5344 is a TLS/non-TLS mismatch, based on the symbol type.
5345 So just combine any TLS types needed. */
5346 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5347 && got_type != GOT_NORMAL)
5348 got_type |= old_got_type;
5349
5350 /* If the symbol is accessed by both IE and GD methods, we
5351 are able to relax. Turn off the GD flag, without
5352 messing up with any other kind of TLS types that may be
5353 involved. */
5354 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5355 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5356
5357 if (old_got_type != got_type)
5358 {
5359 if (h != NULL)
5360 elf64_aarch64_hash_entry (h)->got_type = got_type;
5361 else
5362 {
5363 struct elf_aarch64_local_symbol *locals;
5364 locals = elf64_aarch64_locals (abfd);
5365 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5366 locals[r_symndx].got_type = got_type;
5367 }
5368 }
5369
5370 if (htab->root.sgot == NULL)
5371 {
5372 if (htab->root.dynobj == NULL)
5373 htab->root.dynobj = abfd;
5374 if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
5375 return FALSE;
5376 }
5377 break;
5378 }
5379
5380 case R_AARCH64_ADR_PREL_PG_HI21_NC:
5381 case R_AARCH64_ADR_PREL_PG_HI21:
f41aef5f 5382 case R_AARCH64_ADR_PREL_LO21:
a06ea964
NC
5383 if (h != NULL && info->executable)
5384 {
5385 /* If this reloc is in a read-only section, we might
5386 need a copy reloc. We can't check reliably at this
5387 stage whether the section is read-only, as input
5388 sections have not yet been mapped to output sections.
5389 Tentatively set the flag for now, and correct in
5390 adjust_dynamic_symbol. */
5391 h->non_got_ref = 1;
5392 h->plt.refcount += 1;
5393 h->pointer_equality_needed = 1;
5394 }
5395 /* FIXME:: RR need to handle these in shared libraries
5396 and essentially bomb out as these being non-PIC
5397 relocations in shared libraries. */
5398 break;
5399
5400 case R_AARCH64_CALL26:
5401 case R_AARCH64_JUMP26:
5402 /* If this is a local symbol then we resolve it
5403 directly without creating a PLT entry. */
5404 if (h == NULL)
5405 continue;
5406
5407 h->needs_plt = 1;
5408 h->plt.refcount += 1;
5409 break;
5410 }
5411 }
5412 return TRUE;
5413}
5414
5415/* Treat mapping symbols as special target symbols. */
5416
5417static bfd_boolean
5418elf64_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
5419 asymbol *sym)
5420{
5421 return bfd_is_aarch64_special_symbol_name (sym->name,
5422 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
5423}
5424
5425/* This is a copy of elf_find_function () from elf.c except that
5426 AArch64 mapping symbols are ignored when looking for function names. */
5427
5428static bfd_boolean
5429aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
5430 asection *section,
5431 asymbol **symbols,
5432 bfd_vma offset,
5433 const char **filename_ptr,
5434 const char **functionname_ptr)
5435{
5436 const char *filename = NULL;
5437 asymbol *func = NULL;
5438 bfd_vma low_func = 0;
5439 asymbol **p;
5440
5441 for (p = symbols; *p != NULL; p++)
5442 {
5443 elf_symbol_type *q;
5444
5445 q = (elf_symbol_type *) * p;
5446
5447 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
5448 {
5449 default:
5450 break;
5451 case STT_FILE:
5452 filename = bfd_asymbol_name (&q->symbol);
5453 break;
5454 case STT_FUNC:
5455 case STT_NOTYPE:
5456 /* Skip mapping symbols. */
5457 if ((q->symbol.flags & BSF_LOCAL)
5458 && (bfd_is_aarch64_special_symbol_name
5459 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
5460 continue;
5461 /* Fall through. */
5462 if (bfd_get_section (&q->symbol) == section
5463 && q->symbol.value >= low_func && q->symbol.value <= offset)
5464 {
5465 func = (asymbol *) q;
5466 low_func = q->symbol.value;
5467 }
5468 break;
5469 }
5470 }
5471
5472 if (func == NULL)
5473 return FALSE;
5474
5475 if (filename_ptr)
5476 *filename_ptr = filename;
5477 if (functionname_ptr)
5478 *functionname_ptr = bfd_asymbol_name (func);
5479
5480 return TRUE;
5481}
5482
5483
5484/* Find the nearest line to a particular section and offset, for error
5485 reporting. This code is a duplicate of the code in elf.c, except
5486 that it uses aarch64_elf_find_function. */
5487
5488static bfd_boolean
5489elf64_aarch64_find_nearest_line (bfd *abfd,
5490 asection *section,
5491 asymbol **symbols,
5492 bfd_vma offset,
5493 const char **filename_ptr,
5494 const char **functionname_ptr,
5495 unsigned int *line_ptr)
5496{
5497 bfd_boolean found = FALSE;
5498
5499 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5500 toolchain uses it. */
5501
5502 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
5503 section, symbols, offset,
5504 filename_ptr, functionname_ptr,
5505 line_ptr, NULL, 0,
5506 &elf_tdata (abfd)->dwarf2_find_line_info))
5507 {
5508 if (!*functionname_ptr)
5509 aarch64_elf_find_function (abfd, section, symbols, offset,
5510 *filename_ptr ? NULL : filename_ptr,
5511 functionname_ptr);
5512
5513 return TRUE;
5514 }
5515
5516 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
5517 &found, filename_ptr,
5518 functionname_ptr, line_ptr,
5519 &elf_tdata (abfd)->line_info))
5520 return FALSE;
5521
5522 if (found && (*functionname_ptr || *line_ptr))
5523 return TRUE;
5524
5525 if (symbols == NULL)
5526 return FALSE;
5527
5528 if (!aarch64_elf_find_function (abfd, section, symbols, offset,
5529 filename_ptr, functionname_ptr))
5530 return FALSE;
5531
5532 *line_ptr = 0;
5533 return TRUE;
5534}
5535
5536static bfd_boolean
5537elf64_aarch64_find_inliner_info (bfd *abfd,
5538 const char **filename_ptr,
5539 const char **functionname_ptr,
5540 unsigned int *line_ptr)
5541{
5542 bfd_boolean found;
5543 found = _bfd_dwarf2_find_inliner_info
5544 (abfd, filename_ptr,
5545 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
5546 return found;
5547}
5548
5549
5550static void
5551elf64_aarch64_post_process_headers (bfd *abfd,
5552 struct bfd_link_info *link_info
5553 ATTRIBUTE_UNUSED)
5554{
5555 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
5556
5557 i_ehdrp = elf_elfheader (abfd);
5558 i_ehdrp->e_ident[EI_OSABI] = 0;
5559 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
5560}
5561
5562static enum elf_reloc_type_class
7e612e98
AM
5563elf64_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5564 const asection *rel_sec ATTRIBUTE_UNUSED,
5565 const Elf_Internal_Rela *rela)
a06ea964
NC
5566{
5567 switch ((int) ELF64_R_TYPE (rela->r_info))
5568 {
5569 case R_AARCH64_RELATIVE:
5570 return reloc_class_relative;
5571 case R_AARCH64_JUMP_SLOT:
5572 return reloc_class_plt;
5573 case R_AARCH64_COPY:
5574 return reloc_class_copy;
5575 default:
5576 return reloc_class_normal;
5577 }
5578}
5579
5580/* Set the right machine number for an AArch64 ELF file. */
5581
5582static bfd_boolean
5583elf64_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
5584{
5585 if (hdr->sh_type == SHT_NOTE)
5586 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
5587
5588 return TRUE;
5589}
5590
5591/* Handle an AArch64 specific section when reading an object file. This is
5592 called when bfd_section_from_shdr finds a section with an unknown
5593 type. */
5594
5595static bfd_boolean
5596elf64_aarch64_section_from_shdr (bfd *abfd,
5597 Elf_Internal_Shdr *hdr,
5598 const char *name, int shindex)
5599{
5600 /* There ought to be a place to keep ELF backend specific flags, but
5601 at the moment there isn't one. We just keep track of the
5602 sections by their name, instead. Fortunately, the ABI gives
5603 names for all the AArch64 specific sections, so we will probably get
5604 away with this. */
5605 switch (hdr->sh_type)
5606 {
5607 case SHT_AARCH64_ATTRIBUTES:
5608 break;
5609
5610 default:
5611 return FALSE;
5612 }
5613
5614 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5615 return FALSE;
5616
5617 return TRUE;
5618}
5619
5620/* A structure used to record a list of sections, independently
5621 of the next and prev fields in the asection structure. */
5622typedef struct section_list
5623{
5624 asection *sec;
5625 struct section_list *next;
5626 struct section_list *prev;
5627}
5628section_list;
5629
5630/* Unfortunately we need to keep a list of sections for which
5631 an _aarch64_elf_section_data structure has been allocated. This
5632 is because it is possible for functions like elf64_aarch64_write_section
5633 to be called on a section which has had an elf_data_structure
5634 allocated for it (and so the used_by_bfd field is valid) but
5635 for which the AArch64 extended version of this structure - the
5636 _aarch64_elf_section_data structure - has not been allocated. */
5637static section_list *sections_with_aarch64_elf_section_data = NULL;
5638
5639static void
5640record_section_with_aarch64_elf_section_data (asection *sec)
5641{
5642 struct section_list *entry;
5643
5644 entry = bfd_malloc (sizeof (*entry));
5645 if (entry == NULL)
5646 return;
5647 entry->sec = sec;
5648 entry->next = sections_with_aarch64_elf_section_data;
5649 entry->prev = NULL;
5650 if (entry->next != NULL)
5651 entry->next->prev = entry;
5652 sections_with_aarch64_elf_section_data = entry;
5653}
5654
5655static struct section_list *
5656find_aarch64_elf_section_entry (asection *sec)
5657{
5658 struct section_list *entry;
5659 static struct section_list *last_entry = NULL;
5660
5661 /* This is a short cut for the typical case where the sections are added
5662 to the sections_with_aarch64_elf_section_data list in forward order and
5663 then looked up here in backwards order. This makes a real difference
5664 to the ld-srec/sec64k.exp linker test. */
5665 entry = sections_with_aarch64_elf_section_data;
5666 if (last_entry != NULL)
5667 {
5668 if (last_entry->sec == sec)
5669 entry = last_entry;
5670 else if (last_entry->next != NULL && last_entry->next->sec == sec)
5671 entry = last_entry->next;
5672 }
5673
5674 for (; entry; entry = entry->next)
5675 if (entry->sec == sec)
5676 break;
5677
5678 if (entry)
5679 /* Record the entry prior to this one - it is the entry we are
5680 most likely to want to locate next time. Also this way if we
5681 have been called from
5682 unrecord_section_with_aarch64_elf_section_data () we will not
5683 be caching a pointer that is about to be freed. */
5684 last_entry = entry->prev;
5685
5686 return entry;
5687}
5688
5689static void
5690unrecord_section_with_aarch64_elf_section_data (asection *sec)
5691{
5692 struct section_list *entry;
5693
5694 entry = find_aarch64_elf_section_entry (sec);
5695
5696 if (entry)
5697 {
5698 if (entry->prev != NULL)
5699 entry->prev->next = entry->next;
5700 if (entry->next != NULL)
5701 entry->next->prev = entry->prev;
5702 if (entry == sections_with_aarch64_elf_section_data)
5703 sections_with_aarch64_elf_section_data = entry->next;
5704 free (entry);
5705 }
5706}
5707
5708
5709typedef struct
5710{
5711 void *finfo;
5712 struct bfd_link_info *info;
5713 asection *sec;
5714 int sec_shndx;
5715 int (*func) (void *, const char *, Elf_Internal_Sym *,
5716 asection *, struct elf_link_hash_entry *);
5717} output_arch_syminfo;
5718
5719enum map_symbol_type
5720{
5721 AARCH64_MAP_INSN,
5722 AARCH64_MAP_DATA
5723};
5724
5725
5726/* Output a single mapping symbol. */
5727
5728static bfd_boolean
5729elf64_aarch64_output_map_sym (output_arch_syminfo *osi,
5730 enum map_symbol_type type, bfd_vma offset)
5731{
5732 static const char *names[2] = { "$x", "$d" };
5733 Elf_Internal_Sym sym;
5734
5735 sym.st_value = (osi->sec->output_section->vma
5736 + osi->sec->output_offset + offset);
5737 sym.st_size = 0;
5738 sym.st_other = 0;
5739 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5740 sym.st_shndx = osi->sec_shndx;
5741 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
5742}
5743
5744
5745
5746/* Output mapping symbols for PLT entries associated with H. */
5747
5748static bfd_boolean
5749elf64_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
5750{
5751 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
5752 bfd_vma addr;
5753
5754 if (h->root.type == bfd_link_hash_indirect)
5755 return TRUE;
5756
5757 if (h->root.type == bfd_link_hash_warning)
5758 /* When warning symbols are created, they **replace** the "real"
5759 entry in the hash table, thus we never get to see the real
5760 symbol in a hash traversal. So look at it now. */
5761 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5762
5763 if (h->plt.offset == (bfd_vma) - 1)
5764 return TRUE;
5765
5766 addr = h->plt.offset;
5767 if (addr == 32)
5768 {
5769 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5770 return FALSE;
5771 }
5772 return TRUE;
5773}
5774
5775
5776/* Output a single local symbol for a generated stub. */
5777
5778static bfd_boolean
5779elf64_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
5780 bfd_vma offset, bfd_vma size)
5781{
5782 Elf_Internal_Sym sym;
5783
5784 sym.st_value = (osi->sec->output_section->vma
5785 + osi->sec->output_offset + offset);
5786 sym.st_size = size;
5787 sym.st_other = 0;
5788 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5789 sym.st_shndx = osi->sec_shndx;
5790 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
5791}
5792
5793static bfd_boolean
5794aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
5795{
5796 struct elf64_aarch64_stub_hash_entry *stub_entry;
5797 asection *stub_sec;
5798 bfd_vma addr;
5799 char *stub_name;
5800 output_arch_syminfo *osi;
5801
5802 /* Massage our args to the form they really have. */
5803 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
5804 osi = (output_arch_syminfo *) in_arg;
5805
5806 stub_sec = stub_entry->stub_sec;
5807
5808 /* Ensure this stub is attached to the current section being
5809 processed. */
5810 if (stub_sec != osi->sec)
5811 return TRUE;
5812
5813 addr = (bfd_vma) stub_entry->stub_offset;
5814
5815 stub_name = stub_entry->output_name;
5816
5817 switch (stub_entry->stub_type)
5818 {
5819 case aarch64_stub_adrp_branch:
5820 if (!elf64_aarch64_output_stub_sym (osi, stub_name, addr,
5821 sizeof (aarch64_adrp_branch_stub)))
5822 return FALSE;
5823 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5824 return FALSE;
5825 break;
5826 case aarch64_stub_long_branch:
5827 if (!elf64_aarch64_output_stub_sym
5828 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
5829 return FALSE;
5830 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5831 return FALSE;
5832 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
5833 return FALSE;
5834 break;
5835 default:
5836 BFD_FAIL ();
5837 }
5838
5839 return TRUE;
5840}
5841
5842/* Output mapping symbols for linker generated sections. */
5843
5844static bfd_boolean
5845elf64_aarch64_output_arch_local_syms (bfd *output_bfd,
5846 struct bfd_link_info *info,
5847 void *finfo,
5848 int (*func) (void *, const char *,
5849 Elf_Internal_Sym *,
5850 asection *,
5851 struct elf_link_hash_entry
5852 *))
5853{
5854 output_arch_syminfo osi;
5855 struct elf64_aarch64_link_hash_table *htab;
5856
5857 htab = elf64_aarch64_hash_table (info);
5858
5859 osi.finfo = finfo;
5860 osi.info = info;
5861 osi.func = func;
5862
5863 /* Long calls stubs. */
5864 if (htab->stub_bfd && htab->stub_bfd->sections)
5865 {
5866 asection *stub_sec;
5867
5868 for (stub_sec = htab->stub_bfd->sections;
5869 stub_sec != NULL; stub_sec = stub_sec->next)
5870 {
5871 /* Ignore non-stub sections. */
5872 if (!strstr (stub_sec->name, STUB_SUFFIX))
5873 continue;
5874
5875 osi.sec = stub_sec;
5876
5877 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5878 (output_bfd, osi.sec->output_section);
5879
5880 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
5881 &osi);
5882 }
5883 }
5884
5885 /* Finally, output mapping symbols for the PLT. */
5886 if (!htab->root.splt || htab->root.splt->size == 0)
5887 return TRUE;
5888
5889 /* For now live without mapping symbols for the plt. */
5890 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5891 (output_bfd, htab->root.splt->output_section);
5892 osi.sec = htab->root.splt;
5893
5894 elf_link_hash_traverse (&htab->root, elf64_aarch64_output_plt_map,
5895 (void *) &osi);
5896
5897 return TRUE;
5898
5899}
5900
5901/* Allocate target specific section data. */
5902
5903static bfd_boolean
5904elf64_aarch64_new_section_hook (bfd *abfd, asection *sec)
5905{
5906 if (!sec->used_by_bfd)
5907 {
5908 _aarch64_elf_section_data *sdata;
5909 bfd_size_type amt = sizeof (*sdata);
5910
5911 sdata = bfd_zalloc (abfd, amt);
5912 if (sdata == NULL)
5913 return FALSE;
5914 sec->used_by_bfd = sdata;
5915 }
5916
5917 record_section_with_aarch64_elf_section_data (sec);
5918
5919 return _bfd_elf_new_section_hook (abfd, sec);
5920}
5921
5922
5923static void
5924unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
5925 asection *sec,
5926 void *ignore ATTRIBUTE_UNUSED)
5927{
5928 unrecord_section_with_aarch64_elf_section_data (sec);
5929}
5930
5931static bfd_boolean
5932elf64_aarch64_close_and_cleanup (bfd *abfd)
5933{
5934 if (abfd->sections)
5935 bfd_map_over_sections (abfd,
5936 unrecord_section_via_map_over_sections, NULL);
5937
5938 return _bfd_elf_close_and_cleanup (abfd);
5939}
5940
5941static bfd_boolean
5942elf64_aarch64_bfd_free_cached_info (bfd *abfd)
5943{
5944 if (abfd->sections)
5945 bfd_map_over_sections (abfd,
5946 unrecord_section_via_map_over_sections, NULL);
5947
5948 return _bfd_free_cached_info (abfd);
5949}
5950
5951static bfd_boolean
5952elf64_aarch64_is_function_type (unsigned int type)
5953{
5954 return type == STT_FUNC;
5955}
5956
5957/* Create dynamic sections. This is different from the ARM backend in that
5958 the got, plt, gotplt and their relocation sections are all created in the
5959 standard part of the bfd elf backend. */
5960
5961static bfd_boolean
5962elf64_aarch64_create_dynamic_sections (bfd *dynobj,
5963 struct bfd_link_info *info)
5964{
5965 struct elf64_aarch64_link_hash_table *htab;
5966 struct elf_link_hash_entry *h;
5967
5968 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
5969 return FALSE;
5970
5971 htab = elf64_aarch64_hash_table (info);
5972 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
5973 if (!info->shared)
5974 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
5975
5976 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
5977 abort ();
5978
5979 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5980 dynobj's .got section. We don't do this in the linker script
5981 because we don't want to define the symbol if we are not creating
5982 a global offset table. */
5983 h = _bfd_elf_define_linkage_sym (dynobj, info,
5984 htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
5985 elf_hash_table (info)->hgot = h;
5986 if (h == NULL)
5987 return FALSE;
5988
5989 return TRUE;
5990}
5991
5992
5993/* Allocate space in .plt, .got and associated reloc sections for
5994 dynamic relocs. */
5995
5996static bfd_boolean
5997elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
5998{
5999 struct bfd_link_info *info;
6000 struct elf64_aarch64_link_hash_table *htab;
6001 struct elf64_aarch64_link_hash_entry *eh;
6002 struct elf_dyn_relocs *p;
6003
6004 /* An example of a bfd_link_hash_indirect symbol is versioned
6005 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6006 -> __gxx_personality_v0(bfd_link_hash_defined)
6007
6008 There is no need to process bfd_link_hash_indirect symbols here
6009 because we will also be presented with the concrete instance of
6010 the symbol and elf64_aarch64_copy_indirect_symbol () will have been
6011 called to copy all relevant data from the generic to the concrete
6012 symbol instance.
6013 */
6014 if (h->root.type == bfd_link_hash_indirect)
6015 return TRUE;
6016
6017 if (h->root.type == bfd_link_hash_warning)
6018 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6019
6020 info = (struct bfd_link_info *) inf;
6021 htab = elf64_aarch64_hash_table (info);
6022
6023 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
6024 {
6025 /* Make sure this symbol is output as a dynamic symbol.
6026 Undefined weak syms won't yet be marked as dynamic. */
6027 if (h->dynindx == -1 && !h->forced_local)
6028 {
6029 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6030 return FALSE;
6031 }
6032
6033 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
6034 {
6035 asection *s = htab->root.splt;
6036
6037 /* If this is the first .plt entry, make room for the special
6038 first entry. */
6039 if (s->size == 0)
6040 s->size += htab->plt_header_size;
6041
6042 h->plt.offset = s->size;
6043
6044 /* If this symbol is not defined in a regular file, and we are
6045 not generating a shared library, then set the symbol to this
6046 location in the .plt. This is required to make function
6047 pointers compare as equal between the normal executable and
6048 the shared library. */
6049 if (!info->shared && !h->def_regular)
6050 {
6051 h->root.u.def.section = s;
6052 h->root.u.def.value = h->plt.offset;
6053 }
6054
6055 /* Make room for this entry. For now we only create the
6056 small model PLT entries. We later need to find a way
6057 of relaxing into these from the large model PLT entries. */
6058 s->size += PLT_SMALL_ENTRY_SIZE;
6059
6060 /* We also need to make an entry in the .got.plt section, which
6061 will be placed in the .got section by the linker script. */
6062 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6063
6064 /* We also need to make an entry in the .rela.plt section. */
6065 htab->root.srelplt->size += RELOC_SIZE (htab);
6066
6067 /* We need to ensure that all GOT entries that serve the PLT
6068 are consecutive with the special GOT slots [0] [1] and
6069 [2]. Any addtional relocations, such as
6070 R_AARCH64_TLSDESC, must be placed after the PLT related
6071 entries. We abuse the reloc_count such that during
6072 sizing we adjust reloc_count to indicate the number of
6073 PLT related reserved entries. In subsequent phases when
6074 filling in the contents of the reloc entries, PLT related
6075 entries are placed by computing their PLT index (0
6076 .. reloc_count). While other none PLT relocs are placed
6077 at the slot indicated by reloc_count and reloc_count is
6078 updated. */
6079
6080 htab->root.srelplt->reloc_count++;
6081 }
6082 else
6083 {
6084 h->plt.offset = (bfd_vma) - 1;
6085 h->needs_plt = 0;
6086 }
6087 }
6088 else
6089 {
6090 h->plt.offset = (bfd_vma) - 1;
6091 h->needs_plt = 0;
6092 }
6093
6094 eh = (struct elf64_aarch64_link_hash_entry *) h;
6095 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6096
6097 if (h->got.refcount > 0)
6098 {
6099 bfd_boolean dyn;
6100 unsigned got_type = elf64_aarch64_hash_entry (h)->got_type;
6101
6102 h->got.offset = (bfd_vma) - 1;
6103
6104 dyn = htab->root.dynamic_sections_created;
6105
6106 /* Make sure this symbol is output as a dynamic symbol.
6107 Undefined weak syms won't yet be marked as dynamic. */
6108 if (dyn && h->dynindx == -1 && !h->forced_local)
6109 {
6110 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6111 return FALSE;
6112 }
6113
6114 if (got_type == GOT_UNKNOWN)
6115 {
6116 }
6117 else if (got_type == GOT_NORMAL)
6118 {
6119 h->got.offset = htab->root.sgot->size;
6120 htab->root.sgot->size += GOT_ENTRY_SIZE;
6121 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6122 || h->root.type != bfd_link_hash_undefweak)
6123 && (info->shared
6124 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6125 {
6126 htab->root.srelgot->size += RELOC_SIZE (htab);
6127 }
6128 }
6129 else
6130 {
6131 int indx;
6132 if (got_type & GOT_TLSDESC_GD)
6133 {
6134 eh->tlsdesc_got_jump_table_offset =
6135 (htab->root.sgotplt->size
6136 - aarch64_compute_jump_table_size (htab));
6137 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6138 h->got.offset = (bfd_vma) - 2;
6139 }
6140
6141 if (got_type & GOT_TLS_GD)
6142 {
6143 h->got.offset = htab->root.sgot->size;
6144 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6145 }
6146
6147 if (got_type & GOT_TLS_IE)
6148 {
6149 h->got.offset = htab->root.sgot->size;
6150 htab->root.sgot->size += GOT_ENTRY_SIZE;
6151 }
6152
6153 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6154 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6155 || h->root.type != bfd_link_hash_undefweak)
6156 && (info->shared
6157 || indx != 0
6158 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6159 {
6160 if (got_type & GOT_TLSDESC_GD)
6161 {
6162 htab->root.srelplt->size += RELOC_SIZE (htab);
6163 /* Note reloc_count not incremented here! We have
6164 already adjusted reloc_count for this relocation
6165 type. */
6166
6167 /* TLSDESC PLT is now needed, but not yet determined. */
6168 htab->tlsdesc_plt = (bfd_vma) - 1;
6169 }
6170
6171 if (got_type & GOT_TLS_GD)
6172 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6173
6174 if (got_type & GOT_TLS_IE)
6175 htab->root.srelgot->size += RELOC_SIZE (htab);
6176 }
6177 }
6178 }
6179 else
6180 {
6181 h->got.offset = (bfd_vma) - 1;
6182 }
6183
6184 if (eh->dyn_relocs == NULL)
6185 return TRUE;
6186
6187 /* In the shared -Bsymbolic case, discard space allocated for
6188 dynamic pc-relative relocs against symbols which turn out to be
6189 defined in regular objects. For the normal shared case, discard
6190 space for pc-relative relocs that have become local due to symbol
6191 visibility changes. */
6192
6193 if (info->shared)
6194 {
6195 /* Relocs that use pc_count are those that appear on a call
6196 insn, or certain REL relocs that can generated via assembly.
6197 We want calls to protected symbols to resolve directly to the
6198 function rather than going via the plt. If people want
6199 function pointer comparisons to work as expected then they
6200 should avoid writing weird assembly. */
6201 if (SYMBOL_CALLS_LOCAL (info, h))
6202 {
6203 struct elf_dyn_relocs **pp;
6204
6205 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6206 {
6207 p->count -= p->pc_count;
6208 p->pc_count = 0;
6209 if (p->count == 0)
6210 *pp = p->next;
6211 else
6212 pp = &p->next;
6213 }
6214 }
6215
6216 /* Also discard relocs on undefined weak syms with non-default
6217 visibility. */
6218 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6219 {
6220 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6221 eh->dyn_relocs = NULL;
6222
6223 /* Make sure undefined weak symbols are output as a dynamic
6224 symbol in PIEs. */
6225 else if (h->dynindx == -1
6226 && !h->forced_local
6227 && !bfd_elf_link_record_dynamic_symbol (info, h))
6228 return FALSE;
6229 }
6230
6231 }
6232 else if (ELIMINATE_COPY_RELOCS)
6233 {
6234 /* For the non-shared case, discard space for relocs against
6235 symbols which turn out to need copy relocs or are not
6236 dynamic. */
6237
6238 if (!h->non_got_ref
6239 && ((h->def_dynamic
6240 && !h->def_regular)
6241 || (htab->root.dynamic_sections_created
6242 && (h->root.type == bfd_link_hash_undefweak
6243 || h->root.type == bfd_link_hash_undefined))))
6244 {
6245 /* Make sure this symbol is output as a dynamic symbol.
6246 Undefined weak syms won't yet be marked as dynamic. */
6247 if (h->dynindx == -1
6248 && !h->forced_local
6249 && !bfd_elf_link_record_dynamic_symbol (info, h))
6250 return FALSE;
6251
6252 /* If that succeeded, we know we'll be keeping all the
6253 relocs. */
6254 if (h->dynindx != -1)
6255 goto keep;
6256 }
6257
6258 eh->dyn_relocs = NULL;
6259
6260 keep:;
6261 }
6262
6263 /* Finally, allocate space. */
6264 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6265 {
6266 asection *sreloc;
6267
6268 sreloc = elf_section_data (p->sec)->sreloc;
6269
6270 BFD_ASSERT (sreloc != NULL);
6271
6272 sreloc->size += p->count * RELOC_SIZE (htab);
6273 }
6274
6275 return TRUE;
6276}
6277
6278
6279
6280
6281/* This is the most important function of all . Innocuosly named
6282 though ! */
6283static bfd_boolean
6284elf64_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6285 struct bfd_link_info *info)
6286{
6287 struct elf64_aarch64_link_hash_table *htab;
6288 bfd *dynobj;
6289 asection *s;
6290 bfd_boolean relocs;
6291 bfd *ibfd;
6292
6293 htab = elf64_aarch64_hash_table ((info));
6294 dynobj = htab->root.dynobj;
6295
6296 BFD_ASSERT (dynobj != NULL);
6297
6298 if (htab->root.dynamic_sections_created)
6299 {
6300 if (info->executable)
6301 {
6302 s = bfd_get_linker_section (dynobj, ".interp");
6303 if (s == NULL)
6304 abort ();
6305 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
6306 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
6307 }
6308 }
6309
6310 /* Set up .got offsets for local syms, and space for local dynamic
6311 relocs. */
6312 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
6313 {
6314 struct elf_aarch64_local_symbol *locals = NULL;
6315 Elf_Internal_Shdr *symtab_hdr;
6316 asection *srel;
6317 unsigned int i;
6318
6319 if (!is_aarch64_elf (ibfd))
6320 continue;
6321
6322 for (s = ibfd->sections; s != NULL; s = s->next)
6323 {
6324 struct elf_dyn_relocs *p;
6325
6326 for (p = (struct elf_dyn_relocs *)
6327 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
6328 {
6329 if (!bfd_is_abs_section (p->sec)
6330 && bfd_is_abs_section (p->sec->output_section))
6331 {
6332 /* Input section has been discarded, either because
6333 it is a copy of a linkonce section or due to
6334 linker script /DISCARD/, so we'll be discarding
6335 the relocs too. */
6336 }
6337 else if (p->count != 0)
6338 {
6339 srel = elf_section_data (p->sec)->sreloc;
6340 srel->size += p->count * RELOC_SIZE (htab);
6341 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
6342 info->flags |= DF_TEXTREL;
6343 }
6344 }
6345 }
6346
6347 locals = elf64_aarch64_locals (ibfd);
6348 if (!locals)
6349 continue;
6350
6351 symtab_hdr = &elf_symtab_hdr (ibfd);
6352 srel = htab->root.srelgot;
6353 for (i = 0; i < symtab_hdr->sh_info; i++)
6354 {
6355 locals[i].got_offset = (bfd_vma) - 1;
6356 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6357 if (locals[i].got_refcount > 0)
6358 {
6359 unsigned got_type = locals[i].got_type;
6360 if (got_type & GOT_TLSDESC_GD)
6361 {
6362 locals[i].tlsdesc_got_jump_table_offset =
6363 (htab->root.sgotplt->size
6364 - aarch64_compute_jump_table_size (htab));
6365 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6366 locals[i].got_offset = (bfd_vma) - 2;
6367 }
6368
6369 if (got_type & GOT_TLS_GD)
6370 {
6371 locals[i].got_offset = htab->root.sgot->size;
6372 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6373 }
6374
6375 if (got_type & GOT_TLS_IE)
6376 {
6377 locals[i].got_offset = htab->root.sgot->size;
6378 htab->root.sgot->size += GOT_ENTRY_SIZE;
6379 }
6380
6381 if (got_type == GOT_UNKNOWN)
6382 {
6383 }
6384
6385 if (got_type == GOT_NORMAL)
6386 {
6387 }
6388
6389 if (info->shared)
6390 {
6391 if (got_type & GOT_TLSDESC_GD)
6392 {
6393 htab->root.srelplt->size += RELOC_SIZE (htab);
6394 /* Note RELOC_COUNT not incremented here! */
6395 htab->tlsdesc_plt = (bfd_vma) - 1;
6396 }
6397
6398 if (got_type & GOT_TLS_GD)
6399 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6400
6401 if (got_type & GOT_TLS_IE)
6402 htab->root.srelgot->size += RELOC_SIZE (htab);
6403 }
6404 }
6405 else
6406 {
6407 locals[i].got_refcount = (bfd_vma) - 1;
6408 }
6409 }
6410 }
6411
6412
6413 /* Allocate global sym .plt and .got entries, and space for global
6414 sym dynamic relocs. */
6415 elf_link_hash_traverse (&htab->root, elf64_aarch64_allocate_dynrelocs,
6416 info);
6417
6418
6419 /* For every jump slot reserved in the sgotplt, reloc_count is
6420 incremented. However, when we reserve space for TLS descriptors,
6421 it's not incremented, so in order to compute the space reserved
6422 for them, it suffices to multiply the reloc count by the jump
6423 slot size. */
6424
6425 if (htab->root.srelplt)
6426 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
6427
6428 if (htab->tlsdesc_plt)
6429 {
6430 if (htab->root.splt->size == 0)
6431 htab->root.splt->size += PLT_ENTRY_SIZE;
6432
6433 htab->tlsdesc_plt = htab->root.splt->size;
6434 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
6435
6436 /* If we're not using lazy TLS relocations, don't generate the
6437 GOT entry required. */
6438 if (!(info->flags & DF_BIND_NOW))
6439 {
6440 htab->dt_tlsdesc_got = htab->root.sgot->size;
6441 htab->root.sgot->size += GOT_ENTRY_SIZE;
6442 }
6443 }
6444
6445 /* We now have determined the sizes of the various dynamic sections.
6446 Allocate memory for them. */
6447 relocs = FALSE;
6448 for (s = dynobj->sections; s != NULL; s = s->next)
6449 {
6450 if ((s->flags & SEC_LINKER_CREATED) == 0)
6451 continue;
6452
6453 if (s == htab->root.splt
6454 || s == htab->root.sgot
6455 || s == htab->root.sgotplt
6456 || s == htab->root.iplt
6457 || s == htab->root.igotplt || s == htab->sdynbss)
6458 {
6459 /* Strip this section if we don't need it; see the
6460 comment below. */
6461 }
6462 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
6463 {
6464 if (s->size != 0 && s != htab->root.srelplt)
6465 relocs = TRUE;
6466
6467 /* We use the reloc_count field as a counter if we need
6468 to copy relocs into the output file. */
6469 if (s != htab->root.srelplt)
6470 s->reloc_count = 0;
6471 }
6472 else
6473 {
6474 /* It's not one of our sections, so don't allocate space. */
6475 continue;
6476 }
6477
6478 if (s->size == 0)
6479 {
6480 /* If we don't need this section, strip it from the
6481 output file. This is mostly to handle .rela.bss and
6482 .rela.plt. We must create both sections in
6483 create_dynamic_sections, because they must be created
6484 before the linker maps input sections to output
6485 sections. The linker does that before
6486 adjust_dynamic_symbol is called, and it is that
6487 function which decides whether anything needs to go
6488 into these sections. */
6489
6490 s->flags |= SEC_EXCLUDE;
6491 continue;
6492 }
6493
6494 if ((s->flags & SEC_HAS_CONTENTS) == 0)
6495 continue;
6496
6497 /* Allocate memory for the section contents. We use bfd_zalloc
6498 here in case unused entries are not reclaimed before the
6499 section's contents are written out. This should not happen,
6500 but this way if it does, we get a R_AARCH64_NONE reloc instead
6501 of garbage. */
6502 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
6503 if (s->contents == NULL)
6504 return FALSE;
6505 }
6506
6507 if (htab->root.dynamic_sections_created)
6508 {
6509 /* Add some entries to the .dynamic section. We fill in the
6510 values later, in elf64_aarch64_finish_dynamic_sections, but we
6511 must add the entries now so that we get the correct size for
6512 the .dynamic section. The DT_DEBUG entry is filled in by the
6513 dynamic linker and used by the debugger. */
6514#define add_dynamic_entry(TAG, VAL) \
6515 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6516
6517 if (info->executable)
6518 {
6519 if (!add_dynamic_entry (DT_DEBUG, 0))
6520 return FALSE;
6521 }
6522
6523 if (htab->root.splt->size != 0)
6524 {
6525 if (!add_dynamic_entry (DT_PLTGOT, 0)
6526 || !add_dynamic_entry (DT_PLTRELSZ, 0)
6527 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
6528 || !add_dynamic_entry (DT_JMPREL, 0))
6529 return FALSE;
6530
6531 if (htab->tlsdesc_plt
6532 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
6533 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
6534 return FALSE;
6535 }
6536
6537 if (relocs)
6538 {
6539 if (!add_dynamic_entry (DT_RELA, 0)
6540 || !add_dynamic_entry (DT_RELASZ, 0)
6541 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
6542 return FALSE;
6543
6544 /* If any dynamic relocs apply to a read-only section,
6545 then we need a DT_TEXTREL entry. */
6546 if ((info->flags & DF_TEXTREL) != 0)
6547 {
6548 if (!add_dynamic_entry (DT_TEXTREL, 0))
6549 return FALSE;
6550 }
6551 }
6552 }
6553#undef add_dynamic_entry
6554
6555 return TRUE;
6556
6557
6558}
6559
6560static inline void
6561elf64_aarch64_update_plt_entry (bfd *output_bfd,
6562 unsigned int r_type,
6563 bfd_byte *plt_entry, bfd_vma value)
6564{
6565 reloc_howto_type *howto;
6566 howto = elf64_aarch64_howto_from_type (r_type);
6567 bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
6568}
6569
6570static void
6571elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
6572 struct elf64_aarch64_link_hash_table
6573 *htab, bfd *output_bfd)
6574{
6575 bfd_byte *plt_entry;
6576 bfd_vma plt_index;
6577 bfd_vma got_offset;
6578 bfd_vma gotplt_entry_address;
6579 bfd_vma plt_entry_address;
6580 Elf_Internal_Rela rela;
6581 bfd_byte *loc;
6582
6583 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
6584
6585 /* Offset in the GOT is PLT index plus got GOT headers(3)
6586 times 8. */
6587 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
6588 plt_entry = htab->root.splt->contents + h->plt.offset;
6589 plt_entry_address = htab->root.splt->output_section->vma
6590 + htab->root.splt->output_section->output_offset + h->plt.offset;
6591 gotplt_entry_address = htab->root.sgotplt->output_section->vma +
6592 htab->root.sgotplt->output_offset + got_offset;
6593
6594 /* Copy in the boiler-plate for the PLTn entry. */
6595 memcpy (plt_entry, elf64_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
6596
6597 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6598 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6599 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6600 plt_entry,
6601 PG (gotplt_entry_address) -
6602 PG (plt_entry_address));
6603
6604 /* Fill in the lo12 bits for the load from the pltgot. */
6605 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6606 plt_entry + 4,
6607 PG_OFFSET (gotplt_entry_address));
6608
6609 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6610 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6611 plt_entry + 8,
6612 PG_OFFSET (gotplt_entry_address));
6613
6614 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6615 bfd_put_64 (output_bfd,
6616 (htab->root.splt->output_section->vma
6617 + htab->root.splt->output_offset),
6618 htab->root.sgotplt->contents + got_offset);
6619
6620 /* Fill in the entry in the .rela.plt section. */
6621 rela.r_offset = gotplt_entry_address;
6622 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_JUMP_SLOT);
6623 rela.r_addend = 0;
6624
6625 /* Compute the relocation entry to used based on PLT index and do
6626 not adjust reloc_count. The reloc_count has already been adjusted
6627 to account for this entry. */
6628 loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab);
6629 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6630}
6631
6632/* Size sections even though they're not dynamic. We use it to setup
6633 _TLS_MODULE_BASE_, if needed. */
6634
6635static bfd_boolean
6636elf64_aarch64_always_size_sections (bfd *output_bfd,
6637 struct bfd_link_info *info)
6638{
6639 asection *tls_sec;
6640
6641 if (info->relocatable)
6642 return TRUE;
6643
6644 tls_sec = elf_hash_table (info)->tls_sec;
6645
6646 if (tls_sec)
6647 {
6648 struct elf_link_hash_entry *tlsbase;
6649
6650 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
6651 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
6652
6653 if (tlsbase)
6654 {
6655 struct bfd_link_hash_entry *h = NULL;
6656 const struct elf_backend_data *bed =
6657 get_elf_backend_data (output_bfd);
6658
6659 if (!(_bfd_generic_link_add_one_symbol
6660 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
6661 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
6662 return FALSE;
6663
6664 tlsbase->type = STT_TLS;
6665 tlsbase = (struct elf_link_hash_entry *) h;
6666 tlsbase->def_regular = 1;
6667 tlsbase->other = STV_HIDDEN;
6668 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
6669 }
6670 }
6671
6672 return TRUE;
6673}
6674
6675/* Finish up dynamic symbol handling. We set the contents of various
6676 dynamic sections here. */
6677static bfd_boolean
6678elf64_aarch64_finish_dynamic_symbol (bfd *output_bfd,
6679 struct bfd_link_info *info,
6680 struct elf_link_hash_entry *h,
6681 Elf_Internal_Sym *sym)
6682{
6683 struct elf64_aarch64_link_hash_table *htab;
6684 htab = elf64_aarch64_hash_table (info);
6685
6686 if (h->plt.offset != (bfd_vma) - 1)
6687 {
6688 /* This symbol has an entry in the procedure linkage table. Set
6689 it up. */
6690
6691 if (h->dynindx == -1
6692 || htab->root.splt == NULL
6693 || htab->root.sgotplt == NULL || htab->root.srelplt == NULL)
6694 abort ();
6695
6696 elf64_aarch64_create_small_pltn_entry (h, htab, output_bfd);
6697 if (!h->def_regular)
6698 {
6699 /* Mark the symbol as undefined, rather than as defined in
6700 the .plt section. Leave the value alone. This is a clue
6701 for the dynamic linker, to make function pointer
6702 comparisons work between an application and shared
6703 library. */
6704 sym->st_shndx = SHN_UNDEF;
6705 }
6706 }
6707
6708 if (h->got.offset != (bfd_vma) - 1
6709 && elf64_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
6710 {
6711 Elf_Internal_Rela rela;
6712 bfd_byte *loc;
6713
6714 /* This symbol has an entry in the global offset table. Set it
6715 up. */
6716 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
6717 abort ();
6718
6719 rela.r_offset = (htab->root.sgot->output_section->vma
6720 + htab->root.sgot->output_offset
6721 + (h->got.offset & ~(bfd_vma) 1));
6722
6723 if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
6724 {
6725 if (!h->def_regular)
6726 return FALSE;
6727
6728 BFD_ASSERT ((h->got.offset & 1) != 0);
6729 rela.r_info = ELF64_R_INFO (0, R_AARCH64_RELATIVE);
6730 rela.r_addend = (h->root.u.def.value
6731 + h->root.u.def.section->output_section->vma
6732 + h->root.u.def.section->output_offset);
6733 }
6734 else
6735 {
6736 BFD_ASSERT ((h->got.offset & 1) == 0);
6737 bfd_put_64 (output_bfd, (bfd_vma) 0,
6738 htab->root.sgot->contents + h->got.offset);
6739 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT);
6740 rela.r_addend = 0;
6741 }
6742
6743 loc = htab->root.srelgot->contents;
6744 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
6745 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6746 }
6747
6748 if (h->needs_copy)
6749 {
6750 Elf_Internal_Rela rela;
6751 bfd_byte *loc;
6752
6753 /* This symbol needs a copy reloc. Set it up. */
6754
6755 if (h->dynindx == -1
6756 || (h->root.type != bfd_link_hash_defined
6757 && h->root.type != bfd_link_hash_defweak)
6758 || htab->srelbss == NULL)
6759 abort ();
6760
6761 rela.r_offset = (h->root.u.def.value
6762 + h->root.u.def.section->output_section->vma
6763 + h->root.u.def.section->output_offset);
6764 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_COPY);
6765 rela.r_addend = 0;
6766 loc = htab->srelbss->contents;
6767 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
6768 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6769 }
6770
6771 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6772 be NULL for local symbols. */
6773 if (sym != NULL
9637f6ef 6774 && (h == elf_hash_table (info)->hdynamic
a06ea964
NC
6775 || h == elf_hash_table (info)->hgot))
6776 sym->st_shndx = SHN_ABS;
6777
6778 return TRUE;
6779}
6780
6781static void
6782elf64_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
6783 struct elf64_aarch64_link_hash_table
6784 *htab)
6785{
6786 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6787 small and large plts and at the minute just generates
6788 the small PLT. */
6789
6790 /* PLT0 of the small PLT looks like this -
6791 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6792 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6793 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6794 // symbol resolver
6795 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6796 // GOTPLT entry for this.
6797 br x17
6798 */
6799 bfd_vma plt_got_base;
6800 bfd_vma plt_base;
6801
6802
6803 memcpy (htab->root.splt->contents, elf64_aarch64_small_plt0_entry,
6804 PLT_ENTRY_SIZE);
6805 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
6806 PLT_ENTRY_SIZE;
6807
6808 plt_got_base = (htab->root.sgotplt->output_section->vma
6809 + htab->root.sgotplt->output_offset);
6810
6811 plt_base = htab->root.splt->output_section->vma +
6812 htab->root.splt->output_section->output_offset;
6813
6814 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6815 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6816 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6817 htab->root.splt->contents + 4,
6818 PG (plt_got_base + 16) - PG (plt_base + 4));
6819
6820 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6821 htab->root.splt->contents + 8,
6822 PG_OFFSET (plt_got_base + 16));
6823
6824 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6825 htab->root.splt->contents + 12,
6826 PG_OFFSET (plt_got_base + 16));
6827}
6828
6829static bfd_boolean
6830elf64_aarch64_finish_dynamic_sections (bfd *output_bfd,
6831 struct bfd_link_info *info)
6832{
6833 struct elf64_aarch64_link_hash_table *htab;
6834 bfd *dynobj;
6835 asection *sdyn;
6836
6837 htab = elf64_aarch64_hash_table (info);
6838 dynobj = htab->root.dynobj;
6839 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6840
6841 if (htab->root.dynamic_sections_created)
6842 {
6843 Elf64_External_Dyn *dyncon, *dynconend;
6844
6845 if (sdyn == NULL || htab->root.sgot == NULL)
6846 abort ();
6847
6848 dyncon = (Elf64_External_Dyn *) sdyn->contents;
6849 dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
6850 for (; dyncon < dynconend; dyncon++)
6851 {
6852 Elf_Internal_Dyn dyn;
6853 asection *s;
6854
6855 bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
6856
6857 switch (dyn.d_tag)
6858 {
6859 default:
6860 continue;
6861
6862 case DT_PLTGOT:
6863 s = htab->root.sgotplt;
6864 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6865 break;
6866
6867 case DT_JMPREL:
6868 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
6869 break;
6870
6871 case DT_PLTRELSZ:
6872 s = htab->root.srelplt->output_section;
6873 dyn.d_un.d_val = s->size;
6874 break;
6875
6876 case DT_RELASZ:
6877 /* The procedure linkage table relocs (DT_JMPREL) should
6878 not be included in the overall relocs (DT_RELA).
6879 Therefore, we override the DT_RELASZ entry here to
6880 make it not include the JMPREL relocs. Since the
6881 linker script arranges for .rela.plt to follow all
6882 other relocation sections, we don't have to worry
6883 about changing the DT_RELA entry. */
6884 if (htab->root.srelplt != NULL)
6885 {
6886 s = htab->root.srelplt->output_section;
6887 dyn.d_un.d_val -= s->size;
6888 }
6889 break;
6890
6891 case DT_TLSDESC_PLT:
6892 s = htab->root.splt;
6893 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6894 + htab->tlsdesc_plt;
6895 break;
6896
6897 case DT_TLSDESC_GOT:
6898 s = htab->root.sgot;
6899 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6900 + htab->dt_tlsdesc_got;
6901 break;
6902 }
6903
6904 bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
6905 }
6906
6907 }
6908
6909 /* Fill in the special first entry in the procedure linkage table. */
6910 if (htab->root.splt && htab->root.splt->size > 0)
6911 {
6912 elf64_aarch64_init_small_plt0_entry (output_bfd, htab);
6913
6914 elf_section_data (htab->root.splt->output_section)->
6915 this_hdr.sh_entsize = htab->plt_entry_size;
6916
6917
6918 if (htab->tlsdesc_plt)
6919 {
6920 bfd_put_64 (output_bfd, (bfd_vma) 0,
6921 htab->root.sgot->contents + htab->dt_tlsdesc_got);
6922
6923 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
6924 elf64_aarch64_tlsdesc_small_plt_entry,
6925 sizeof (elf64_aarch64_tlsdesc_small_plt_entry));
6926
6927 {
6928 bfd_vma adrp1_addr =
6929 htab->root.splt->output_section->vma
6930 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
6931
6932 bfd_vma adrp2_addr =
6933 htab->root.splt->output_section->vma
6934 + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
6935
6936 bfd_vma got_addr =
6937 htab->root.sgot->output_section->vma
6938 + htab->root.sgot->output_offset;
6939
6940 bfd_vma pltgot_addr =
6941 htab->root.sgotplt->output_section->vma
6942 + htab->root.sgotplt->output_offset;
6943
6944 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
6945 bfd_vma opcode;
6946
6947 /* adrp x2, DT_TLSDESC_GOT */
6948 opcode = bfd_get_32 (output_bfd,
6949 htab->root.splt->contents
6950 + htab->tlsdesc_plt + 4);
6951 opcode = reencode_adr_imm
6952 (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
6953 bfd_put_32 (output_bfd, opcode,
6954 htab->root.splt->contents + htab->tlsdesc_plt + 4);
6955
6956 /* adrp x3, 0 */
6957 opcode = bfd_get_32 (output_bfd,
6958 htab->root.splt->contents
6959 + htab->tlsdesc_plt + 8);
6960 opcode = reencode_adr_imm
6961 (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
6962 bfd_put_32 (output_bfd, opcode,
6963 htab->root.splt->contents + htab->tlsdesc_plt + 8);
6964
6965 /* ldr x2, [x2, #0] */
6966 opcode = bfd_get_32 (output_bfd,
6967 htab->root.splt->contents
6968 + htab->tlsdesc_plt + 12);
6969 opcode = reencode_ldst_pos_imm (opcode,
6970 PG_OFFSET (dt_tlsdesc_got) >> 3);
6971 bfd_put_32 (output_bfd, opcode,
6972 htab->root.splt->contents + htab->tlsdesc_plt + 12);
6973
6974 /* add x3, x3, 0 */
6975 opcode = bfd_get_32 (output_bfd,
6976 htab->root.splt->contents
6977 + htab->tlsdesc_plt + 16);
6978 opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
6979 bfd_put_32 (output_bfd, opcode,
6980 htab->root.splt->contents + htab->tlsdesc_plt + 16);
6981 }
6982 }
6983 }
6984
6985 if (htab->root.sgotplt)
6986 {
6987 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
6988 {
6989 (*_bfd_error_handler)
6990 (_("discarded output section: `%A'"), htab->root.sgotplt);
6991 return FALSE;
6992 }
6993
6994 /* Fill in the first three entries in the global offset table. */
6995 if (htab->root.sgotplt->size > 0)
6996 {
6997 /* Set the first entry in the global offset table to the address of
6998 the dynamic section. */
6999 if (sdyn == NULL)
7000 bfd_put_64 (output_bfd, (bfd_vma) 0,
7001 htab->root.sgotplt->contents);
7002 else
7003 bfd_put_64 (output_bfd,
7004 sdyn->output_section->vma + sdyn->output_offset,
7005 htab->root.sgotplt->contents);
7006 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
7007 bfd_put_64 (output_bfd,
7008 (bfd_vma) 0,
7009 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
7010 bfd_put_64 (output_bfd,
7011 (bfd_vma) 0,
7012 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
7013 }
7014
7015 elf_section_data (htab->root.sgotplt->output_section)->
7016 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
7017 }
7018
7019 if (htab->root.sgot && htab->root.sgot->size > 0)
7020 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
7021 = GOT_ENTRY_SIZE;
7022
7023 return TRUE;
7024}
7025
7026/* Return address for Ith PLT stub in section PLT, for relocation REL
7027 or (bfd_vma) -1 if it should not be included. */
7028
7029static bfd_vma
7030elf64_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
7031 const arelent *rel ATTRIBUTE_UNUSED)
7032{
7033 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
7034}
7035
7036
7037/* We use this so we can override certain functions
7038 (though currently we don't). */
7039
7040const struct elf_size_info elf64_aarch64_size_info =
7041{
7042 sizeof (Elf64_External_Ehdr),
7043 sizeof (Elf64_External_Phdr),
7044 sizeof (Elf64_External_Shdr),
7045 sizeof (Elf64_External_Rel),
7046 sizeof (Elf64_External_Rela),
7047 sizeof (Elf64_External_Sym),
7048 sizeof (Elf64_External_Dyn),
7049 sizeof (Elf_External_Note),
7050 4, /* Hash table entry size. */
7051 1, /* Internal relocs per external relocs. */
7052 64, /* Arch size. */
7053 3, /* Log_file_align. */
7054 ELFCLASS64, EV_CURRENT,
7055 bfd_elf64_write_out_phdrs,
7056 bfd_elf64_write_shdrs_and_ehdr,
7057 bfd_elf64_checksum_contents,
7058 bfd_elf64_write_relocs,
7059 bfd_elf64_swap_symbol_in,
7060 bfd_elf64_swap_symbol_out,
7061 bfd_elf64_slurp_reloc_table,
7062 bfd_elf64_slurp_symbol_table,
7063 bfd_elf64_swap_dyn_in,
7064 bfd_elf64_swap_dyn_out,
7065 bfd_elf64_swap_reloc_in,
7066 bfd_elf64_swap_reloc_out,
7067 bfd_elf64_swap_reloca_in,
7068 bfd_elf64_swap_reloca_out
7069};
7070
7071#define ELF_ARCH bfd_arch_aarch64
7072#define ELF_MACHINE_CODE EM_AARCH64
7073#define ELF_MAXPAGESIZE 0x10000
7074#define ELF_MINPAGESIZE 0x1000
7075#define ELF_COMMONPAGESIZE 0x1000
7076
7077#define bfd_elf64_close_and_cleanup \
7078 elf64_aarch64_close_and_cleanup
7079
7080#define bfd_elf64_bfd_copy_private_bfd_data \
7081 elf64_aarch64_copy_private_bfd_data
7082
7083#define bfd_elf64_bfd_free_cached_info \
7084 elf64_aarch64_bfd_free_cached_info
7085
7086#define bfd_elf64_bfd_is_target_special_symbol \
7087 elf64_aarch64_is_target_special_symbol
7088
7089#define bfd_elf64_bfd_link_hash_table_create \
7090 elf64_aarch64_link_hash_table_create
7091
7092#define bfd_elf64_bfd_link_hash_table_free \
7093 elf64_aarch64_hash_table_free
7094
7095#define bfd_elf64_bfd_merge_private_bfd_data \
7096 elf64_aarch64_merge_private_bfd_data
7097
7098#define bfd_elf64_bfd_print_private_bfd_data \
7099 elf64_aarch64_print_private_bfd_data
7100
7101#define bfd_elf64_bfd_reloc_type_lookup \
7102 elf64_aarch64_reloc_type_lookup
7103
7104#define bfd_elf64_bfd_reloc_name_lookup \
7105 elf64_aarch64_reloc_name_lookup
7106
7107#define bfd_elf64_bfd_set_private_flags \
7108 elf64_aarch64_set_private_flags
7109
7110#define bfd_elf64_find_inliner_info \
7111 elf64_aarch64_find_inliner_info
7112
7113#define bfd_elf64_find_nearest_line \
7114 elf64_aarch64_find_nearest_line
7115
7116#define bfd_elf64_mkobject \
7117 elf64_aarch64_mkobject
7118
7119#define bfd_elf64_new_section_hook \
7120 elf64_aarch64_new_section_hook
7121
7122#define elf_backend_adjust_dynamic_symbol \
7123 elf64_aarch64_adjust_dynamic_symbol
7124
7125#define elf_backend_always_size_sections \
7126 elf64_aarch64_always_size_sections
7127
7128#define elf_backend_check_relocs \
7129 elf64_aarch64_check_relocs
7130
7131#define elf_backend_copy_indirect_symbol \
7132 elf64_aarch64_copy_indirect_symbol
7133
7134/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7135 to them in our hash. */
7136#define elf_backend_create_dynamic_sections \
7137 elf64_aarch64_create_dynamic_sections
7138
7139#define elf_backend_init_index_section \
7140 _bfd_elf_init_2_index_sections
7141
7142#define elf_backend_is_function_type \
7143 elf64_aarch64_is_function_type
7144
7145#define elf_backend_finish_dynamic_sections \
7146 elf64_aarch64_finish_dynamic_sections
7147
7148#define elf_backend_finish_dynamic_symbol \
7149 elf64_aarch64_finish_dynamic_symbol
7150
7151#define elf_backend_gc_sweep_hook \
7152 elf64_aarch64_gc_sweep_hook
7153
7154#define elf_backend_object_p \
7155 elf64_aarch64_object_p
7156
7157#define elf_backend_output_arch_local_syms \
7158 elf64_aarch64_output_arch_local_syms
7159
7160#define elf_backend_plt_sym_val \
7161 elf64_aarch64_plt_sym_val
7162
7163#define elf_backend_post_process_headers \
7164 elf64_aarch64_post_process_headers
7165
7166#define elf_backend_relocate_section \
7167 elf64_aarch64_relocate_section
7168
7169#define elf_backend_reloc_type_class \
7170 elf64_aarch64_reloc_type_class
7171
7172#define elf_backend_section_flags \
7173 elf64_aarch64_section_flags
7174
7175#define elf_backend_section_from_shdr \
7176 elf64_aarch64_section_from_shdr
7177
7178#define elf_backend_size_dynamic_sections \
7179 elf64_aarch64_size_dynamic_sections
7180
7181#define elf_backend_size_info \
7182 elf64_aarch64_size_info
7183
7184#define elf_backend_can_refcount 1
59c108f7 7185#define elf_backend_can_gc_sections 1
a06ea964
NC
7186#define elf_backend_plt_readonly 1
7187#define elf_backend_want_got_plt 1
7188#define elf_backend_want_plt_sym 0
7189#define elf_backend_may_use_rel_p 0
7190#define elf_backend_may_use_rela_p 1
7191#define elf_backend_default_use_rela_p 1
7192#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
7193
7194#undef elf_backend_obj_attrs_section
7195#define elf_backend_obj_attrs_section ".ARM.attributes"
7196
7197#include "elf64-target.h"
This page took 0.397844 seconds and 4 git commands to generate.