Add support for 64-bit ARM architecture: AArch64
[deliverable/binutils-gdb.git] / bfd / elf64-aarch64.c
CommitLineData
a06ea964
NC
1/* ELF support for AArch64.
2 Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21/* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elf64_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elf64_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elf64_aarch64_relocate_section ()
123
124 Calls elf64_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elf64_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138#include "sysdep.h"
139#include "bfd.h"
140#include "libiberty.h"
141#include "libbfd.h"
142#include "bfd_stdint.h"
143#include "elf-bfd.h"
144#include "bfdlink.h"
145#include "elf/aarch64.h"
146
147static bfd_reloc_status_type
148bfd_elf_aarch64_put_addend (bfd *abfd,
149 bfd_byte *address,
150 reloc_howto_type *howto, bfd_signed_vma addend);
151
152#define IS_AARCH64_TLS_RELOC(R_TYPE) \
153 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
154 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
155 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
156 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
157 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
158 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
159 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
160 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
161 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
162 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
163 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
164 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
165 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
166 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
167 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
168 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
169 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
170 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
171 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
172
173#define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
174 ((R_TYPE) == R_AARCH64_TLSDESC_LD64_PREL19 \
175 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
176 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE \
177 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
178 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
179 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
180 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
181 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
182 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
183 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
184 || (R_TYPE) == R_AARCH64_TLSDESC)
185
186#define ELIMINATE_COPY_RELOCS 0
187
188/* Return the relocation section associated with NAME. HTAB is the
189 bfd's elf64_aarch64_link_hash_entry. */
190#define RELOC_SECTION(HTAB, NAME) \
191 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
192
193/* Return size of a relocation entry. HTAB is the bfd's
194 elf64_aarch64_link_hash_entry. */
195#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
196
197/* Return function to swap relocations in. HTAB is the bfd's
198 elf64_aarch64_link_hash_entry. */
199#define SWAP_RELOC_IN(HTAB) (bfd_elf64_swap_reloca_in)
200
201/* Return function to swap relocations out. HTAB is the bfd's
202 elf64_aarch64_link_hash_entry. */
203#define SWAP_RELOC_OUT(HTAB) (bfd_elf64_swap_reloca_out)
204
205/* GOT Entry size - 8 bytes. */
206#define GOT_ENTRY_SIZE (8)
207#define PLT_ENTRY_SIZE (32)
208#define PLT_SMALL_ENTRY_SIZE (16)
209#define PLT_TLSDESC_ENTRY_SIZE (32)
210
211/* Take the PAGE component of an address or offset. */
212#define PG(x) ((x) & ~ 0xfff)
213#define PG_OFFSET(x) ((x) & 0xfff)
214
215/* Encoding of the nop instruction */
216#define INSN_NOP 0xd503201f
217
218#define aarch64_compute_jump_table_size(htab) \
219 (((htab)->root.srelplt == NULL) ? 0 \
220 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
221
222/* The first entry in a procedure linkage table looks like this
223 if the distance between the PLTGOT and the PLT is < 4GB use
224 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
225 in x16 and needs to work out PLTGOT[1] by using an address of
226 [x16,#-8]. */
227static const bfd_byte elf64_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
228{
229 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
230 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
231 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
232 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
233 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
234 0x1f, 0x20, 0x03, 0xd5, /* nop */
235 0x1f, 0x20, 0x03, 0xd5, /* nop */
236 0x1f, 0x20, 0x03, 0xd5, /* nop */
237};
238
239/* Per function entry in a procedure linkage table looks like this
240 if the distance between the PLTGOT and the PLT is < 4GB use
241 these PLT entries. */
242static const bfd_byte elf64_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
243{
244 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
245 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
246 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
247 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
248};
249
250static const bfd_byte
251elf64_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
252{
253 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
254 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
255 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
256 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
257 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
258 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
259 0x1f, 0x20, 0x03, 0xd5, /* nop */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
261};
262
263#define elf_info_to_howto elf64_aarch64_info_to_howto
264#define elf_info_to_howto_rel elf64_aarch64_info_to_howto
265
266#define AARCH64_ELF_ABI_VERSION 0
267#define AARCH64_ELF_OS_ABI_VERSION 0
268
269/* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
270#define ALL_ONES (~ (bfd_vma) 0)
271
272static reloc_howto_type elf64_aarch64_howto_none =
273 HOWTO (R_AARCH64_NONE, /* type */
274 0, /* rightshift */
275 0, /* size (0 = byte, 1 = short, 2 = long) */
276 0, /* bitsize */
277 FALSE, /* pc_relative */
278 0, /* bitpos */
279 complain_overflow_dont,/* complain_on_overflow */
280 bfd_elf_generic_reloc, /* special_function */
281 "R_AARCH64_NONE", /* name */
282 FALSE, /* partial_inplace */
283 0, /* src_mask */
284 0, /* dst_mask */
285 FALSE); /* pcrel_offset */
286
287static reloc_howto_type elf64_aarch64_howto_dynrelocs[] =
288{
289 HOWTO (R_AARCH64_COPY, /* type */
290 0, /* rightshift */
291 2, /* size (0 = byte, 1 = short, 2 = long) */
292 64, /* bitsize */
293 FALSE, /* pc_relative */
294 0, /* bitpos */
295 complain_overflow_bitfield, /* complain_on_overflow */
296 bfd_elf_generic_reloc, /* special_function */
297 "R_AARCH64_COPY", /* name */
298 TRUE, /* partial_inplace */
299 0xffffffff, /* src_mask */
300 0xffffffff, /* dst_mask */
301 FALSE), /* pcrel_offset */
302
303 HOWTO (R_AARCH64_GLOB_DAT, /* type */
304 0, /* rightshift */
305 2, /* size (0 = byte, 1 = short, 2 = long) */
306 64, /* bitsize */
307 FALSE, /* pc_relative */
308 0, /* bitpos */
309 complain_overflow_bitfield, /* complain_on_overflow */
310 bfd_elf_generic_reloc, /* special_function */
311 "R_AARCH64_GLOB_DAT", /* name */
312 TRUE, /* partial_inplace */
313 0xffffffff, /* src_mask */
314 0xffffffff, /* dst_mask */
315 FALSE), /* pcrel_offset */
316
317 HOWTO (R_AARCH64_JUMP_SLOT, /* type */
318 0, /* rightshift */
319 2, /* size (0 = byte, 1 = short, 2 = long) */
320 64, /* bitsize */
321 FALSE, /* pc_relative */
322 0, /* bitpos */
323 complain_overflow_bitfield, /* complain_on_overflow */
324 bfd_elf_generic_reloc, /* special_function */
325 "R_AARCH64_JUMP_SLOT", /* name */
326 TRUE, /* partial_inplace */
327 0xffffffff, /* src_mask */
328 0xffffffff, /* dst_mask */
329 FALSE), /* pcrel_offset */
330
331 HOWTO (R_AARCH64_RELATIVE, /* type */
332 0, /* rightshift */
333 2, /* size (0 = byte, 1 = short, 2 = long) */
334 64, /* bitsize */
335 FALSE, /* pc_relative */
336 0, /* bitpos */
337 complain_overflow_bitfield, /* complain_on_overflow */
338 bfd_elf_generic_reloc, /* special_function */
339 "R_AARCH64_RELATIVE", /* name */
340 TRUE, /* partial_inplace */
341 ALL_ONES, /* src_mask */
342 ALL_ONES, /* dst_mask */
343 FALSE), /* pcrel_offset */
344
345 HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */
346 0, /* rightshift */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
348 64, /* bitsize */
349 FALSE, /* pc_relative */
350 0, /* bitpos */
351 complain_overflow_dont, /* complain_on_overflow */
352 bfd_elf_generic_reloc, /* special_function */
353 "R_AARCH64_TLS_DTPMOD64", /* name */
354 FALSE, /* partial_inplace */
355 0, /* src_mask */
356 ALL_ONES, /* dst_mask */
357 FALSE), /* pc_reloffset */
358
359 HOWTO (R_AARCH64_TLS_DTPREL64, /* type */
360 0, /* rightshift */
361 2, /* size (0 = byte, 1 = short, 2 = long) */
362 64, /* bitsize */
363 FALSE, /* pc_relative */
364 0, /* bitpos */
365 complain_overflow_dont, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 "R_AARCH64_TLS_DTPREL64", /* name */
368 FALSE, /* partial_inplace */
369 0, /* src_mask */
370 ALL_ONES, /* dst_mask */
371 FALSE), /* pcrel_offset */
372
373 HOWTO (R_AARCH64_TLS_TPREL64, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 64, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_dont, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_AARCH64_TLS_TPREL64", /* name */
382 FALSE, /* partial_inplace */
383 0, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 HOWTO (R_AARCH64_TLSDESC, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 64, /* bitsize */
391 FALSE, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_dont, /* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_AARCH64_TLSDESC", /* name */
396 FALSE, /* partial_inplace */
397 0, /* src_mask */
398 ALL_ONES, /* dst_mask */
399 FALSE), /* pcrel_offset */
400
401};
402
403/* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
404 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
405 in that slot. */
406
407static reloc_howto_type elf64_aarch64_howto_table[] =
408{
409 /* Basic data relocations. */
410
411 HOWTO (R_AARCH64_NULL, /* type */
412 0, /* rightshift */
413 0, /* size (0 = byte, 1 = short, 2 = long) */
414 0, /* bitsize */
415 FALSE, /* pc_relative */
416 0, /* bitpos */
417 complain_overflow_dont, /* complain_on_overflow */
418 bfd_elf_generic_reloc, /* special_function */
419 "R_AARCH64_NULL", /* name */
420 FALSE, /* partial_inplace */
421 0, /* src_mask */
422 0, /* dst_mask */
423 FALSE), /* pcrel_offset */
424
425 /* .xword: (S+A) */
426 HOWTO (R_AARCH64_ABS64, /* type */
427 0, /* rightshift */
428 4, /* size (4 = long long) */
429 64, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_unsigned, /* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_AARCH64_ABS64", /* name */
435 FALSE, /* partial_inplace */
436 ALL_ONES, /* src_mask */
437 ALL_ONES, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 /* .word: (S+A) */
441 HOWTO (R_AARCH64_ABS32, /* type */
442 0, /* rightshift */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
444 32, /* bitsize */
445 FALSE, /* pc_relative */
446 0, /* bitpos */
447 complain_overflow_unsigned, /* complain_on_overflow */
448 bfd_elf_generic_reloc, /* special_function */
449 "R_AARCH64_ABS32", /* name */
450 FALSE, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 FALSE), /* pcrel_offset */
454
455 /* .half: (S+A) */
456 HOWTO (R_AARCH64_ABS16, /* type */
457 0, /* rightshift */
458 1, /* size (0 = byte, 1 = short, 2 = long) */
459 16, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_unsigned, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_ABS16", /* name */
465 FALSE, /* partial_inplace */
466 0xffff, /* src_mask */
467 0xffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 /* .xword: (S+A-P) */
471 HOWTO (R_AARCH64_PREL64, /* type */
472 0, /* rightshift */
473 4, /* size (4 = long long) */
474 64, /* bitsize */
475 TRUE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_signed, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_AARCH64_PREL64", /* name */
480 FALSE, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 TRUE), /* pcrel_offset */
484
485 /* .word: (S+A-P) */
486 HOWTO (R_AARCH64_PREL32, /* type */
487 0, /* rightshift */
488 2, /* size (0 = byte, 1 = short, 2 = long) */
489 32, /* bitsize */
490 TRUE, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_signed, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 "R_AARCH64_PREL32", /* name */
495 FALSE, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 TRUE), /* pcrel_offset */
499
500 /* .half: (S+A-P) */
501 HOWTO (R_AARCH64_PREL16, /* type */
502 0, /* rightshift */
503 1, /* size (0 = byte, 1 = short, 2 = long) */
504 16, /* bitsize */
505 TRUE, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_signed, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 "R_AARCH64_PREL16", /* name */
510 FALSE, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 TRUE), /* pcrel_offset */
514
515 /* Group relocations to create a 16, 32, 48 or 64 bit
516 unsigned data or abs address inline. */
517
518 /* MOVZ: ((S+A) >> 0) & 0xffff */
519 HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */
520 0, /* rightshift */
521 2, /* size (0 = byte, 1 = short, 2 = long) */
522 16, /* bitsize */
523 FALSE, /* pc_relative */
524 0, /* bitpos */
525 complain_overflow_unsigned, /* complain_on_overflow */
526 bfd_elf_generic_reloc, /* special_function */
527 "R_AARCH64_MOVW_UABS_G0", /* name */
528 FALSE, /* partial_inplace */
529 0xffff, /* src_mask */
530 0xffff, /* dst_mask */
531 FALSE), /* pcrel_offset */
532
533 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
534 HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */
535 0, /* rightshift */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
537 16, /* bitsize */
538 FALSE, /* pc_relative */
539 0, /* bitpos */
540 complain_overflow_dont, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
547
548 /* MOVZ: ((S+A) >> 16) & 0xffff */
549 HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */
550 16, /* rightshift */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
552 16, /* bitsize */
553 FALSE, /* pc_relative */
554 0, /* bitpos */
555 complain_overflow_unsigned, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 "R_AARCH64_MOVW_UABS_G1", /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
562
563 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
564 HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */
565 16, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 16, /* bitsize */
568 FALSE, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_dont, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
577
578 /* MOVZ: ((S+A) >> 32) & 0xffff */
579 HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */
580 32, /* rightshift */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
582 16, /* bitsize */
583 FALSE, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_unsigned, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 "R_AARCH64_MOVW_UABS_G2", /* name */
588 FALSE, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE), /* pcrel_offset */
592
593 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
594 HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */
595 32, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 16, /* bitsize */
598 FALSE, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_dont, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
603 FALSE, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 /* MOVZ: ((S+A) >> 48) & 0xffff */
609 HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */
610 48, /* rightshift */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
612 16, /* bitsize */
613 FALSE, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_unsigned, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 "R_AARCH64_MOVW_UABS_G3", /* name */
618 FALSE, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE), /* pcrel_offset */
622
623 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
624 signed data or abs address inline. Will change instruction
625 to MOVN or MOVZ depending on sign of calculated value. */
626
627 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
628 HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */
629 0, /* rightshift */
630 2, /* size (0 = byte, 1 = short, 2 = long) */
631 16, /* bitsize */
632 FALSE, /* pc_relative */
633 0, /* bitpos */
634 complain_overflow_signed, /* complain_on_overflow */
635 bfd_elf_generic_reloc, /* special_function */
636 "R_AARCH64_MOVW_SABS_G0", /* name */
637 FALSE, /* partial_inplace */
638 0xffff, /* src_mask */
639 0xffff, /* dst_mask */
640 FALSE), /* pcrel_offset */
641
642 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
643 HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */
644 16, /* rightshift */
645 2, /* size (0 = byte, 1 = short, 2 = long) */
646 16, /* bitsize */
647 FALSE, /* pc_relative */
648 0, /* bitpos */
649 complain_overflow_signed, /* complain_on_overflow */
650 bfd_elf_generic_reloc, /* special_function */
651 "R_AARCH64_MOVW_SABS_G1", /* name */
652 FALSE, /* partial_inplace */
653 0xffff, /* src_mask */
654 0xffff, /* dst_mask */
655 FALSE), /* pcrel_offset */
656
657 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
658 HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */
659 32, /* rightshift */
660 2, /* size (0 = byte, 1 = short, 2 = long) */
661 16, /* bitsize */
662 FALSE, /* pc_relative */
663 0, /* bitpos */
664 complain_overflow_signed, /* complain_on_overflow */
665 bfd_elf_generic_reloc, /* special_function */
666 "R_AARCH64_MOVW_SABS_G2", /* name */
667 FALSE, /* partial_inplace */
668 0xffff, /* src_mask */
669 0xffff, /* dst_mask */
670 FALSE), /* pcrel_offset */
671
672/* Relocations to generate 19, 21 and 33 bit PC-relative load/store
673 addresses: PG(x) is (x & ~0xfff). */
674
675 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
676 HOWTO (R_AARCH64_LD_PREL_LO19, /* type */
677 2, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 19, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed, /* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_AARCH64_LD_PREL_LO19", /* name */
685 FALSE, /* partial_inplace */
686 0x7ffff, /* src_mask */
687 0x7ffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 /* ADR: (S+A-P) & 0x1fffff */
691 HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */
692 0, /* rightshift */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
694 21, /* bitsize */
695 TRUE, /* pc_relative */
696 0, /* bitpos */
697 complain_overflow_signed, /* complain_on_overflow */
698 bfd_elf_generic_reloc, /* special_function */
699 "R_AARCH64_ADR_PREL_LO21", /* name */
700 FALSE, /* partial_inplace */
701 0x1fffff, /* src_mask */
702 0x1fffff, /* dst_mask */
703 TRUE), /* pcrel_offset */
704
705 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
706 HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */
707 12, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 21, /* bitsize */
710 TRUE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_signed, /* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
715 FALSE, /* partial_inplace */
716 0x1fffff, /* src_mask */
717 0x1fffff, /* dst_mask */
718 TRUE), /* pcrel_offset */
719
720 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
721 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */
722 12, /* rightshift */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
724 21, /* bitsize */
725 TRUE, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_dont, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
730 FALSE, /* partial_inplace */
731 0x1fffff, /* src_mask */
732 0x1fffff, /* dst_mask */
733 TRUE), /* pcrel_offset */
734
735 /* ADD: (S+A) & 0xfff [no overflow check] */
736 HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 12, /* bitsize */
740 FALSE, /* pc_relative */
741 10, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
745 FALSE, /* partial_inplace */
746 0x3ffc00, /* src_mask */
747 0x3ffc00, /* dst_mask */
748 FALSE), /* pcrel_offset */
749
750 /* LD/ST8: (S+A) & 0xfff */
751 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 12, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
760 FALSE, /* partial_inplace */
761 0xfff, /* src_mask */
762 0xfff, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 /* Relocations for control-flow instructions. */
766
767 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
768 HOWTO (R_AARCH64_TSTBR14, /* type */
769 2, /* rightshift */
770 2, /* size (0 = byte, 1 = short, 2 = long) */
771 14, /* bitsize */
772 TRUE, /* pc_relative */
773 0, /* bitpos */
774 complain_overflow_signed, /* complain_on_overflow */
775 bfd_elf_generic_reloc, /* special_function */
776 "R_AARCH64_TSTBR14", /* name */
777 FALSE, /* partial_inplace */
778 0x3fff, /* src_mask */
779 0x3fff, /* dst_mask */
780 TRUE), /* pcrel_offset */
781
782 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
783 HOWTO (R_AARCH64_CONDBR19, /* type */
784 2, /* rightshift */
785 2, /* size (0 = byte, 1 = short, 2 = long) */
786 19, /* bitsize */
787 TRUE, /* pc_relative */
788 0, /* bitpos */
789 complain_overflow_signed, /* complain_on_overflow */
790 bfd_elf_generic_reloc, /* special_function */
791 "R_AARCH64_CONDBR19", /* name */
792 FALSE, /* partial_inplace */
793 0x7ffff, /* src_mask */
794 0x7ffff, /* dst_mask */
795 TRUE), /* pcrel_offset */
796
797 EMPTY_HOWTO (281),
798
799 /* B: ((S+A-P) >> 2) & 0x3ffffff */
800 HOWTO (R_AARCH64_JUMP26, /* type */
801 2, /* rightshift */
802 2, /* size (0 = byte, 1 = short, 2 = long) */
803 26, /* bitsize */
804 TRUE, /* pc_relative */
805 0, /* bitpos */
806 complain_overflow_signed, /* complain_on_overflow */
807 bfd_elf_generic_reloc, /* special_function */
808 "R_AARCH64_JUMP26", /* name */
809 FALSE, /* partial_inplace */
810 0x3ffffff, /* src_mask */
811 0x3ffffff, /* dst_mask */
812 TRUE), /* pcrel_offset */
813
814 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
815 HOWTO (R_AARCH64_CALL26, /* type */
816 2, /* rightshift */
817 2, /* size (0 = byte, 1 = short, 2 = long) */
818 26, /* bitsize */
819 TRUE, /* pc_relative */
820 0, /* bitpos */
821 complain_overflow_signed, /* complain_on_overflow */
822 bfd_elf_generic_reloc, /* special_function */
823 "R_AARCH64_CALL26", /* name */
824 FALSE, /* partial_inplace */
825 0x3ffffff, /* src_mask */
826 0x3ffffff, /* dst_mask */
827 TRUE), /* pcrel_offset */
828
829 /* LD/ST16: (S+A) & 0xffe */
830 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */
831 1, /* rightshift */
832 2, /* size (0 = byte, 1 = short, 2 = long) */
833 12, /* bitsize */
834 FALSE, /* pc_relative */
835 0, /* bitpos */
836 complain_overflow_dont, /* complain_on_overflow */
837 bfd_elf_generic_reloc, /* special_function */
838 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
839 FALSE, /* partial_inplace */
840 0xffe, /* src_mask */
841 0xffe, /* dst_mask */
842 FALSE), /* pcrel_offset */
843
844 /* LD/ST32: (S+A) & 0xffc */
845 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */
846 2, /* rightshift */
847 2, /* size (0 = byte, 1 = short, 2 = long) */
848 12, /* bitsize */
849 FALSE, /* pc_relative */
850 0, /* bitpos */
851 complain_overflow_dont, /* complain_on_overflow */
852 bfd_elf_generic_reloc, /* special_function */
853 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
854 FALSE, /* partial_inplace */
855 0xffc, /* src_mask */
856 0xffc, /* dst_mask */
857 FALSE), /* pcrel_offset */
858
859 /* LD/ST64: (S+A) & 0xff8 */
860 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */
861 3, /* rightshift */
862 2, /* size (0 = byte, 1 = short, 2 = long) */
863 12, /* bitsize */
864 FALSE, /* pc_relative */
865 0, /* bitpos */
866 complain_overflow_dont, /* complain_on_overflow */
867 bfd_elf_generic_reloc, /* special_function */
868 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
869 FALSE, /* partial_inplace */
870 0xff8, /* src_mask */
871 0xff8, /* dst_mask */
872 FALSE), /* pcrel_offset */
873
874 EMPTY_HOWTO (287),
875 EMPTY_HOWTO (288),
876 EMPTY_HOWTO (289),
877 EMPTY_HOWTO (290),
878 EMPTY_HOWTO (291),
879 EMPTY_HOWTO (292),
880 EMPTY_HOWTO (293),
881 EMPTY_HOWTO (294),
882 EMPTY_HOWTO (295),
883 EMPTY_HOWTO (296),
884 EMPTY_HOWTO (297),
885 EMPTY_HOWTO (298),
886
887 /* LD/ST128: (S+A) & 0xff0 */
888 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */
889 4, /* rightshift */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
891 12, /* bitsize */
892 FALSE, /* pc_relative */
893 0, /* bitpos */
894 complain_overflow_dont, /* complain_on_overflow */
895 bfd_elf_generic_reloc, /* special_function */
896 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
897 FALSE, /* partial_inplace */
898 0xff0, /* src_mask */
899 0xff0, /* dst_mask */
900 FALSE), /* pcrel_offset */
901
902 EMPTY_HOWTO (300),
903 EMPTY_HOWTO (301),
904 EMPTY_HOWTO (302),
905 EMPTY_HOWTO (303),
906 EMPTY_HOWTO (304),
907 EMPTY_HOWTO (305),
908 EMPTY_HOWTO (306),
909 EMPTY_HOWTO (307),
910 EMPTY_HOWTO (308),
911 EMPTY_HOWTO (309),
912 EMPTY_HOWTO (310),
913
914 /* Get to the page for the GOT entry for the symbol
915 (G(S) - P) using an ADRP instruction. */
916 HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */
917 12, /* rightshift */
918 2, /* size (0 = byte, 1 = short, 2 = long) */
919 21, /* bitsize */
920 TRUE, /* pc_relative */
921 0, /* bitpos */
922 complain_overflow_dont, /* complain_on_overflow */
923 bfd_elf_generic_reloc, /* special_function */
924 "R_AARCH64_ADR_GOT_PAGE", /* name */
925 FALSE, /* partial_inplace */
926 0x1fffff, /* src_mask */
927 0x1fffff, /* dst_mask */
928 TRUE), /* pcrel_offset */
929
930 /* LD64: GOT offset G(S) & 0xff8 */
931 HOWTO (R_AARCH64_LD64_GOT_LO12_NC, /* type */
932 3, /* rightshift */
933 2, /* size (0 = byte, 1 = short, 2 = long) */
934 12, /* bitsize */
935 FALSE, /* pc_relative */
936 0, /* bitpos */
937 complain_overflow_dont, /* complain_on_overflow */
938 bfd_elf_generic_reloc, /* special_function */
939 "R_AARCH64_LD64_GOT_LO12_NC", /* name */
940 FALSE, /* partial_inplace */
941 0xff8, /* src_mask */
942 0xff8, /* dst_mask */
943 FALSE) /* pcrel_offset */
944};
945
946static reloc_howto_type elf64_aarch64_tls_howto_table[] =
947{
948 EMPTY_HOWTO (512),
949
950 /* Get to the page for the GOT entry for the symbol
951 (G(S) - P) using an ADRP instruction. */
952 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */
953 12, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 21, /* bitsize */
956 TRUE, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont, /* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
961 FALSE, /* partial_inplace */
962 0x1fffff, /* src_mask */
963 0x1fffff, /* dst_mask */
964 TRUE), /* pcrel_offset */
965
966 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
967 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */
968 0, /* rightshift */
969 2, /* size (0 = byte, 1 = short, 2 = long) */
970 12, /* bitsize */
971 FALSE, /* pc_relative */
972 0, /* bitpos */
973 complain_overflow_dont, /* complain_on_overflow */
974 bfd_elf_generic_reloc, /* special_function */
975 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
976 FALSE, /* partial_inplace */
977 0xfff, /* src_mask */
978 0xfff, /* dst_mask */
979 FALSE), /* pcrel_offset */
980
981 EMPTY_HOWTO (515),
982 EMPTY_HOWTO (516),
983 EMPTY_HOWTO (517),
984 EMPTY_HOWTO (518),
985 EMPTY_HOWTO (519),
986 EMPTY_HOWTO (520),
987 EMPTY_HOWTO (521),
988 EMPTY_HOWTO (522),
989 EMPTY_HOWTO (523),
990 EMPTY_HOWTO (524),
991 EMPTY_HOWTO (525),
992 EMPTY_HOWTO (526),
993 EMPTY_HOWTO (527),
994 EMPTY_HOWTO (528),
995 EMPTY_HOWTO (529),
996 EMPTY_HOWTO (530),
997 EMPTY_HOWTO (531),
998 EMPTY_HOWTO (532),
999 EMPTY_HOWTO (533),
1000 EMPTY_HOWTO (534),
1001 EMPTY_HOWTO (535),
1002 EMPTY_HOWTO (536),
1003 EMPTY_HOWTO (537),
1004 EMPTY_HOWTO (538),
1005
1006 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */
1007 16, /* rightshift */
1008 2, /* size (0 = byte, 1 = short, 2 = long) */
1009 16, /* bitsize */
1010 FALSE, /* pc_relative */
1011 0, /* bitpos */
1012 complain_overflow_dont, /* complain_on_overflow */
1013 bfd_elf_generic_reloc, /* special_function */
1014 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1015 FALSE, /* partial_inplace */
1016 0xffff, /* src_mask */
1017 0xffff, /* dst_mask */
1018 FALSE), /* pcrel_offset */
1019
1020 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */
1021 0, /* rightshift */
1022 2, /* size (0 = byte, 1 = short, 2 = long) */
1023 32, /* bitsize */
1024 FALSE, /* pc_relative */
1025 0, /* bitpos */
1026 complain_overflow_dont, /* complain_on_overflow */
1027 bfd_elf_generic_reloc, /* special_function */
1028 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1029 FALSE, /* partial_inplace */
1030 0xffff, /* src_mask */
1031 0xffff, /* dst_mask */
1032 FALSE), /* pcrel_offset */
1033
1034 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */
1035 12, /* rightshift */
1036 2, /* size (0 = byte, 1 = short, 2 = long) */
1037 21, /* bitsize */
1038 FALSE, /* pc_relative */
1039 0, /* bitpos */
1040 complain_overflow_dont, /* complain_on_overflow */
1041 bfd_elf_generic_reloc, /* special_function */
1042 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1043 FALSE, /* partial_inplace */
1044 0x1fffff, /* src_mask */
1045 0x1fffff, /* dst_mask */
1046 FALSE), /* pcrel_offset */
1047
1048 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */
1049 3, /* rightshift */
1050 2, /* size (0 = byte, 1 = short, 2 = long) */
1051 12, /* bitsize */
1052 FALSE, /* pc_relative */
1053 0, /* bitpos */
1054 complain_overflow_dont, /* complain_on_overflow */
1055 bfd_elf_generic_reloc, /* special_function */
1056 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1057 FALSE, /* partial_inplace */
1058 0xff8, /* src_mask */
1059 0xff8, /* dst_mask */
1060 FALSE), /* pcrel_offset */
1061
1062 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */
1063 0, /* rightshift */
1064 2, /* size (0 = byte, 1 = short, 2 = long) */
1065 21, /* bitsize */
1066 FALSE, /* pc_relative */
1067 0, /* bitpos */
1068 complain_overflow_dont, /* complain_on_overflow */
1069 bfd_elf_generic_reloc, /* special_function */
1070 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1071 FALSE, /* partial_inplace */
1072 0x1ffffc, /* src_mask */
1073 0x1ffffc, /* dst_mask */
1074 FALSE), /* pcrel_offset */
1075
1076 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */
1077 8, /* rightshift */
1078 2, /* size (0 = byte, 1 = short, 2 = long) */
1079 12, /* bitsize */
1080 FALSE, /* pc_relative */
1081 0, /* bitpos */
1082 complain_overflow_dont, /* complain_on_overflow */
1083 bfd_elf_generic_reloc, /* special_function */
1084 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1085 FALSE, /* partial_inplace */
1086 0xffff, /* src_mask */
1087 0xffff, /* dst_mask */
1088 FALSE), /* pcrel_offset */
1089
1090 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */
1091 4, /* rightshift */
1092 2, /* size (0 = byte, 1 = short, 2 = long) */
1093 12, /* bitsize */
1094 FALSE, /* pc_relative */
1095 0, /* bitpos */
1096 complain_overflow_dont, /* complain_on_overflow */
1097 bfd_elf_generic_reloc, /* special_function */
1098 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1099 FALSE, /* partial_inplace */
1100 0xffff, /* src_mask */
1101 0xffff, /* dst_mask */
1102 FALSE), /* pcrel_offset */
1103
1104 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */
1105 4, /* rightshift */
1106 2, /* size (0 = byte, 1 = short, 2 = long) */
1107 12, /* bitsize */
1108 FALSE, /* pc_relative */
1109 0, /* bitpos */
1110 complain_overflow_dont, /* complain_on_overflow */
1111 bfd_elf_generic_reloc, /* special_function */
1112 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1113 FALSE, /* partial_inplace */
1114 0xffff, /* src_mask */
1115 0xffff, /* dst_mask */
1116 FALSE), /* pcrel_offset */
1117
1118 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */
1119 0, /* rightshift */
1120 2, /* size (0 = byte, 1 = short, 2 = long) */
1121 12, /* bitsize */
1122 FALSE, /* pc_relative */
1123 0, /* bitpos */
1124 complain_overflow_dont, /* complain_on_overflow */
1125 bfd_elf_generic_reloc, /* special_function */
1126 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1127 FALSE, /* partial_inplace */
1128 0xffff, /* src_mask */
1129 0xffff, /* dst_mask */
1130 FALSE), /* pcrel_offset */
1131
1132 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */
1133 0, /* rightshift */
1134 2, /* size (0 = byte, 1 = short, 2 = long) */
1135 12, /* bitsize */
1136 FALSE, /* pc_relative */
1137 0, /* bitpos */
1138 complain_overflow_dont, /* complain_on_overflow */
1139 bfd_elf_generic_reloc, /* special_function */
1140 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1141 FALSE, /* partial_inplace */
1142 0xffff, /* src_mask */
1143 0xffff, /* dst_mask */
1144 FALSE), /* pcrel_offset */
1145
1146 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */
1147 3, /* rightshift */
1148 2, /* size (0 = byte, 1 = short, 2 = long) */
1149 12, /* bitsize */
1150 FALSE, /* pc_relative */
1151 0, /* bitpos */
1152 complain_overflow_dont, /* complain_on_overflow */
1153 bfd_elf_generic_reloc, /* special_function */
1154 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1155 FALSE, /* partial_inplace */
1156 0xfff, /* src_mask */
1157 0xfff, /* dst_mask */
1158 FALSE), /* pcrel_offset */
1159
1160 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */
1161 0, /* rightshift */
1162 2, /* size (0 = byte, 1 = short, 2 = long) */
1163 12, /* bitsize */
1164 FALSE, /* pc_relative */
1165 0, /* bitpos */
1166 complain_overflow_dont, /* complain_on_overflow */
1167 bfd_elf_generic_reloc, /* special_function */
1168 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1169 FALSE, /* partial_inplace */
1170 0xfff, /* src_mask */
1171 0xfff, /* dst_mask */
1172 FALSE), /* pcrel_offset */
1173
1174 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */
1175 0, /* rightshift */
1176 2, /* size (0 = byte, 1 = short, 2 = long) */
1177 12, /* bitsize */
1178 FALSE, /* pc_relative */
1179 0, /* bitpos */
1180 complain_overflow_dont, /* complain_on_overflow */
1181 bfd_elf_generic_reloc, /* special_function */
1182 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1183 FALSE, /* partial_inplace */
1184 0xfff, /* src_mask */
1185 0xfff, /* dst_mask */
1186 FALSE), /* pcrel_offset */
1187};
1188
1189static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] =
1190{
1191 HOWTO (R_AARCH64_TLSDESC_LD64_PREL19, /* type */
1192 0, /* rightshift */
1193 2, /* size (0 = byte, 1 = short, 2 = long) */
1194 21, /* bitsize */
1195 TRUE, /* pc_relative */
1196 0, /* bitpos */
1197 complain_overflow_dont, /* complain_on_overflow */
1198 bfd_elf_generic_reloc, /* special_function */
1199 "R_AARCH64_TLSDESC_LD64_PREL19", /* name */
1200 FALSE, /* partial_inplace */
1201 0x1ffffc, /* src_mask */
1202 0x1ffffc, /* dst_mask */
1203 TRUE), /* pcrel_offset */
1204
1205 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */
1206 0, /* rightshift */
1207 2, /* size (0 = byte, 1 = short, 2 = long) */
1208 21, /* bitsize */
1209 TRUE, /* pc_relative */
1210 0, /* bitpos */
1211 complain_overflow_dont, /* complain_on_overflow */
1212 bfd_elf_generic_reloc, /* special_function */
1213 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1214 FALSE, /* partial_inplace */
1215 0x1fffff, /* src_mask */
1216 0x1fffff, /* dst_mask */
1217 TRUE), /* pcrel_offset */
1218
1219 /* Get to the page for the GOT entry for the symbol
1220 (G(S) - P) using an ADRP instruction. */
1221 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE, /* type */
1222 12, /* rightshift */
1223 2, /* size (0 = byte, 1 = short, 2 = long) */
1224 21, /* bitsize */
1225 TRUE, /* pc_relative */
1226 0, /* bitpos */
1227 complain_overflow_dont, /* complain_on_overflow */
1228 bfd_elf_generic_reloc, /* special_function */
1229 "R_AARCH64_TLSDESC_ADR_PAGE", /* name */
1230 FALSE, /* partial_inplace */
1231 0x1fffff, /* src_mask */
1232 0x1fffff, /* dst_mask */
1233 TRUE), /* pcrel_offset */
1234
1235 /* LD64: GOT offset G(S) & 0xfff. */
1236 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */
1237 3, /* rightshift */
1238 2, /* size (0 = byte, 1 = short, 2 = long) */
1239 12, /* bitsize */
1240 FALSE, /* pc_relative */
1241 0, /* bitpos */
1242 complain_overflow_dont, /* complain_on_overflow */
1243 bfd_elf_generic_reloc, /* special_function */
1244 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1245 FALSE, /* partial_inplace */
1246 0xfff, /* src_mask */
1247 0xfff, /* dst_mask */
1248 FALSE), /* pcrel_offset */
1249
1250 /* ADD: GOT offset G(S) & 0xfff. */
1251 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */
1252 0, /* rightshift */
1253 2, /* size (0 = byte, 1 = short, 2 = long) */
1254 12, /* bitsize */
1255 FALSE, /* pc_relative */
1256 0, /* bitpos */
1257 complain_overflow_dont, /* complain_on_overflow */
1258 bfd_elf_generic_reloc, /* special_function */
1259 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1260 FALSE, /* partial_inplace */
1261 0xfff, /* src_mask */
1262 0xfff, /* dst_mask */
1263 FALSE), /* pcrel_offset */
1264
1265 HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */
1266 4, /* rightshift */
1267 2, /* size (0 = byte, 1 = short, 2 = long) */
1268 12, /* bitsize */
1269 FALSE, /* pc_relative */
1270 0, /* bitpos */
1271 complain_overflow_dont, /* complain_on_overflow */
1272 bfd_elf_generic_reloc, /* special_function */
1273 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1274 FALSE, /* partial_inplace */
1275 0xffff, /* src_mask */
1276 0xffff, /* dst_mask */
1277 FALSE), /* pcrel_offset */
1278
1279 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */
1280 0, /* rightshift */
1281 2, /* size (0 = byte, 1 = short, 2 = long) */
1282 12, /* bitsize */
1283 FALSE, /* pc_relative */
1284 0, /* bitpos */
1285 complain_overflow_dont, /* complain_on_overflow */
1286 bfd_elf_generic_reloc, /* special_function */
1287 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1288 FALSE, /* partial_inplace */
1289 0xffff, /* src_mask */
1290 0xffff, /* dst_mask */
1291 FALSE), /* pcrel_offset */
1292
1293 HOWTO (R_AARCH64_TLSDESC_LDR, /* type */
1294 0, /* rightshift */
1295 2, /* size (0 = byte, 1 = short, 2 = long) */
1296 12, /* bitsize */
1297 FALSE, /* pc_relative */
1298 0, /* bitpos */
1299 complain_overflow_dont, /* complain_on_overflow */
1300 bfd_elf_generic_reloc, /* special_function */
1301 "R_AARCH64_TLSDESC_LDR", /* name */
1302 FALSE, /* partial_inplace */
1303 0x0, /* src_mask */
1304 0x0, /* dst_mask */
1305 FALSE), /* pcrel_offset */
1306
1307 HOWTO (R_AARCH64_TLSDESC_ADD, /* type */
1308 0, /* rightshift */
1309 2, /* size (0 = byte, 1 = short, 2 = long) */
1310 12, /* bitsize */
1311 FALSE, /* pc_relative */
1312 0, /* bitpos */
1313 complain_overflow_dont, /* complain_on_overflow */
1314 bfd_elf_generic_reloc, /* special_function */
1315 "R_AARCH64_TLSDESC_ADD", /* name */
1316 FALSE, /* partial_inplace */
1317 0x0, /* src_mask */
1318 0x0, /* dst_mask */
1319 FALSE), /* pcrel_offset */
1320
1321 HOWTO (R_AARCH64_TLSDESC_CALL, /* type */
1322 0, /* rightshift */
1323 2, /* size (0 = byte, 1 = short, 2 = long) */
1324 12, /* bitsize */
1325 FALSE, /* pc_relative */
1326 0, /* bitpos */
1327 complain_overflow_dont, /* complain_on_overflow */
1328 bfd_elf_generic_reloc, /* special_function */
1329 "R_AARCH64_TLSDESC_CALL", /* name */
1330 FALSE, /* partial_inplace */
1331 0x0, /* src_mask */
1332 0x0, /* dst_mask */
1333 FALSE), /* pcrel_offset */
1334};
1335
1336static reloc_howto_type *
1337elf64_aarch64_howto_from_type (unsigned int r_type)
1338{
1339 if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max)
1340 return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min];
1341
1342 if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max)
1343 return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min];
1344
1345 if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max)
1346 return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min];
1347
1348 if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max)
1349 return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min];
1350
1351 switch (r_type)
1352 {
1353 case R_AARCH64_NONE:
1354 return &elf64_aarch64_howto_none;
1355
1356 }
1357 bfd_set_error (bfd_error_bad_value);
1358 return NULL;
1359}
1360
1361static void
1362elf64_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1363 Elf_Internal_Rela *elf_reloc)
1364{
1365 unsigned int r_type;
1366
1367 r_type = ELF64_R_TYPE (elf_reloc->r_info);
1368 bfd_reloc->howto = elf64_aarch64_howto_from_type (r_type);
1369}
1370
1371struct elf64_aarch64_reloc_map
1372{
1373 bfd_reloc_code_real_type bfd_reloc_val;
1374 unsigned int elf_reloc_val;
1375};
1376
1377/* All entries in this list must also be present in
1378 elf64_aarch64_howto_table. */
1379static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] =
1380{
1381 {BFD_RELOC_NONE, R_AARCH64_NONE},
1382
1383 /* Basic data relocations. */
1384 {BFD_RELOC_CTOR, R_AARCH64_ABS64},
1385 {BFD_RELOC_64, R_AARCH64_ABS64},
1386 {BFD_RELOC_32, R_AARCH64_ABS32},
1387 {BFD_RELOC_16, R_AARCH64_ABS16},
1388 {BFD_RELOC_64_PCREL, R_AARCH64_PREL64},
1389 {BFD_RELOC_32_PCREL, R_AARCH64_PREL32},
1390 {BFD_RELOC_16_PCREL, R_AARCH64_PREL16},
1391
1392 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1393 value inline. */
1394 {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC},
1395 {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC},
1396 {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC},
1397
1398 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1399 signed value inline. */
1400 {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0},
1401 {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1},
1402 {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2},
1403
1404 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1405 unsigned value inline. */
1406 {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0},
1407 {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1},
1408 {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2},
1409 {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3},
1410
1411 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1412 {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19},
1413 {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21},
1414 {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21},
1415 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC},
1416 {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC},
1417 {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC},
1418 {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC},
1419 {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC},
1420 {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC},
1421 {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC},
1422
1423 /* Relocations for control-flow instructions. */
1424 {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14},
1425 {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19},
1426 {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26},
1427 {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26},
1428
1429 /* Relocations for PIC. */
1430 {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE},
1431 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC},
1432
1433 /* Relocations for TLS. */
1434 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21},
1435 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC},
1436 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
1437 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1},
1438 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
1439 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC},
1440 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
1441 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21},
1442 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
1443 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
1444 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
1445 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
1446 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2},
1447 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1},
1448 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
1449 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC},
1450 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0},
1451 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
1452 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC},
1453 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12},
1454 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12},
1455 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
1456 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC},
1457 {BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19, R_AARCH64_TLSDESC_LD64_PREL19},
1458 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21},
1459 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE, R_AARCH64_TLSDESC_ADR_PAGE},
1460 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC},
1461 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC},
1462 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1},
1463 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC},
1464 {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR},
1465 {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD},
1466 {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL},
1467 {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64},
1468 {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64},
1469 {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64},
1470 {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC},
1471};
1472
1473static reloc_howto_type *
1474elf64_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1475 bfd_reloc_code_real_type code)
1476{
1477 unsigned int i;
1478
1479 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++)
1480 if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code)
1481 return elf64_aarch64_howto_from_type
1482 (elf64_aarch64_reloc_map[i].elf_reloc_val);
1483
1484 bfd_set_error (bfd_error_bad_value);
1485 return NULL;
1486}
1487
1488static reloc_howto_type *
1489elf64_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1490 const char *r_name)
1491{
1492 unsigned int i;
1493
1494 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++)
1495 if (elf64_aarch64_howto_table[i].name != NULL
1496 && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0)
1497 return &elf64_aarch64_howto_table[i];
1498
1499 return NULL;
1500}
1501
1502#define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
1503#define TARGET_LITTLE_NAME "elf64-littleaarch64"
1504#define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
1505#define TARGET_BIG_NAME "elf64-bigaarch64"
1506
1507typedef unsigned long int insn32;
1508
1509/* The linker script knows the section names for placement.
1510 The entry_names are used to do simple name mangling on the stubs.
1511 Given a function name, and its type, the stub can be found. The
1512 name can be changed. The only requirement is the %s be present. */
1513#define STUB_ENTRY_NAME "__%s_veneer"
1514
1515/* The name of the dynamic interpreter. This is put in the .interp
1516 section. */
1517#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1518
1519#define AARCH64_MAX_FWD_BRANCH_OFFSET \
1520 (((1 << 25) - 1) << 2)
1521#define AARCH64_MAX_BWD_BRANCH_OFFSET \
1522 (-((1 << 25) << 2))
1523
1524#define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1525#define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1526
1527static int
1528aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1529{
1530 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1531 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1532}
1533
1534static int
1535aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1536{
1537 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1538 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1539 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1540}
1541
1542static const uint32_t aarch64_adrp_branch_stub [] =
1543{
1544 0x90000010, /* adrp ip0, X */
1545 /* R_AARCH64_ADR_HI21_PCREL(X) */
1546 0x91000210, /* add ip0, ip0, :lo12:X */
1547 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1548 0xd61f0200, /* br ip0 */
1549};
1550
1551static const uint32_t aarch64_long_branch_stub[] =
1552{
1553 0x58000090, /* ldr ip0, 1f */
1554 0x10000011, /* adr ip1, #0 */
1555 0x8b110210, /* add ip0, ip0, ip1 */
1556 0xd61f0200, /* br ip0 */
1557 0x00000000, /* 1: .xword
1558 R_AARCH64_PREL64(X) + 12
1559 */
1560 0x00000000,
1561};
1562
1563/* Section name for stubs is the associated section name plus this
1564 string. */
1565#define STUB_SUFFIX ".stub"
1566
1567enum elf64_aarch64_stub_type
1568{
1569 aarch64_stub_none,
1570 aarch64_stub_adrp_branch,
1571 aarch64_stub_long_branch,
1572};
1573
1574struct elf64_aarch64_stub_hash_entry
1575{
1576 /* Base hash table entry structure. */
1577 struct bfd_hash_entry root;
1578
1579 /* The stub section. */
1580 asection *stub_sec;
1581
1582 /* Offset within stub_sec of the beginning of this stub. */
1583 bfd_vma stub_offset;
1584
1585 /* Given the symbol's value and its section we can determine its final
1586 value when building the stubs (so the stub knows where to jump). */
1587 bfd_vma target_value;
1588 asection *target_section;
1589
1590 enum elf64_aarch64_stub_type stub_type;
1591
1592 /* The symbol table entry, if any, that this was derived from. */
1593 struct elf64_aarch64_link_hash_entry *h;
1594
1595 /* Destination symbol type */
1596 unsigned char st_type;
1597
1598 /* Where this stub is being called from, or, in the case of combined
1599 stub sections, the first input section in the group. */
1600 asection *id_sec;
1601
1602 /* The name for the local symbol at the start of this stub. The
1603 stub name in the hash table has to be unique; this does not, so
1604 it can be friendlier. */
1605 char *output_name;
1606};
1607
1608/* Used to build a map of a section. This is required for mixed-endian
1609 code/data. */
1610
1611typedef struct elf64_elf_section_map
1612{
1613 bfd_vma vma;
1614 char type;
1615}
1616elf64_aarch64_section_map;
1617
1618
1619typedef struct _aarch64_elf_section_data
1620{
1621 struct bfd_elf_section_data elf;
1622 unsigned int mapcount;
1623 unsigned int mapsize;
1624 elf64_aarch64_section_map *map;
1625}
1626_aarch64_elf_section_data;
1627
1628#define elf64_aarch64_section_data(sec) \
1629 ((_aarch64_elf_section_data *) elf_section_data (sec))
1630
1631/* The size of the thread control block. */
1632#define TCB_SIZE 16
1633
1634struct elf_aarch64_local_symbol
1635{
1636 unsigned int got_type;
1637 bfd_signed_vma got_refcount;
1638 bfd_vma got_offset;
1639
1640 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1641 offset is from the end of the jump table and reserved entries
1642 within the PLTGOT.
1643
1644 The magic value (bfd_vma) -1 indicates that an offset has not be
1645 allocated. */
1646 bfd_vma tlsdesc_got_jump_table_offset;
1647};
1648
1649struct elf_aarch64_obj_tdata
1650{
1651 struct elf_obj_tdata root;
1652
1653 /* local symbol descriptors */
1654 struct elf_aarch64_local_symbol *locals;
1655
1656 /* Zero to warn when linking objects with incompatible enum sizes. */
1657 int no_enum_size_warning;
1658
1659 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1660 int no_wchar_size_warning;
1661};
1662
1663#define elf_aarch64_tdata(bfd) \
1664 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1665
1666#define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1667
1668#define is_aarch64_elf(bfd) \
1669 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1670 && elf_tdata (bfd) != NULL \
1671 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1672
1673static bfd_boolean
1674elf64_aarch64_mkobject (bfd *abfd)
1675{
1676 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1677 AARCH64_ELF_DATA);
1678}
1679
1680/* The AArch64 linker needs to keep track of the number of relocs that it
1681 decides to copy in check_relocs for each symbol. This is so that
1682 it can discard PC relative relocs if it doesn't need them when
1683 linking with -Bsymbolic. We store the information in a field
1684 extending the regular ELF linker hash table. */
1685
1686/* This structure keeps track of the number of relocs we have copied
1687 for a given symbol. */
1688struct elf64_aarch64_relocs_copied
1689{
1690 /* Next section. */
1691 struct elf64_aarch64_relocs_copied *next;
1692 /* A section in dynobj. */
1693 asection *section;
1694 /* Number of relocs copied in this section. */
1695 bfd_size_type count;
1696 /* Number of PC-relative relocs copied in this section. */
1697 bfd_size_type pc_count;
1698};
1699
1700#define elf64_aarch64_hash_entry(ent) \
1701 ((struct elf64_aarch64_link_hash_entry *)(ent))
1702
1703#define GOT_UNKNOWN 0
1704#define GOT_NORMAL 1
1705#define GOT_TLS_GD 2
1706#define GOT_TLS_IE 4
1707#define GOT_TLSDESC_GD 8
1708
1709#define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1710
1711/* AArch64 ELF linker hash entry. */
1712struct elf64_aarch64_link_hash_entry
1713{
1714 struct elf_link_hash_entry root;
1715
1716 /* Track dynamic relocs copied for this symbol. */
1717 struct elf_dyn_relocs *dyn_relocs;
1718
1719 /* Number of PC relative relocs copied for this symbol. */
1720 struct elf64_aarch64_relocs_copied *relocs_copied;
1721
1722 /* Since PLT entries have variable size, we need to record the
1723 index into .got.plt instead of recomputing it from the PLT
1724 offset. */
1725 bfd_signed_vma plt_got_offset;
1726
1727 /* Bit mask representing the type of GOT entry(s) if any required by
1728 this symbol. */
1729 unsigned int got_type;
1730
1731 /* A pointer to the most recently used stub hash entry against this
1732 symbol. */
1733 struct elf64_aarch64_stub_hash_entry *stub_cache;
1734
1735 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1736 is from the end of the jump table and reserved entries within the PLTGOT.
1737
1738 The magic value (bfd_vma) -1 indicates that an offset has not
1739 be allocated. */
1740 bfd_vma tlsdesc_got_jump_table_offset;
1741};
1742
1743static unsigned int
1744elf64_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1745 bfd *abfd,
1746 unsigned long r_symndx)
1747{
1748 if (h)
1749 return elf64_aarch64_hash_entry (h)->got_type;
1750
1751 if (! elf64_aarch64_locals (abfd))
1752 return GOT_UNKNOWN;
1753
1754 return elf64_aarch64_locals (abfd)[r_symndx].got_type;
1755}
1756
1757/* Traverse an AArch64 ELF linker hash table. */
1758#define elf64_aarch64_link_hash_traverse(table, func, info) \
1759 (elf_link_hash_traverse \
1760 (&(table)->root, \
1761 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
1762 (info)))
1763
1764/* Get the AArch64 elf linker hash table from a link_info structure. */
1765#define elf64_aarch64_hash_table(info) \
1766 ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
1767
1768#define aarch64_stub_hash_lookup(table, string, create, copy) \
1769 ((struct elf64_aarch64_stub_hash_entry *) \
1770 bfd_hash_lookup ((table), (string), (create), (copy)))
1771
1772/* AArch64 ELF linker hash table. */
1773struct elf64_aarch64_link_hash_table
1774{
1775 /* The main hash table. */
1776 struct elf_link_hash_table root;
1777
1778 /* Nonzero to force PIC branch veneers. */
1779 int pic_veneer;
1780
1781 /* The number of bytes in the initial entry in the PLT. */
1782 bfd_size_type plt_header_size;
1783
1784 /* The number of bytes in the subsequent PLT etries. */
1785 bfd_size_type plt_entry_size;
1786
1787 /* Short-cuts to get to dynamic linker sections. */
1788 asection *sdynbss;
1789 asection *srelbss;
1790
1791 /* Small local sym cache. */
1792 struct sym_cache sym_cache;
1793
1794 /* For convenience in allocate_dynrelocs. */
1795 bfd *obfd;
1796
1797 /* The amount of space used by the reserved portion of the sgotplt
1798 section, plus whatever space is used by the jump slots. */
1799 bfd_vma sgotplt_jump_table_size;
1800
1801 /* The stub hash table. */
1802 struct bfd_hash_table stub_hash_table;
1803
1804 /* Linker stub bfd. */
1805 bfd *stub_bfd;
1806
1807 /* Linker call-backs. */
1808 asection *(*add_stub_section) (const char *, asection *);
1809 void (*layout_sections_again) (void);
1810
1811 /* Array to keep track of which stub sections have been created, and
1812 information on stub grouping. */
1813 struct map_stub
1814 {
1815 /* This is the section to which stubs in the group will be
1816 attached. */
1817 asection *link_sec;
1818 /* The stub section. */
1819 asection *stub_sec;
1820 } *stub_group;
1821
1822 /* Assorted information used by elf64_aarch64_size_stubs. */
1823 unsigned int bfd_count;
1824 int top_index;
1825 asection **input_list;
1826
1827 /* The offset into splt of the PLT entry for the TLS descriptor
1828 resolver. Special values are 0, if not necessary (or not found
1829 to be necessary yet), and -1 if needed but not determined
1830 yet. */
1831 bfd_vma tlsdesc_plt;
1832
1833 /* The GOT offset for the lazy trampoline. Communicated to the
1834 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1835 indicates an offset is not allocated. */
1836 bfd_vma dt_tlsdesc_got;
1837};
1838
1839
1840/* Return non-zero if the indicated VALUE has overflowed the maximum
1841 range expressible by a unsigned number with the indicated number of
1842 BITS. */
1843
1844static bfd_reloc_status_type
1845aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
1846{
1847 bfd_vma lim;
1848 if (bits >= sizeof (bfd_vma) * 8)
1849 return bfd_reloc_ok;
1850 lim = (bfd_vma) 1 << bits;
1851 if (value >= lim)
1852 return bfd_reloc_overflow;
1853 return bfd_reloc_ok;
1854}
1855
1856
1857/* Return non-zero if the indicated VALUE has overflowed the maximum
1858 range expressible by an signed number with the indicated number of
1859 BITS. */
1860
1861static bfd_reloc_status_type
1862aarch64_signed_overflow (bfd_vma value, unsigned int bits)
1863{
1864 bfd_signed_vma svalue = (bfd_signed_vma) value;
1865 bfd_signed_vma lim;
1866
1867 if (bits >= sizeof (bfd_vma) * 8)
1868 return bfd_reloc_ok;
1869 lim = (bfd_signed_vma) 1 << (bits - 1);
1870 if (svalue < -lim || svalue >= lim)
1871 return bfd_reloc_overflow;
1872 return bfd_reloc_ok;
1873}
1874
1875/* Create an entry in an AArch64 ELF linker hash table. */
1876
1877static struct bfd_hash_entry *
1878elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1879 struct bfd_hash_table *table,
1880 const char *string)
1881{
1882 struct elf64_aarch64_link_hash_entry *ret =
1883 (struct elf64_aarch64_link_hash_entry *) entry;
1884
1885 /* Allocate the structure if it has not already been allocated by a
1886 subclass. */
1887 if (ret == NULL)
1888 ret = bfd_hash_allocate (table,
1889 sizeof (struct elf64_aarch64_link_hash_entry));
1890 if (ret == NULL)
1891 return (struct bfd_hash_entry *) ret;
1892
1893 /* Call the allocation method of the superclass. */
1894 ret = ((struct elf64_aarch64_link_hash_entry *)
1895 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1896 table, string));
1897 if (ret != NULL)
1898 {
1899 ret->dyn_relocs = NULL;
1900 ret->relocs_copied = NULL;
1901 ret->got_type = GOT_UNKNOWN;
1902 ret->plt_got_offset = (bfd_vma) - 1;
1903 ret->stub_cache = NULL;
1904 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1905 }
1906
1907 return (struct bfd_hash_entry *) ret;
1908}
1909
1910/* Initialize an entry in the stub hash table. */
1911
1912static struct bfd_hash_entry *
1913stub_hash_newfunc (struct bfd_hash_entry *entry,
1914 struct bfd_hash_table *table, const char *string)
1915{
1916 /* Allocate the structure if it has not already been allocated by a
1917 subclass. */
1918 if (entry == NULL)
1919 {
1920 entry = bfd_hash_allocate (table,
1921 sizeof (struct
1922 elf64_aarch64_stub_hash_entry));
1923 if (entry == NULL)
1924 return entry;
1925 }
1926
1927 /* Call the allocation method of the superclass. */
1928 entry = bfd_hash_newfunc (entry, table, string);
1929 if (entry != NULL)
1930 {
1931 struct elf64_aarch64_stub_hash_entry *eh;
1932
1933 /* Initialize the local fields. */
1934 eh = (struct elf64_aarch64_stub_hash_entry *) entry;
1935 eh->stub_sec = NULL;
1936 eh->stub_offset = 0;
1937 eh->target_value = 0;
1938 eh->target_section = NULL;
1939 eh->stub_type = aarch64_stub_none;
1940 eh->h = NULL;
1941 eh->id_sec = NULL;
1942 }
1943
1944 return entry;
1945}
1946
1947
1948/* Copy the extra info we tack onto an elf_link_hash_entry. */
1949
1950static void
1951elf64_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
1952 struct elf_link_hash_entry *dir,
1953 struct elf_link_hash_entry *ind)
1954{
1955 struct elf64_aarch64_link_hash_entry *edir, *eind;
1956
1957 edir = (struct elf64_aarch64_link_hash_entry *) dir;
1958 eind = (struct elf64_aarch64_link_hash_entry *) ind;
1959
1960 if (eind->dyn_relocs != NULL)
1961 {
1962 if (edir->dyn_relocs != NULL)
1963 {
1964 struct elf_dyn_relocs **pp;
1965 struct elf_dyn_relocs *p;
1966
1967 /* Add reloc counts against the indirect sym to the direct sym
1968 list. Merge any entries against the same section. */
1969 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
1970 {
1971 struct elf_dyn_relocs *q;
1972
1973 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1974 if (q->sec == p->sec)
1975 {
1976 q->pc_count += p->pc_count;
1977 q->count += p->count;
1978 *pp = p->next;
1979 break;
1980 }
1981 if (q == NULL)
1982 pp = &p->next;
1983 }
1984 *pp = edir->dyn_relocs;
1985 }
1986
1987 edir->dyn_relocs = eind->dyn_relocs;
1988 eind->dyn_relocs = NULL;
1989 }
1990
1991 if (eind->relocs_copied != NULL)
1992 {
1993 if (edir->relocs_copied != NULL)
1994 {
1995 struct elf64_aarch64_relocs_copied **pp;
1996 struct elf64_aarch64_relocs_copied *p;
1997
1998 /* Add reloc counts against the indirect sym to the direct sym
1999 list. Merge any entries against the same section. */
2000 for (pp = &eind->relocs_copied; (p = *pp) != NULL;)
2001 {
2002 struct elf64_aarch64_relocs_copied *q;
2003
2004 for (q = edir->relocs_copied; q != NULL; q = q->next)
2005 if (q->section == p->section)
2006 {
2007 q->pc_count += p->pc_count;
2008 q->count += p->count;
2009 *pp = p->next;
2010 break;
2011 }
2012 if (q == NULL)
2013 pp = &p->next;
2014 }
2015 *pp = edir->relocs_copied;
2016 }
2017
2018 edir->relocs_copied = eind->relocs_copied;
2019 eind->relocs_copied = NULL;
2020 }
2021
2022 if (ind->root.type == bfd_link_hash_indirect)
2023 {
2024 /* Copy over PLT info. */
2025 if (dir->got.refcount <= 0)
2026 {
2027 edir->got_type = eind->got_type;
2028 eind->got_type = GOT_UNKNOWN;
2029 }
2030 }
2031
2032 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2033}
2034
2035/* Create an AArch64 elf linker hash table. */
2036
2037static struct bfd_link_hash_table *
2038elf64_aarch64_link_hash_table_create (bfd *abfd)
2039{
2040 struct elf64_aarch64_link_hash_table *ret;
2041 bfd_size_type amt = sizeof (struct elf64_aarch64_link_hash_table);
2042
2043 ret = bfd_malloc (amt);
2044 if (ret == NULL)
2045 return NULL;
2046
2047 if (!_bfd_elf_link_hash_table_init
2048 (&ret->root, abfd, elf64_aarch64_link_hash_newfunc,
2049 sizeof (struct elf64_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2050 {
2051 free (ret);
2052 return NULL;
2053 }
2054
2055 ret->sdynbss = NULL;
2056 ret->srelbss = NULL;
2057
2058 ret->plt_header_size = PLT_ENTRY_SIZE;
2059 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2060
2061 ret->sym_cache.abfd = NULL;
2062 ret->obfd = abfd;
2063
2064 ret->stub_bfd = NULL;
2065 ret->add_stub_section = NULL;
2066 ret->layout_sections_again = NULL;
2067 ret->stub_group = NULL;
2068 ret->bfd_count = 0;
2069 ret->top_index = 0;
2070 ret->input_list = NULL;
2071 ret->tlsdesc_plt = 0;
2072 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2073
2074 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2075 sizeof (struct elf64_aarch64_stub_hash_entry)))
2076 {
2077 free (ret);
2078 return NULL;
2079 }
2080
2081 return &ret->root.root;
2082}
2083
2084/* Free the derived linker hash table. */
2085
2086static void
2087elf64_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
2088{
2089 struct elf64_aarch64_link_hash_table *ret
2090 = (struct elf64_aarch64_link_hash_table *) hash;
2091
2092 bfd_hash_table_free (&ret->stub_hash_table);
2093 _bfd_generic_link_hash_table_free (hash);
2094}
2095
2096static bfd_vma
2097aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
2098 bfd_vma addend, bfd_boolean weak_undef_p)
2099{
2100 switch (r_type)
2101 {
2102 case R_AARCH64_TLSDESC_CALL:
2103 case R_AARCH64_NONE:
2104 case R_AARCH64_NULL:
2105 break;
2106
2107 case R_AARCH64_ADR_PREL_LO21:
2108 case R_AARCH64_CONDBR19:
2109 case R_AARCH64_LD_PREL_LO19:
2110 case R_AARCH64_PREL16:
2111 case R_AARCH64_PREL32:
2112 case R_AARCH64_PREL64:
2113 case R_AARCH64_TSTBR14:
2114 if (weak_undef_p)
2115 value = place;
2116 value = value + addend - place;
2117 break;
2118
2119 case R_AARCH64_CALL26:
2120 case R_AARCH64_JUMP26:
2121 value = value + addend - place;
2122 break;
2123
2124 case R_AARCH64_ABS16:
2125 case R_AARCH64_ABS32:
2126 case R_AARCH64_MOVW_SABS_G0:
2127 case R_AARCH64_MOVW_SABS_G1:
2128 case R_AARCH64_MOVW_SABS_G2:
2129 case R_AARCH64_MOVW_UABS_G0:
2130 case R_AARCH64_MOVW_UABS_G0_NC:
2131 case R_AARCH64_MOVW_UABS_G1:
2132 case R_AARCH64_MOVW_UABS_G1_NC:
2133 case R_AARCH64_MOVW_UABS_G2:
2134 case R_AARCH64_MOVW_UABS_G2_NC:
2135 case R_AARCH64_MOVW_UABS_G3:
2136 value = value + addend;
2137 break;
2138
2139 case R_AARCH64_ADR_PREL_PG_HI21:
2140 case R_AARCH64_ADR_PREL_PG_HI21_NC:
2141 if (weak_undef_p)
2142 value = PG (place);
2143 value = PG (value + addend) - PG (place);
2144 break;
2145
2146 case R_AARCH64_ADR_GOT_PAGE:
2147 case R_AARCH64_TLSDESC_ADR_PAGE:
2148 case R_AARCH64_TLSGD_ADR_PAGE21:
2149 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
2150 value = PG (value + addend) - PG (place);
2151 break;
2152
2153 case R_AARCH64_ADD_ABS_LO12_NC:
2154 case R_AARCH64_LD64_GOT_LO12_NC:
2155 case R_AARCH64_LDST8_ABS_LO12_NC:
2156 case R_AARCH64_LDST16_ABS_LO12_NC:
2157 case R_AARCH64_LDST32_ABS_LO12_NC:
2158 case R_AARCH64_LDST64_ABS_LO12_NC:
2159 case R_AARCH64_LDST128_ABS_LO12_NC:
2160 case R_AARCH64_TLSDESC_ADD_LO12_NC:
2161 case R_AARCH64_TLSDESC_ADD:
2162 case R_AARCH64_TLSDESC_LD64_LO12_NC:
2163 case R_AARCH64_TLSDESC_LDR:
2164 case R_AARCH64_TLSGD_ADD_LO12_NC:
2165 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
2166 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
2167 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
2168 value = PG_OFFSET (value + addend);
2169 break;
2170
2171 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
2172 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
2173 value = (value + addend) & (bfd_vma) 0xffff0000;
2174 break;
2175 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
2176 value = (value + addend) & (bfd_vma) 0xfff000;
2177 break;
2178
2179 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
2180 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
2181 value = (value + addend) & (bfd_vma) 0xffff;
2182 break;
2183
2184 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
2185 value = (value + addend) & ~(bfd_vma) 0xffffffff;
2186 value -= place & ~(bfd_vma) 0xffffffff;
2187 break;
2188 }
2189 return value;
2190}
2191
2192static bfd_boolean
2193aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2194 bfd_vma offset, bfd_vma value)
2195{
2196 reloc_howto_type *howto;
2197 bfd_vma place;
2198
2199 howto = elf64_aarch64_howto_from_type (r_type);
2200 place = (input_section->output_section->vma + input_section->output_offset
2201 + offset);
2202 value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
2203 return bfd_elf_aarch64_put_addend (input_bfd,
2204 input_section->contents + offset,
2205 howto, value);
2206}
2207
2208static enum elf64_aarch64_stub_type
2209aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2210{
2211 if (aarch64_valid_for_adrp_p (value, place))
2212 return aarch64_stub_adrp_branch;
2213 return aarch64_stub_long_branch;
2214}
2215
2216/* Determine the type of stub needed, if any, for a call. */
2217
2218static enum elf64_aarch64_stub_type
2219aarch64_type_of_stub (struct bfd_link_info *info,
2220 asection *input_sec,
2221 const Elf_Internal_Rela *rel,
2222 unsigned char st_type,
2223 struct elf64_aarch64_link_hash_entry *hash,
2224 bfd_vma destination)
2225{
2226 bfd_vma location;
2227 bfd_signed_vma branch_offset;
2228 unsigned int r_type;
2229 struct elf64_aarch64_link_hash_table *globals;
2230 enum elf64_aarch64_stub_type stub_type = aarch64_stub_none;
2231 bfd_boolean via_plt_p;
2232
2233 if (st_type != STT_FUNC)
2234 return stub_type;
2235
2236 globals = elf64_aarch64_hash_table (info);
2237 via_plt_p = (globals->root.splt != NULL && hash != NULL
2238 && hash->root.plt.offset != (bfd_vma) - 1);
2239
2240 if (via_plt_p)
2241 return stub_type;
2242
2243 /* Determine where the call point is. */
2244 location = (input_sec->output_offset
2245 + input_sec->output_section->vma + rel->r_offset);
2246
2247 branch_offset = (bfd_signed_vma) (destination - location);
2248
2249 r_type = ELF64_R_TYPE (rel->r_info);
2250
2251 /* We don't want to redirect any old unconditional jump in this way,
2252 only one which is being used for a sibcall, where it is
2253 acceptable for the IP0 and IP1 registers to be clobbered. */
2254 if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26)
2255 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2256 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2257 {
2258 stub_type = aarch64_stub_long_branch;
2259 }
2260
2261 return stub_type;
2262}
2263
2264/* Build a name for an entry in the stub hash table. */
2265
2266static char *
2267elf64_aarch64_stub_name (const asection *input_section,
2268 const asection *sym_sec,
2269 const struct elf64_aarch64_link_hash_entry *hash,
2270 const Elf_Internal_Rela *rel)
2271{
2272 char *stub_name;
2273 bfd_size_type len;
2274
2275 if (hash)
2276 {
2277 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2278 stub_name = bfd_malloc (len);
2279 if (stub_name != NULL)
2280 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2281 (unsigned int) input_section->id,
2282 hash->root.root.root.string,
2283 rel->r_addend);
2284 }
2285 else
2286 {
2287 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2288 stub_name = bfd_malloc (len);
2289 if (stub_name != NULL)
2290 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2291 (unsigned int) input_section->id,
2292 (unsigned int) sym_sec->id,
2293 (unsigned int) ELF64_R_SYM (rel->r_info),
2294 rel->r_addend);
2295 }
2296
2297 return stub_name;
2298}
2299
2300/* Look up an entry in the stub hash. Stub entries are cached because
2301 creating the stub name takes a bit of time. */
2302
2303static struct elf64_aarch64_stub_hash_entry *
2304elf64_aarch64_get_stub_entry (const asection *input_section,
2305 const asection *sym_sec,
2306 struct elf_link_hash_entry *hash,
2307 const Elf_Internal_Rela *rel,
2308 struct elf64_aarch64_link_hash_table *htab)
2309{
2310 struct elf64_aarch64_stub_hash_entry *stub_entry;
2311 struct elf64_aarch64_link_hash_entry *h =
2312 (struct elf64_aarch64_link_hash_entry *) hash;
2313 const asection *id_sec;
2314
2315 if ((input_section->flags & SEC_CODE) == 0)
2316 return NULL;
2317
2318 /* If this input section is part of a group of sections sharing one
2319 stub section, then use the id of the first section in the group.
2320 Stub names need to include a section id, as there may well be
2321 more than one stub used to reach say, printf, and we need to
2322 distinguish between them. */
2323 id_sec = htab->stub_group[input_section->id].link_sec;
2324
2325 if (h != NULL && h->stub_cache != NULL
2326 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2327 {
2328 stub_entry = h->stub_cache;
2329 }
2330 else
2331 {
2332 char *stub_name;
2333
2334 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, h, rel);
2335 if (stub_name == NULL)
2336 return NULL;
2337
2338 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2339 stub_name, FALSE, FALSE);
2340 if (h != NULL)
2341 h->stub_cache = stub_entry;
2342
2343 free (stub_name);
2344 }
2345
2346 return stub_entry;
2347}
2348
2349/* Add a new stub entry to the stub hash. Not all fields of the new
2350 stub entry are initialised. */
2351
2352static struct elf64_aarch64_stub_hash_entry *
2353elf64_aarch64_add_stub (const char *stub_name,
2354 asection *section,
2355 struct elf64_aarch64_link_hash_table *htab)
2356{
2357 asection *link_sec;
2358 asection *stub_sec;
2359 struct elf64_aarch64_stub_hash_entry *stub_entry;
2360
2361 link_sec = htab->stub_group[section->id].link_sec;
2362 stub_sec = htab->stub_group[section->id].stub_sec;
2363 if (stub_sec == NULL)
2364 {
2365 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2366 if (stub_sec == NULL)
2367 {
2368 size_t namelen;
2369 bfd_size_type len;
2370 char *s_name;
2371
2372 namelen = strlen (link_sec->name);
2373 len = namelen + sizeof (STUB_SUFFIX);
2374 s_name = bfd_alloc (htab->stub_bfd, len);
2375 if (s_name == NULL)
2376 return NULL;
2377
2378 memcpy (s_name, link_sec->name, namelen);
2379 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2380 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2381 if (stub_sec == NULL)
2382 return NULL;
2383 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2384 }
2385 htab->stub_group[section->id].stub_sec = stub_sec;
2386 }
2387
2388 /* Enter this entry into the linker stub hash table. */
2389 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2390 TRUE, FALSE);
2391 if (stub_entry == NULL)
2392 {
2393 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2394 section->owner, stub_name);
2395 return NULL;
2396 }
2397
2398 stub_entry->stub_sec = stub_sec;
2399 stub_entry->stub_offset = 0;
2400 stub_entry->id_sec = link_sec;
2401
2402 return stub_entry;
2403}
2404
2405static bfd_boolean
2406aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2407 void *in_arg ATTRIBUTE_UNUSED)
2408{
2409 struct elf64_aarch64_stub_hash_entry *stub_entry;
2410 asection *stub_sec;
2411 bfd *stub_bfd;
2412 bfd_byte *loc;
2413 bfd_vma sym_value;
2414 unsigned int template_size;
2415 const uint32_t *template;
2416 unsigned int i;
2417
2418 /* Massage our args to the form they really have. */
2419 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2420
2421 stub_sec = stub_entry->stub_sec;
2422
2423 /* Make a note of the offset within the stubs for this entry. */
2424 stub_entry->stub_offset = stub_sec->size;
2425 loc = stub_sec->contents + stub_entry->stub_offset;
2426
2427 stub_bfd = stub_sec->owner;
2428
2429 /* This is the address of the stub destination. */
2430 sym_value = (stub_entry->target_value
2431 + stub_entry->target_section->output_offset
2432 + stub_entry->target_section->output_section->vma);
2433
2434 if (stub_entry->stub_type == aarch64_stub_long_branch)
2435 {
2436 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2437 + stub_sec->output_offset);
2438
2439 /* See if we can relax the stub. */
2440 if (aarch64_valid_for_adrp_p (sym_value, place))
2441 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2442 }
2443
2444 switch (stub_entry->stub_type)
2445 {
2446 case aarch64_stub_adrp_branch:
2447 template = aarch64_adrp_branch_stub;
2448 template_size = sizeof (aarch64_adrp_branch_stub);
2449 break;
2450 case aarch64_stub_long_branch:
2451 template = aarch64_long_branch_stub;
2452 template_size = sizeof (aarch64_long_branch_stub);
2453 break;
2454 default:
2455 BFD_FAIL ();
2456 return FALSE;
2457 }
2458
2459 for (i = 0; i < (template_size / sizeof template[0]); i++)
2460 {
2461 bfd_putl32 (template[i], loc);
2462 loc += 4;
2463 }
2464
2465 template_size = (template_size + 7) & ~7;
2466 stub_sec->size += template_size;
2467
2468 switch (stub_entry->stub_type)
2469 {
2470 case aarch64_stub_adrp_branch:
2471 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec,
2472 stub_entry->stub_offset, sym_value))
2473 /* The stub would not have been relaxed if the offset was out
2474 of range. */
2475 BFD_FAIL ();
2476
2477 _bfd_final_link_relocate
2478 (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC),
2479 stub_bfd,
2480 stub_sec,
2481 stub_sec->contents,
2482 stub_entry->stub_offset + 4,
2483 sym_value,
2484 0);
2485 break;
2486
2487 case aarch64_stub_long_branch:
2488 /* We want the value relative to the address 12 bytes back from the
2489 value itself. */
2490 _bfd_final_link_relocate (elf64_aarch64_howto_from_type
2491 (R_AARCH64_PREL64), stub_bfd, stub_sec,
2492 stub_sec->contents,
2493 stub_entry->stub_offset + 16,
2494 sym_value + 12, 0);
2495 break;
2496 default:
2497 break;
2498 }
2499
2500 return TRUE;
2501}
2502
2503/* As above, but don't actually build the stub. Just bump offset so
2504 we know stub section sizes. */
2505
2506static bfd_boolean
2507aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2508 void *in_arg ATTRIBUTE_UNUSED)
2509{
2510 struct elf64_aarch64_stub_hash_entry *stub_entry;
2511 int size;
2512
2513 /* Massage our args to the form they really have. */
2514 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2515
2516 switch (stub_entry->stub_type)
2517 {
2518 case aarch64_stub_adrp_branch:
2519 size = sizeof (aarch64_adrp_branch_stub);
2520 break;
2521 case aarch64_stub_long_branch:
2522 size = sizeof (aarch64_long_branch_stub);
2523 break;
2524 default:
2525 BFD_FAIL ();
2526 return FALSE;
2527 break;
2528 }
2529
2530 size = (size + 7) & ~7;
2531 stub_entry->stub_sec->size += size;
2532 return TRUE;
2533}
2534
2535/* External entry points for sizing and building linker stubs. */
2536
2537/* Set up various things so that we can make a list of input sections
2538 for each output section included in the link. Returns -1 on error,
2539 0 when no stubs will be needed, and 1 on success. */
2540
2541int
2542elf64_aarch64_setup_section_lists (bfd *output_bfd,
2543 struct bfd_link_info *info)
2544{
2545 bfd *input_bfd;
2546 unsigned int bfd_count;
2547 int top_id, top_index;
2548 asection *section;
2549 asection **input_list, **list;
2550 bfd_size_type amt;
2551 struct elf64_aarch64_link_hash_table *htab =
2552 elf64_aarch64_hash_table (info);
2553
2554 if (!is_elf_hash_table (htab))
2555 return 0;
2556
2557 /* Count the number of input BFDs and find the top input section id. */
2558 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2559 input_bfd != NULL; input_bfd = input_bfd->link_next)
2560 {
2561 bfd_count += 1;
2562 for (section = input_bfd->sections;
2563 section != NULL; section = section->next)
2564 {
2565 if (top_id < section->id)
2566 top_id = section->id;
2567 }
2568 }
2569 htab->bfd_count = bfd_count;
2570
2571 amt = sizeof (struct map_stub) * (top_id + 1);
2572 htab->stub_group = bfd_zmalloc (amt);
2573 if (htab->stub_group == NULL)
2574 return -1;
2575
2576 /* We can't use output_bfd->section_count here to find the top output
2577 section index as some sections may have been removed, and
2578 _bfd_strip_section_from_output doesn't renumber the indices. */
2579 for (section = output_bfd->sections, top_index = 0;
2580 section != NULL; section = section->next)
2581 {
2582 if (top_index < section->index)
2583 top_index = section->index;
2584 }
2585
2586 htab->top_index = top_index;
2587 amt = sizeof (asection *) * (top_index + 1);
2588 input_list = bfd_malloc (amt);
2589 htab->input_list = input_list;
2590 if (input_list == NULL)
2591 return -1;
2592
2593 /* For sections we aren't interested in, mark their entries with a
2594 value we can check later. */
2595 list = input_list + top_index;
2596 do
2597 *list = bfd_abs_section_ptr;
2598 while (list-- != input_list);
2599
2600 for (section = output_bfd->sections;
2601 section != NULL; section = section->next)
2602 {
2603 if ((section->flags & SEC_CODE) != 0)
2604 input_list[section->index] = NULL;
2605 }
2606
2607 return 1;
2608}
2609
2610/* Used by elf64_aarch64_next_input_section and group_sections. */
2611#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2612
2613/* The linker repeatedly calls this function for each input section,
2614 in the order that input sections are linked into output sections.
2615 Build lists of input sections to determine groupings between which
2616 we may insert linker stubs. */
2617
2618void
2619elf64_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2620{
2621 struct elf64_aarch64_link_hash_table *htab =
2622 elf64_aarch64_hash_table (info);
2623
2624 if (isec->output_section->index <= htab->top_index)
2625 {
2626 asection **list = htab->input_list + isec->output_section->index;
2627
2628 if (*list != bfd_abs_section_ptr)
2629 {
2630 /* Steal the link_sec pointer for our list. */
2631 /* This happens to make the list in reverse order,
2632 which is what we want. */
2633 PREV_SEC (isec) = *list;
2634 *list = isec;
2635 }
2636 }
2637}
2638
2639/* See whether we can group stub sections together. Grouping stub
2640 sections may result in fewer stubs. More importantly, we need to
2641 put all .init* and .fini* stubs at the beginning of the .init or
2642 .fini output sections respectively, because glibc splits the
2643 _init and _fini functions into multiple parts. Putting a stub in
2644 the middle of a function is not a good idea. */
2645
2646static void
2647group_sections (struct elf64_aarch64_link_hash_table *htab,
2648 bfd_size_type stub_group_size,
2649 bfd_boolean stubs_always_before_branch)
2650{
2651 asection **list = htab->input_list + htab->top_index;
2652
2653 do
2654 {
2655 asection *tail = *list;
2656
2657 if (tail == bfd_abs_section_ptr)
2658 continue;
2659
2660 while (tail != NULL)
2661 {
2662 asection *curr;
2663 asection *prev;
2664 bfd_size_type total;
2665
2666 curr = tail;
2667 total = tail->size;
2668 while ((prev = PREV_SEC (curr)) != NULL
2669 && ((total += curr->output_offset - prev->output_offset)
2670 < stub_group_size))
2671 curr = prev;
2672
2673 /* OK, the size from the start of CURR to the end is less
2674 than stub_group_size and thus can be handled by one stub
2675 section. (Or the tail section is itself larger than
2676 stub_group_size, in which case we may be toast.)
2677 We should really be keeping track of the total size of
2678 stubs added here, as stubs contribute to the final output
2679 section size. */
2680 do
2681 {
2682 prev = PREV_SEC (tail);
2683 /* Set up this stub group. */
2684 htab->stub_group[tail->id].link_sec = curr;
2685 }
2686 while (tail != curr && (tail = prev) != NULL);
2687
2688 /* But wait, there's more! Input sections up to stub_group_size
2689 bytes before the stub section can be handled by it too. */
2690 if (!stubs_always_before_branch)
2691 {
2692 total = 0;
2693 while (prev != NULL
2694 && ((total += tail->output_offset - prev->output_offset)
2695 < stub_group_size))
2696 {
2697 tail = prev;
2698 prev = PREV_SEC (tail);
2699 htab->stub_group[tail->id].link_sec = curr;
2700 }
2701 }
2702 tail = prev;
2703 }
2704 }
2705 while (list-- != htab->input_list);
2706
2707 free (htab->input_list);
2708}
2709
2710#undef PREV_SEC
2711
2712/* Determine and set the size of the stub section for a final link.
2713
2714 The basic idea here is to examine all the relocations looking for
2715 PC-relative calls to a target that is unreachable with a "bl"
2716 instruction. */
2717
2718bfd_boolean
2719elf64_aarch64_size_stubs (bfd *output_bfd,
2720 bfd *stub_bfd,
2721 struct bfd_link_info *info,
2722 bfd_signed_vma group_size,
2723 asection * (*add_stub_section) (const char *,
2724 asection *),
2725 void (*layout_sections_again) (void))
2726{
2727 bfd_size_type stub_group_size;
2728 bfd_boolean stubs_always_before_branch;
2729 bfd_boolean stub_changed = 0;
2730 struct elf64_aarch64_link_hash_table *htab = elf64_aarch64_hash_table (info);
2731
2732 /* Propagate mach to stub bfd, because it may not have been
2733 finalized when we created stub_bfd. */
2734 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
2735 bfd_get_mach (output_bfd));
2736
2737 /* Stash our params away. */
2738 htab->stub_bfd = stub_bfd;
2739 htab->add_stub_section = add_stub_section;
2740 htab->layout_sections_again = layout_sections_again;
2741 stubs_always_before_branch = group_size < 0;
2742 if (group_size < 0)
2743 stub_group_size = -group_size;
2744 else
2745 stub_group_size = group_size;
2746
2747 if (stub_group_size == 1)
2748 {
2749 /* Default values. */
2750 /* Aarch64 branch range is +-128MB. The value used is 1MB less. */
2751 stub_group_size = 127 * 1024 * 1024;
2752 }
2753
2754 group_sections (htab, stub_group_size, stubs_always_before_branch);
2755
2756 while (1)
2757 {
2758 bfd *input_bfd;
2759 unsigned int bfd_indx;
2760 asection *stub_sec;
2761
2762 for (input_bfd = info->input_bfds, bfd_indx = 0;
2763 input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++)
2764 {
2765 Elf_Internal_Shdr *symtab_hdr;
2766 asection *section;
2767 Elf_Internal_Sym *local_syms = NULL;
2768
2769 /* We'll need the symbol table in a second. */
2770 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2771 if (symtab_hdr->sh_info == 0)
2772 continue;
2773
2774 /* Walk over each section attached to the input bfd. */
2775 for (section = input_bfd->sections;
2776 section != NULL; section = section->next)
2777 {
2778 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2779
2780 /* If there aren't any relocs, then there's nothing more
2781 to do. */
2782 if ((section->flags & SEC_RELOC) == 0
2783 || section->reloc_count == 0
2784 || (section->flags & SEC_CODE) == 0)
2785 continue;
2786
2787 /* If this section is a link-once section that will be
2788 discarded, then don't create any stubs. */
2789 if (section->output_section == NULL
2790 || section->output_section->owner != output_bfd)
2791 continue;
2792
2793 /* Get the relocs. */
2794 internal_relocs
2795 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
2796 NULL, info->keep_memory);
2797 if (internal_relocs == NULL)
2798 goto error_ret_free_local;
2799
2800 /* Now examine each relocation. */
2801 irela = internal_relocs;
2802 irelaend = irela + section->reloc_count;
2803 for (; irela < irelaend; irela++)
2804 {
2805 unsigned int r_type, r_indx;
2806 enum elf64_aarch64_stub_type stub_type;
2807 struct elf64_aarch64_stub_hash_entry *stub_entry;
2808 asection *sym_sec;
2809 bfd_vma sym_value;
2810 bfd_vma destination;
2811 struct elf64_aarch64_link_hash_entry *hash;
2812 const char *sym_name;
2813 char *stub_name;
2814 const asection *id_sec;
2815 unsigned char st_type;
2816 bfd_size_type len;
2817
2818 r_type = ELF64_R_TYPE (irela->r_info);
2819 r_indx = ELF64_R_SYM (irela->r_info);
2820
2821 if (r_type >= (unsigned int) R_AARCH64_end)
2822 {
2823 bfd_set_error (bfd_error_bad_value);
2824 error_ret_free_internal:
2825 if (elf_section_data (section)->relocs == NULL)
2826 free (internal_relocs);
2827 goto error_ret_free_local;
2828 }
2829
2830 /* Only look for stubs on unconditional branch and
2831 branch and link instructions. */
2832 if (r_type != (unsigned int) R_AARCH64_CALL26
2833 && r_type != (unsigned int) R_AARCH64_JUMP26)
2834 continue;
2835
2836 /* Now determine the call target, its name, value,
2837 section. */
2838 sym_sec = NULL;
2839 sym_value = 0;
2840 destination = 0;
2841 hash = NULL;
2842 sym_name = NULL;
2843 if (r_indx < symtab_hdr->sh_info)
2844 {
2845 /* It's a local symbol. */
2846 Elf_Internal_Sym *sym;
2847 Elf_Internal_Shdr *hdr;
2848
2849 if (local_syms == NULL)
2850 {
2851 local_syms
2852 = (Elf_Internal_Sym *) symtab_hdr->contents;
2853 if (local_syms == NULL)
2854 local_syms
2855 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
2856 symtab_hdr->sh_info, 0,
2857 NULL, NULL, NULL);
2858 if (local_syms == NULL)
2859 goto error_ret_free_internal;
2860 }
2861
2862 sym = local_syms + r_indx;
2863 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
2864 sym_sec = hdr->bfd_section;
2865 if (!sym_sec)
2866 /* This is an undefined symbol. It can never
2867 be resolved. */
2868 continue;
2869
2870 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
2871 sym_value = sym->st_value;
2872 destination = (sym_value + irela->r_addend
2873 + sym_sec->output_offset
2874 + sym_sec->output_section->vma);
2875 st_type = ELF_ST_TYPE (sym->st_info);
2876 sym_name
2877 = bfd_elf_string_from_elf_section (input_bfd,
2878 symtab_hdr->sh_link,
2879 sym->st_name);
2880 }
2881 else
2882 {
2883 int e_indx;
2884
2885 e_indx = r_indx - symtab_hdr->sh_info;
2886 hash = ((struct elf64_aarch64_link_hash_entry *)
2887 elf_sym_hashes (input_bfd)[e_indx]);
2888
2889 while (hash->root.root.type == bfd_link_hash_indirect
2890 || hash->root.root.type == bfd_link_hash_warning)
2891 hash = ((struct elf64_aarch64_link_hash_entry *)
2892 hash->root.root.u.i.link);
2893
2894 if (hash->root.root.type == bfd_link_hash_defined
2895 || hash->root.root.type == bfd_link_hash_defweak)
2896 {
2897 struct elf64_aarch64_link_hash_table *globals =
2898 elf64_aarch64_hash_table (info);
2899 sym_sec = hash->root.root.u.def.section;
2900 sym_value = hash->root.root.u.def.value;
2901 /* For a destination in a shared library,
2902 use the PLT stub as target address to
2903 decide whether a branch stub is
2904 needed. */
2905 if (globals->root.splt != NULL && hash != NULL
2906 && hash->root.plt.offset != (bfd_vma) - 1)
2907 {
2908 sym_sec = globals->root.splt;
2909 sym_value = hash->root.plt.offset;
2910 if (sym_sec->output_section != NULL)
2911 destination = (sym_value
2912 + sym_sec->output_offset
2913 +
2914 sym_sec->output_section->vma);
2915 }
2916 else if (sym_sec->output_section != NULL)
2917 destination = (sym_value + irela->r_addend
2918 + sym_sec->output_offset
2919 + sym_sec->output_section->vma);
2920 }
2921 else if (hash->root.root.type == bfd_link_hash_undefined
2922 || (hash->root.root.type
2923 == bfd_link_hash_undefweak))
2924 {
2925 /* For a shared library, use the PLT stub as
2926 target address to decide whether a long
2927 branch stub is needed.
2928 For absolute code, they cannot be handled. */
2929 struct elf64_aarch64_link_hash_table *globals =
2930 elf64_aarch64_hash_table (info);
2931
2932 if (globals->root.splt != NULL && hash != NULL
2933 && hash->root.plt.offset != (bfd_vma) - 1)
2934 {
2935 sym_sec = globals->root.splt;
2936 sym_value = hash->root.plt.offset;
2937 if (sym_sec->output_section != NULL)
2938 destination = (sym_value
2939 + sym_sec->output_offset
2940 +
2941 sym_sec->output_section->vma);
2942 }
2943 else
2944 continue;
2945 }
2946 else
2947 {
2948 bfd_set_error (bfd_error_bad_value);
2949 goto error_ret_free_internal;
2950 }
2951 st_type = ELF_ST_TYPE (hash->root.type);
2952 sym_name = hash->root.root.root.string;
2953 }
2954
2955 /* Determine what (if any) linker stub is needed. */
2956 stub_type = aarch64_type_of_stub
2957 (info, section, irela, st_type, hash, destination);
2958 if (stub_type == aarch64_stub_none)
2959 continue;
2960
2961 /* Support for grouping stub sections. */
2962 id_sec = htab->stub_group[section->id].link_sec;
2963
2964 /* Get the name of this stub. */
2965 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, hash,
2966 irela);
2967 if (!stub_name)
2968 goto error_ret_free_internal;
2969
2970 stub_entry =
2971 aarch64_stub_hash_lookup (&htab->stub_hash_table,
2972 stub_name, FALSE, FALSE);
2973 if (stub_entry != NULL)
2974 {
2975 /* The proper stub has already been created. */
2976 free (stub_name);
2977 continue;
2978 }
2979
2980 stub_entry = elf64_aarch64_add_stub (stub_name, section,
2981 htab);
2982 if (stub_entry == NULL)
2983 {
2984 free (stub_name);
2985 goto error_ret_free_internal;
2986 }
2987
2988 stub_entry->target_value = sym_value;
2989 stub_entry->target_section = sym_sec;
2990 stub_entry->stub_type = stub_type;
2991 stub_entry->h = hash;
2992 stub_entry->st_type = st_type;
2993
2994 if (sym_name == NULL)
2995 sym_name = "unnamed";
2996 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
2997 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
2998 if (stub_entry->output_name == NULL)
2999 {
3000 free (stub_name);
3001 goto error_ret_free_internal;
3002 }
3003
3004 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3005 sym_name);
3006
3007 stub_changed = TRUE;
3008 }
3009
3010 /* We're done with the internal relocs, free them. */
3011 if (elf_section_data (section)->relocs == NULL)
3012 free (internal_relocs);
3013 }
3014 }
3015
3016 if (!stub_changed)
3017 break;
3018
3019 /* OK, we've added some stubs. Find out the new size of the
3020 stub sections. */
3021 for (stub_sec = htab->stub_bfd->sections;
3022 stub_sec != NULL; stub_sec = stub_sec->next)
3023 stub_sec->size = 0;
3024
3025 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3026
3027 /* Ask the linker to do its stuff. */
3028 (*htab->layout_sections_again) ();
3029 stub_changed = FALSE;
3030 }
3031
3032 return TRUE;
3033
3034error_ret_free_local:
3035 return FALSE;
3036}
3037
3038/* Build all the stubs associated with the current output file. The
3039 stubs are kept in a hash table attached to the main linker hash
3040 table. We also set up the .plt entries for statically linked PIC
3041 functions here. This function is called via aarch64_elf_finish in the
3042 linker. */
3043
3044bfd_boolean
3045elf64_aarch64_build_stubs (struct bfd_link_info *info)
3046{
3047 asection *stub_sec;
3048 struct bfd_hash_table *table;
3049 struct elf64_aarch64_link_hash_table *htab;
3050
3051 htab = elf64_aarch64_hash_table (info);
3052
3053 for (stub_sec = htab->stub_bfd->sections;
3054 stub_sec != NULL; stub_sec = stub_sec->next)
3055 {
3056 bfd_size_type size;
3057
3058 /* Ignore non-stub sections. */
3059 if (!strstr (stub_sec->name, STUB_SUFFIX))
3060 continue;
3061
3062 /* Allocate memory to hold the linker stubs. */
3063 size = stub_sec->size;
3064 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3065 if (stub_sec->contents == NULL && size != 0)
3066 return FALSE;
3067 stub_sec->size = 0;
3068 }
3069
3070 /* Build the stubs as directed by the stub hash table. */
3071 table = &htab->stub_hash_table;
3072 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3073
3074 return TRUE;
3075}
3076
3077
3078/* Add an entry to the code/data map for section SEC. */
3079
3080static void
3081elf64_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3082{
3083 struct _aarch64_elf_section_data *sec_data =
3084 elf64_aarch64_section_data (sec);
3085 unsigned int newidx;
3086
3087 if (sec_data->map == NULL)
3088 {
3089 sec_data->map = bfd_malloc (sizeof (elf64_aarch64_section_map));
3090 sec_data->mapcount = 0;
3091 sec_data->mapsize = 1;
3092 }
3093
3094 newidx = sec_data->mapcount++;
3095
3096 if (sec_data->mapcount > sec_data->mapsize)
3097 {
3098 sec_data->mapsize *= 2;
3099 sec_data->map = bfd_realloc_or_free
3100 (sec_data->map, sec_data->mapsize * sizeof (elf64_aarch64_section_map));
3101 }
3102
3103 if (sec_data->map)
3104 {
3105 sec_data->map[newidx].vma = vma;
3106 sec_data->map[newidx].type = type;
3107 }
3108}
3109
3110
3111/* Initialise maps of insn/data for input BFDs. */
3112void
3113bfd_elf64_aarch64_init_maps (bfd *abfd)
3114{
3115 Elf_Internal_Sym *isymbuf;
3116 Elf_Internal_Shdr *hdr;
3117 unsigned int i, localsyms;
3118
3119 /* Make sure that we are dealing with an AArch64 elf binary. */
3120 if (!is_aarch64_elf (abfd))
3121 return;
3122
3123 if ((abfd->flags & DYNAMIC) != 0)
3124 return;
3125
3126 hdr = &elf_symtab_hdr (abfd);
3127 localsyms = hdr->sh_info;
3128
3129 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3130 should contain the number of local symbols, which should come before any
3131 global symbols. Mapping symbols are always local. */
3132 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3133
3134 /* No internal symbols read? Skip this BFD. */
3135 if (isymbuf == NULL)
3136 return;
3137
3138 for (i = 0; i < localsyms; i++)
3139 {
3140 Elf_Internal_Sym *isym = &isymbuf[i];
3141 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3142 const char *name;
3143
3144 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3145 {
3146 name = bfd_elf_string_from_elf_section (abfd,
3147 hdr->sh_link,
3148 isym->st_name);
3149
3150 if (bfd_is_aarch64_special_symbol_name
3151 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3152 elf64_aarch64_section_map_add (sec, name[1], isym->st_value);
3153 }
3154 }
3155}
3156
3157/* Set option values needed during linking. */
3158void
3159bfd_elf64_aarch64_set_options (struct bfd *output_bfd,
3160 struct bfd_link_info *link_info,
3161 int no_enum_warn,
3162 int no_wchar_warn, int pic_veneer)
3163{
3164 struct elf64_aarch64_link_hash_table *globals;
3165
3166 globals = elf64_aarch64_hash_table (link_info);
3167 globals->pic_veneer = pic_veneer;
3168
3169 BFD_ASSERT (is_aarch64_elf (output_bfd));
3170 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3171 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3172}
3173
3174#define MASK(n) ((1u << (n)) - 1)
3175
3176/* Decode the 26-bit offset of unconditional branch. */
3177static inline uint32_t
3178decode_branch_ofs_26 (uint32_t insn)
3179{
3180 return insn & MASK (26);
3181}
3182
3183/* Decode the 19-bit offset of conditional branch and compare & branch. */
3184static inline uint32_t
3185decode_cond_branch_ofs_19 (uint32_t insn)
3186{
3187 return (insn >> 5) & MASK (19);
3188}
3189
3190/* Decode the 19-bit offset of load literal. */
3191static inline uint32_t
3192decode_ld_lit_ofs_19 (uint32_t insn)
3193{
3194 return (insn >> 5) & MASK (19);
3195}
3196
3197/* Decode the 14-bit offset of test & branch. */
3198static inline uint32_t
3199decode_tst_branch_ofs_14 (uint32_t insn)
3200{
3201 return (insn >> 5) & MASK (14);
3202}
3203
3204/* Decode the 16-bit imm of move wide. */
3205static inline uint32_t
3206decode_movw_imm (uint32_t insn)
3207{
3208 return (insn >> 5) & MASK (16);
3209}
3210
3211/* Decode the 21-bit imm of adr. */
3212static inline uint32_t
3213decode_adr_imm (uint32_t insn)
3214{
3215 return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
3216}
3217
3218/* Decode the 12-bit imm of add immediate. */
3219static inline uint32_t
3220decode_add_imm (uint32_t insn)
3221{
3222 return (insn >> 10) & MASK (12);
3223}
3224
3225
3226/* Encode the 26-bit offset of unconditional branch. */
3227static inline uint32_t
3228reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
3229{
3230 return (insn & ~MASK (26)) | (ofs & MASK (26));
3231}
3232
3233/* Encode the 19-bit offset of conditional branch and compare & branch. */
3234static inline uint32_t
3235reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
3236{
3237 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3238}
3239
3240/* Decode the 19-bit offset of load literal. */
3241static inline uint32_t
3242reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
3243{
3244 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3245}
3246
3247/* Encode the 14-bit offset of test & branch. */
3248static inline uint32_t
3249reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
3250{
3251 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
3252}
3253
3254/* Reencode the imm field of move wide. */
3255static inline uint32_t
3256reencode_movw_imm (uint32_t insn, uint32_t imm)
3257{
3258 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
3259}
3260
3261/* Reencode the imm field of adr. */
3262static inline uint32_t
3263reencode_adr_imm (uint32_t insn, uint32_t imm)
3264{
3265 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
3266 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
3267}
3268
3269/* Reencode the imm field of ld/st pos immediate. */
3270static inline uint32_t
3271reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
3272{
3273 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3274}
3275
3276/* Reencode the imm field of add immediate. */
3277static inline uint32_t
3278reencode_add_imm (uint32_t insn, uint32_t imm)
3279{
3280 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3281}
3282
3283/* Reencode mov[zn] to movz. */
3284static inline uint32_t
3285reencode_movzn_to_movz (uint32_t opcode)
3286{
3287 return opcode | (1 << 30);
3288}
3289
3290/* Reencode mov[zn] to movn. */
3291static inline uint32_t
3292reencode_movzn_to_movn (uint32_t opcode)
3293{
3294 return opcode & ~(1 << 30);
3295}
3296
3297/* Insert the addend/value into the instruction or data object being
3298 relocated. */
3299static bfd_reloc_status_type
3300bfd_elf_aarch64_put_addend (bfd *abfd,
3301 bfd_byte *address,
3302 reloc_howto_type *howto, bfd_signed_vma addend)
3303{
3304 bfd_reloc_status_type status = bfd_reloc_ok;
3305 bfd_signed_vma old_addend = addend;
3306 bfd_vma contents;
3307 int size;
3308
3309 size = bfd_get_reloc_size (howto);
3310 switch (size)
3311 {
3312 case 2:
3313 contents = bfd_get_16 (abfd, address);
3314 break;
3315 case 4:
3316 if (howto->src_mask != 0xffffffff)
3317 /* Must be 32-bit instruction, always little-endian. */
3318 contents = bfd_getl32 (address);
3319 else
3320 /* Must be 32-bit data (endianness dependent). */
3321 contents = bfd_get_32 (abfd, address);
3322 break;
3323 case 8:
3324 contents = bfd_get_64 (abfd, address);
3325 break;
3326 default:
3327 abort ();
3328 }
3329
3330 switch (howto->complain_on_overflow)
3331 {
3332 case complain_overflow_dont:
3333 break;
3334 case complain_overflow_signed:
3335 status = aarch64_signed_overflow (addend,
3336 howto->bitsize + howto->rightshift);
3337 break;
3338 case complain_overflow_unsigned:
3339 status = aarch64_unsigned_overflow (addend,
3340 howto->bitsize + howto->rightshift);
3341 break;
3342 case complain_overflow_bitfield:
3343 default:
3344 abort ();
3345 }
3346
3347 addend >>= howto->rightshift;
3348
3349 switch (howto->type)
3350 {
3351 case R_AARCH64_JUMP26:
3352 case R_AARCH64_CALL26:
3353 contents = reencode_branch_ofs_26 (contents, addend);
3354 break;
3355
3356 case R_AARCH64_CONDBR19:
3357 contents = reencode_cond_branch_ofs_19 (contents, addend);
3358 break;
3359
3360 case R_AARCH64_TSTBR14:
3361 contents = reencode_tst_branch_ofs_14 (contents, addend);
3362 break;
3363
3364 case R_AARCH64_LD_PREL_LO19:
3365 if (old_addend & ((1 << howto->rightshift) - 1))
3366 return bfd_reloc_overflow;
3367 contents = reencode_ld_lit_ofs_19 (contents, addend);
3368 break;
3369
3370 case R_AARCH64_TLSDESC_CALL:
3371 break;
3372
3373 case R_AARCH64_TLSGD_ADR_PAGE21:
3374 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3375 case R_AARCH64_TLSDESC_ADR_PAGE:
3376 case R_AARCH64_ADR_GOT_PAGE:
3377 case R_AARCH64_ADR_PREL_LO21:
3378 case R_AARCH64_ADR_PREL_PG_HI21:
3379 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3380 contents = reencode_adr_imm (contents, addend);
3381 break;
3382
3383 case R_AARCH64_TLSGD_ADD_LO12_NC:
3384 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3385 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3386 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3387 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3388 case R_AARCH64_ADD_ABS_LO12_NC:
3389 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3390 12 bits of the page offset following
3391 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3392 (pc-relative) page base. */
3393 contents = reencode_add_imm (contents, addend);
3394 break;
3395
3396 case R_AARCH64_LDST8_ABS_LO12_NC:
3397 case R_AARCH64_LDST16_ABS_LO12_NC:
3398 case R_AARCH64_LDST32_ABS_LO12_NC:
3399 case R_AARCH64_LDST64_ABS_LO12_NC:
3400 case R_AARCH64_LDST128_ABS_LO12_NC:
3401 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3402 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3403 case R_AARCH64_LD64_GOT_LO12_NC:
3404 if (old_addend & ((1 << howto->rightshift) - 1))
3405 return bfd_reloc_overflow;
3406 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3407 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3408 which computes the (pc-relative) page base. */
3409 contents = reencode_ldst_pos_imm (contents, addend);
3410 break;
3411
3412 /* Group relocations to create high bits of a 16, 32, 48 or 64
3413 bit signed data or abs address inline. Will change
3414 instruction to MOVN or MOVZ depending on sign of calculated
3415 value. */
3416
3417 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3418 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3419 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3420 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3421 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3422 case R_AARCH64_MOVW_SABS_G0:
3423 case R_AARCH64_MOVW_SABS_G1:
3424 case R_AARCH64_MOVW_SABS_G2:
3425 /* NOTE: We can only come here with movz or movn. */
3426 if (addend < 0)
3427 {
3428 /* Force use of MOVN. */
3429 addend = ~addend;
3430 contents = reencode_movzn_to_movn (contents);
3431 }
3432 else
3433 {
3434 /* Force use of MOVZ. */
3435 contents = reencode_movzn_to_movz (contents);
3436 }
3437 /* fall through */
3438
3439 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3440 data or abs address inline. */
3441
3442 case R_AARCH64_MOVW_UABS_G0:
3443 case R_AARCH64_MOVW_UABS_G0_NC:
3444 case R_AARCH64_MOVW_UABS_G1:
3445 case R_AARCH64_MOVW_UABS_G1_NC:
3446 case R_AARCH64_MOVW_UABS_G2:
3447 case R_AARCH64_MOVW_UABS_G2_NC:
3448 case R_AARCH64_MOVW_UABS_G3:
3449 contents = reencode_movw_imm (contents, addend);
3450 break;
3451
3452 default:
3453 /* Repack simple data */
3454 if (howto->dst_mask & (howto->dst_mask + 1))
3455 return bfd_reloc_notsupported;
3456
3457 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
3458 break;
3459 }
3460
3461 switch (size)
3462 {
3463 case 2:
3464 bfd_put_16 (abfd, contents, address);
3465 break;
3466 case 4:
3467 if (howto->dst_mask != 0xffffffff)
3468 /* must be 32-bit instruction, always little-endian */
3469 bfd_putl32 (contents, address);
3470 else
3471 /* must be 32-bit data (endianness dependent) */
3472 bfd_put_32 (abfd, contents, address);
3473 break;
3474 case 8:
3475 bfd_put_64 (abfd, contents, address);
3476 break;
3477 default:
3478 abort ();
3479 }
3480
3481 return status;
3482}
3483
3484static bfd_vma
3485aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3486 struct elf64_aarch64_link_hash_table
3487 *globals, struct bfd_link_info *info,
3488 bfd_vma value, bfd *output_bfd,
3489 bfd_boolean *unresolved_reloc_p)
3490{
3491 bfd_vma off = (bfd_vma) - 1;
3492 asection *basegot = globals->root.sgot;
3493 bfd_boolean dyn = globals->root.dynamic_sections_created;
3494
3495 if (h != NULL)
3496 {
3497 off = h->got.offset;
3498 BFD_ASSERT (off != (bfd_vma) - 1);
3499 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3500 || (info->shared
3501 && SYMBOL_REFERENCES_LOCAL (info, h))
3502 || (ELF_ST_VISIBILITY (h->other)
3503 && h->root.type == bfd_link_hash_undefweak))
3504 {
3505 /* This is actually a static link, or it is a -Bsymbolic link
3506 and the symbol is defined locally. We must initialize this
3507 entry in the global offset table. Since the offset must
3508 always be a multiple of 8, we use the least significant bit
3509 to record whether we have initialized it already.
3510 When doing a dynamic link, we create a .rel(a).got relocation
3511 entry to initialize the value. This is done in the
3512 finish_dynamic_symbol routine. */
3513 if ((off & 1) != 0)
3514 off &= ~1;
3515 else
3516 {
3517 bfd_put_64 (output_bfd, value, basegot->contents + off);
3518 h->got.offset |= 1;
3519 }
3520 }
3521 else
3522 *unresolved_reloc_p = FALSE;
3523
3524 off = off + basegot->output_section->vma + basegot->output_offset;
3525 }
3526
3527 return off;
3528}
3529
3530/* Change R_TYPE to a more efficient access model where possible,
3531 return the new reloc type. */
3532
3533static unsigned int
3534aarch64_tls_transition_without_check (unsigned int r_type,
3535 struct elf_link_hash_entry *h)
3536{
3537 bfd_boolean is_local = h == NULL;
3538 switch (r_type)
3539 {
3540 case R_AARCH64_TLSGD_ADR_PAGE21:
3541 case R_AARCH64_TLSDESC_ADR_PAGE:
3542 return is_local
3543 ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
3544
3545 case R_AARCH64_TLSGD_ADD_LO12_NC:
3546 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3547 return is_local
3548 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3549 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
3550
3551 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3552 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3553
3554 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3555 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3556
3557 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3558 case R_AARCH64_TLSDESC_CALL:
3559 /* Instructions with these relocations will become NOPs. */
3560 return R_AARCH64_NONE;
3561 }
3562
3563 return r_type;
3564}
3565
3566static unsigned int
3567aarch64_reloc_got_type (unsigned int r_type)
3568{
3569 switch (r_type)
3570 {
3571 case R_AARCH64_LD64_GOT_LO12_NC:
3572 case R_AARCH64_ADR_GOT_PAGE:
3573 return GOT_NORMAL;
3574
3575 case R_AARCH64_TLSGD_ADR_PAGE21:
3576 case R_AARCH64_TLSGD_ADD_LO12_NC:
3577 return GOT_TLS_GD;
3578
3579 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3580 case R_AARCH64_TLSDESC_ADR_PAGE:
3581 case R_AARCH64_TLSDESC_CALL:
3582 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3583 return GOT_TLSDESC_GD;
3584
3585 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3586 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3587 return GOT_TLS_IE;
3588
3589 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3590 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3591 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3592 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3593 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3594 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3595 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3596 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3597 return GOT_UNKNOWN;
3598 }
3599 return GOT_UNKNOWN;
3600}
3601
3602static bfd_boolean
3603aarch64_can_relax_tls (bfd *input_bfd,
3604 struct bfd_link_info *info,
3605 unsigned int r_type,
3606 struct elf_link_hash_entry *h,
3607 unsigned long r_symndx)
3608{
3609 unsigned int symbol_got_type;
3610 unsigned int reloc_got_type;
3611
3612 if (! IS_AARCH64_TLS_RELOC (r_type))
3613 return FALSE;
3614
3615 symbol_got_type = elf64_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3616 reloc_got_type = aarch64_reloc_got_type (r_type);
3617
3618 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3619 return TRUE;
3620
3621 if (info->shared)
3622 return FALSE;
3623
3624 if (h && h->root.type == bfd_link_hash_undefweak)
3625 return FALSE;
3626
3627 return TRUE;
3628}
3629
3630static unsigned int
3631aarch64_tls_transition (bfd *input_bfd,
3632 struct bfd_link_info *info,
3633 unsigned int r_type,
3634 struct elf_link_hash_entry *h,
3635 unsigned long r_symndx)
3636{
3637 if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx))
3638 return r_type;
3639
3640 return aarch64_tls_transition_without_check (r_type, h);
3641}
3642
3643/* Return the base VMA address which should be subtracted from real addresses
3644 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3645
3646static bfd_vma
3647dtpoff_base (struct bfd_link_info *info)
3648{
3649 /* If tls_sec is NULL, we should have signalled an error already. */
3650 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3651 return elf_hash_table (info)->tls_sec->vma;
3652}
3653
3654
3655/* Return the base VMA address which should be subtracted from real addresses
3656 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3657
3658static bfd_vma
3659tpoff_base (struct bfd_link_info *info)
3660{
3661 struct elf_link_hash_table *htab = elf_hash_table (info);
3662
3663 /* If tls_sec is NULL, we should have signalled an error already. */
3664 if (htab->tls_sec == NULL)
3665 return 0;
3666
3667 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3668 htab->tls_sec->alignment_power);
3669 return htab->tls_sec->vma - base;
3670}
3671
3672static bfd_vma *
3673symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3674 unsigned long r_symndx)
3675{
3676 /* Calculate the address of the GOT entry for symbol
3677 referred to in h. */
3678 if (h != NULL)
3679 return &h->got.offset;
3680 else
3681 {
3682 /* local symbol */
3683 struct elf_aarch64_local_symbol *l;
3684
3685 l = elf64_aarch64_locals (input_bfd);
3686 return &l[r_symndx].got_offset;
3687 }
3688}
3689
3690static void
3691symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3692 unsigned long r_symndx)
3693{
3694 bfd_vma *p;
3695 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3696 *p |= 1;
3697}
3698
3699static int
3700symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3701 unsigned long r_symndx)
3702{
3703 bfd_vma value;
3704 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3705 return value & 1;
3706}
3707
3708static bfd_vma
3709symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3710 unsigned long r_symndx)
3711{
3712 bfd_vma value;
3713 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3714 value &= ~1;
3715 return value;
3716}
3717
3718static bfd_vma *
3719symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3720 unsigned long r_symndx)
3721{
3722 /* Calculate the address of the GOT entry for symbol
3723 referred to in h. */
3724 if (h != NULL)
3725 {
3726 struct elf64_aarch64_link_hash_entry *eh;
3727 eh = (struct elf64_aarch64_link_hash_entry *) h;
3728 return &eh->tlsdesc_got_jump_table_offset;
3729 }
3730 else
3731 {
3732 /* local symbol */
3733 struct elf_aarch64_local_symbol *l;
3734
3735 l = elf64_aarch64_locals (input_bfd);
3736 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3737 }
3738}
3739
3740static void
3741symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3742 unsigned long r_symndx)
3743{
3744 bfd_vma *p;
3745 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3746 *p |= 1;
3747}
3748
3749static int
3750symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3751 struct elf_link_hash_entry *h,
3752 unsigned long r_symndx)
3753{
3754 bfd_vma value;
3755 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3756 return value & 1;
3757}
3758
3759static bfd_vma
3760symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3761 unsigned long r_symndx)
3762{
3763 bfd_vma value;
3764 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3765 value &= ~1;
3766 return value;
3767}
3768
3769/* Perform a relocation as part of a final link. */
3770static bfd_reloc_status_type
3771elf64_aarch64_final_link_relocate (reloc_howto_type *howto,
3772 bfd *input_bfd,
3773 bfd *output_bfd,
3774 asection *input_section,
3775 bfd_byte *contents,
3776 Elf_Internal_Rela *rel,
3777 bfd_vma value,
3778 struct bfd_link_info *info,
3779 asection *sym_sec,
3780 struct elf_link_hash_entry *h,
3781 bfd_boolean *unresolved_reloc_p,
3782 bfd_boolean save_addend,
3783 bfd_vma *saved_addend)
3784{
3785 unsigned int r_type = howto->type;
3786 unsigned long r_symndx;
3787 bfd_byte *hit_data = contents + rel->r_offset;
3788 bfd_vma place;
3789 bfd_signed_vma signed_addend;
3790 struct elf64_aarch64_link_hash_table *globals;
3791 bfd_boolean weak_undef_p;
3792
3793 globals = elf64_aarch64_hash_table (info);
3794
3795 BFD_ASSERT (is_aarch64_elf (input_bfd));
3796
3797 r_symndx = ELF64_R_SYM (rel->r_info);
3798
3799 /* It is possible to have linker relaxations on some TLS access
3800 models. Update our information here. */
3801 r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
3802
3803 if (r_type != howto->type)
3804 howto = elf64_aarch64_howto_from_type (r_type);
3805
3806 place = input_section->output_section->vma
3807 + input_section->output_offset + rel->r_offset;
3808
3809 /* Get addend, accumulating the addend for consecutive relocs
3810 which refer to the same offset. */
3811 signed_addend = saved_addend ? *saved_addend : 0;
3812 signed_addend += rel->r_addend;
3813
3814 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
3815 : bfd_is_und_section (sym_sec));
3816 switch (r_type)
3817 {
3818 case R_AARCH64_NONE:
3819 case R_AARCH64_NULL:
3820 case R_AARCH64_TLSDESC_CALL:
3821 *unresolved_reloc_p = FALSE;
3822 return bfd_reloc_ok;
3823
3824 case R_AARCH64_ABS64:
3825
3826 /* When generating a shared object or relocatable executable, these
3827 relocations are copied into the output file to be resolved at
3828 run time. */
3829 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
3830 && (input_section->flags & SEC_ALLOC)
3831 && (h == NULL
3832 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3833 || h->root.type != bfd_link_hash_undefweak))
3834 {
3835 Elf_Internal_Rela outrel;
3836 bfd_byte *loc;
3837 bfd_boolean skip, relocate;
3838 asection *sreloc;
3839
3840 *unresolved_reloc_p = FALSE;
3841
3842 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd,
3843 input_section, 1);
3844 if (sreloc == NULL)
3845 return bfd_reloc_notsupported;
3846
3847 skip = FALSE;
3848 relocate = FALSE;
3849
3850 outrel.r_addend = signed_addend;
3851 outrel.r_offset =
3852 _bfd_elf_section_offset (output_bfd, info, input_section,
3853 rel->r_offset);
3854 if (outrel.r_offset == (bfd_vma) - 1)
3855 skip = TRUE;
3856 else if (outrel.r_offset == (bfd_vma) - 2)
3857 {
3858 skip = TRUE;
3859 relocate = TRUE;
3860 }
3861
3862 outrel.r_offset += (input_section->output_section->vma
3863 + input_section->output_offset);
3864
3865 if (skip)
3866 memset (&outrel, 0, sizeof outrel);
3867 else if (h != NULL
3868 && h->dynindx != -1
3869 && (!info->shared || !info->symbolic || !h->def_regular))
3870 outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
3871 else
3872 {
3873 int symbol;
3874
3875 /* On SVR4-ish systems, the dynamic loader cannot
3876 relocate the text and data segments independently,
3877 so the symbol does not matter. */
3878 symbol = 0;
3879 outrel.r_info = ELF64_R_INFO (symbol, R_AARCH64_RELATIVE);
3880 outrel.r_addend += value;
3881 }
3882
3883 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab);
3884 bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
3885
3886 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
3887 {
3888 /* Sanity to check that we have previously allocated
3889 sufficient space in the relocation section for the
3890 number of relocations we actually want to emit. */
3891 abort ();
3892 }
3893
3894 /* If this reloc is against an external symbol, we do not want to
3895 fiddle with the addend. Otherwise, we need to include the symbol
3896 value so that it becomes an addend for the dynamic reloc. */
3897 if (!relocate)
3898 return bfd_reloc_ok;
3899
3900 return _bfd_final_link_relocate (howto, input_bfd, input_section,
3901 contents, rel->r_offset, value,
3902 signed_addend);
3903 }
3904 else
3905 value += signed_addend;
3906 break;
3907
3908 case R_AARCH64_JUMP26:
3909 case R_AARCH64_CALL26:
3910 {
3911 asection *splt = globals->root.splt;
3912 bfd_boolean via_plt_p =
3913 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
3914
3915 /* A call to an undefined weak symbol is converted to a jump to
3916 the next instruction unless a PLT entry will be created.
3917 The jump to the next instruction is optimized as a NOP.
3918 Do the same for local undefined symbols. */
3919 if (weak_undef_p && ! via_plt_p)
3920 {
3921 bfd_putl32 (INSN_NOP, hit_data);
3922 return bfd_reloc_ok;
3923 }
3924
3925 /* If the call goes through a PLT entry, make sure to
3926 check distance to the right destination address. */
3927 if (via_plt_p)
3928 {
3929 value = (splt->output_section->vma
3930 + splt->output_offset + h->plt.offset);
3931 *unresolved_reloc_p = FALSE;
3932 }
3933
3934 /* If the target symbol is global and marked as a function the
3935 relocation applies a function call or a tail call. In this
3936 situation we can veneer out of range branches. The veneers
3937 use IP0 and IP1 hence cannot be used arbitrary out of range
3938 branches that occur within the body of a function. */
3939 if (h && h->type == STT_FUNC)
3940 {
3941 /* Check if a stub has to be inserted because the destination
3942 is too far away. */
3943 if (! aarch64_valid_branch_p (value, place))
3944 {
3945 /* The target is out of reach, so redirect the branch to
3946 the local stub for this function. */
3947 struct elf64_aarch64_stub_hash_entry *stub_entry;
3948 stub_entry = elf64_aarch64_get_stub_entry (input_section,
3949 sym_sec, h,
3950 rel, globals);
3951 if (stub_entry != NULL)
3952 value = (stub_entry->stub_offset
3953 + stub_entry->stub_sec->output_offset
3954 + stub_entry->stub_sec->output_section->vma);
3955 }
3956 }
3957 }
3958 value = aarch64_resolve_relocation (r_type, place, value,
3959 signed_addend, weak_undef_p);
3960 break;
3961
3962 case R_AARCH64_ABS16:
3963 case R_AARCH64_ABS32:
3964 case R_AARCH64_ADD_ABS_LO12_NC:
3965 case R_AARCH64_ADR_PREL_LO21:
3966 case R_AARCH64_ADR_PREL_PG_HI21:
3967 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3968 case R_AARCH64_CONDBR19:
3969 case R_AARCH64_LD_PREL_LO19:
3970 case R_AARCH64_LDST8_ABS_LO12_NC:
3971 case R_AARCH64_LDST16_ABS_LO12_NC:
3972 case R_AARCH64_LDST32_ABS_LO12_NC:
3973 case R_AARCH64_LDST64_ABS_LO12_NC:
3974 case R_AARCH64_LDST128_ABS_LO12_NC:
3975 case R_AARCH64_MOVW_SABS_G0:
3976 case R_AARCH64_MOVW_SABS_G1:
3977 case R_AARCH64_MOVW_SABS_G2:
3978 case R_AARCH64_MOVW_UABS_G0:
3979 case R_AARCH64_MOVW_UABS_G0_NC:
3980 case R_AARCH64_MOVW_UABS_G1:
3981 case R_AARCH64_MOVW_UABS_G1_NC:
3982 case R_AARCH64_MOVW_UABS_G2:
3983 case R_AARCH64_MOVW_UABS_G2_NC:
3984 case R_AARCH64_MOVW_UABS_G3:
3985 case R_AARCH64_PREL16:
3986 case R_AARCH64_PREL32:
3987 case R_AARCH64_PREL64:
3988 case R_AARCH64_TSTBR14:
3989 value = aarch64_resolve_relocation (r_type, place, value,
3990 signed_addend, weak_undef_p);
3991 break;
3992
3993 case R_AARCH64_LD64_GOT_LO12_NC:
3994 case R_AARCH64_ADR_GOT_PAGE:
3995 if (globals->root.sgot == NULL)
3996 BFD_ASSERT (h != NULL);
3997
3998 if (h != NULL)
3999 {
4000 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4001 output_bfd,
4002 unresolved_reloc_p);
4003 value = aarch64_resolve_relocation (r_type, place, value,
4004 0, weak_undef_p);
4005 }
4006 break;
4007
4008 case R_AARCH64_TLSGD_ADR_PAGE21:
4009 case R_AARCH64_TLSGD_ADD_LO12_NC:
4010 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4011 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4012 if (globals->root.sgot == NULL)
4013 return bfd_reloc_notsupported;
4014
4015 value = (symbol_got_offset (input_bfd, h, r_symndx)
4016 + globals->root.sgot->output_section->vma
4017 + globals->root.sgot->output_section->output_offset);
4018
4019 value = aarch64_resolve_relocation (r_type, place, value,
4020 0, weak_undef_p);
4021 *unresolved_reloc_p = FALSE;
4022 break;
4023
4024 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4025 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4026 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4027 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4028 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4029 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4030 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4031 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4032 value = aarch64_resolve_relocation (r_type, place, value,
4033 - tpoff_base (info), weak_undef_p);
4034 *unresolved_reloc_p = FALSE;
4035 break;
4036
4037 case R_AARCH64_TLSDESC_ADR_PAGE:
4038 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4039 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4040 case R_AARCH64_TLSDESC_ADD:
4041 case R_AARCH64_TLSDESC_LDR:
4042 if (globals->root.sgot == NULL)
4043 return bfd_reloc_notsupported;
4044
4045 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4046 + globals->root.sgotplt->output_section->vma
4047 + globals->root.sgotplt->output_section->output_offset
4048 + globals->sgotplt_jump_table_size);
4049
4050 value = aarch64_resolve_relocation (r_type, place, value,
4051 0, weak_undef_p);
4052 *unresolved_reloc_p = FALSE;
4053 break;
4054
4055 default:
4056 return bfd_reloc_notsupported;
4057 }
4058
4059 if (saved_addend)
4060 *saved_addend = value;
4061
4062 /* Only apply the final relocation in a sequence. */
4063 if (save_addend)
4064 return bfd_reloc_continue;
4065
4066 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4067}
4068
4069/* Handle TLS relaxations. Relaxing is possible for symbols that use
4070 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4071 link.
4072
4073 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4074 is to then call final_link_relocate. Return other values in the
4075 case of error. */
4076
4077static bfd_reloc_status_type
4078elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table *globals,
4079 bfd *input_bfd, bfd_byte *contents,
4080 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4081{
4082 bfd_boolean is_local = h == NULL;
4083 unsigned int r_type = ELF64_R_TYPE (rel->r_info);
4084 unsigned long insn;
4085
4086 BFD_ASSERT (globals && input_bfd && contents && rel);
4087
4088 switch (r_type)
4089 {
4090 case R_AARCH64_TLSGD_ADR_PAGE21:
4091 case R_AARCH64_TLSDESC_ADR_PAGE:
4092 if (is_local)
4093 {
4094 /* GD->LE relaxation:
4095 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4096 or
4097 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4098 */
4099 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4100 return bfd_reloc_continue;
4101 }
4102 else
4103 {
4104 /* GD->IE relaxation:
4105 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4106 or
4107 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4108 */
4109 insn = bfd_getl32 (contents + rel->r_offset);
4110 return bfd_reloc_continue;
4111 }
4112
4113 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4114 if (is_local)
4115 {
4116 /* GD->LE relaxation:
4117 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4118 */
4119 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4120 return bfd_reloc_continue;
4121 }
4122 else
4123 {
4124 /* GD->IE relaxation:
4125 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4126 */
4127 insn = bfd_getl32 (contents + rel->r_offset);
4128 insn &= 0xfffffff0;
4129 bfd_putl32 (insn, contents + rel->r_offset);
4130 return bfd_reloc_continue;
4131 }
4132
4133 case R_AARCH64_TLSGD_ADD_LO12_NC:
4134 if (is_local)
4135 {
4136 /* GD->LE relaxation
4137 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4138 bl __tls_get_addr => mrs x1, tpidr_el0
4139 nop => add x0, x1, x0
4140 */
4141
4142 /* First kill the tls_get_addr reloc on the bl instruction. */
4143 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4144 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4145
4146 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4147 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4148 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4149 return bfd_reloc_continue;
4150 }
4151 else
4152 {
4153 /* GD->IE relaxation
4154 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4155 BL __tls_get_addr => mrs x1, tpidr_el0
4156 R_AARCH64_CALL26
4157 NOP => add x0, x1, x0
4158 */
4159
4160 BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26);
4161
4162 /* Remove the relocation on the BL instruction. */
4163 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4164
4165 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4166
4167 /* We choose to fixup the BL and NOP instructions using the
4168 offset from the second relocation to allow flexibility in
4169 scheduling instructions between the ADD and BL. */
4170 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4171 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4172 return bfd_reloc_continue;
4173 }
4174
4175 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4176 case R_AARCH64_TLSDESC_CALL:
4177 /* GD->IE/LE relaxation:
4178 add x0, x0, #:tlsdesc_lo12:var => nop
4179 blr xd => nop
4180 */
4181 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4182 return bfd_reloc_ok;
4183
4184 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4185 /* IE->LE relaxation:
4186 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4187 */
4188 if (is_local)
4189 {
4190 insn = bfd_getl32 (contents + rel->r_offset);
4191 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4192 }
4193 return bfd_reloc_continue;
4194
4195 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4196 /* IE->LE relaxation:
4197 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4198 */
4199 if (is_local)
4200 {
4201 insn = bfd_getl32 (contents + rel->r_offset);
4202 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4203 }
4204 return bfd_reloc_continue;
4205
4206 default:
4207 return bfd_reloc_continue;
4208 }
4209
4210 return bfd_reloc_ok;
4211}
4212
4213/* Relocate an AArch64 ELF section. */
4214
4215static bfd_boolean
4216elf64_aarch64_relocate_section (bfd *output_bfd,
4217 struct bfd_link_info *info,
4218 bfd *input_bfd,
4219 asection *input_section,
4220 bfd_byte *contents,
4221 Elf_Internal_Rela *relocs,
4222 Elf_Internal_Sym *local_syms,
4223 asection **local_sections)
4224{
4225 Elf_Internal_Shdr *symtab_hdr;
4226 struct elf_link_hash_entry **sym_hashes;
4227 Elf_Internal_Rela *rel;
4228 Elf_Internal_Rela *relend;
4229 const char *name;
4230 struct elf64_aarch64_link_hash_table *globals;
4231 bfd_boolean save_addend = FALSE;
4232 bfd_vma addend = 0;
4233
4234 globals = elf64_aarch64_hash_table (info);
4235
4236 symtab_hdr = &elf_symtab_hdr (input_bfd);
4237 sym_hashes = elf_sym_hashes (input_bfd);
4238
4239 rel = relocs;
4240 relend = relocs + input_section->reloc_count;
4241 for (; rel < relend; rel++)
4242 {
4243 unsigned int r_type;
4244 unsigned int relaxed_r_type;
4245 reloc_howto_type *howto;
4246 unsigned long r_symndx;
4247 Elf_Internal_Sym *sym;
4248 asection *sec;
4249 struct elf_link_hash_entry *h;
4250 bfd_vma relocation;
4251 bfd_reloc_status_type r;
4252 arelent bfd_reloc;
4253 char sym_type;
4254 bfd_boolean unresolved_reloc = FALSE;
4255 char *error_message = NULL;
4256
4257 r_symndx = ELF64_R_SYM (rel->r_info);
4258 r_type = ELF64_R_TYPE (rel->r_info);
4259
4260 bfd_reloc.howto = elf64_aarch64_howto_from_type (r_type);
4261 howto = bfd_reloc.howto;
4262
4263 h = NULL;
4264 sym = NULL;
4265 sec = NULL;
4266
4267 if (r_symndx < symtab_hdr->sh_info)
4268 {
4269 sym = local_syms + r_symndx;
4270 sym_type = ELF64_ST_TYPE (sym->st_info);
4271 sec = local_sections[r_symndx];
4272
4273 /* An object file might have a reference to a local
4274 undefined symbol. This is a daft object file, but we
4275 should at least do something about it. */
4276 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4277 && bfd_is_und_section (sec)
4278 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4279 {
4280 if (!info->callbacks->undefined_symbol
4281 (info, bfd_elf_string_from_elf_section
4282 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4283 input_bfd, input_section, rel->r_offset, TRUE))
4284 return FALSE;
4285 }
4286
4287 if (r_type >= R_AARCH64_dyn_max)
4288 {
4289 bfd_set_error (bfd_error_bad_value);
4290 return FALSE;
4291 }
4292
4293 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4294 }
4295 else
4296 {
4297 bfd_boolean warned;
4298
4299 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4300 r_symndx, symtab_hdr, sym_hashes,
4301 h, sec, relocation,
4302 unresolved_reloc, warned);
4303
4304 sym_type = h->type;
4305 }
4306
4307 if (sec != NULL && discarded_section (sec))
4308 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4309 rel, 1, relend, howto, 0, contents);
4310
4311 if (info->relocatable)
4312 {
4313 /* This is a relocatable link. We don't have to change
4314 anything, unless the reloc is against a section symbol,
4315 in which case we have to adjust according to where the
4316 section symbol winds up in the output section. */
4317 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
4318 rel->r_addend += sec->output_offset;
4319 continue;
4320 }
4321
4322 if (h != NULL)
4323 name = h->root.root.string;
4324 else
4325 {
4326 name = (bfd_elf_string_from_elf_section
4327 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4328 if (name == NULL || *name == '\0')
4329 name = bfd_section_name (input_bfd, sec);
4330 }
4331
4332 if (r_symndx != 0
4333 && r_type != R_AARCH64_NONE
4334 && r_type != R_AARCH64_NULL
4335 && (h == NULL
4336 || h->root.type == bfd_link_hash_defined
4337 || h->root.type == bfd_link_hash_defweak)
4338 && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
4339 {
4340 (*_bfd_error_handler)
4341 ((sym_type == STT_TLS
4342 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4343 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4344 input_bfd,
4345 input_section, (long) rel->r_offset, howto->name, name);
4346 }
4347
4348
4349 /* We relax only if we can see that there can be a valid transition
4350 from a reloc type to another.
4351 We call elf64_aarch64_final_link_relocate unless we're completely
4352 done, i.e., the relaxation produced the final output we want. */
4353
4354 relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4355 h, r_symndx);
4356 if (relaxed_r_type != r_type)
4357 {
4358 r_type = relaxed_r_type;
4359 howto = elf64_aarch64_howto_from_type (r_type);
4360
4361 r = elf64_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4362 unresolved_reloc = 0;
4363 }
4364 else
4365 r = bfd_reloc_continue;
4366
4367 /* There may be multiple consecutive relocations for the
4368 same offset. In that case we are supposed to treat the
4369 output of each relocation as the addend for the next. */
4370 if (rel + 1 < relend
4371 && rel->r_offset == rel[1].r_offset
4372 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4373 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4374 save_addend = TRUE;
4375 else
4376 save_addend = FALSE;
4377
4378 if (r == bfd_reloc_continue)
4379 r = elf64_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4380 input_section, contents, rel,
4381 relocation, info, sec,
4382 h, &unresolved_reloc,
4383 save_addend, &addend);
4384
4385 switch (r_type)
4386 {
4387 case R_AARCH64_TLSGD_ADR_PAGE21:
4388 case R_AARCH64_TLSGD_ADD_LO12_NC:
4389 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4390 {
4391 bfd_boolean need_relocs = FALSE;
4392 bfd_byte *loc;
4393 int indx;
4394 bfd_vma off;
4395
4396 off = symbol_got_offset (input_bfd, h, r_symndx);
4397 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4398
4399 need_relocs =
4400 (info->shared || indx != 0) &&
4401 (h == NULL
4402 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4403 || h->root.type != bfd_link_hash_undefweak);
4404
4405 BFD_ASSERT (globals->root.srelgot != NULL);
4406
4407 if (need_relocs)
4408 {
4409 Elf_Internal_Rela rela;
4410 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_DTPMOD64);
4411 rela.r_addend = 0;
4412 rela.r_offset = globals->root.sgot->output_section->vma +
4413 globals->root.sgot->output_offset + off;
4414
4415
4416 loc = globals->root.srelgot->contents;
4417 loc += globals->root.srelgot->reloc_count++
4418 * RELOC_SIZE (htab);
4419 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4420
4421 if (indx == 0)
4422 {
4423 bfd_put_64 (output_bfd,
4424 relocation - dtpoff_base (info),
4425 globals->root.sgot->contents + off
4426 + GOT_ENTRY_SIZE);
4427 }
4428 else
4429 {
4430 /* This TLS symbol is global. We emit a
4431 relocation to fixup the tls offset at load
4432 time. */
4433 rela.r_info =
4434 ELF64_R_INFO (indx, R_AARCH64_TLS_DTPREL64);
4435 rela.r_addend = 0;
4436 rela.r_offset =
4437 (globals->root.sgot->output_section->vma
4438 + globals->root.sgot->output_offset + off
4439 + GOT_ENTRY_SIZE);
4440
4441 loc = globals->root.srelgot->contents;
4442 loc += globals->root.srelgot->reloc_count++
4443 * RELOC_SIZE (globals);
4444 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4445 bfd_put_64 (output_bfd, (bfd_vma) 0,
4446 globals->root.sgot->contents + off
4447 + GOT_ENTRY_SIZE);
4448 }
4449 }
4450 else
4451 {
4452 bfd_put_64 (output_bfd, (bfd_vma) 1,
4453 globals->root.sgot->contents + off);
4454 bfd_put_64 (output_bfd,
4455 relocation - dtpoff_base (info),
4456 globals->root.sgot->contents + off
4457 + GOT_ENTRY_SIZE);
4458 }
4459
4460 symbol_got_offset_mark (input_bfd, h, r_symndx);
4461 }
4462 break;
4463
4464 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4465 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4466 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4467 {
4468 bfd_boolean need_relocs = FALSE;
4469 bfd_byte *loc;
4470 int indx;
4471 bfd_vma off;
4472
4473 off = symbol_got_offset (input_bfd, h, r_symndx);
4474
4475 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4476
4477 need_relocs =
4478 (info->shared || indx != 0) &&
4479 (h == NULL
4480 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4481 || h->root.type != bfd_link_hash_undefweak);
4482
4483 BFD_ASSERT (globals->root.srelgot != NULL);
4484
4485 if (need_relocs)
4486 {
4487 Elf_Internal_Rela rela;
4488
4489 if (indx == 0)
4490 rela.r_addend = relocation - dtpoff_base (info);
4491 else
4492 rela.r_addend = 0;
4493
4494 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_TPREL64);
4495 rela.r_offset = globals->root.sgot->output_section->vma +
4496 globals->root.sgot->output_offset + off;
4497
4498 loc = globals->root.srelgot->contents;
4499 loc += globals->root.srelgot->reloc_count++
4500 * RELOC_SIZE (htab);
4501
4502 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4503
4504 bfd_put_64 (output_bfd, rela.r_addend,
4505 globals->root.sgot->contents + off);
4506 }
4507 else
4508 bfd_put_64 (output_bfd, relocation - tpoff_base (info),
4509 globals->root.sgot->contents + off);
4510
4511 symbol_got_offset_mark (input_bfd, h, r_symndx);
4512 }
4513 break;
4514
4515 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4516 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4517 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4518 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4519 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4520 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4521 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4522 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4523 break;
4524
4525 case R_AARCH64_TLSDESC_ADR_PAGE:
4526 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4527 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4528 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
4529 {
4530 bfd_boolean need_relocs = FALSE;
4531 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
4532 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
4533
4534 need_relocs = (h == NULL
4535 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4536 || h->root.type != bfd_link_hash_undefweak);
4537
4538 BFD_ASSERT (globals->root.srelgot != NULL);
4539 BFD_ASSERT (globals->root.sgot != NULL);
4540
4541 if (need_relocs)
4542 {
4543 bfd_byte *loc;
4544 Elf_Internal_Rela rela;
4545 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLSDESC);
4546 rela.r_addend = 0;
4547 rela.r_offset = (globals->root.sgotplt->output_section->vma
4548 + globals->root.sgotplt->output_offset
4549 + off + globals->sgotplt_jump_table_size);
4550
4551 if (indx == 0)
4552 rela.r_addend = relocation - dtpoff_base (info);
4553
4554 /* Allocate the next available slot in the PLT reloc
4555 section to hold our R_AARCH64_TLSDESC, the next
4556 available slot is determined from reloc_count,
4557 which we step. But note, reloc_count was
4558 artifically moved down while allocating slots for
4559 real PLT relocs such that all of the PLT relocs
4560 will fit above the initial reloc_count and the
4561 extra stuff will fit below. */
4562 loc = globals->root.srelplt->contents;
4563 loc += globals->root.srelplt->reloc_count++
4564 * RELOC_SIZE (globals);
4565
4566 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4567
4568 bfd_put_64 (output_bfd, (bfd_vma) 0,
4569 globals->root.sgotplt->contents + off +
4570 globals->sgotplt_jump_table_size);
4571 bfd_put_64 (output_bfd, (bfd_vma) 0,
4572 globals->root.sgotplt->contents + off +
4573 globals->sgotplt_jump_table_size +
4574 GOT_ENTRY_SIZE);
4575 }
4576
4577 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
4578 }
4579 break;
4580 }
4581
4582 if (!save_addend)
4583 addend = 0;
4584
4585
4586 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4587 because such sections are not SEC_ALLOC and thus ld.so will
4588 not process them. */
4589 if (unresolved_reloc
4590 && !((input_section->flags & SEC_DEBUGGING) != 0
4591 && h->def_dynamic)
4592 && _bfd_elf_section_offset (output_bfd, info, input_section,
4593 +rel->r_offset) != (bfd_vma) - 1)
4594 {
4595 (*_bfd_error_handler)
4596 (_
4597 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4598 input_bfd, input_section, (long) rel->r_offset, howto->name,
4599 h->root.root.string);
4600 return FALSE;
4601 }
4602
4603 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
4604 {
4605 switch (r)
4606 {
4607 case bfd_reloc_overflow:
4608 /* If the overflowing reloc was to an undefined symbol,
4609 we have already printed one error message and there
4610 is no point complaining again. */
4611 if ((!h ||
4612 h->root.type != bfd_link_hash_undefined)
4613 && (!((*info->callbacks->reloc_overflow)
4614 (info, (h ? &h->root : NULL), name, howto->name,
4615 (bfd_vma) 0, input_bfd, input_section,
4616 rel->r_offset))))
4617 return FALSE;
4618 break;
4619
4620 case bfd_reloc_undefined:
4621 if (!((*info->callbacks->undefined_symbol)
4622 (info, name, input_bfd, input_section,
4623 rel->r_offset, TRUE)))
4624 return FALSE;
4625 break;
4626
4627 case bfd_reloc_outofrange:
4628 error_message = _("out of range");
4629 goto common_error;
4630
4631 case bfd_reloc_notsupported:
4632 error_message = _("unsupported relocation");
4633 goto common_error;
4634
4635 case bfd_reloc_dangerous:
4636 /* error_message should already be set. */
4637 goto common_error;
4638
4639 default:
4640 error_message = _("unknown error");
4641 /* Fall through. */
4642
4643 common_error:
4644 BFD_ASSERT (error_message != NULL);
4645 if (!((*info->callbacks->reloc_dangerous)
4646 (info, error_message, input_bfd, input_section,
4647 rel->r_offset)))
4648 return FALSE;
4649 break;
4650 }
4651 }
4652 }
4653
4654 return TRUE;
4655}
4656
4657/* Set the right machine number. */
4658
4659static bfd_boolean
4660elf64_aarch64_object_p (bfd *abfd)
4661{
4662 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
4663 return TRUE;
4664}
4665
4666/* Function to keep AArch64 specific flags in the ELF header. */
4667
4668static bfd_boolean
4669elf64_aarch64_set_private_flags (bfd *abfd, flagword flags)
4670{
4671 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
4672 {
4673 }
4674 else
4675 {
4676 elf_elfheader (abfd)->e_flags = flags;
4677 elf_flags_init (abfd) = TRUE;
4678 }
4679
4680 return TRUE;
4681}
4682
4683/* Copy backend specific data from one object module to another. */
4684
4685static bfd_boolean
4686elf64_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
4687{
4688 flagword in_flags;
4689
4690 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4691 return TRUE;
4692
4693 in_flags = elf_elfheader (ibfd)->e_flags;
4694
4695 elf_elfheader (obfd)->e_flags = in_flags;
4696 elf_flags_init (obfd) = TRUE;
4697
4698 /* Also copy the EI_OSABI field. */
4699 elf_elfheader (obfd)->e_ident[EI_OSABI] =
4700 elf_elfheader (ibfd)->e_ident[EI_OSABI];
4701
4702 /* Copy object attributes. */
4703 _bfd_elf_copy_obj_attributes (ibfd, obfd);
4704
4705 return TRUE;
4706}
4707
4708/* Merge backend specific data from an object file to the output
4709 object file when linking. */
4710
4711static bfd_boolean
4712elf64_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
4713{
4714 flagword out_flags;
4715 flagword in_flags;
4716 bfd_boolean flags_compatible = TRUE;
4717 asection *sec;
4718
4719 /* Check if we have the same endianess. */
4720 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
4721 return FALSE;
4722
4723 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4724 return TRUE;
4725
4726 /* The input BFD must have had its flags initialised. */
4727 /* The following seems bogus to me -- The flags are initialized in
4728 the assembler but I don't think an elf_flags_init field is
4729 written into the object. */
4730 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4731
4732 in_flags = elf_elfheader (ibfd)->e_flags;
4733 out_flags = elf_elfheader (obfd)->e_flags;
4734
4735 if (!elf_flags_init (obfd))
4736 {
4737 /* If the input is the default architecture and had the default
4738 flags then do not bother setting the flags for the output
4739 architecture, instead allow future merges to do this. If no
4740 future merges ever set these flags then they will retain their
4741 uninitialised values, which surprise surprise, correspond
4742 to the default values. */
4743 if (bfd_get_arch_info (ibfd)->the_default
4744 && elf_elfheader (ibfd)->e_flags == 0)
4745 return TRUE;
4746
4747 elf_flags_init (obfd) = TRUE;
4748 elf_elfheader (obfd)->e_flags = in_flags;
4749
4750 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
4751 && bfd_get_arch_info (obfd)->the_default)
4752 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
4753 bfd_get_mach (ibfd));
4754
4755 return TRUE;
4756 }
4757
4758 /* Identical flags must be compatible. */
4759 if (in_flags == out_flags)
4760 return TRUE;
4761
4762 /* Check to see if the input BFD actually contains any sections. If
4763 not, its flags may not have been initialised either, but it
4764 cannot actually cause any incompatiblity. Do not short-circuit
4765 dynamic objects; their section list may be emptied by
4766 elf_link_add_object_symbols.
4767
4768 Also check to see if there are no code sections in the input.
4769 In this case there is no need to check for code specific flags.
4770 XXX - do we need to worry about floating-point format compatability
4771 in data sections ? */
4772 if (!(ibfd->flags & DYNAMIC))
4773 {
4774 bfd_boolean null_input_bfd = TRUE;
4775 bfd_boolean only_data_sections = TRUE;
4776
4777 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4778 {
4779 if ((bfd_get_section_flags (ibfd, sec)
4780 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4781 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4782 only_data_sections = FALSE;
4783
4784 null_input_bfd = FALSE;
4785 break;
4786 }
4787
4788 if (null_input_bfd || only_data_sections)
4789 return TRUE;
4790 }
4791
4792 return flags_compatible;
4793}
4794
4795/* Display the flags field. */
4796
4797static bfd_boolean
4798elf64_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
4799{
4800 FILE *file = (FILE *) ptr;
4801 unsigned long flags;
4802
4803 BFD_ASSERT (abfd != NULL && ptr != NULL);
4804
4805 /* Print normal ELF private data. */
4806 _bfd_elf_print_private_bfd_data (abfd, ptr);
4807
4808 flags = elf_elfheader (abfd)->e_flags;
4809 /* Ignore init flag - it may not be set, despite the flags field
4810 containing valid data. */
4811
4812 /* xgettext:c-format */
4813 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
4814
4815 if (flags)
4816 fprintf (file, _("<Unrecognised flag bits set>"));
4817
4818 fputc ('\n', file);
4819
4820 return TRUE;
4821}
4822
4823/* Update the got entry reference counts for the section being removed. */
4824
4825static bfd_boolean
4826elf64_aarch64_gc_sweep_hook (bfd *abfd ATTRIBUTE_UNUSED,
4827 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4828 asection *sec ATTRIBUTE_UNUSED,
4829 const Elf_Internal_Rela *
4830 relocs ATTRIBUTE_UNUSED)
4831{
4832 return TRUE;
4833}
4834
4835/* Adjust a symbol defined by a dynamic object and referenced by a
4836 regular object. The current definition is in some section of the
4837 dynamic object, but we're not including those sections. We have to
4838 change the definition to something the rest of the link can
4839 understand. */
4840
4841static bfd_boolean
4842elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
4843 struct elf_link_hash_entry *h)
4844{
4845 struct elf64_aarch64_link_hash_table *htab;
4846 asection *s;
4847
4848 /* If this is a function, put it in the procedure linkage table. We
4849 will fill in the contents of the procedure linkage table later,
4850 when we know the address of the .got section. */
4851 if (h->type == STT_FUNC || h->needs_plt)
4852 {
4853 if (h->plt.refcount <= 0
4854 || SYMBOL_CALLS_LOCAL (info, h)
4855 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
4856 && h->root.type == bfd_link_hash_undefweak))
4857 {
4858 /* This case can occur if we saw a CALL26 reloc in
4859 an input file, but the symbol wasn't referred to
4860 by a dynamic object or all references were
4861 garbage collected. In which case we can end up
4862 resolving. */
4863 h->plt.offset = (bfd_vma) - 1;
4864 h->needs_plt = 0;
4865 }
4866
4867 return TRUE;
4868 }
4869 else
4870 /* It's possible that we incorrectly decided a .plt reloc was
4871 needed for an R_X86_64_PC32 reloc to a non-function sym in
4872 check_relocs. We can't decide accurately between function and
4873 non-function syms in check-relocs; Objects loaded later in
4874 the link may change h->type. So fix it now. */
4875 h->plt.offset = (bfd_vma) - 1;
4876
4877
4878 /* If this is a weak symbol, and there is a real definition, the
4879 processor independent code will have arranged for us to see the
4880 real definition first, and we can just use the same value. */
4881 if (h->u.weakdef != NULL)
4882 {
4883 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
4884 || h->u.weakdef->root.type == bfd_link_hash_defweak);
4885 h->root.u.def.section = h->u.weakdef->root.u.def.section;
4886 h->root.u.def.value = h->u.weakdef->root.u.def.value;
4887 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
4888 h->non_got_ref = h->u.weakdef->non_got_ref;
4889 return TRUE;
4890 }
4891
4892 /* If we are creating a shared library, we must presume that the
4893 only references to the symbol are via the global offset table.
4894 For such cases we need not do anything here; the relocations will
4895 be handled correctly by relocate_section. */
4896 if (info->shared)
4897 return TRUE;
4898
4899 /* If there are no references to this symbol that do not use the
4900 GOT, we don't need to generate a copy reloc. */
4901 if (!h->non_got_ref)
4902 return TRUE;
4903
4904 /* If -z nocopyreloc was given, we won't generate them either. */
4905 if (info->nocopyreloc)
4906 {
4907 h->non_got_ref = 0;
4908 return TRUE;
4909 }
4910
4911 /* We must allocate the symbol in our .dynbss section, which will
4912 become part of the .bss section of the executable. There will be
4913 an entry for this symbol in the .dynsym section. The dynamic
4914 object will contain position independent code, so all references
4915 from the dynamic object to this symbol will go through the global
4916 offset table. The dynamic linker will use the .dynsym entry to
4917 determine the address it must put in the global offset table, so
4918 both the dynamic object and the regular object will refer to the
4919 same memory location for the variable. */
4920
4921 htab = elf64_aarch64_hash_table (info);
4922
4923 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
4924 to copy the initial value out of the dynamic object and into the
4925 runtime process image. */
4926 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
4927 {
4928 htab->srelbss->size += RELOC_SIZE (htab);
4929 h->needs_copy = 1;
4930 }
4931
4932 s = htab->sdynbss;
4933
4934 return _bfd_elf_adjust_dynamic_copy (h, s);
4935
4936}
4937
4938static bfd_boolean
4939elf64_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
4940{
4941 struct elf_aarch64_local_symbol *locals;
4942 locals = elf64_aarch64_locals (abfd);
4943 if (locals == NULL)
4944 {
4945 locals = (struct elf_aarch64_local_symbol *)
4946 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
4947 if (locals == NULL)
4948 return FALSE;
4949 elf64_aarch64_locals (abfd) = locals;
4950 }
4951 return TRUE;
4952}
4953
4954/* Look through the relocs for a section during the first phase. */
4955
4956static bfd_boolean
4957elf64_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
4958 asection *sec, const Elf_Internal_Rela *relocs)
4959{
4960 Elf_Internal_Shdr *symtab_hdr;
4961 struct elf_link_hash_entry **sym_hashes;
4962 const Elf_Internal_Rela *rel;
4963 const Elf_Internal_Rela *rel_end;
4964 asection *sreloc;
4965
4966 struct elf64_aarch64_link_hash_table *htab;
4967
4968 unsigned long nsyms;
4969
4970 if (info->relocatable)
4971 return TRUE;
4972
4973 BFD_ASSERT (is_aarch64_elf (abfd));
4974
4975 htab = elf64_aarch64_hash_table (info);
4976 sreloc = NULL;
4977
4978 symtab_hdr = &elf_symtab_hdr (abfd);
4979 sym_hashes = elf_sym_hashes (abfd);
4980 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
4981
4982 rel_end = relocs + sec->reloc_count;
4983 for (rel = relocs; rel < rel_end; rel++)
4984 {
4985 struct elf_link_hash_entry *h;
4986 unsigned long r_symndx;
4987 unsigned int r_type;
4988
4989 r_symndx = ELF64_R_SYM (rel->r_info);
4990 r_type = ELF64_R_TYPE (rel->r_info);
4991
4992 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
4993 {
4994 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
4995 r_symndx);
4996 return FALSE;
4997 }
4998
4999 if (r_symndx >= nsyms
5000 /* PR 9934: It is possible to have relocations that do not
5001 refer to symbols, thus it is also possible to have an
5002 object file containing relocations but no symbol table. */
5003 && (r_symndx > 0 || nsyms > 0))
5004 {
5005 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5006 r_symndx);
5007 return FALSE;
5008 }
5009
5010 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
5011 h = NULL;
5012 else
5013 {
5014 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5015 while (h->root.type == bfd_link_hash_indirect
5016 || h->root.type == bfd_link_hash_warning)
5017 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5018 }
5019
5020 /* Could be done earlier, if h were already available. */
5021 r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5022
5023 switch (r_type)
5024 {
5025 case R_AARCH64_ABS64:
5026
5027 /* We don't need to handle relocs into sections not going into
5028 the "real" output. */
5029 if ((sec->flags & SEC_ALLOC) == 0)
5030 break;
5031
5032 if (h != NULL)
5033 {
5034 if (!info->shared)
5035 h->non_got_ref = 1;
5036
5037 h->plt.refcount += 1;
5038 h->pointer_equality_needed = 1;
5039 }
5040
5041 /* No need to do anything if we're not creating a shared
5042 object. */
5043 if (! info->shared)
5044 break;
5045
5046 {
5047 struct elf_dyn_relocs *p;
5048 struct elf_dyn_relocs **head;
5049
5050 /* We must copy these reloc types into the output file.
5051 Create a reloc section in dynobj and make room for
5052 this reloc. */
5053 if (sreloc == NULL)
5054 {
5055 if (htab->root.dynobj == NULL)
5056 htab->root.dynobj = abfd;
5057
5058 sreloc = _bfd_elf_make_dynamic_reloc_section
5059 (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
5060
5061 if (sreloc == NULL)
5062 return FALSE;
5063 }
5064
5065 /* If this is a global symbol, we count the number of
5066 relocations we need for this symbol. */
5067 if (h != NULL)
5068 {
5069 struct elf64_aarch64_link_hash_entry *eh;
5070 eh = (struct elf64_aarch64_link_hash_entry *) h;
5071 head = &eh->dyn_relocs;
5072 }
5073 else
5074 {
5075 /* Track dynamic relocs needed for local syms too.
5076 We really need local syms available to do this
5077 easily. Oh well. */
5078
5079 asection *s;
5080 void **vpp;
5081 Elf_Internal_Sym *isym;
5082
5083 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5084 abfd, r_symndx);
5085 if (isym == NULL)
5086 return FALSE;
5087
5088 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5089 if (s == NULL)
5090 s = sec;
5091
5092 /* Beware of type punned pointers vs strict aliasing
5093 rules. */
5094 vpp = &(elf_section_data (s)->local_dynrel);
5095 head = (struct elf_dyn_relocs **) vpp;
5096 }
5097
5098 p = *head;
5099 if (p == NULL || p->sec != sec)
5100 {
5101 bfd_size_type amt = sizeof *p;
5102 p = ((struct elf_dyn_relocs *)
5103 bfd_zalloc (htab->root.dynobj, amt));
5104 if (p == NULL)
5105 return FALSE;
5106 p->next = *head;
5107 *head = p;
5108 p->sec = sec;
5109 }
5110
5111 p->count += 1;
5112
5113 }
5114 break;
5115
5116 /* RR: We probably want to keep a consistency check that
5117 there are no dangling GOT_PAGE relocs. */
5118 case R_AARCH64_LD64_GOT_LO12_NC:
5119 case R_AARCH64_ADR_GOT_PAGE:
5120 case R_AARCH64_TLSGD_ADR_PAGE21:
5121 case R_AARCH64_TLSGD_ADD_LO12_NC:
5122 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5123 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5124 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
5125 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
5126 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5127 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
5128 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
5129 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5130 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
5131 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5132 case R_AARCH64_TLSDESC_ADR_PAGE:
5133 case R_AARCH64_TLSDESC_ADD_LO12_NC:
5134 case R_AARCH64_TLSDESC_LD64_LO12_NC:
5135 {
5136 unsigned got_type;
5137 unsigned old_got_type;
5138
5139 got_type = aarch64_reloc_got_type (r_type);
5140
5141 if (h)
5142 {
5143 h->got.refcount += 1;
5144 old_got_type = elf64_aarch64_hash_entry (h)->got_type;
5145 }
5146 else
5147 {
5148 struct elf_aarch64_local_symbol *locals;
5149
5150 if (!elf64_aarch64_allocate_local_symbols
5151 (abfd, symtab_hdr->sh_info))
5152 return FALSE;
5153
5154 locals = elf64_aarch64_locals (abfd);
5155 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5156 locals[r_symndx].got_refcount += 1;
5157 old_got_type = locals[r_symndx].got_type;
5158 }
5159
5160 /* If a variable is accessed with both general dynamic TLS
5161 methods, two slots may be created. */
5162 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5163 got_type |= old_got_type;
5164
5165 /* We will already have issued an error message if there
5166 is a TLS/non-TLS mismatch, based on the symbol type.
5167 So just combine any TLS types needed. */
5168 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5169 && got_type != GOT_NORMAL)
5170 got_type |= old_got_type;
5171
5172 /* If the symbol is accessed by both IE and GD methods, we
5173 are able to relax. Turn off the GD flag, without
5174 messing up with any other kind of TLS types that may be
5175 involved. */
5176 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5177 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5178
5179 if (old_got_type != got_type)
5180 {
5181 if (h != NULL)
5182 elf64_aarch64_hash_entry (h)->got_type = got_type;
5183 else
5184 {
5185 struct elf_aarch64_local_symbol *locals;
5186 locals = elf64_aarch64_locals (abfd);
5187 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5188 locals[r_symndx].got_type = got_type;
5189 }
5190 }
5191
5192 if (htab->root.sgot == NULL)
5193 {
5194 if (htab->root.dynobj == NULL)
5195 htab->root.dynobj = abfd;
5196 if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
5197 return FALSE;
5198 }
5199 break;
5200 }
5201
5202 case R_AARCH64_ADR_PREL_PG_HI21_NC:
5203 case R_AARCH64_ADR_PREL_PG_HI21:
5204 if (h != NULL && info->executable)
5205 {
5206 /* If this reloc is in a read-only section, we might
5207 need a copy reloc. We can't check reliably at this
5208 stage whether the section is read-only, as input
5209 sections have not yet been mapped to output sections.
5210 Tentatively set the flag for now, and correct in
5211 adjust_dynamic_symbol. */
5212 h->non_got_ref = 1;
5213 h->plt.refcount += 1;
5214 h->pointer_equality_needed = 1;
5215 }
5216 /* FIXME:: RR need to handle these in shared libraries
5217 and essentially bomb out as these being non-PIC
5218 relocations in shared libraries. */
5219 break;
5220
5221 case R_AARCH64_CALL26:
5222 case R_AARCH64_JUMP26:
5223 /* If this is a local symbol then we resolve it
5224 directly without creating a PLT entry. */
5225 if (h == NULL)
5226 continue;
5227
5228 h->needs_plt = 1;
5229 h->plt.refcount += 1;
5230 break;
5231 }
5232 }
5233 return TRUE;
5234}
5235
5236/* Treat mapping symbols as special target symbols. */
5237
5238static bfd_boolean
5239elf64_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
5240 asymbol *sym)
5241{
5242 return bfd_is_aarch64_special_symbol_name (sym->name,
5243 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
5244}
5245
5246/* This is a copy of elf_find_function () from elf.c except that
5247 AArch64 mapping symbols are ignored when looking for function names. */
5248
5249static bfd_boolean
5250aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
5251 asection *section,
5252 asymbol **symbols,
5253 bfd_vma offset,
5254 const char **filename_ptr,
5255 const char **functionname_ptr)
5256{
5257 const char *filename = NULL;
5258 asymbol *func = NULL;
5259 bfd_vma low_func = 0;
5260 asymbol **p;
5261
5262 for (p = symbols; *p != NULL; p++)
5263 {
5264 elf_symbol_type *q;
5265
5266 q = (elf_symbol_type *) * p;
5267
5268 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
5269 {
5270 default:
5271 break;
5272 case STT_FILE:
5273 filename = bfd_asymbol_name (&q->symbol);
5274 break;
5275 case STT_FUNC:
5276 case STT_NOTYPE:
5277 /* Skip mapping symbols. */
5278 if ((q->symbol.flags & BSF_LOCAL)
5279 && (bfd_is_aarch64_special_symbol_name
5280 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
5281 continue;
5282 /* Fall through. */
5283 if (bfd_get_section (&q->symbol) == section
5284 && q->symbol.value >= low_func && q->symbol.value <= offset)
5285 {
5286 func = (asymbol *) q;
5287 low_func = q->symbol.value;
5288 }
5289 break;
5290 }
5291 }
5292
5293 if (func == NULL)
5294 return FALSE;
5295
5296 if (filename_ptr)
5297 *filename_ptr = filename;
5298 if (functionname_ptr)
5299 *functionname_ptr = bfd_asymbol_name (func);
5300
5301 return TRUE;
5302}
5303
5304
5305/* Find the nearest line to a particular section and offset, for error
5306 reporting. This code is a duplicate of the code in elf.c, except
5307 that it uses aarch64_elf_find_function. */
5308
5309static bfd_boolean
5310elf64_aarch64_find_nearest_line (bfd *abfd,
5311 asection *section,
5312 asymbol **symbols,
5313 bfd_vma offset,
5314 const char **filename_ptr,
5315 const char **functionname_ptr,
5316 unsigned int *line_ptr)
5317{
5318 bfd_boolean found = FALSE;
5319
5320 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5321 toolchain uses it. */
5322
5323 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
5324 section, symbols, offset,
5325 filename_ptr, functionname_ptr,
5326 line_ptr, NULL, 0,
5327 &elf_tdata (abfd)->dwarf2_find_line_info))
5328 {
5329 if (!*functionname_ptr)
5330 aarch64_elf_find_function (abfd, section, symbols, offset,
5331 *filename_ptr ? NULL : filename_ptr,
5332 functionname_ptr);
5333
5334 return TRUE;
5335 }
5336
5337 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
5338 &found, filename_ptr,
5339 functionname_ptr, line_ptr,
5340 &elf_tdata (abfd)->line_info))
5341 return FALSE;
5342
5343 if (found && (*functionname_ptr || *line_ptr))
5344 return TRUE;
5345
5346 if (symbols == NULL)
5347 return FALSE;
5348
5349 if (!aarch64_elf_find_function (abfd, section, symbols, offset,
5350 filename_ptr, functionname_ptr))
5351 return FALSE;
5352
5353 *line_ptr = 0;
5354 return TRUE;
5355}
5356
5357static bfd_boolean
5358elf64_aarch64_find_inliner_info (bfd *abfd,
5359 const char **filename_ptr,
5360 const char **functionname_ptr,
5361 unsigned int *line_ptr)
5362{
5363 bfd_boolean found;
5364 found = _bfd_dwarf2_find_inliner_info
5365 (abfd, filename_ptr,
5366 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
5367 return found;
5368}
5369
5370
5371static void
5372elf64_aarch64_post_process_headers (bfd *abfd,
5373 struct bfd_link_info *link_info
5374 ATTRIBUTE_UNUSED)
5375{
5376 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
5377
5378 i_ehdrp = elf_elfheader (abfd);
5379 i_ehdrp->e_ident[EI_OSABI] = 0;
5380 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
5381}
5382
5383static enum elf_reloc_type_class
5384elf64_aarch64_reloc_type_class (const Elf_Internal_Rela *rela)
5385{
5386 switch ((int) ELF64_R_TYPE (rela->r_info))
5387 {
5388 case R_AARCH64_RELATIVE:
5389 return reloc_class_relative;
5390 case R_AARCH64_JUMP_SLOT:
5391 return reloc_class_plt;
5392 case R_AARCH64_COPY:
5393 return reloc_class_copy;
5394 default:
5395 return reloc_class_normal;
5396 }
5397}
5398
5399/* Set the right machine number for an AArch64 ELF file. */
5400
5401static bfd_boolean
5402elf64_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
5403{
5404 if (hdr->sh_type == SHT_NOTE)
5405 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
5406
5407 return TRUE;
5408}
5409
5410/* Handle an AArch64 specific section when reading an object file. This is
5411 called when bfd_section_from_shdr finds a section with an unknown
5412 type. */
5413
5414static bfd_boolean
5415elf64_aarch64_section_from_shdr (bfd *abfd,
5416 Elf_Internal_Shdr *hdr,
5417 const char *name, int shindex)
5418{
5419 /* There ought to be a place to keep ELF backend specific flags, but
5420 at the moment there isn't one. We just keep track of the
5421 sections by their name, instead. Fortunately, the ABI gives
5422 names for all the AArch64 specific sections, so we will probably get
5423 away with this. */
5424 switch (hdr->sh_type)
5425 {
5426 case SHT_AARCH64_ATTRIBUTES:
5427 break;
5428
5429 default:
5430 return FALSE;
5431 }
5432
5433 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5434 return FALSE;
5435
5436 return TRUE;
5437}
5438
5439/* A structure used to record a list of sections, independently
5440 of the next and prev fields in the asection structure. */
5441typedef struct section_list
5442{
5443 asection *sec;
5444 struct section_list *next;
5445 struct section_list *prev;
5446}
5447section_list;
5448
5449/* Unfortunately we need to keep a list of sections for which
5450 an _aarch64_elf_section_data structure has been allocated. This
5451 is because it is possible for functions like elf64_aarch64_write_section
5452 to be called on a section which has had an elf_data_structure
5453 allocated for it (and so the used_by_bfd field is valid) but
5454 for which the AArch64 extended version of this structure - the
5455 _aarch64_elf_section_data structure - has not been allocated. */
5456static section_list *sections_with_aarch64_elf_section_data = NULL;
5457
5458static void
5459record_section_with_aarch64_elf_section_data (asection *sec)
5460{
5461 struct section_list *entry;
5462
5463 entry = bfd_malloc (sizeof (*entry));
5464 if (entry == NULL)
5465 return;
5466 entry->sec = sec;
5467 entry->next = sections_with_aarch64_elf_section_data;
5468 entry->prev = NULL;
5469 if (entry->next != NULL)
5470 entry->next->prev = entry;
5471 sections_with_aarch64_elf_section_data = entry;
5472}
5473
5474static struct section_list *
5475find_aarch64_elf_section_entry (asection *sec)
5476{
5477 struct section_list *entry;
5478 static struct section_list *last_entry = NULL;
5479
5480 /* This is a short cut for the typical case where the sections are added
5481 to the sections_with_aarch64_elf_section_data list in forward order and
5482 then looked up here in backwards order. This makes a real difference
5483 to the ld-srec/sec64k.exp linker test. */
5484 entry = sections_with_aarch64_elf_section_data;
5485 if (last_entry != NULL)
5486 {
5487 if (last_entry->sec == sec)
5488 entry = last_entry;
5489 else if (last_entry->next != NULL && last_entry->next->sec == sec)
5490 entry = last_entry->next;
5491 }
5492
5493 for (; entry; entry = entry->next)
5494 if (entry->sec == sec)
5495 break;
5496
5497 if (entry)
5498 /* Record the entry prior to this one - it is the entry we are
5499 most likely to want to locate next time. Also this way if we
5500 have been called from
5501 unrecord_section_with_aarch64_elf_section_data () we will not
5502 be caching a pointer that is about to be freed. */
5503 last_entry = entry->prev;
5504
5505 return entry;
5506}
5507
5508static void
5509unrecord_section_with_aarch64_elf_section_data (asection *sec)
5510{
5511 struct section_list *entry;
5512
5513 entry = find_aarch64_elf_section_entry (sec);
5514
5515 if (entry)
5516 {
5517 if (entry->prev != NULL)
5518 entry->prev->next = entry->next;
5519 if (entry->next != NULL)
5520 entry->next->prev = entry->prev;
5521 if (entry == sections_with_aarch64_elf_section_data)
5522 sections_with_aarch64_elf_section_data = entry->next;
5523 free (entry);
5524 }
5525}
5526
5527
5528typedef struct
5529{
5530 void *finfo;
5531 struct bfd_link_info *info;
5532 asection *sec;
5533 int sec_shndx;
5534 int (*func) (void *, const char *, Elf_Internal_Sym *,
5535 asection *, struct elf_link_hash_entry *);
5536} output_arch_syminfo;
5537
5538enum map_symbol_type
5539{
5540 AARCH64_MAP_INSN,
5541 AARCH64_MAP_DATA
5542};
5543
5544
5545/* Output a single mapping symbol. */
5546
5547static bfd_boolean
5548elf64_aarch64_output_map_sym (output_arch_syminfo *osi,
5549 enum map_symbol_type type, bfd_vma offset)
5550{
5551 static const char *names[2] = { "$x", "$d" };
5552 Elf_Internal_Sym sym;
5553
5554 sym.st_value = (osi->sec->output_section->vma
5555 + osi->sec->output_offset + offset);
5556 sym.st_size = 0;
5557 sym.st_other = 0;
5558 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5559 sym.st_shndx = osi->sec_shndx;
5560 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
5561}
5562
5563
5564
5565/* Output mapping symbols for PLT entries associated with H. */
5566
5567static bfd_boolean
5568elf64_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
5569{
5570 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
5571 bfd_vma addr;
5572
5573 if (h->root.type == bfd_link_hash_indirect)
5574 return TRUE;
5575
5576 if (h->root.type == bfd_link_hash_warning)
5577 /* When warning symbols are created, they **replace** the "real"
5578 entry in the hash table, thus we never get to see the real
5579 symbol in a hash traversal. So look at it now. */
5580 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5581
5582 if (h->plt.offset == (bfd_vma) - 1)
5583 return TRUE;
5584
5585 addr = h->plt.offset;
5586 if (addr == 32)
5587 {
5588 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5589 return FALSE;
5590 }
5591 return TRUE;
5592}
5593
5594
5595/* Output a single local symbol for a generated stub. */
5596
5597static bfd_boolean
5598elf64_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
5599 bfd_vma offset, bfd_vma size)
5600{
5601 Elf_Internal_Sym sym;
5602
5603 sym.st_value = (osi->sec->output_section->vma
5604 + osi->sec->output_offset + offset);
5605 sym.st_size = size;
5606 sym.st_other = 0;
5607 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5608 sym.st_shndx = osi->sec_shndx;
5609 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
5610}
5611
5612static bfd_boolean
5613aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
5614{
5615 struct elf64_aarch64_stub_hash_entry *stub_entry;
5616 asection *stub_sec;
5617 bfd_vma addr;
5618 char *stub_name;
5619 output_arch_syminfo *osi;
5620
5621 /* Massage our args to the form they really have. */
5622 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
5623 osi = (output_arch_syminfo *) in_arg;
5624
5625 stub_sec = stub_entry->stub_sec;
5626
5627 /* Ensure this stub is attached to the current section being
5628 processed. */
5629 if (stub_sec != osi->sec)
5630 return TRUE;
5631
5632 addr = (bfd_vma) stub_entry->stub_offset;
5633
5634 stub_name = stub_entry->output_name;
5635
5636 switch (stub_entry->stub_type)
5637 {
5638 case aarch64_stub_adrp_branch:
5639 if (!elf64_aarch64_output_stub_sym (osi, stub_name, addr,
5640 sizeof (aarch64_adrp_branch_stub)))
5641 return FALSE;
5642 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5643 return FALSE;
5644 break;
5645 case aarch64_stub_long_branch:
5646 if (!elf64_aarch64_output_stub_sym
5647 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
5648 return FALSE;
5649 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5650 return FALSE;
5651 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
5652 return FALSE;
5653 break;
5654 default:
5655 BFD_FAIL ();
5656 }
5657
5658 return TRUE;
5659}
5660
5661/* Output mapping symbols for linker generated sections. */
5662
5663static bfd_boolean
5664elf64_aarch64_output_arch_local_syms (bfd *output_bfd,
5665 struct bfd_link_info *info,
5666 void *finfo,
5667 int (*func) (void *, const char *,
5668 Elf_Internal_Sym *,
5669 asection *,
5670 struct elf_link_hash_entry
5671 *))
5672{
5673 output_arch_syminfo osi;
5674 struct elf64_aarch64_link_hash_table *htab;
5675
5676 htab = elf64_aarch64_hash_table (info);
5677
5678 osi.finfo = finfo;
5679 osi.info = info;
5680 osi.func = func;
5681
5682 /* Long calls stubs. */
5683 if (htab->stub_bfd && htab->stub_bfd->sections)
5684 {
5685 asection *stub_sec;
5686
5687 for (stub_sec = htab->stub_bfd->sections;
5688 stub_sec != NULL; stub_sec = stub_sec->next)
5689 {
5690 /* Ignore non-stub sections. */
5691 if (!strstr (stub_sec->name, STUB_SUFFIX))
5692 continue;
5693
5694 osi.sec = stub_sec;
5695
5696 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5697 (output_bfd, osi.sec->output_section);
5698
5699 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
5700 &osi);
5701 }
5702 }
5703
5704 /* Finally, output mapping symbols for the PLT. */
5705 if (!htab->root.splt || htab->root.splt->size == 0)
5706 return TRUE;
5707
5708 /* For now live without mapping symbols for the plt. */
5709 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5710 (output_bfd, htab->root.splt->output_section);
5711 osi.sec = htab->root.splt;
5712
5713 elf_link_hash_traverse (&htab->root, elf64_aarch64_output_plt_map,
5714 (void *) &osi);
5715
5716 return TRUE;
5717
5718}
5719
5720/* Allocate target specific section data. */
5721
5722static bfd_boolean
5723elf64_aarch64_new_section_hook (bfd *abfd, asection *sec)
5724{
5725 if (!sec->used_by_bfd)
5726 {
5727 _aarch64_elf_section_data *sdata;
5728 bfd_size_type amt = sizeof (*sdata);
5729
5730 sdata = bfd_zalloc (abfd, amt);
5731 if (sdata == NULL)
5732 return FALSE;
5733 sec->used_by_bfd = sdata;
5734 }
5735
5736 record_section_with_aarch64_elf_section_data (sec);
5737
5738 return _bfd_elf_new_section_hook (abfd, sec);
5739}
5740
5741
5742static void
5743unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
5744 asection *sec,
5745 void *ignore ATTRIBUTE_UNUSED)
5746{
5747 unrecord_section_with_aarch64_elf_section_data (sec);
5748}
5749
5750static bfd_boolean
5751elf64_aarch64_close_and_cleanup (bfd *abfd)
5752{
5753 if (abfd->sections)
5754 bfd_map_over_sections (abfd,
5755 unrecord_section_via_map_over_sections, NULL);
5756
5757 return _bfd_elf_close_and_cleanup (abfd);
5758}
5759
5760static bfd_boolean
5761elf64_aarch64_bfd_free_cached_info (bfd *abfd)
5762{
5763 if (abfd->sections)
5764 bfd_map_over_sections (abfd,
5765 unrecord_section_via_map_over_sections, NULL);
5766
5767 return _bfd_free_cached_info (abfd);
5768}
5769
5770static bfd_boolean
5771elf64_aarch64_is_function_type (unsigned int type)
5772{
5773 return type == STT_FUNC;
5774}
5775
5776/* Create dynamic sections. This is different from the ARM backend in that
5777 the got, plt, gotplt and their relocation sections are all created in the
5778 standard part of the bfd elf backend. */
5779
5780static bfd_boolean
5781elf64_aarch64_create_dynamic_sections (bfd *dynobj,
5782 struct bfd_link_info *info)
5783{
5784 struct elf64_aarch64_link_hash_table *htab;
5785 struct elf_link_hash_entry *h;
5786
5787 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
5788 return FALSE;
5789
5790 htab = elf64_aarch64_hash_table (info);
5791 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
5792 if (!info->shared)
5793 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
5794
5795 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
5796 abort ();
5797
5798 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5799 dynobj's .got section. We don't do this in the linker script
5800 because we don't want to define the symbol if we are not creating
5801 a global offset table. */
5802 h = _bfd_elf_define_linkage_sym (dynobj, info,
5803 htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
5804 elf_hash_table (info)->hgot = h;
5805 if (h == NULL)
5806 return FALSE;
5807
5808 return TRUE;
5809}
5810
5811
5812/* Allocate space in .plt, .got and associated reloc sections for
5813 dynamic relocs. */
5814
5815static bfd_boolean
5816elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
5817{
5818 struct bfd_link_info *info;
5819 struct elf64_aarch64_link_hash_table *htab;
5820 struct elf64_aarch64_link_hash_entry *eh;
5821 struct elf_dyn_relocs *p;
5822
5823 /* An example of a bfd_link_hash_indirect symbol is versioned
5824 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
5825 -> __gxx_personality_v0(bfd_link_hash_defined)
5826
5827 There is no need to process bfd_link_hash_indirect symbols here
5828 because we will also be presented with the concrete instance of
5829 the symbol and elf64_aarch64_copy_indirect_symbol () will have been
5830 called to copy all relevant data from the generic to the concrete
5831 symbol instance.
5832 */
5833 if (h->root.type == bfd_link_hash_indirect)
5834 return TRUE;
5835
5836 if (h->root.type == bfd_link_hash_warning)
5837 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5838
5839 info = (struct bfd_link_info *) inf;
5840 htab = elf64_aarch64_hash_table (info);
5841
5842 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
5843 {
5844 /* Make sure this symbol is output as a dynamic symbol.
5845 Undefined weak syms won't yet be marked as dynamic. */
5846 if (h->dynindx == -1 && !h->forced_local)
5847 {
5848 if (!bfd_elf_link_record_dynamic_symbol (info, h))
5849 return FALSE;
5850 }
5851
5852 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
5853 {
5854 asection *s = htab->root.splt;
5855
5856 /* If this is the first .plt entry, make room for the special
5857 first entry. */
5858 if (s->size == 0)
5859 s->size += htab->plt_header_size;
5860
5861 h->plt.offset = s->size;
5862
5863 /* If this symbol is not defined in a regular file, and we are
5864 not generating a shared library, then set the symbol to this
5865 location in the .plt. This is required to make function
5866 pointers compare as equal between the normal executable and
5867 the shared library. */
5868 if (!info->shared && !h->def_regular)
5869 {
5870 h->root.u.def.section = s;
5871 h->root.u.def.value = h->plt.offset;
5872 }
5873
5874 /* Make room for this entry. For now we only create the
5875 small model PLT entries. We later need to find a way
5876 of relaxing into these from the large model PLT entries. */
5877 s->size += PLT_SMALL_ENTRY_SIZE;
5878
5879 /* We also need to make an entry in the .got.plt section, which
5880 will be placed in the .got section by the linker script. */
5881 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
5882
5883 /* We also need to make an entry in the .rela.plt section. */
5884 htab->root.srelplt->size += RELOC_SIZE (htab);
5885
5886 /* We need to ensure that all GOT entries that serve the PLT
5887 are consecutive with the special GOT slots [0] [1] and
5888 [2]. Any addtional relocations, such as
5889 R_AARCH64_TLSDESC, must be placed after the PLT related
5890 entries. We abuse the reloc_count such that during
5891 sizing we adjust reloc_count to indicate the number of
5892 PLT related reserved entries. In subsequent phases when
5893 filling in the contents of the reloc entries, PLT related
5894 entries are placed by computing their PLT index (0
5895 .. reloc_count). While other none PLT relocs are placed
5896 at the slot indicated by reloc_count and reloc_count is
5897 updated. */
5898
5899 htab->root.srelplt->reloc_count++;
5900 }
5901 else
5902 {
5903 h->plt.offset = (bfd_vma) - 1;
5904 h->needs_plt = 0;
5905 }
5906 }
5907 else
5908 {
5909 h->plt.offset = (bfd_vma) - 1;
5910 h->needs_plt = 0;
5911 }
5912
5913 eh = (struct elf64_aarch64_link_hash_entry *) h;
5914 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
5915
5916 if (h->got.refcount > 0)
5917 {
5918 bfd_boolean dyn;
5919 unsigned got_type = elf64_aarch64_hash_entry (h)->got_type;
5920
5921 h->got.offset = (bfd_vma) - 1;
5922
5923 dyn = htab->root.dynamic_sections_created;
5924
5925 /* Make sure this symbol is output as a dynamic symbol.
5926 Undefined weak syms won't yet be marked as dynamic. */
5927 if (dyn && h->dynindx == -1 && !h->forced_local)
5928 {
5929 if (!bfd_elf_link_record_dynamic_symbol (info, h))
5930 return FALSE;
5931 }
5932
5933 if (got_type == GOT_UNKNOWN)
5934 {
5935 }
5936 else if (got_type == GOT_NORMAL)
5937 {
5938 h->got.offset = htab->root.sgot->size;
5939 htab->root.sgot->size += GOT_ENTRY_SIZE;
5940 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5941 || h->root.type != bfd_link_hash_undefweak)
5942 && (info->shared
5943 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
5944 {
5945 htab->root.srelgot->size += RELOC_SIZE (htab);
5946 }
5947 }
5948 else
5949 {
5950 int indx;
5951 if (got_type & GOT_TLSDESC_GD)
5952 {
5953 eh->tlsdesc_got_jump_table_offset =
5954 (htab->root.sgotplt->size
5955 - aarch64_compute_jump_table_size (htab));
5956 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
5957 h->got.offset = (bfd_vma) - 2;
5958 }
5959
5960 if (got_type & GOT_TLS_GD)
5961 {
5962 h->got.offset = htab->root.sgot->size;
5963 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
5964 }
5965
5966 if (got_type & GOT_TLS_IE)
5967 {
5968 h->got.offset = htab->root.sgot->size;
5969 htab->root.sgot->size += GOT_ENTRY_SIZE;
5970 }
5971
5972 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5973 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5974 || h->root.type != bfd_link_hash_undefweak)
5975 && (info->shared
5976 || indx != 0
5977 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
5978 {
5979 if (got_type & GOT_TLSDESC_GD)
5980 {
5981 htab->root.srelplt->size += RELOC_SIZE (htab);
5982 /* Note reloc_count not incremented here! We have
5983 already adjusted reloc_count for this relocation
5984 type. */
5985
5986 /* TLSDESC PLT is now needed, but not yet determined. */
5987 htab->tlsdesc_plt = (bfd_vma) - 1;
5988 }
5989
5990 if (got_type & GOT_TLS_GD)
5991 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
5992
5993 if (got_type & GOT_TLS_IE)
5994 htab->root.srelgot->size += RELOC_SIZE (htab);
5995 }
5996 }
5997 }
5998 else
5999 {
6000 h->got.offset = (bfd_vma) - 1;
6001 }
6002
6003 if (eh->dyn_relocs == NULL)
6004 return TRUE;
6005
6006 /* In the shared -Bsymbolic case, discard space allocated for
6007 dynamic pc-relative relocs against symbols which turn out to be
6008 defined in regular objects. For the normal shared case, discard
6009 space for pc-relative relocs that have become local due to symbol
6010 visibility changes. */
6011
6012 if (info->shared)
6013 {
6014 /* Relocs that use pc_count are those that appear on a call
6015 insn, or certain REL relocs that can generated via assembly.
6016 We want calls to protected symbols to resolve directly to the
6017 function rather than going via the plt. If people want
6018 function pointer comparisons to work as expected then they
6019 should avoid writing weird assembly. */
6020 if (SYMBOL_CALLS_LOCAL (info, h))
6021 {
6022 struct elf_dyn_relocs **pp;
6023
6024 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6025 {
6026 p->count -= p->pc_count;
6027 p->pc_count = 0;
6028 if (p->count == 0)
6029 *pp = p->next;
6030 else
6031 pp = &p->next;
6032 }
6033 }
6034
6035 /* Also discard relocs on undefined weak syms with non-default
6036 visibility. */
6037 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6038 {
6039 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6040 eh->dyn_relocs = NULL;
6041
6042 /* Make sure undefined weak symbols are output as a dynamic
6043 symbol in PIEs. */
6044 else if (h->dynindx == -1
6045 && !h->forced_local
6046 && !bfd_elf_link_record_dynamic_symbol (info, h))
6047 return FALSE;
6048 }
6049
6050 }
6051 else if (ELIMINATE_COPY_RELOCS)
6052 {
6053 /* For the non-shared case, discard space for relocs against
6054 symbols which turn out to need copy relocs or are not
6055 dynamic. */
6056
6057 if (!h->non_got_ref
6058 && ((h->def_dynamic
6059 && !h->def_regular)
6060 || (htab->root.dynamic_sections_created
6061 && (h->root.type == bfd_link_hash_undefweak
6062 || h->root.type == bfd_link_hash_undefined))))
6063 {
6064 /* Make sure this symbol is output as a dynamic symbol.
6065 Undefined weak syms won't yet be marked as dynamic. */
6066 if (h->dynindx == -1
6067 && !h->forced_local
6068 && !bfd_elf_link_record_dynamic_symbol (info, h))
6069 return FALSE;
6070
6071 /* If that succeeded, we know we'll be keeping all the
6072 relocs. */
6073 if (h->dynindx != -1)
6074 goto keep;
6075 }
6076
6077 eh->dyn_relocs = NULL;
6078
6079 keep:;
6080 }
6081
6082 /* Finally, allocate space. */
6083 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6084 {
6085 asection *sreloc;
6086
6087 sreloc = elf_section_data (p->sec)->sreloc;
6088
6089 BFD_ASSERT (sreloc != NULL);
6090
6091 sreloc->size += p->count * RELOC_SIZE (htab);
6092 }
6093
6094 return TRUE;
6095}
6096
6097
6098
6099
6100/* This is the most important function of all . Innocuosly named
6101 though ! */
6102static bfd_boolean
6103elf64_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6104 struct bfd_link_info *info)
6105{
6106 struct elf64_aarch64_link_hash_table *htab;
6107 bfd *dynobj;
6108 asection *s;
6109 bfd_boolean relocs;
6110 bfd *ibfd;
6111
6112 htab = elf64_aarch64_hash_table ((info));
6113 dynobj = htab->root.dynobj;
6114
6115 BFD_ASSERT (dynobj != NULL);
6116
6117 if (htab->root.dynamic_sections_created)
6118 {
6119 if (info->executable)
6120 {
6121 s = bfd_get_linker_section (dynobj, ".interp");
6122 if (s == NULL)
6123 abort ();
6124 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
6125 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
6126 }
6127 }
6128
6129 /* Set up .got offsets for local syms, and space for local dynamic
6130 relocs. */
6131 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
6132 {
6133 struct elf_aarch64_local_symbol *locals = NULL;
6134 Elf_Internal_Shdr *symtab_hdr;
6135 asection *srel;
6136 unsigned int i;
6137
6138 if (!is_aarch64_elf (ibfd))
6139 continue;
6140
6141 for (s = ibfd->sections; s != NULL; s = s->next)
6142 {
6143 struct elf_dyn_relocs *p;
6144
6145 for (p = (struct elf_dyn_relocs *)
6146 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
6147 {
6148 if (!bfd_is_abs_section (p->sec)
6149 && bfd_is_abs_section (p->sec->output_section))
6150 {
6151 /* Input section has been discarded, either because
6152 it is a copy of a linkonce section or due to
6153 linker script /DISCARD/, so we'll be discarding
6154 the relocs too. */
6155 }
6156 else if (p->count != 0)
6157 {
6158 srel = elf_section_data (p->sec)->sreloc;
6159 srel->size += p->count * RELOC_SIZE (htab);
6160 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
6161 info->flags |= DF_TEXTREL;
6162 }
6163 }
6164 }
6165
6166 locals = elf64_aarch64_locals (ibfd);
6167 if (!locals)
6168 continue;
6169
6170 symtab_hdr = &elf_symtab_hdr (ibfd);
6171 srel = htab->root.srelgot;
6172 for (i = 0; i < symtab_hdr->sh_info; i++)
6173 {
6174 locals[i].got_offset = (bfd_vma) - 1;
6175 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6176 if (locals[i].got_refcount > 0)
6177 {
6178 unsigned got_type = locals[i].got_type;
6179 if (got_type & GOT_TLSDESC_GD)
6180 {
6181 locals[i].tlsdesc_got_jump_table_offset =
6182 (htab->root.sgotplt->size
6183 - aarch64_compute_jump_table_size (htab));
6184 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6185 locals[i].got_offset = (bfd_vma) - 2;
6186 }
6187
6188 if (got_type & GOT_TLS_GD)
6189 {
6190 locals[i].got_offset = htab->root.sgot->size;
6191 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6192 }
6193
6194 if (got_type & GOT_TLS_IE)
6195 {
6196 locals[i].got_offset = htab->root.sgot->size;
6197 htab->root.sgot->size += GOT_ENTRY_SIZE;
6198 }
6199
6200 if (got_type == GOT_UNKNOWN)
6201 {
6202 }
6203
6204 if (got_type == GOT_NORMAL)
6205 {
6206 }
6207
6208 if (info->shared)
6209 {
6210 if (got_type & GOT_TLSDESC_GD)
6211 {
6212 htab->root.srelplt->size += RELOC_SIZE (htab);
6213 /* Note RELOC_COUNT not incremented here! */
6214 htab->tlsdesc_plt = (bfd_vma) - 1;
6215 }
6216
6217 if (got_type & GOT_TLS_GD)
6218 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6219
6220 if (got_type & GOT_TLS_IE)
6221 htab->root.srelgot->size += RELOC_SIZE (htab);
6222 }
6223 }
6224 else
6225 {
6226 locals[i].got_refcount = (bfd_vma) - 1;
6227 }
6228 }
6229 }
6230
6231
6232 /* Allocate global sym .plt and .got entries, and space for global
6233 sym dynamic relocs. */
6234 elf_link_hash_traverse (&htab->root, elf64_aarch64_allocate_dynrelocs,
6235 info);
6236
6237
6238 /* For every jump slot reserved in the sgotplt, reloc_count is
6239 incremented. However, when we reserve space for TLS descriptors,
6240 it's not incremented, so in order to compute the space reserved
6241 for them, it suffices to multiply the reloc count by the jump
6242 slot size. */
6243
6244 if (htab->root.srelplt)
6245 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
6246
6247 if (htab->tlsdesc_plt)
6248 {
6249 if (htab->root.splt->size == 0)
6250 htab->root.splt->size += PLT_ENTRY_SIZE;
6251
6252 htab->tlsdesc_plt = htab->root.splt->size;
6253 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
6254
6255 /* If we're not using lazy TLS relocations, don't generate the
6256 GOT entry required. */
6257 if (!(info->flags & DF_BIND_NOW))
6258 {
6259 htab->dt_tlsdesc_got = htab->root.sgot->size;
6260 htab->root.sgot->size += GOT_ENTRY_SIZE;
6261 }
6262 }
6263
6264 /* We now have determined the sizes of the various dynamic sections.
6265 Allocate memory for them. */
6266 relocs = FALSE;
6267 for (s = dynobj->sections; s != NULL; s = s->next)
6268 {
6269 if ((s->flags & SEC_LINKER_CREATED) == 0)
6270 continue;
6271
6272 if (s == htab->root.splt
6273 || s == htab->root.sgot
6274 || s == htab->root.sgotplt
6275 || s == htab->root.iplt
6276 || s == htab->root.igotplt || s == htab->sdynbss)
6277 {
6278 /* Strip this section if we don't need it; see the
6279 comment below. */
6280 }
6281 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
6282 {
6283 if (s->size != 0 && s != htab->root.srelplt)
6284 relocs = TRUE;
6285
6286 /* We use the reloc_count field as a counter if we need
6287 to copy relocs into the output file. */
6288 if (s != htab->root.srelplt)
6289 s->reloc_count = 0;
6290 }
6291 else
6292 {
6293 /* It's not one of our sections, so don't allocate space. */
6294 continue;
6295 }
6296
6297 if (s->size == 0)
6298 {
6299 /* If we don't need this section, strip it from the
6300 output file. This is mostly to handle .rela.bss and
6301 .rela.plt. We must create both sections in
6302 create_dynamic_sections, because they must be created
6303 before the linker maps input sections to output
6304 sections. The linker does that before
6305 adjust_dynamic_symbol is called, and it is that
6306 function which decides whether anything needs to go
6307 into these sections. */
6308
6309 s->flags |= SEC_EXCLUDE;
6310 continue;
6311 }
6312
6313 if ((s->flags & SEC_HAS_CONTENTS) == 0)
6314 continue;
6315
6316 /* Allocate memory for the section contents. We use bfd_zalloc
6317 here in case unused entries are not reclaimed before the
6318 section's contents are written out. This should not happen,
6319 but this way if it does, we get a R_AARCH64_NONE reloc instead
6320 of garbage. */
6321 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
6322 if (s->contents == NULL)
6323 return FALSE;
6324 }
6325
6326 if (htab->root.dynamic_sections_created)
6327 {
6328 /* Add some entries to the .dynamic section. We fill in the
6329 values later, in elf64_aarch64_finish_dynamic_sections, but we
6330 must add the entries now so that we get the correct size for
6331 the .dynamic section. The DT_DEBUG entry is filled in by the
6332 dynamic linker and used by the debugger. */
6333#define add_dynamic_entry(TAG, VAL) \
6334 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6335
6336 if (info->executable)
6337 {
6338 if (!add_dynamic_entry (DT_DEBUG, 0))
6339 return FALSE;
6340 }
6341
6342 if (htab->root.splt->size != 0)
6343 {
6344 if (!add_dynamic_entry (DT_PLTGOT, 0)
6345 || !add_dynamic_entry (DT_PLTRELSZ, 0)
6346 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
6347 || !add_dynamic_entry (DT_JMPREL, 0))
6348 return FALSE;
6349
6350 if (htab->tlsdesc_plt
6351 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
6352 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
6353 return FALSE;
6354 }
6355
6356 if (relocs)
6357 {
6358 if (!add_dynamic_entry (DT_RELA, 0)
6359 || !add_dynamic_entry (DT_RELASZ, 0)
6360 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
6361 return FALSE;
6362
6363 /* If any dynamic relocs apply to a read-only section,
6364 then we need a DT_TEXTREL entry. */
6365 if ((info->flags & DF_TEXTREL) != 0)
6366 {
6367 if (!add_dynamic_entry (DT_TEXTREL, 0))
6368 return FALSE;
6369 }
6370 }
6371 }
6372#undef add_dynamic_entry
6373
6374 return TRUE;
6375
6376
6377}
6378
6379static inline void
6380elf64_aarch64_update_plt_entry (bfd *output_bfd,
6381 unsigned int r_type,
6382 bfd_byte *plt_entry, bfd_vma value)
6383{
6384 reloc_howto_type *howto;
6385 howto = elf64_aarch64_howto_from_type (r_type);
6386 bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
6387}
6388
6389static void
6390elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
6391 struct elf64_aarch64_link_hash_table
6392 *htab, bfd *output_bfd)
6393{
6394 bfd_byte *plt_entry;
6395 bfd_vma plt_index;
6396 bfd_vma got_offset;
6397 bfd_vma gotplt_entry_address;
6398 bfd_vma plt_entry_address;
6399 Elf_Internal_Rela rela;
6400 bfd_byte *loc;
6401
6402 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
6403
6404 /* Offset in the GOT is PLT index plus got GOT headers(3)
6405 times 8. */
6406 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
6407 plt_entry = htab->root.splt->contents + h->plt.offset;
6408 plt_entry_address = htab->root.splt->output_section->vma
6409 + htab->root.splt->output_section->output_offset + h->plt.offset;
6410 gotplt_entry_address = htab->root.sgotplt->output_section->vma +
6411 htab->root.sgotplt->output_offset + got_offset;
6412
6413 /* Copy in the boiler-plate for the PLTn entry. */
6414 memcpy (plt_entry, elf64_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
6415
6416 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6417 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6418 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6419 plt_entry,
6420 PG (gotplt_entry_address) -
6421 PG (plt_entry_address));
6422
6423 /* Fill in the lo12 bits for the load from the pltgot. */
6424 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6425 plt_entry + 4,
6426 PG_OFFSET (gotplt_entry_address));
6427
6428 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6429 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6430 plt_entry + 8,
6431 PG_OFFSET (gotplt_entry_address));
6432
6433 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6434 bfd_put_64 (output_bfd,
6435 (htab->root.splt->output_section->vma
6436 + htab->root.splt->output_offset),
6437 htab->root.sgotplt->contents + got_offset);
6438
6439 /* Fill in the entry in the .rela.plt section. */
6440 rela.r_offset = gotplt_entry_address;
6441 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_JUMP_SLOT);
6442 rela.r_addend = 0;
6443
6444 /* Compute the relocation entry to used based on PLT index and do
6445 not adjust reloc_count. The reloc_count has already been adjusted
6446 to account for this entry. */
6447 loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab);
6448 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6449}
6450
6451/* Size sections even though they're not dynamic. We use it to setup
6452 _TLS_MODULE_BASE_, if needed. */
6453
6454static bfd_boolean
6455elf64_aarch64_always_size_sections (bfd *output_bfd,
6456 struct bfd_link_info *info)
6457{
6458 asection *tls_sec;
6459
6460 if (info->relocatable)
6461 return TRUE;
6462
6463 tls_sec = elf_hash_table (info)->tls_sec;
6464
6465 if (tls_sec)
6466 {
6467 struct elf_link_hash_entry *tlsbase;
6468
6469 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
6470 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
6471
6472 if (tlsbase)
6473 {
6474 struct bfd_link_hash_entry *h = NULL;
6475 const struct elf_backend_data *bed =
6476 get_elf_backend_data (output_bfd);
6477
6478 if (!(_bfd_generic_link_add_one_symbol
6479 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
6480 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
6481 return FALSE;
6482
6483 tlsbase->type = STT_TLS;
6484 tlsbase = (struct elf_link_hash_entry *) h;
6485 tlsbase->def_regular = 1;
6486 tlsbase->other = STV_HIDDEN;
6487 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
6488 }
6489 }
6490
6491 return TRUE;
6492}
6493
6494/* Finish up dynamic symbol handling. We set the contents of various
6495 dynamic sections here. */
6496static bfd_boolean
6497elf64_aarch64_finish_dynamic_symbol (bfd *output_bfd,
6498 struct bfd_link_info *info,
6499 struct elf_link_hash_entry *h,
6500 Elf_Internal_Sym *sym)
6501{
6502 struct elf64_aarch64_link_hash_table *htab;
6503 htab = elf64_aarch64_hash_table (info);
6504
6505 if (h->plt.offset != (bfd_vma) - 1)
6506 {
6507 /* This symbol has an entry in the procedure linkage table. Set
6508 it up. */
6509
6510 if (h->dynindx == -1
6511 || htab->root.splt == NULL
6512 || htab->root.sgotplt == NULL || htab->root.srelplt == NULL)
6513 abort ();
6514
6515 elf64_aarch64_create_small_pltn_entry (h, htab, output_bfd);
6516 if (!h->def_regular)
6517 {
6518 /* Mark the symbol as undefined, rather than as defined in
6519 the .plt section. Leave the value alone. This is a clue
6520 for the dynamic linker, to make function pointer
6521 comparisons work between an application and shared
6522 library. */
6523 sym->st_shndx = SHN_UNDEF;
6524 }
6525 }
6526
6527 if (h->got.offset != (bfd_vma) - 1
6528 && elf64_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
6529 {
6530 Elf_Internal_Rela rela;
6531 bfd_byte *loc;
6532
6533 /* This symbol has an entry in the global offset table. Set it
6534 up. */
6535 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
6536 abort ();
6537
6538 rela.r_offset = (htab->root.sgot->output_section->vma
6539 + htab->root.sgot->output_offset
6540 + (h->got.offset & ~(bfd_vma) 1));
6541
6542 if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
6543 {
6544 if (!h->def_regular)
6545 return FALSE;
6546
6547 BFD_ASSERT ((h->got.offset & 1) != 0);
6548 rela.r_info = ELF64_R_INFO (0, R_AARCH64_RELATIVE);
6549 rela.r_addend = (h->root.u.def.value
6550 + h->root.u.def.section->output_section->vma
6551 + h->root.u.def.section->output_offset);
6552 }
6553 else
6554 {
6555 BFD_ASSERT ((h->got.offset & 1) == 0);
6556 bfd_put_64 (output_bfd, (bfd_vma) 0,
6557 htab->root.sgot->contents + h->got.offset);
6558 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT);
6559 rela.r_addend = 0;
6560 }
6561
6562 loc = htab->root.srelgot->contents;
6563 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
6564 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6565 }
6566
6567 if (h->needs_copy)
6568 {
6569 Elf_Internal_Rela rela;
6570 bfd_byte *loc;
6571
6572 /* This symbol needs a copy reloc. Set it up. */
6573
6574 if (h->dynindx == -1
6575 || (h->root.type != bfd_link_hash_defined
6576 && h->root.type != bfd_link_hash_defweak)
6577 || htab->srelbss == NULL)
6578 abort ();
6579
6580 rela.r_offset = (h->root.u.def.value
6581 + h->root.u.def.section->output_section->vma
6582 + h->root.u.def.section->output_offset);
6583 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_COPY);
6584 rela.r_addend = 0;
6585 loc = htab->srelbss->contents;
6586 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
6587 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6588 }
6589
6590 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6591 be NULL for local symbols. */
6592 if (sym != NULL
6593 && (strcmp (h->root.root.string, "_DYNAMIC") == 0
6594 || h == elf_hash_table (info)->hgot))
6595 sym->st_shndx = SHN_ABS;
6596
6597 return TRUE;
6598}
6599
6600static void
6601elf64_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
6602 struct elf64_aarch64_link_hash_table
6603 *htab)
6604{
6605 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6606 small and large plts and at the minute just generates
6607 the small PLT. */
6608
6609 /* PLT0 of the small PLT looks like this -
6610 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6611 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6612 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6613 // symbol resolver
6614 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6615 // GOTPLT entry for this.
6616 br x17
6617 */
6618 bfd_vma plt_got_base;
6619 bfd_vma plt_base;
6620
6621
6622 memcpy (htab->root.splt->contents, elf64_aarch64_small_plt0_entry,
6623 PLT_ENTRY_SIZE);
6624 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
6625 PLT_ENTRY_SIZE;
6626
6627 plt_got_base = (htab->root.sgotplt->output_section->vma
6628 + htab->root.sgotplt->output_offset);
6629
6630 plt_base = htab->root.splt->output_section->vma +
6631 htab->root.splt->output_section->output_offset;
6632
6633 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6634 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6635 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6636 htab->root.splt->contents + 4,
6637 PG (plt_got_base + 16) - PG (plt_base + 4));
6638
6639 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6640 htab->root.splt->contents + 8,
6641 PG_OFFSET (plt_got_base + 16));
6642
6643 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6644 htab->root.splt->contents + 12,
6645 PG_OFFSET (plt_got_base + 16));
6646}
6647
6648static bfd_boolean
6649elf64_aarch64_finish_dynamic_sections (bfd *output_bfd,
6650 struct bfd_link_info *info)
6651{
6652 struct elf64_aarch64_link_hash_table *htab;
6653 bfd *dynobj;
6654 asection *sdyn;
6655
6656 htab = elf64_aarch64_hash_table (info);
6657 dynobj = htab->root.dynobj;
6658 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6659
6660 if (htab->root.dynamic_sections_created)
6661 {
6662 Elf64_External_Dyn *dyncon, *dynconend;
6663
6664 if (sdyn == NULL || htab->root.sgot == NULL)
6665 abort ();
6666
6667 dyncon = (Elf64_External_Dyn *) sdyn->contents;
6668 dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
6669 for (; dyncon < dynconend; dyncon++)
6670 {
6671 Elf_Internal_Dyn dyn;
6672 asection *s;
6673
6674 bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
6675
6676 switch (dyn.d_tag)
6677 {
6678 default:
6679 continue;
6680
6681 case DT_PLTGOT:
6682 s = htab->root.sgotplt;
6683 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6684 break;
6685
6686 case DT_JMPREL:
6687 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
6688 break;
6689
6690 case DT_PLTRELSZ:
6691 s = htab->root.srelplt->output_section;
6692 dyn.d_un.d_val = s->size;
6693 break;
6694
6695 case DT_RELASZ:
6696 /* The procedure linkage table relocs (DT_JMPREL) should
6697 not be included in the overall relocs (DT_RELA).
6698 Therefore, we override the DT_RELASZ entry here to
6699 make it not include the JMPREL relocs. Since the
6700 linker script arranges for .rela.plt to follow all
6701 other relocation sections, we don't have to worry
6702 about changing the DT_RELA entry. */
6703 if (htab->root.srelplt != NULL)
6704 {
6705 s = htab->root.srelplt->output_section;
6706 dyn.d_un.d_val -= s->size;
6707 }
6708 break;
6709
6710 case DT_TLSDESC_PLT:
6711 s = htab->root.splt;
6712 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6713 + htab->tlsdesc_plt;
6714 break;
6715
6716 case DT_TLSDESC_GOT:
6717 s = htab->root.sgot;
6718 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6719 + htab->dt_tlsdesc_got;
6720 break;
6721 }
6722
6723 bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
6724 }
6725
6726 }
6727
6728 /* Fill in the special first entry in the procedure linkage table. */
6729 if (htab->root.splt && htab->root.splt->size > 0)
6730 {
6731 elf64_aarch64_init_small_plt0_entry (output_bfd, htab);
6732
6733 elf_section_data (htab->root.splt->output_section)->
6734 this_hdr.sh_entsize = htab->plt_entry_size;
6735
6736
6737 if (htab->tlsdesc_plt)
6738 {
6739 bfd_put_64 (output_bfd, (bfd_vma) 0,
6740 htab->root.sgot->contents + htab->dt_tlsdesc_got);
6741
6742 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
6743 elf64_aarch64_tlsdesc_small_plt_entry,
6744 sizeof (elf64_aarch64_tlsdesc_small_plt_entry));
6745
6746 {
6747 bfd_vma adrp1_addr =
6748 htab->root.splt->output_section->vma
6749 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
6750
6751 bfd_vma adrp2_addr =
6752 htab->root.splt->output_section->vma
6753 + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
6754
6755 bfd_vma got_addr =
6756 htab->root.sgot->output_section->vma
6757 + htab->root.sgot->output_offset;
6758
6759 bfd_vma pltgot_addr =
6760 htab->root.sgotplt->output_section->vma
6761 + htab->root.sgotplt->output_offset;
6762
6763 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
6764 bfd_vma opcode;
6765
6766 /* adrp x2, DT_TLSDESC_GOT */
6767 opcode = bfd_get_32 (output_bfd,
6768 htab->root.splt->contents
6769 + htab->tlsdesc_plt + 4);
6770 opcode = reencode_adr_imm
6771 (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
6772 bfd_put_32 (output_bfd, opcode,
6773 htab->root.splt->contents + htab->tlsdesc_plt + 4);
6774
6775 /* adrp x3, 0 */
6776 opcode = bfd_get_32 (output_bfd,
6777 htab->root.splt->contents
6778 + htab->tlsdesc_plt + 8);
6779 opcode = reencode_adr_imm
6780 (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
6781 bfd_put_32 (output_bfd, opcode,
6782 htab->root.splt->contents + htab->tlsdesc_plt + 8);
6783
6784 /* ldr x2, [x2, #0] */
6785 opcode = bfd_get_32 (output_bfd,
6786 htab->root.splt->contents
6787 + htab->tlsdesc_plt + 12);
6788 opcode = reencode_ldst_pos_imm (opcode,
6789 PG_OFFSET (dt_tlsdesc_got) >> 3);
6790 bfd_put_32 (output_bfd, opcode,
6791 htab->root.splt->contents + htab->tlsdesc_plt + 12);
6792
6793 /* add x3, x3, 0 */
6794 opcode = bfd_get_32 (output_bfd,
6795 htab->root.splt->contents
6796 + htab->tlsdesc_plt + 16);
6797 opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
6798 bfd_put_32 (output_bfd, opcode,
6799 htab->root.splt->contents + htab->tlsdesc_plt + 16);
6800 }
6801 }
6802 }
6803
6804 if (htab->root.sgotplt)
6805 {
6806 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
6807 {
6808 (*_bfd_error_handler)
6809 (_("discarded output section: `%A'"), htab->root.sgotplt);
6810 return FALSE;
6811 }
6812
6813 /* Fill in the first three entries in the global offset table. */
6814 if (htab->root.sgotplt->size > 0)
6815 {
6816 /* Set the first entry in the global offset table to the address of
6817 the dynamic section. */
6818 if (sdyn == NULL)
6819 bfd_put_64 (output_bfd, (bfd_vma) 0,
6820 htab->root.sgotplt->contents);
6821 else
6822 bfd_put_64 (output_bfd,
6823 sdyn->output_section->vma + sdyn->output_offset,
6824 htab->root.sgotplt->contents);
6825 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6826 bfd_put_64 (output_bfd,
6827 (bfd_vma) 0,
6828 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
6829 bfd_put_64 (output_bfd,
6830 (bfd_vma) 0,
6831 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
6832 }
6833
6834 elf_section_data (htab->root.sgotplt->output_section)->
6835 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
6836 }
6837
6838 if (htab->root.sgot && htab->root.sgot->size > 0)
6839 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
6840 = GOT_ENTRY_SIZE;
6841
6842 return TRUE;
6843}
6844
6845/* Return address for Ith PLT stub in section PLT, for relocation REL
6846 or (bfd_vma) -1 if it should not be included. */
6847
6848static bfd_vma
6849elf64_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
6850 const arelent *rel ATTRIBUTE_UNUSED)
6851{
6852 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
6853}
6854
6855
6856/* We use this so we can override certain functions
6857 (though currently we don't). */
6858
6859const struct elf_size_info elf64_aarch64_size_info =
6860{
6861 sizeof (Elf64_External_Ehdr),
6862 sizeof (Elf64_External_Phdr),
6863 sizeof (Elf64_External_Shdr),
6864 sizeof (Elf64_External_Rel),
6865 sizeof (Elf64_External_Rela),
6866 sizeof (Elf64_External_Sym),
6867 sizeof (Elf64_External_Dyn),
6868 sizeof (Elf_External_Note),
6869 4, /* Hash table entry size. */
6870 1, /* Internal relocs per external relocs. */
6871 64, /* Arch size. */
6872 3, /* Log_file_align. */
6873 ELFCLASS64, EV_CURRENT,
6874 bfd_elf64_write_out_phdrs,
6875 bfd_elf64_write_shdrs_and_ehdr,
6876 bfd_elf64_checksum_contents,
6877 bfd_elf64_write_relocs,
6878 bfd_elf64_swap_symbol_in,
6879 bfd_elf64_swap_symbol_out,
6880 bfd_elf64_slurp_reloc_table,
6881 bfd_elf64_slurp_symbol_table,
6882 bfd_elf64_swap_dyn_in,
6883 bfd_elf64_swap_dyn_out,
6884 bfd_elf64_swap_reloc_in,
6885 bfd_elf64_swap_reloc_out,
6886 bfd_elf64_swap_reloca_in,
6887 bfd_elf64_swap_reloca_out
6888};
6889
6890#define ELF_ARCH bfd_arch_aarch64
6891#define ELF_MACHINE_CODE EM_AARCH64
6892#define ELF_MAXPAGESIZE 0x10000
6893#define ELF_MINPAGESIZE 0x1000
6894#define ELF_COMMONPAGESIZE 0x1000
6895
6896#define bfd_elf64_close_and_cleanup \
6897 elf64_aarch64_close_and_cleanup
6898
6899#define bfd_elf64_bfd_copy_private_bfd_data \
6900 elf64_aarch64_copy_private_bfd_data
6901
6902#define bfd_elf64_bfd_free_cached_info \
6903 elf64_aarch64_bfd_free_cached_info
6904
6905#define bfd_elf64_bfd_is_target_special_symbol \
6906 elf64_aarch64_is_target_special_symbol
6907
6908#define bfd_elf64_bfd_link_hash_table_create \
6909 elf64_aarch64_link_hash_table_create
6910
6911#define bfd_elf64_bfd_link_hash_table_free \
6912 elf64_aarch64_hash_table_free
6913
6914#define bfd_elf64_bfd_merge_private_bfd_data \
6915 elf64_aarch64_merge_private_bfd_data
6916
6917#define bfd_elf64_bfd_print_private_bfd_data \
6918 elf64_aarch64_print_private_bfd_data
6919
6920#define bfd_elf64_bfd_reloc_type_lookup \
6921 elf64_aarch64_reloc_type_lookup
6922
6923#define bfd_elf64_bfd_reloc_name_lookup \
6924 elf64_aarch64_reloc_name_lookup
6925
6926#define bfd_elf64_bfd_set_private_flags \
6927 elf64_aarch64_set_private_flags
6928
6929#define bfd_elf64_find_inliner_info \
6930 elf64_aarch64_find_inliner_info
6931
6932#define bfd_elf64_find_nearest_line \
6933 elf64_aarch64_find_nearest_line
6934
6935#define bfd_elf64_mkobject \
6936 elf64_aarch64_mkobject
6937
6938#define bfd_elf64_new_section_hook \
6939 elf64_aarch64_new_section_hook
6940
6941#define elf_backend_adjust_dynamic_symbol \
6942 elf64_aarch64_adjust_dynamic_symbol
6943
6944#define elf_backend_always_size_sections \
6945 elf64_aarch64_always_size_sections
6946
6947#define elf_backend_check_relocs \
6948 elf64_aarch64_check_relocs
6949
6950#define elf_backend_copy_indirect_symbol \
6951 elf64_aarch64_copy_indirect_symbol
6952
6953/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
6954 to them in our hash. */
6955#define elf_backend_create_dynamic_sections \
6956 elf64_aarch64_create_dynamic_sections
6957
6958#define elf_backend_init_index_section \
6959 _bfd_elf_init_2_index_sections
6960
6961#define elf_backend_is_function_type \
6962 elf64_aarch64_is_function_type
6963
6964#define elf_backend_finish_dynamic_sections \
6965 elf64_aarch64_finish_dynamic_sections
6966
6967#define elf_backend_finish_dynamic_symbol \
6968 elf64_aarch64_finish_dynamic_symbol
6969
6970#define elf_backend_gc_sweep_hook \
6971 elf64_aarch64_gc_sweep_hook
6972
6973#define elf_backend_object_p \
6974 elf64_aarch64_object_p
6975
6976#define elf_backend_output_arch_local_syms \
6977 elf64_aarch64_output_arch_local_syms
6978
6979#define elf_backend_plt_sym_val \
6980 elf64_aarch64_plt_sym_val
6981
6982#define elf_backend_post_process_headers \
6983 elf64_aarch64_post_process_headers
6984
6985#define elf_backend_relocate_section \
6986 elf64_aarch64_relocate_section
6987
6988#define elf_backend_reloc_type_class \
6989 elf64_aarch64_reloc_type_class
6990
6991#define elf_backend_section_flags \
6992 elf64_aarch64_section_flags
6993
6994#define elf_backend_section_from_shdr \
6995 elf64_aarch64_section_from_shdr
6996
6997#define elf_backend_size_dynamic_sections \
6998 elf64_aarch64_size_dynamic_sections
6999
7000#define elf_backend_size_info \
7001 elf64_aarch64_size_info
7002
7003#define elf_backend_can_refcount 1
7004#define elf_backend_can_gc_sections 0
7005#define elf_backend_plt_readonly 1
7006#define elf_backend_want_got_plt 1
7007#define elf_backend_want_plt_sym 0
7008#define elf_backend_may_use_rel_p 0
7009#define elf_backend_may_use_rela_p 1
7010#define elf_backend_default_use_rela_p 1
7011#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
7012
7013#undef elf_backend_obj_attrs_section
7014#define elf_backend_obj_attrs_section ".ARM.attributes"
7015
7016#include "elf64-target.h"
This page took 0.355337 seconds and 4 git commands to generate.