1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
30 #include "elf-vxworks.h"
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
68 static bfd_boolean
elf32_arm_write_section (bfd
*output_bfd
,
69 struct bfd_link_info
*link_info
,
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
77 static reloc_howto_type elf32_arm_howto_table_1
[] =
80 HOWTO (R_ARM_NONE
, /* type */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
84 FALSE
, /* pc_relative */
86 complain_overflow_dont
,/* complain_on_overflow */
87 bfd_elf_generic_reloc
, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE
, /* partial_inplace */
92 FALSE
), /* pcrel_offset */
94 HOWTO (R_ARM_PC24
, /* type */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
98 TRUE
, /* pc_relative */
100 complain_overflow_signed
,/* complain_on_overflow */
101 bfd_elf_generic_reloc
, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE
, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE
), /* pcrel_offset */
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32
, /* type */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
113 FALSE
, /* pc_relative */
115 complain_overflow_bitfield
,/* complain_on_overflow */
116 bfd_elf_generic_reloc
, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE
, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE
), /* pcrel_offset */
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32
, /* type */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
128 TRUE
, /* pc_relative */
130 complain_overflow_bitfield
,/* complain_on_overflow */
131 bfd_elf_generic_reloc
, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE
, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE
), /* pcrel_offset */
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0
, /* type */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
143 TRUE
, /* pc_relative */
145 complain_overflow_dont
,/* complain_on_overflow */
146 bfd_elf_generic_reloc
, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE
, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE
), /* pcrel_offset */
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16
, /* type */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
158 FALSE
, /* pc_relative */
160 complain_overflow_bitfield
,/* complain_on_overflow */
161 bfd_elf_generic_reloc
, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE
, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE
), /* pcrel_offset */
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12
, /* type */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
173 FALSE
, /* pc_relative */
175 complain_overflow_bitfield
,/* complain_on_overflow */
176 bfd_elf_generic_reloc
, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE
, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE
), /* pcrel_offset */
183 HOWTO (R_ARM_THM_ABS5
, /* type */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
187 FALSE
, /* pc_relative */
189 complain_overflow_bitfield
,/* complain_on_overflow */
190 bfd_elf_generic_reloc
, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE
, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE
), /* pcrel_offset */
198 HOWTO (R_ARM_ABS8
, /* type */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
202 FALSE
, /* pc_relative */
204 complain_overflow_bitfield
,/* complain_on_overflow */
205 bfd_elf_generic_reloc
, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE
, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE
), /* pcrel_offset */
212 HOWTO (R_ARM_SBREL32
, /* type */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
216 FALSE
, /* pc_relative */
218 complain_overflow_dont
,/* complain_on_overflow */
219 bfd_elf_generic_reloc
, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE
, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE
), /* pcrel_offset */
226 HOWTO (R_ARM_THM_CALL
, /* type */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
230 TRUE
, /* pc_relative */
232 complain_overflow_signed
,/* complain_on_overflow */
233 bfd_elf_generic_reloc
, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE
, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE
), /* pcrel_offset */
240 HOWTO (R_ARM_THM_PC8
, /* type */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
244 TRUE
, /* pc_relative */
246 complain_overflow_signed
,/* complain_on_overflow */
247 bfd_elf_generic_reloc
, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE
, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE
), /* pcrel_offset */
254 HOWTO (R_ARM_BREL_ADJ
, /* type */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
258 FALSE
, /* pc_relative */
260 complain_overflow_signed
,/* complain_on_overflow */
261 bfd_elf_generic_reloc
, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE
, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE
), /* pcrel_offset */
268 HOWTO (R_ARM_TLS_DESC
, /* type */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
272 FALSE
, /* pc_relative */
274 complain_overflow_bitfield
,/* complain_on_overflow */
275 bfd_elf_generic_reloc
, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE
, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE
), /* pcrel_offset */
282 HOWTO (R_ARM_THM_SWI8
, /* type */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
286 FALSE
, /* pc_relative */
288 complain_overflow_signed
,/* complain_on_overflow */
289 bfd_elf_generic_reloc
, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE
, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE
), /* pcrel_offset */
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25
, /* type */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
301 TRUE
, /* pc_relative */
303 complain_overflow_signed
,/* complain_on_overflow */
304 bfd_elf_generic_reloc
, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE
, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE
), /* pcrel_offset */
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22
, /* type */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
316 TRUE
, /* pc_relative */
318 complain_overflow_signed
,/* complain_on_overflow */
319 bfd_elf_generic_reloc
, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE
, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE
), /* pcrel_offset */
326 /* Dynamic TLS relocations. */
328 HOWTO (R_ARM_TLS_DTPMOD32
, /* type */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
332 FALSE
, /* pc_relative */
334 complain_overflow_bitfield
,/* complain_on_overflow */
335 bfd_elf_generic_reloc
, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE
, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE
), /* pcrel_offset */
342 HOWTO (R_ARM_TLS_DTPOFF32
, /* type */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
346 FALSE
, /* pc_relative */
348 complain_overflow_bitfield
,/* complain_on_overflow */
349 bfd_elf_generic_reloc
, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE
, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE
), /* pcrel_offset */
356 HOWTO (R_ARM_TLS_TPOFF32
, /* type */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
360 FALSE
, /* pc_relative */
362 complain_overflow_bitfield
,/* complain_on_overflow */
363 bfd_elf_generic_reloc
, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE
, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE
), /* pcrel_offset */
370 /* Relocs used in ARM Linux */
372 HOWTO (R_ARM_COPY
, /* type */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
376 FALSE
, /* pc_relative */
378 complain_overflow_bitfield
,/* complain_on_overflow */
379 bfd_elf_generic_reloc
, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE
, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE
), /* pcrel_offset */
386 HOWTO (R_ARM_GLOB_DAT
, /* type */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
390 FALSE
, /* pc_relative */
392 complain_overflow_bitfield
,/* complain_on_overflow */
393 bfd_elf_generic_reloc
, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE
, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE
), /* pcrel_offset */
400 HOWTO (R_ARM_JUMP_SLOT
, /* type */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
404 FALSE
, /* pc_relative */
406 complain_overflow_bitfield
,/* complain_on_overflow */
407 bfd_elf_generic_reloc
, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE
, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE
), /* pcrel_offset */
414 HOWTO (R_ARM_RELATIVE
, /* type */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
418 FALSE
, /* pc_relative */
420 complain_overflow_bitfield
,/* complain_on_overflow */
421 bfd_elf_generic_reloc
, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE
, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE
), /* pcrel_offset */
428 HOWTO (R_ARM_GOTOFF32
, /* type */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
432 FALSE
, /* pc_relative */
434 complain_overflow_bitfield
,/* complain_on_overflow */
435 bfd_elf_generic_reloc
, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE
, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE
), /* pcrel_offset */
442 HOWTO (R_ARM_GOTPC
, /* type */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
446 TRUE
, /* pc_relative */
448 complain_overflow_bitfield
,/* complain_on_overflow */
449 bfd_elf_generic_reloc
, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE
, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE
), /* pcrel_offset */
456 HOWTO (R_ARM_GOT32
, /* type */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE
, /* pc_relative */
462 complain_overflow_bitfield
,/* complain_on_overflow */
463 bfd_elf_generic_reloc
, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE
, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE
), /* pcrel_offset */
470 HOWTO (R_ARM_PLT32
, /* type */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
474 TRUE
, /* pc_relative */
476 complain_overflow_bitfield
,/* complain_on_overflow */
477 bfd_elf_generic_reloc
, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE
, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE
), /* pcrel_offset */
484 HOWTO (R_ARM_CALL
, /* type */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
488 TRUE
, /* pc_relative */
490 complain_overflow_signed
,/* complain_on_overflow */
491 bfd_elf_generic_reloc
, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE
, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE
), /* pcrel_offset */
498 HOWTO (R_ARM_JUMP24
, /* type */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
502 TRUE
, /* pc_relative */
504 complain_overflow_signed
,/* complain_on_overflow */
505 bfd_elf_generic_reloc
, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE
, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE
), /* pcrel_offset */
512 HOWTO (R_ARM_THM_JUMP24
, /* type */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
516 TRUE
, /* pc_relative */
518 complain_overflow_signed
,/* complain_on_overflow */
519 bfd_elf_generic_reloc
, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE
, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE
), /* pcrel_offset */
526 HOWTO (R_ARM_BASE_ABS
, /* type */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
530 FALSE
, /* pc_relative */
532 complain_overflow_dont
,/* complain_on_overflow */
533 bfd_elf_generic_reloc
, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE
, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE
), /* pcrel_offset */
540 HOWTO (R_ARM_ALU_PCREL7_0
, /* type */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
544 TRUE
, /* pc_relative */
546 complain_overflow_dont
,/* complain_on_overflow */
547 bfd_elf_generic_reloc
, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE
, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE
), /* pcrel_offset */
554 HOWTO (R_ARM_ALU_PCREL15_8
, /* type */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
558 TRUE
, /* pc_relative */
560 complain_overflow_dont
,/* complain_on_overflow */
561 bfd_elf_generic_reloc
, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE
, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE
), /* pcrel_offset */
568 HOWTO (R_ARM_ALU_PCREL23_15
, /* type */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
572 TRUE
, /* pc_relative */
574 complain_overflow_dont
,/* complain_on_overflow */
575 bfd_elf_generic_reloc
, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE
, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE
), /* pcrel_offset */
582 HOWTO (R_ARM_LDR_SBREL_11_0
, /* type */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
586 FALSE
, /* pc_relative */
588 complain_overflow_dont
,/* complain_on_overflow */
589 bfd_elf_generic_reloc
, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE
, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE
), /* pcrel_offset */
596 HOWTO (R_ARM_ALU_SBREL_19_12
, /* type */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
600 FALSE
, /* pc_relative */
602 complain_overflow_dont
,/* complain_on_overflow */
603 bfd_elf_generic_reloc
, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE
, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE
), /* pcrel_offset */
610 HOWTO (R_ARM_ALU_SBREL_27_20
, /* type */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
614 FALSE
, /* pc_relative */
616 complain_overflow_dont
,/* complain_on_overflow */
617 bfd_elf_generic_reloc
, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE
, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE
), /* pcrel_offset */
624 HOWTO (R_ARM_TARGET1
, /* type */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
628 FALSE
, /* pc_relative */
630 complain_overflow_dont
,/* complain_on_overflow */
631 bfd_elf_generic_reloc
, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE
, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE
), /* pcrel_offset */
638 HOWTO (R_ARM_ROSEGREL32
, /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE
, /* pc_relative */
644 complain_overflow_dont
,/* complain_on_overflow */
645 bfd_elf_generic_reloc
, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE
, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE
), /* pcrel_offset */
652 HOWTO (R_ARM_V4BX
, /* type */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
656 FALSE
, /* pc_relative */
658 complain_overflow_dont
,/* complain_on_overflow */
659 bfd_elf_generic_reloc
, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE
, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE
), /* pcrel_offset */
666 HOWTO (R_ARM_TARGET2
, /* type */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
670 FALSE
, /* pc_relative */
672 complain_overflow_signed
,/* complain_on_overflow */
673 bfd_elf_generic_reloc
, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE
, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE
), /* pcrel_offset */
680 HOWTO (R_ARM_PREL31
, /* type */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
684 TRUE
, /* pc_relative */
686 complain_overflow_signed
,/* complain_on_overflow */
687 bfd_elf_generic_reloc
, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE
, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE
), /* pcrel_offset */
694 HOWTO (R_ARM_MOVW_ABS_NC
, /* type */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
698 FALSE
, /* pc_relative */
700 complain_overflow_dont
,/* complain_on_overflow */
701 bfd_elf_generic_reloc
, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE
, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE
), /* pcrel_offset */
708 HOWTO (R_ARM_MOVT_ABS
, /* type */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
712 FALSE
, /* pc_relative */
714 complain_overflow_bitfield
,/* complain_on_overflow */
715 bfd_elf_generic_reloc
, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE
, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE
), /* pcrel_offset */
722 HOWTO (R_ARM_MOVW_PREL_NC
, /* type */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
726 TRUE
, /* pc_relative */
728 complain_overflow_dont
,/* complain_on_overflow */
729 bfd_elf_generic_reloc
, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE
, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE
), /* pcrel_offset */
736 HOWTO (R_ARM_MOVT_PREL
, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 TRUE
, /* pc_relative */
742 complain_overflow_bitfield
,/* complain_on_overflow */
743 bfd_elf_generic_reloc
, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE
, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE
), /* pcrel_offset */
750 HOWTO (R_ARM_THM_MOVW_ABS_NC
, /* type */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
754 FALSE
, /* pc_relative */
756 complain_overflow_dont
,/* complain_on_overflow */
757 bfd_elf_generic_reloc
, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE
, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE
), /* pcrel_offset */
764 HOWTO (R_ARM_THM_MOVT_ABS
, /* type */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
768 FALSE
, /* pc_relative */
770 complain_overflow_bitfield
,/* complain_on_overflow */
771 bfd_elf_generic_reloc
, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE
, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE
), /* pcrel_offset */
778 HOWTO (R_ARM_THM_MOVW_PREL_NC
,/* type */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
782 TRUE
, /* pc_relative */
784 complain_overflow_dont
,/* complain_on_overflow */
785 bfd_elf_generic_reloc
, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE
, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE
), /* pcrel_offset */
792 HOWTO (R_ARM_THM_MOVT_PREL
, /* type */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
796 TRUE
, /* pc_relative */
798 complain_overflow_bitfield
,/* complain_on_overflow */
799 bfd_elf_generic_reloc
, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE
, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE
), /* pcrel_offset */
806 HOWTO (R_ARM_THM_JUMP19
, /* type */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
810 TRUE
, /* pc_relative */
812 complain_overflow_signed
,/* complain_on_overflow */
813 bfd_elf_generic_reloc
, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE
, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE
), /* pcrel_offset */
820 HOWTO (R_ARM_THM_JUMP6
, /* type */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
824 TRUE
, /* pc_relative */
826 complain_overflow_unsigned
,/* complain_on_overflow */
827 bfd_elf_generic_reloc
, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE
, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE
), /* pcrel_offset */
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 HOWTO (R_ARM_THM_ALU_PREL_11_0
,/* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 TRUE
, /* pc_relative */
843 complain_overflow_dont
,/* complain_on_overflow */
844 bfd_elf_generic_reloc
, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE
, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE
), /* pcrel_offset */
851 HOWTO (R_ARM_THM_PC12
, /* type */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
855 TRUE
, /* pc_relative */
857 complain_overflow_dont
,/* complain_on_overflow */
858 bfd_elf_generic_reloc
, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE
, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE
), /* pcrel_offset */
865 HOWTO (R_ARM_ABS32_NOI
, /* type */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
869 FALSE
, /* pc_relative */
871 complain_overflow_dont
,/* complain_on_overflow */
872 bfd_elf_generic_reloc
, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE
, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE
), /* pcrel_offset */
879 HOWTO (R_ARM_REL32_NOI
, /* type */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
883 TRUE
, /* pc_relative */
885 complain_overflow_dont
,/* complain_on_overflow */
886 bfd_elf_generic_reloc
, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE
, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE
), /* pcrel_offset */
893 /* Group relocations. */
895 HOWTO (R_ARM_ALU_PC_G0_NC
, /* type */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
899 TRUE
, /* pc_relative */
901 complain_overflow_dont
,/* complain_on_overflow */
902 bfd_elf_generic_reloc
, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE
, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE
), /* pcrel_offset */
909 HOWTO (R_ARM_ALU_PC_G0
, /* type */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
913 TRUE
, /* pc_relative */
915 complain_overflow_dont
,/* complain_on_overflow */
916 bfd_elf_generic_reloc
, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE
, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE
), /* pcrel_offset */
923 HOWTO (R_ARM_ALU_PC_G1_NC
, /* type */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
927 TRUE
, /* pc_relative */
929 complain_overflow_dont
,/* complain_on_overflow */
930 bfd_elf_generic_reloc
, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE
, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE
), /* pcrel_offset */
937 HOWTO (R_ARM_ALU_PC_G1
, /* type */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
941 TRUE
, /* pc_relative */
943 complain_overflow_dont
,/* complain_on_overflow */
944 bfd_elf_generic_reloc
, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE
, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE
), /* pcrel_offset */
951 HOWTO (R_ARM_ALU_PC_G2
, /* type */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
955 TRUE
, /* pc_relative */
957 complain_overflow_dont
,/* complain_on_overflow */
958 bfd_elf_generic_reloc
, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE
, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE
), /* pcrel_offset */
965 HOWTO (R_ARM_LDR_PC_G1
, /* type */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
969 TRUE
, /* pc_relative */
971 complain_overflow_dont
,/* complain_on_overflow */
972 bfd_elf_generic_reloc
, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE
, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE
), /* pcrel_offset */
979 HOWTO (R_ARM_LDR_PC_G2
, /* type */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
983 TRUE
, /* pc_relative */
985 complain_overflow_dont
,/* complain_on_overflow */
986 bfd_elf_generic_reloc
, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE
, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE
), /* pcrel_offset */
993 HOWTO (R_ARM_LDRS_PC_G0
, /* type */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
997 TRUE
, /* pc_relative */
999 complain_overflow_dont
,/* complain_on_overflow */
1000 bfd_elf_generic_reloc
, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE
, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE
), /* pcrel_offset */
1007 HOWTO (R_ARM_LDRS_PC_G1
, /* type */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 TRUE
, /* pc_relative */
1013 complain_overflow_dont
,/* complain_on_overflow */
1014 bfd_elf_generic_reloc
, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE
, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE
), /* pcrel_offset */
1021 HOWTO (R_ARM_LDRS_PC_G2
, /* type */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 TRUE
, /* pc_relative */
1027 complain_overflow_dont
,/* complain_on_overflow */
1028 bfd_elf_generic_reloc
, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE
, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE
), /* pcrel_offset */
1035 HOWTO (R_ARM_LDC_PC_G0
, /* type */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 TRUE
, /* pc_relative */
1041 complain_overflow_dont
,/* complain_on_overflow */
1042 bfd_elf_generic_reloc
, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE
, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE
), /* pcrel_offset */
1049 HOWTO (R_ARM_LDC_PC_G1
, /* type */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 TRUE
, /* pc_relative */
1055 complain_overflow_dont
,/* complain_on_overflow */
1056 bfd_elf_generic_reloc
, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE
, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE
), /* pcrel_offset */
1063 HOWTO (R_ARM_LDC_PC_G2
, /* type */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 TRUE
, /* pc_relative */
1069 complain_overflow_dont
,/* complain_on_overflow */
1070 bfd_elf_generic_reloc
, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE
, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE
), /* pcrel_offset */
1077 HOWTO (R_ARM_ALU_SB_G0_NC
, /* type */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 TRUE
, /* pc_relative */
1083 complain_overflow_dont
,/* complain_on_overflow */
1084 bfd_elf_generic_reloc
, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE
, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE
), /* pcrel_offset */
1091 HOWTO (R_ARM_ALU_SB_G0
, /* type */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 TRUE
, /* pc_relative */
1097 complain_overflow_dont
,/* complain_on_overflow */
1098 bfd_elf_generic_reloc
, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE
, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE
), /* pcrel_offset */
1105 HOWTO (R_ARM_ALU_SB_G1_NC
, /* type */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 TRUE
, /* pc_relative */
1111 complain_overflow_dont
,/* complain_on_overflow */
1112 bfd_elf_generic_reloc
, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE
, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE
), /* pcrel_offset */
1119 HOWTO (R_ARM_ALU_SB_G1
, /* type */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 TRUE
, /* pc_relative */
1125 complain_overflow_dont
,/* complain_on_overflow */
1126 bfd_elf_generic_reloc
, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE
, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE
), /* pcrel_offset */
1133 HOWTO (R_ARM_ALU_SB_G2
, /* type */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 TRUE
, /* pc_relative */
1139 complain_overflow_dont
,/* complain_on_overflow */
1140 bfd_elf_generic_reloc
, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE
, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE
), /* pcrel_offset */
1147 HOWTO (R_ARM_LDR_SB_G0
, /* type */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 TRUE
, /* pc_relative */
1153 complain_overflow_dont
,/* complain_on_overflow */
1154 bfd_elf_generic_reloc
, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE
, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE
), /* pcrel_offset */
1161 HOWTO (R_ARM_LDR_SB_G1
, /* type */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 TRUE
, /* pc_relative */
1167 complain_overflow_dont
,/* complain_on_overflow */
1168 bfd_elf_generic_reloc
, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE
, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE
), /* pcrel_offset */
1175 HOWTO (R_ARM_LDR_SB_G2
, /* type */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 TRUE
, /* pc_relative */
1181 complain_overflow_dont
,/* complain_on_overflow */
1182 bfd_elf_generic_reloc
, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE
, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE
), /* pcrel_offset */
1189 HOWTO (R_ARM_LDRS_SB_G0
, /* type */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 TRUE
, /* pc_relative */
1195 complain_overflow_dont
,/* complain_on_overflow */
1196 bfd_elf_generic_reloc
, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE
, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE
), /* pcrel_offset */
1203 HOWTO (R_ARM_LDRS_SB_G1
, /* type */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 TRUE
, /* pc_relative */
1209 complain_overflow_dont
,/* complain_on_overflow */
1210 bfd_elf_generic_reloc
, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE
, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE
), /* pcrel_offset */
1217 HOWTO (R_ARM_LDRS_SB_G2
, /* type */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 TRUE
, /* pc_relative */
1223 complain_overflow_dont
,/* complain_on_overflow */
1224 bfd_elf_generic_reloc
, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE
, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE
), /* pcrel_offset */
1231 HOWTO (R_ARM_LDC_SB_G0
, /* type */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 TRUE
, /* pc_relative */
1237 complain_overflow_dont
,/* complain_on_overflow */
1238 bfd_elf_generic_reloc
, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE
, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE
), /* pcrel_offset */
1245 HOWTO (R_ARM_LDC_SB_G1
, /* type */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 TRUE
, /* pc_relative */
1251 complain_overflow_dont
,/* complain_on_overflow */
1252 bfd_elf_generic_reloc
, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE
, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE
), /* pcrel_offset */
1259 HOWTO (R_ARM_LDC_SB_G2
, /* type */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 TRUE
, /* pc_relative */
1265 complain_overflow_dont
,/* complain_on_overflow */
1266 bfd_elf_generic_reloc
, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE
, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE
), /* pcrel_offset */
1273 /* End of group relocations. */
1275 HOWTO (R_ARM_MOVW_BREL_NC
, /* type */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 FALSE
, /* pc_relative */
1281 complain_overflow_dont
,/* complain_on_overflow */
1282 bfd_elf_generic_reloc
, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE
, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE
), /* pcrel_offset */
1289 HOWTO (R_ARM_MOVT_BREL
, /* type */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 FALSE
, /* pc_relative */
1295 complain_overflow_bitfield
,/* complain_on_overflow */
1296 bfd_elf_generic_reloc
, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE
, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE
), /* pcrel_offset */
1303 HOWTO (R_ARM_MOVW_BREL
, /* type */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 FALSE
, /* pc_relative */
1309 complain_overflow_dont
,/* complain_on_overflow */
1310 bfd_elf_generic_reloc
, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE
, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE
), /* pcrel_offset */
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC
,/* type */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 FALSE
, /* pc_relative */
1323 complain_overflow_dont
,/* complain_on_overflow */
1324 bfd_elf_generic_reloc
, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE
, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE
), /* pcrel_offset */
1331 HOWTO (R_ARM_THM_MOVT_BREL
, /* type */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 FALSE
, /* pc_relative */
1337 complain_overflow_bitfield
,/* complain_on_overflow */
1338 bfd_elf_generic_reloc
, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE
, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE
), /* pcrel_offset */
1345 HOWTO (R_ARM_THM_MOVW_BREL
, /* type */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 FALSE
, /* pc_relative */
1351 complain_overflow_dont
,/* complain_on_overflow */
1352 bfd_elf_generic_reloc
, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE
, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE
), /* pcrel_offset */
1359 HOWTO (R_ARM_TLS_GOTDESC
, /* type */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 FALSE
, /* pc_relative */
1365 complain_overflow_bitfield
,/* complain_on_overflow */
1366 NULL
, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE
, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE
), /* pcrel_offset */
1373 HOWTO (R_ARM_TLS_CALL
, /* type */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 FALSE
, /* pc_relative */
1379 complain_overflow_dont
,/* complain_on_overflow */
1380 bfd_elf_generic_reloc
, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE
, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE
), /* pcrel_offset */
1387 HOWTO (R_ARM_TLS_DESCSEQ
, /* type */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 FALSE
, /* pc_relative */
1393 complain_overflow_bitfield
,/* complain_on_overflow */
1394 bfd_elf_generic_reloc
, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE
, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE
), /* pcrel_offset */
1401 HOWTO (R_ARM_THM_TLS_CALL
, /* type */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 FALSE
, /* pc_relative */
1407 complain_overflow_dont
,/* complain_on_overflow */
1408 bfd_elf_generic_reloc
, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE
, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE
), /* pcrel_offset */
1415 HOWTO (R_ARM_PLT32_ABS
, /* type */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 FALSE
, /* pc_relative */
1421 complain_overflow_dont
,/* complain_on_overflow */
1422 bfd_elf_generic_reloc
, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE
, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE
), /* pcrel_offset */
1429 HOWTO (R_ARM_GOT_ABS
, /* type */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 FALSE
, /* pc_relative */
1435 complain_overflow_dont
,/* complain_on_overflow */
1436 bfd_elf_generic_reloc
, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE
, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE
), /* pcrel_offset */
1443 HOWTO (R_ARM_GOT_PREL
, /* type */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 TRUE
, /* pc_relative */
1449 complain_overflow_dont
, /* complain_on_overflow */
1450 bfd_elf_generic_reloc
, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE
, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE
), /* pcrel_offset */
1457 HOWTO (R_ARM_GOT_BREL12
, /* type */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 FALSE
, /* pc_relative */
1463 complain_overflow_bitfield
,/* complain_on_overflow */
1464 bfd_elf_generic_reloc
, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE
, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE
), /* pcrel_offset */
1471 HOWTO (R_ARM_GOTOFF12
, /* type */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 FALSE
, /* pc_relative */
1477 complain_overflow_bitfield
,/* complain_on_overflow */
1478 bfd_elf_generic_reloc
, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE
, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE
), /* pcrel_offset */
1485 EMPTY_HOWTO (R_ARM_GOTRELAX
), /* reserved for future GOT-load optimizations */
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY
, /* type */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 FALSE
, /* pc_relative */
1494 complain_overflow_dont
, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn
, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE
, /* partial_inplace */
1500 FALSE
), /* pcrel_offset */
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT
, /* type */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 FALSE
, /* pc_relative */
1509 complain_overflow_dont
, /* complain_on_overflow */
1510 NULL
, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE
, /* partial_inplace */
1515 FALSE
), /* pcrel_offset */
1517 HOWTO (R_ARM_THM_JUMP11
, /* type */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 TRUE
, /* pc_relative */
1523 complain_overflow_signed
, /* complain_on_overflow */
1524 bfd_elf_generic_reloc
, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE
, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE
), /* pcrel_offset */
1531 HOWTO (R_ARM_THM_JUMP8
, /* type */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 TRUE
, /* pc_relative */
1537 complain_overflow_signed
, /* complain_on_overflow */
1538 bfd_elf_generic_reloc
, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE
, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE
), /* pcrel_offset */
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32
, /* type */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 FALSE
, /* pc_relative */
1552 complain_overflow_bitfield
,/* complain_on_overflow */
1553 NULL
, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE
, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE
), /* pcrel_offset */
1560 HOWTO (R_ARM_TLS_LDM32
, /* type */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 FALSE
, /* pc_relative */
1566 complain_overflow_bitfield
,/* complain_on_overflow */
1567 bfd_elf_generic_reloc
, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE
, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE
), /* pcrel_offset */
1574 HOWTO (R_ARM_TLS_LDO32
, /* type */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 FALSE
, /* pc_relative */
1580 complain_overflow_bitfield
,/* complain_on_overflow */
1581 bfd_elf_generic_reloc
, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE
, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE
), /* pcrel_offset */
1588 HOWTO (R_ARM_TLS_IE32
, /* type */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 FALSE
, /* pc_relative */
1594 complain_overflow_bitfield
,/* complain_on_overflow */
1595 NULL
, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE
, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE
), /* pcrel_offset */
1602 HOWTO (R_ARM_TLS_LE32
, /* type */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 FALSE
, /* pc_relative */
1608 complain_overflow_bitfield
,/* complain_on_overflow */
1609 NULL
, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE
, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE
), /* pcrel_offset */
1616 HOWTO (R_ARM_TLS_LDO12
, /* type */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 FALSE
, /* pc_relative */
1622 complain_overflow_bitfield
,/* complain_on_overflow */
1623 bfd_elf_generic_reloc
, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE
, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE
), /* pcrel_offset */
1630 HOWTO (R_ARM_TLS_LE12
, /* type */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 FALSE
, /* pc_relative */
1636 complain_overflow_bitfield
,/* complain_on_overflow */
1637 bfd_elf_generic_reloc
, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE
, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE
), /* pcrel_offset */
1644 HOWTO (R_ARM_TLS_IE12GP
, /* type */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 FALSE
, /* pc_relative */
1650 complain_overflow_bitfield
,/* complain_on_overflow */
1651 bfd_elf_generic_reloc
, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE
, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE
), /* pcrel_offset */
1658 /* 112-127 private relocations. */
1676 /* R_ARM_ME_TOO, obsolete. */
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ
, /* type */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 FALSE
, /* pc_relative */
1685 complain_overflow_bitfield
,/* complain_on_overflow */
1686 bfd_elf_generic_reloc
, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE
, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE
), /* pcrel_offset */
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC
,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 FALSE
, /* pc_relative. */
1700 complain_overflow_bitfield
,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc
, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE
, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE
), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC
,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 FALSE
, /* pc_relative. */
1713 complain_overflow_bitfield
,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc
, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE
, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE
), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC
,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 FALSE
, /* pc_relative. */
1726 complain_overflow_bitfield
,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc
, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE
, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE
), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC
,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 FALSE
, /* pc_relative. */
1739 complain_overflow_bitfield
,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc
, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE
, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE
), /* pcrel_offset. */
1749 static reloc_howto_type elf32_arm_howto_table_2
[1] =
1751 HOWTO (R_ARM_IRELATIVE
, /* type */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1755 FALSE
, /* pc_relative */
1757 complain_overflow_bitfield
,/* complain_on_overflow */
1758 bfd_elf_generic_reloc
, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE
, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE
) /* pcrel_offset */
1766 /* 249-255 extended, currently unused, relocations: */
1767 static reloc_howto_type elf32_arm_howto_table_3
[4] =
1769 HOWTO (R_ARM_RREL32
, /* type */
1771 0, /* size (0 = byte, 1 = short, 2 = long) */
1773 FALSE
, /* pc_relative */
1775 complain_overflow_dont
,/* complain_on_overflow */
1776 bfd_elf_generic_reloc
, /* special_function */
1777 "R_ARM_RREL32", /* name */
1778 FALSE
, /* partial_inplace */
1781 FALSE
), /* pcrel_offset */
1783 HOWTO (R_ARM_RABS32
, /* type */
1785 0, /* size (0 = byte, 1 = short, 2 = long) */
1787 FALSE
, /* pc_relative */
1789 complain_overflow_dont
,/* complain_on_overflow */
1790 bfd_elf_generic_reloc
, /* special_function */
1791 "R_ARM_RABS32", /* name */
1792 FALSE
, /* partial_inplace */
1795 FALSE
), /* pcrel_offset */
1797 HOWTO (R_ARM_RPC24
, /* type */
1799 0, /* size (0 = byte, 1 = short, 2 = long) */
1801 FALSE
, /* pc_relative */
1803 complain_overflow_dont
,/* complain_on_overflow */
1804 bfd_elf_generic_reloc
, /* special_function */
1805 "R_ARM_RPC24", /* name */
1806 FALSE
, /* partial_inplace */
1809 FALSE
), /* pcrel_offset */
1811 HOWTO (R_ARM_RBASE
, /* type */
1813 0, /* size (0 = byte, 1 = short, 2 = long) */
1815 FALSE
, /* pc_relative */
1817 complain_overflow_dont
,/* complain_on_overflow */
1818 bfd_elf_generic_reloc
, /* special_function */
1819 "R_ARM_RBASE", /* name */
1820 FALSE
, /* partial_inplace */
1823 FALSE
) /* pcrel_offset */
1826 static reloc_howto_type
*
1827 elf32_arm_howto_from_type (unsigned int r_type
)
1829 if (r_type
< ARRAY_SIZE (elf32_arm_howto_table_1
))
1830 return &elf32_arm_howto_table_1
[r_type
];
1832 if (r_type
== R_ARM_IRELATIVE
)
1833 return &elf32_arm_howto_table_2
[r_type
- R_ARM_IRELATIVE
];
1835 if (r_type
>= R_ARM_RREL32
1836 && r_type
< R_ARM_RREL32
+ ARRAY_SIZE (elf32_arm_howto_table_3
))
1837 return &elf32_arm_howto_table_3
[r_type
- R_ARM_RREL32
];
1843 elf32_arm_info_to_howto (bfd
* abfd ATTRIBUTE_UNUSED
, arelent
* bfd_reloc
,
1844 Elf_Internal_Rela
* elf_reloc
)
1846 unsigned int r_type
;
1848 r_type
= ELF32_R_TYPE (elf_reloc
->r_info
);
1849 bfd_reloc
->howto
= elf32_arm_howto_from_type (r_type
);
1852 struct elf32_arm_reloc_map
1854 bfd_reloc_code_real_type bfd_reloc_val
;
1855 unsigned char elf_reloc_val
;
1858 /* All entries in this list must also be present in elf32_arm_howto_table. */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map
[] =
1861 {BFD_RELOC_NONE
, R_ARM_NONE
},
1862 {BFD_RELOC_ARM_PCREL_BRANCH
, R_ARM_PC24
},
1863 {BFD_RELOC_ARM_PCREL_CALL
, R_ARM_CALL
},
1864 {BFD_RELOC_ARM_PCREL_JUMP
, R_ARM_JUMP24
},
1865 {BFD_RELOC_ARM_PCREL_BLX
, R_ARM_XPC25
},
1866 {BFD_RELOC_THUMB_PCREL_BLX
, R_ARM_THM_XPC22
},
1867 {BFD_RELOC_32
, R_ARM_ABS32
},
1868 {BFD_RELOC_32_PCREL
, R_ARM_REL32
},
1869 {BFD_RELOC_8
, R_ARM_ABS8
},
1870 {BFD_RELOC_16
, R_ARM_ABS16
},
1871 {BFD_RELOC_ARM_OFFSET_IMM
, R_ARM_ABS12
},
1872 {BFD_RELOC_ARM_THUMB_OFFSET
, R_ARM_THM_ABS5
},
1873 {BFD_RELOC_THUMB_PCREL_BRANCH25
, R_ARM_THM_JUMP24
},
1874 {BFD_RELOC_THUMB_PCREL_BRANCH23
, R_ARM_THM_CALL
},
1875 {BFD_RELOC_THUMB_PCREL_BRANCH12
, R_ARM_THM_JUMP11
},
1876 {BFD_RELOC_THUMB_PCREL_BRANCH20
, R_ARM_THM_JUMP19
},
1877 {BFD_RELOC_THUMB_PCREL_BRANCH9
, R_ARM_THM_JUMP8
},
1878 {BFD_RELOC_THUMB_PCREL_BRANCH7
, R_ARM_THM_JUMP6
},
1879 {BFD_RELOC_ARM_GLOB_DAT
, R_ARM_GLOB_DAT
},
1880 {BFD_RELOC_ARM_JUMP_SLOT
, R_ARM_JUMP_SLOT
},
1881 {BFD_RELOC_ARM_RELATIVE
, R_ARM_RELATIVE
},
1882 {BFD_RELOC_ARM_GOTOFF
, R_ARM_GOTOFF32
},
1883 {BFD_RELOC_ARM_GOTPC
, R_ARM_GOTPC
},
1884 {BFD_RELOC_ARM_GOT_PREL
, R_ARM_GOT_PREL
},
1885 {BFD_RELOC_ARM_GOT32
, R_ARM_GOT32
},
1886 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1887 {BFD_RELOC_ARM_TARGET1
, R_ARM_TARGET1
},
1888 {BFD_RELOC_ARM_ROSEGREL32
, R_ARM_ROSEGREL32
},
1889 {BFD_RELOC_ARM_SBREL32
, R_ARM_SBREL32
},
1890 {BFD_RELOC_ARM_PREL31
, R_ARM_PREL31
},
1891 {BFD_RELOC_ARM_TARGET2
, R_ARM_TARGET2
},
1892 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1893 {BFD_RELOC_ARM_TLS_GOTDESC
, R_ARM_TLS_GOTDESC
},
1894 {BFD_RELOC_ARM_TLS_CALL
, R_ARM_TLS_CALL
},
1895 {BFD_RELOC_ARM_THM_TLS_CALL
, R_ARM_THM_TLS_CALL
},
1896 {BFD_RELOC_ARM_TLS_DESCSEQ
, R_ARM_TLS_DESCSEQ
},
1897 {BFD_RELOC_ARM_THM_TLS_DESCSEQ
, R_ARM_THM_TLS_DESCSEQ
},
1898 {BFD_RELOC_ARM_TLS_DESC
, R_ARM_TLS_DESC
},
1899 {BFD_RELOC_ARM_TLS_GD32
, R_ARM_TLS_GD32
},
1900 {BFD_RELOC_ARM_TLS_LDO32
, R_ARM_TLS_LDO32
},
1901 {BFD_RELOC_ARM_TLS_LDM32
, R_ARM_TLS_LDM32
},
1902 {BFD_RELOC_ARM_TLS_DTPMOD32
, R_ARM_TLS_DTPMOD32
},
1903 {BFD_RELOC_ARM_TLS_DTPOFF32
, R_ARM_TLS_DTPOFF32
},
1904 {BFD_RELOC_ARM_TLS_TPOFF32
, R_ARM_TLS_TPOFF32
},
1905 {BFD_RELOC_ARM_TLS_IE32
, R_ARM_TLS_IE32
},
1906 {BFD_RELOC_ARM_TLS_LE32
, R_ARM_TLS_LE32
},
1907 {BFD_RELOC_ARM_IRELATIVE
, R_ARM_IRELATIVE
},
1908 {BFD_RELOC_VTABLE_INHERIT
, R_ARM_GNU_VTINHERIT
},
1909 {BFD_RELOC_VTABLE_ENTRY
, R_ARM_GNU_VTENTRY
},
1910 {BFD_RELOC_ARM_MOVW
, R_ARM_MOVW_ABS_NC
},
1911 {BFD_RELOC_ARM_MOVT
, R_ARM_MOVT_ABS
},
1912 {BFD_RELOC_ARM_MOVW_PCREL
, R_ARM_MOVW_PREL_NC
},
1913 {BFD_RELOC_ARM_MOVT_PCREL
, R_ARM_MOVT_PREL
},
1914 {BFD_RELOC_ARM_THUMB_MOVW
, R_ARM_THM_MOVW_ABS_NC
},
1915 {BFD_RELOC_ARM_THUMB_MOVT
, R_ARM_THM_MOVT_ABS
},
1916 {BFD_RELOC_ARM_THUMB_MOVW_PCREL
, R_ARM_THM_MOVW_PREL_NC
},
1917 {BFD_RELOC_ARM_THUMB_MOVT_PCREL
, R_ARM_THM_MOVT_PREL
},
1918 {BFD_RELOC_ARM_ALU_PC_G0_NC
, R_ARM_ALU_PC_G0_NC
},
1919 {BFD_RELOC_ARM_ALU_PC_G0
, R_ARM_ALU_PC_G0
},
1920 {BFD_RELOC_ARM_ALU_PC_G1_NC
, R_ARM_ALU_PC_G1_NC
},
1921 {BFD_RELOC_ARM_ALU_PC_G1
, R_ARM_ALU_PC_G1
},
1922 {BFD_RELOC_ARM_ALU_PC_G2
, R_ARM_ALU_PC_G2
},
1923 {BFD_RELOC_ARM_LDR_PC_G0
, R_ARM_LDR_PC_G0
},
1924 {BFD_RELOC_ARM_LDR_PC_G1
, R_ARM_LDR_PC_G1
},
1925 {BFD_RELOC_ARM_LDR_PC_G2
, R_ARM_LDR_PC_G2
},
1926 {BFD_RELOC_ARM_LDRS_PC_G0
, R_ARM_LDRS_PC_G0
},
1927 {BFD_RELOC_ARM_LDRS_PC_G1
, R_ARM_LDRS_PC_G1
},
1928 {BFD_RELOC_ARM_LDRS_PC_G2
, R_ARM_LDRS_PC_G2
},
1929 {BFD_RELOC_ARM_LDC_PC_G0
, R_ARM_LDC_PC_G0
},
1930 {BFD_RELOC_ARM_LDC_PC_G1
, R_ARM_LDC_PC_G1
},
1931 {BFD_RELOC_ARM_LDC_PC_G2
, R_ARM_LDC_PC_G2
},
1932 {BFD_RELOC_ARM_ALU_SB_G0_NC
, R_ARM_ALU_SB_G0_NC
},
1933 {BFD_RELOC_ARM_ALU_SB_G0
, R_ARM_ALU_SB_G0
},
1934 {BFD_RELOC_ARM_ALU_SB_G1_NC
, R_ARM_ALU_SB_G1_NC
},
1935 {BFD_RELOC_ARM_ALU_SB_G1
, R_ARM_ALU_SB_G1
},
1936 {BFD_RELOC_ARM_ALU_SB_G2
, R_ARM_ALU_SB_G2
},
1937 {BFD_RELOC_ARM_LDR_SB_G0
, R_ARM_LDR_SB_G0
},
1938 {BFD_RELOC_ARM_LDR_SB_G1
, R_ARM_LDR_SB_G1
},
1939 {BFD_RELOC_ARM_LDR_SB_G2
, R_ARM_LDR_SB_G2
},
1940 {BFD_RELOC_ARM_LDRS_SB_G0
, R_ARM_LDRS_SB_G0
},
1941 {BFD_RELOC_ARM_LDRS_SB_G1
, R_ARM_LDRS_SB_G1
},
1942 {BFD_RELOC_ARM_LDRS_SB_G2
, R_ARM_LDRS_SB_G2
},
1943 {BFD_RELOC_ARM_LDC_SB_G0
, R_ARM_LDC_SB_G0
},
1944 {BFD_RELOC_ARM_LDC_SB_G1
, R_ARM_LDC_SB_G1
},
1945 {BFD_RELOC_ARM_LDC_SB_G2
, R_ARM_LDC_SB_G2
},
1946 {BFD_RELOC_ARM_V4BX
, R_ARM_V4BX
},
1947 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
, R_ARM_THM_ALU_ABS_G3_NC
},
1948 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
, R_ARM_THM_ALU_ABS_G2_NC
},
1949 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
, R_ARM_THM_ALU_ABS_G1_NC
},
1950 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
, R_ARM_THM_ALU_ABS_G0_NC
}
1953 static reloc_howto_type
*
1954 elf32_arm_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1955 bfd_reloc_code_real_type code
)
1959 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_reloc_map
); i
++)
1960 if (elf32_arm_reloc_map
[i
].bfd_reloc_val
== code
)
1961 return elf32_arm_howto_from_type (elf32_arm_reloc_map
[i
].elf_reloc_val
);
1966 static reloc_howto_type
*
1967 elf32_arm_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1972 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_1
); i
++)
1973 if (elf32_arm_howto_table_1
[i
].name
!= NULL
1974 && strcasecmp (elf32_arm_howto_table_1
[i
].name
, r_name
) == 0)
1975 return &elf32_arm_howto_table_1
[i
];
1977 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_2
); i
++)
1978 if (elf32_arm_howto_table_2
[i
].name
!= NULL
1979 && strcasecmp (elf32_arm_howto_table_2
[i
].name
, r_name
) == 0)
1980 return &elf32_arm_howto_table_2
[i
];
1982 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_3
); i
++)
1983 if (elf32_arm_howto_table_3
[i
].name
!= NULL
1984 && strcasecmp (elf32_arm_howto_table_3
[i
].name
, r_name
) == 0)
1985 return &elf32_arm_howto_table_3
[i
];
1990 /* Support for core dump NOTE sections. */
1993 elf32_arm_nabi_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
1998 switch (note
->descsz
)
2003 case 148: /* Linux/ARM 32-bit. */
2005 elf_tdata (abfd
)->core
->signal
= bfd_get_16 (abfd
, note
->descdata
+ 12);
2008 elf_tdata (abfd
)->core
->lwpid
= bfd_get_32 (abfd
, note
->descdata
+ 24);
2017 /* Make a ".reg/999" section. */
2018 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
2019 size
, note
->descpos
+ offset
);
2023 elf32_arm_nabi_grok_psinfo (bfd
*abfd
, Elf_Internal_Note
*note
)
2025 switch (note
->descsz
)
2030 case 124: /* Linux/ARM elf_prpsinfo. */
2031 elf_tdata (abfd
)->core
->pid
2032 = bfd_get_32 (abfd
, note
->descdata
+ 12);
2033 elf_tdata (abfd
)->core
->program
2034 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 28, 16);
2035 elf_tdata (abfd
)->core
->command
2036 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 44, 80);
2039 /* Note that for some reason, a spurious space is tacked
2040 onto the end of the args in some (at least one anyway)
2041 implementations, so strip it off if it exists. */
2043 char *command
= elf_tdata (abfd
)->core
->command
;
2044 int n
= strlen (command
);
2046 if (0 < n
&& command
[n
- 1] == ' ')
2047 command
[n
- 1] = '\0';
2054 elf32_arm_nabi_write_core_note (bfd
*abfd
, char *buf
, int *bufsiz
,
2067 va_start (ap
, note_type
);
2068 memset (data
, 0, sizeof (data
));
2069 strncpy (data
+ 28, va_arg (ap
, const char *), 16);
2070 strncpy (data
+ 44, va_arg (ap
, const char *), 80);
2073 return elfcore_write_note (abfd
, buf
, bufsiz
,
2074 "CORE", note_type
, data
, sizeof (data
));
2085 va_start (ap
, note_type
);
2086 memset (data
, 0, sizeof (data
));
2087 pid
= va_arg (ap
, long);
2088 bfd_put_32 (abfd
, pid
, data
+ 24);
2089 cursig
= va_arg (ap
, int);
2090 bfd_put_16 (abfd
, cursig
, data
+ 12);
2091 greg
= va_arg (ap
, const void *);
2092 memcpy (data
+ 72, greg
, 72);
2095 return elfcore_write_note (abfd
, buf
, bufsiz
,
2096 "CORE", note_type
, data
, sizeof (data
));
2101 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME "elf32-littlearm"
2103 #define TARGET_BIG_SYM arm_elf32_be_vec
2104 #define TARGET_BIG_NAME "elf32-bigarm"
2106 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2110 typedef unsigned long int insn32
;
2111 typedef unsigned short int insn16
;
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2115 #define INTERWORK_FLAG(abfd) \
2116 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118 || ((abfd)->flags & BFD_LINKER_CREATED))
2120 /* The linker script knows the section names for placement.
2121 The entry_names are used to do simple name mangling on the stubs.
2122 Given a function name, and its type, the stub can be found. The
2123 name can be changed. The only requirement is the %s be present. */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2139 #define STUB_ENTRY_NAME "__%s_veneer"
2141 /* The name of the dynamic interpreter. This is put in the .interp
2143 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2145 static const unsigned long tls_trampoline
[] =
2147 0xe08e0000, /* add r0, lr, r0 */
2148 0xe5901004, /* ldr r1, [r0,#4] */
2149 0xe12fff11, /* bx r1 */
2152 static const unsigned long dl_tlsdesc_lazy_trampoline
[] =
2154 0xe52d2004, /* push {r2} */
2155 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2156 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2157 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2158 0xe081100f, /* 2: add r1, pc */
2159 0xe12fff12, /* bx r2 */
2160 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 + dl_tlsdesc_lazy_resolver(GOT) */
2162 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2165 #ifdef FOUR_WORD_PLT
2167 /* The first entry in a procedure linkage table looks like
2168 this. It is set up so that any shared library function that is
2169 called before the relocation has been set up calls the dynamic
2171 static const bfd_vma elf32_arm_plt0_entry
[] =
2173 0xe52de004, /* str lr, [sp, #-4]! */
2174 0xe59fe010, /* ldr lr, [pc, #16] */
2175 0xe08fe00e, /* add lr, pc, lr */
2176 0xe5bef008, /* ldr pc, [lr, #8]! */
2179 /* Subsequent entries in a procedure linkage table look like
2181 static const bfd_vma elf32_arm_plt_entry
[] =
2183 0xe28fc600, /* add ip, pc, #NN */
2184 0xe28cca00, /* add ip, ip, #NN */
2185 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2186 0x00000000, /* unused */
2189 #else /* not FOUR_WORD_PLT */
2191 /* The first entry in a procedure linkage table looks like
2192 this. It is set up so that any shared library function that is
2193 called before the relocation has been set up calls the dynamic
2195 static const bfd_vma elf32_arm_plt0_entry
[] =
2197 0xe52de004, /* str lr, [sp, #-4]! */
2198 0xe59fe004, /* ldr lr, [pc, #4] */
2199 0xe08fe00e, /* add lr, pc, lr */
2200 0xe5bef008, /* ldr pc, [lr, #8]! */
2201 0x00000000, /* &GOT[0] - . */
2204 /* By default subsequent entries in a procedure linkage table look like
2205 this. Offsets that don't fit into 28 bits will cause link error. */
2206 static const bfd_vma elf32_arm_plt_entry_short
[] =
2208 0xe28fc600, /* add ip, pc, #0xNN00000 */
2209 0xe28cca00, /* add ip, ip, #0xNN000 */
2210 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2213 /* When explicitly asked, we'll use this "long" entry format
2214 which can cope with arbitrary displacements. */
2215 static const bfd_vma elf32_arm_plt_entry_long
[] =
2217 0xe28fc200, /* add ip, pc, #0xN0000000 */
2218 0xe28cc600, /* add ip, ip, #0xNN00000 */
2219 0xe28cca00, /* add ip, ip, #0xNN000 */
2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2223 static bfd_boolean elf32_arm_use_long_plt_entry
= FALSE
;
2225 #endif /* not FOUR_WORD_PLT */
2227 /* The first entry in a procedure linkage table looks like this.
2228 It is set up so that any shared library function that is called before the
2229 relocation has been set up calls the dynamic linker first. */
2230 static const bfd_vma elf32_thumb2_plt0_entry
[] =
2232 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233 an instruction maybe encoded to one or two array elements. */
2234 0xf8dfb500, /* push {lr} */
2235 0x44fee008, /* ldr.w lr, [pc, #8] */
2237 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2238 0x00000000, /* &GOT[0] - . */
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2243 static const bfd_vma elf32_thumb2_plt_entry
[] =
2245 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246 an instruction maybe encoded to one or two array elements. */
2247 0x0c00f240, /* movw ip, #0xNNNN */
2248 0x0c00f2c0, /* movt ip, #0xNNNN */
2249 0xf8dc44fc, /* add ip, pc */
2250 0xbf00f000 /* ldr.w pc, [ip] */
2254 /* The format of the first entry in the procedure linkage table
2255 for a VxWorks executable. */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry
[] =
2258 0xe52dc008, /* str ip,[sp,#-8]! */
2259 0xe59fc000, /* ldr ip,[pc] */
2260 0xe59cf008, /* ldr pc,[ip,#8] */
2261 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2264 /* The format of subsequent entries in a VxWorks executable. */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry
[] =
2267 0xe59fc000, /* ldr ip,[pc] */
2268 0xe59cf000, /* ldr pc,[ip] */
2269 0x00000000, /* .long @got */
2270 0xe59fc000, /* ldr ip,[pc] */
2271 0xea000000, /* b _PLT */
2272 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2275 /* The format of entries in a VxWorks shared library. */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry
[] =
2278 0xe59fc000, /* ldr ip,[pc] */
2279 0xe79cf009, /* ldr pc,[ip,r9] */
2280 0x00000000, /* .long @got */
2281 0xe59fc000, /* ldr ip,[pc] */
2282 0xe599f008, /* ldr pc,[r9,#8] */
2283 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2286 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub
[] =
2294 /* The entries in a PLT when using a DLL-based target with multiple
2296 static const bfd_vma elf32_arm_symbian_plt_entry
[] =
2298 0xe51ff004, /* ldr pc, [pc, #-4] */
2299 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2302 /* The first entry in a procedure linkage table looks like
2303 this. It is set up so that any shared library function that is
2304 called before the relocation has been set up calls the dynamic
2306 static const bfd_vma elf32_arm_nacl_plt0_entry
[] =
2309 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2310 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2311 0xe08cc00f, /* add ip, ip, pc */
2312 0xe52dc008, /* str ip, [sp, #-8]! */
2313 /* Second bundle: */
2314 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2315 0xe59cc000, /* ldr ip, [ip] */
2316 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2317 0xe12fff1c, /* bx ip */
2319 0xe320f000, /* nop */
2320 0xe320f000, /* nop */
2321 0xe320f000, /* nop */
2323 0xe50dc004, /* str ip, [sp, #-4] */
2324 /* Fourth bundle: */
2325 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2326 0xe59cc000, /* ldr ip, [ip] */
2327 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2328 0xe12fff1c, /* bx ip */
2330 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2332 /* Subsequent entries in a procedure linkage table look like this. */
2333 static const bfd_vma elf32_arm_nacl_plt_entry
[] =
2335 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2336 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2337 0xe08cc00f, /* add ip, ip, pc */
2338 0xea000000, /* b .Lplt_tail */
2341 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2358 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2360 is inserted in arm_build_one_stub(). */
2361 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2364 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2365 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2366 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2371 enum stub_insn_type type
;
2372 unsigned int r_type
;
2376 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2377 to reach the stub if necessary. */
2378 static const insn_sequence elf32_arm_stub_long_branch_any_any
[] =
2380 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2381 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2384 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb
[] =
2388 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2389 ARM_INSN (0xe12fff1c), /* bx ip */
2390 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2393 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2394 static const insn_sequence elf32_arm_stub_long_branch_thumb_only
[] =
2396 THUMB16_INSN (0xb401), /* push {r0} */
2397 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2398 THUMB16_INSN (0x4684), /* mov ip, r0 */
2399 THUMB16_INSN (0xbc01), /* pop {r0} */
2400 THUMB16_INSN (0x4760), /* bx ip */
2401 THUMB16_INSN (0xbf00), /* nop */
2402 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2405 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2407 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb
[] =
2409 THUMB16_INSN (0x4778), /* bx pc */
2410 THUMB16_INSN (0x46c0), /* nop */
2411 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2412 ARM_INSN (0xe12fff1c), /* bx ip */
2413 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2416 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2418 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm
[] =
2420 THUMB16_INSN (0x4778), /* bx pc */
2421 THUMB16_INSN (0x46c0), /* nop */
2422 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2423 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2426 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2427 one, when the destination is close enough. */
2428 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm
[] =
2430 THUMB16_INSN (0x4778), /* bx pc */
2431 THUMB16_INSN (0x46c0), /* nop */
2432 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2435 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2436 blx to reach the stub if necessary. */
2437 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic
[] =
2439 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2440 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2441 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2444 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2445 blx to reach the stub if necessary. We can not add into pc;
2446 it is not guaranteed to mode switch (different in ARMv6 and
2448 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic
[] =
2450 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2451 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2452 ARM_INSN (0xe12fff1c), /* bx ip */
2453 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2456 /* V4T ARM -> ARM long branch stub, PIC. */
2457 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic
[] =
2459 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2460 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2461 ARM_INSN (0xe12fff1c), /* bx ip */
2462 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2465 /* V4T Thumb -> ARM long branch stub, PIC. */
2466 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic
[] =
2468 THUMB16_INSN (0x4778), /* bx pc */
2469 THUMB16_INSN (0x46c0), /* nop */
2470 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2471 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2472 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2475 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2477 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic
[] =
2479 THUMB16_INSN (0xb401), /* push {r0} */
2480 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2481 THUMB16_INSN (0x46fc), /* mov ip, pc */
2482 THUMB16_INSN (0x4484), /* add ip, r0 */
2483 THUMB16_INSN (0xbc01), /* pop {r0} */
2484 THUMB16_INSN (0x4760), /* bx ip */
2485 DATA_WORD (0, R_ARM_REL32
, 4), /* dcd R_ARM_REL32(X) */
2488 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2490 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic
[] =
2492 THUMB16_INSN (0x4778), /* bx pc */
2493 THUMB16_INSN (0x46c0), /* nop */
2494 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2495 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2496 ARM_INSN (0xe12fff1c), /* bx ip */
2497 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2500 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2501 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2502 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic
[] =
2504 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2505 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2506 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2509 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2510 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2511 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic
[] =
2513 THUMB16_INSN (0x4778), /* bx pc */
2514 THUMB16_INSN (0x46c0), /* nop */
2515 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2516 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2517 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2520 /* NaCl ARM -> ARM long branch stub. */
2521 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl
[] =
2523 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2524 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2525 ARM_INSN (0xe12fff1c), /* bx ip */
2526 ARM_INSN (0xe320f000), /* nop */
2527 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2528 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2529 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2530 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2533 /* NaCl ARM -> ARM long branch stub, PIC. */
2534 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic
[] =
2536 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2537 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2538 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2539 ARM_INSN (0xe12fff1c), /* bx ip */
2540 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2541 DATA_WORD (0, R_ARM_REL32
, 8), /* dcd R_ARM_REL32(X+8) */
2542 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2543 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2547 /* Cortex-A8 erratum-workaround stubs. */
2549 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2550 can't use a conditional branch to reach this stub). */
2552 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond
[] =
2554 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2555 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2556 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2559 /* Stub used for b.w and bl.w instructions. */
2561 static const insn_sequence elf32_arm_stub_a8_veneer_b
[] =
2563 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2566 static const insn_sequence elf32_arm_stub_a8_veneer_bl
[] =
2568 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2571 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2572 instruction (which switches to ARM mode) to point to this stub. Jump to the
2573 real destination using an ARM-mode branch. */
2575 static const insn_sequence elf32_arm_stub_a8_veneer_blx
[] =
2577 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2580 /* For each section group there can be a specially created linker section
2581 to hold the stubs for that group. The name of the stub section is based
2582 upon the name of another section within that group with the suffix below
2585 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2586 create what appeared to be a linker stub section when it actually
2587 contained user code/data. For example, consider this fragment:
2589 const char * stubborn_problems[] = { "np" };
2591 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2594 .data.rel.local.stubborn_problems
2596 This then causes problems in arm32_arm_build_stubs() as it triggers:
2598 // Ignore non-stub sections.
2599 if (!strstr (stub_sec->name, STUB_SUFFIX))
2602 And so the section would be ignored instead of being processed. Hence
2603 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2605 #define STUB_SUFFIX ".__stub"
2607 /* One entry per long/short branch stub defined above. */
2609 DEF_STUB(long_branch_any_any) \
2610 DEF_STUB(long_branch_v4t_arm_thumb) \
2611 DEF_STUB(long_branch_thumb_only) \
2612 DEF_STUB(long_branch_v4t_thumb_thumb) \
2613 DEF_STUB(long_branch_v4t_thumb_arm) \
2614 DEF_STUB(short_branch_v4t_thumb_arm) \
2615 DEF_STUB(long_branch_any_arm_pic) \
2616 DEF_STUB(long_branch_any_thumb_pic) \
2617 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2618 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2619 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2620 DEF_STUB(long_branch_thumb_only_pic) \
2621 DEF_STUB(long_branch_any_tls_pic) \
2622 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2623 DEF_STUB(long_branch_arm_nacl) \
2624 DEF_STUB(long_branch_arm_nacl_pic) \
2625 DEF_STUB(a8_veneer_b_cond) \
2626 DEF_STUB(a8_veneer_b) \
2627 DEF_STUB(a8_veneer_bl) \
2628 DEF_STUB(a8_veneer_blx)
2630 #define DEF_STUB(x) arm_stub_##x,
2631 enum elf32_arm_stub_type
2638 /* Note the first a8_veneer type. */
2639 const unsigned arm_stub_a8_veneer_lwm
= arm_stub_a8_veneer_b_cond
;
2643 const insn_sequence
* template_sequence
;
2647 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2648 static const stub_def stub_definitions
[] =
2654 struct elf32_arm_stub_hash_entry
2656 /* Base hash table entry structure. */
2657 struct bfd_hash_entry root
;
2659 /* The stub section. */
2662 /* Offset within stub_sec of the beginning of this stub. */
2663 bfd_vma stub_offset
;
2665 /* Given the symbol's value and its section we can determine its final
2666 value when building the stubs (so the stub knows where to jump). */
2667 bfd_vma target_value
;
2668 asection
*target_section
;
2670 /* Same as above but for the source of the branch to the stub. Used for
2671 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2672 such, source section does not need to be recorded since Cortex-A8 erratum
2673 workaround stubs are only generated when both source and target are in the
2675 bfd_vma source_value
;
2677 /* The instruction which caused this stub to be generated (only valid for
2678 Cortex-A8 erratum workaround stubs at present). */
2679 unsigned long orig_insn
;
2681 /* The stub type. */
2682 enum elf32_arm_stub_type stub_type
;
2683 /* Its encoding size in bytes. */
2686 const insn_sequence
*stub_template
;
2687 /* The size of the template (number of entries). */
2688 int stub_template_size
;
2690 /* The symbol table entry, if any, that this was derived from. */
2691 struct elf32_arm_link_hash_entry
*h
;
2693 /* Type of branch. */
2694 enum arm_st_branch_type branch_type
;
2696 /* Where this stub is being called from, or, in the case of combined
2697 stub sections, the first input section in the group. */
2700 /* The name for the local symbol at the start of this stub. The
2701 stub name in the hash table has to be unique; this does not, so
2702 it can be friendlier. */
2706 /* Used to build a map of a section. This is required for mixed-endian
2709 typedef struct elf32_elf_section_map
2714 elf32_arm_section_map
;
2716 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2720 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
,
2721 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
,
2722 VFP11_ERRATUM_ARM_VENEER
,
2723 VFP11_ERRATUM_THUMB_VENEER
2725 elf32_vfp11_erratum_type
;
2727 typedef struct elf32_vfp11_erratum_list
2729 struct elf32_vfp11_erratum_list
*next
;
2735 struct elf32_vfp11_erratum_list
*veneer
;
2736 unsigned int vfp_insn
;
2740 struct elf32_vfp11_erratum_list
*branch
;
2744 elf32_vfp11_erratum_type type
;
2746 elf32_vfp11_erratum_list
;
2748 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2752 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
,
2753 STM32L4XX_ERRATUM_VENEER
2755 elf32_stm32l4xx_erratum_type
;
2757 typedef struct elf32_stm32l4xx_erratum_list
2759 struct elf32_stm32l4xx_erratum_list
*next
;
2765 struct elf32_stm32l4xx_erratum_list
*veneer
;
2770 struct elf32_stm32l4xx_erratum_list
*branch
;
2774 elf32_stm32l4xx_erratum_type type
;
2776 elf32_stm32l4xx_erratum_list
;
2781 INSERT_EXIDX_CANTUNWIND_AT_END
2783 arm_unwind_edit_type
;
2785 /* A (sorted) list of edits to apply to an unwind table. */
2786 typedef struct arm_unwind_table_edit
2788 arm_unwind_edit_type type
;
2789 /* Note: we sometimes want to insert an unwind entry corresponding to a
2790 section different from the one we're currently writing out, so record the
2791 (text) section this edit relates to here. */
2792 asection
*linked_section
;
2794 struct arm_unwind_table_edit
*next
;
2796 arm_unwind_table_edit
;
2798 typedef struct _arm_elf_section_data
2800 /* Information about mapping symbols. */
2801 struct bfd_elf_section_data elf
;
2802 unsigned int mapcount
;
2803 unsigned int mapsize
;
2804 elf32_arm_section_map
*map
;
2805 /* Information about CPU errata. */
2806 unsigned int erratumcount
;
2807 elf32_vfp11_erratum_list
*erratumlist
;
2808 unsigned int stm32l4xx_erratumcount
;
2809 elf32_stm32l4xx_erratum_list
*stm32l4xx_erratumlist
;
2810 unsigned int additional_reloc_count
;
2811 /* Information about unwind tables. */
2814 /* Unwind info attached to a text section. */
2817 asection
*arm_exidx_sec
;
2820 /* Unwind info attached to an .ARM.exidx section. */
2823 arm_unwind_table_edit
*unwind_edit_list
;
2824 arm_unwind_table_edit
*unwind_edit_tail
;
2828 _arm_elf_section_data
;
2830 #define elf32_arm_section_data(sec) \
2831 ((_arm_elf_section_data *) elf_section_data (sec))
2833 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2834 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2835 so may be created multiple times: we use an array of these entries whilst
2836 relaxing which we can refresh easily, then create stubs for each potentially
2837 erratum-triggering instruction once we've settled on a solution. */
2839 struct a8_erratum_fix
2844 bfd_vma target_offset
;
2845 unsigned long orig_insn
;
2847 enum elf32_arm_stub_type stub_type
;
2848 enum arm_st_branch_type branch_type
;
2851 /* A table of relocs applied to branches which might trigger Cortex-A8
2854 struct a8_erratum_reloc
2857 bfd_vma destination
;
2858 struct elf32_arm_link_hash_entry
*hash
;
2859 const char *sym_name
;
2860 unsigned int r_type
;
2861 enum arm_st_branch_type branch_type
;
2862 bfd_boolean non_a8_stub
;
2865 /* The size of the thread control block. */
2868 /* ARM-specific information about a PLT entry, over and above the usual
2872 /* We reference count Thumb references to a PLT entry separately,
2873 so that we can emit the Thumb trampoline only if needed. */
2874 bfd_signed_vma thumb_refcount
;
2876 /* Some references from Thumb code may be eliminated by BL->BLX
2877 conversion, so record them separately. */
2878 bfd_signed_vma maybe_thumb_refcount
;
2880 /* How many of the recorded PLT accesses were from non-call relocations.
2881 This information is useful when deciding whether anything takes the
2882 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2883 non-call references to the function should resolve directly to the
2884 real runtime target. */
2885 unsigned int noncall_refcount
;
2887 /* Since PLT entries have variable size if the Thumb prologue is
2888 used, we need to record the index into .got.plt instead of
2889 recomputing it from the PLT offset. */
2890 bfd_signed_vma got_offset
;
2893 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2894 struct arm_local_iplt_info
2896 /* The information that is usually found in the generic ELF part of
2897 the hash table entry. */
2898 union gotplt_union root
;
2900 /* The information that is usually found in the ARM-specific part of
2901 the hash table entry. */
2902 struct arm_plt_info arm
;
2904 /* A list of all potential dynamic relocations against this symbol. */
2905 struct elf_dyn_relocs
*dyn_relocs
;
2908 struct elf_arm_obj_tdata
2910 struct elf_obj_tdata root
;
2912 /* tls_type for each local got entry. */
2913 char *local_got_tls_type
;
2915 /* GOTPLT entries for TLS descriptors. */
2916 bfd_vma
*local_tlsdesc_gotent
;
2918 /* Information for local symbols that need entries in .iplt. */
2919 struct arm_local_iplt_info
**local_iplt
;
2921 /* Zero to warn when linking objects with incompatible enum sizes. */
2922 int no_enum_size_warning
;
2924 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2925 int no_wchar_size_warning
;
2928 #define elf_arm_tdata(bfd) \
2929 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2931 #define elf32_arm_local_got_tls_type(bfd) \
2932 (elf_arm_tdata (bfd)->local_got_tls_type)
2934 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2935 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2937 #define elf32_arm_local_iplt(bfd) \
2938 (elf_arm_tdata (bfd)->local_iplt)
2940 #define is_arm_elf(bfd) \
2941 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2942 && elf_tdata (bfd) != NULL \
2943 && elf_object_id (bfd) == ARM_ELF_DATA)
2946 elf32_arm_mkobject (bfd
*abfd
)
2948 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_arm_obj_tdata
),
2952 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2954 /* Arm ELF linker hash entry. */
2955 struct elf32_arm_link_hash_entry
2957 struct elf_link_hash_entry root
;
2959 /* Track dynamic relocs copied for this symbol. */
2960 struct elf_dyn_relocs
*dyn_relocs
;
2962 /* ARM-specific PLT information. */
2963 struct arm_plt_info plt
;
2965 #define GOT_UNKNOWN 0
2966 #define GOT_NORMAL 1
2967 #define GOT_TLS_GD 2
2968 #define GOT_TLS_IE 4
2969 #define GOT_TLS_GDESC 8
2970 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2971 unsigned int tls_type
: 8;
2973 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2974 unsigned int is_iplt
: 1;
2976 unsigned int unused
: 23;
2978 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2979 starting at the end of the jump table. */
2980 bfd_vma tlsdesc_got
;
2982 /* The symbol marking the real symbol location for exported thumb
2983 symbols with Arm stubs. */
2984 struct elf_link_hash_entry
*export_glue
;
2986 /* A pointer to the most recently used stub hash entry against this
2988 struct elf32_arm_stub_hash_entry
*stub_cache
;
2991 /* Traverse an arm ELF linker hash table. */
2992 #define elf32_arm_link_hash_traverse(table, func, info) \
2993 (elf_link_hash_traverse \
2995 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2998 /* Get the ARM elf linker hash table from a link_info structure. */
2999 #define elf32_arm_hash_table(info) \
3000 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3001 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3003 #define arm_stub_hash_lookup(table, string, create, copy) \
3004 ((struct elf32_arm_stub_hash_entry *) \
3005 bfd_hash_lookup ((table), (string), (create), (copy)))
3007 /* Array to keep track of which stub sections have been created, and
3008 information on stub grouping. */
3011 /* This is the section to which stubs in the group will be
3014 /* The stub section. */
3018 #define elf32_arm_compute_jump_table_size(htab) \
3019 ((htab)->next_tls_desc_index * 4)
3021 /* ARM ELF linker hash table. */
3022 struct elf32_arm_link_hash_table
3024 /* The main hash table. */
3025 struct elf_link_hash_table root
;
3027 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3028 bfd_size_type thumb_glue_size
;
3030 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3031 bfd_size_type arm_glue_size
;
3033 /* The size in bytes of section containing the ARMv4 BX veneers. */
3034 bfd_size_type bx_glue_size
;
3036 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3037 veneer has been populated. */
3038 bfd_vma bx_glue_offset
[15];
3040 /* The size in bytes of the section containing glue for VFP11 erratum
3042 bfd_size_type vfp11_erratum_glue_size
;
3044 /* The size in bytes of the section containing glue for STM32L4XX erratum
3046 bfd_size_type stm32l4xx_erratum_glue_size
;
3048 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3049 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3050 elf32_arm_write_section(). */
3051 struct a8_erratum_fix
*a8_erratum_fixes
;
3052 unsigned int num_a8_erratum_fixes
;
3054 /* An arbitrary input BFD chosen to hold the glue sections. */
3055 bfd
* bfd_of_glue_owner
;
3057 /* Nonzero to output a BE8 image. */
3060 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3061 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3064 /* The relocation to use for R_ARM_TARGET2 relocations. */
3067 /* 0 = Ignore R_ARM_V4BX.
3068 1 = Convert BX to MOV PC.
3069 2 = Generate v4 interworing stubs. */
3072 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3075 /* Whether we should fix the ARM1176 BLX immediate issue. */
3078 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3081 /* What sort of code sequences we should look for which may trigger the
3082 VFP11 denorm erratum. */
3083 bfd_arm_vfp11_fix vfp11_fix
;
3085 /* Global counter for the number of fixes we have emitted. */
3086 int num_vfp11_fixes
;
3088 /* What sort of code sequences we should look for which may trigger the
3089 STM32L4XX erratum. */
3090 bfd_arm_stm32l4xx_fix stm32l4xx_fix
;
3092 /* Global counter for the number of fixes we have emitted. */
3093 int num_stm32l4xx_fixes
;
3095 /* Nonzero to force PIC branch veneers. */
3098 /* The number of bytes in the initial entry in the PLT. */
3099 bfd_size_type plt_header_size
;
3101 /* The number of bytes in the subsequent PLT etries. */
3102 bfd_size_type plt_entry_size
;
3104 /* True if the target system is VxWorks. */
3107 /* True if the target system is Symbian OS. */
3110 /* True if the target system is Native Client. */
3113 /* True if the target uses REL relocations. */
3116 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3117 bfd_vma next_tls_desc_index
;
3119 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3120 bfd_vma num_tls_desc
;
3122 /* Short-cuts to get to dynamic linker sections. */
3126 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3129 /* The offset into splt of the PLT entry for the TLS descriptor
3130 resolver. Special values are 0, if not necessary (or not found
3131 to be necessary yet), and -1 if needed but not determined
3133 bfd_vma dt_tlsdesc_plt
;
3135 /* The offset into sgot of the GOT entry used by the PLT entry
3137 bfd_vma dt_tlsdesc_got
;
3139 /* Offset in .plt section of tls_arm_trampoline. */
3140 bfd_vma tls_trampoline
;
3142 /* Data for R_ARM_TLS_LDM32 relocations. */
3145 bfd_signed_vma refcount
;
3149 /* Small local sym cache. */
3150 struct sym_cache sym_cache
;
3152 /* For convenience in allocate_dynrelocs. */
3155 /* The amount of space used by the reserved portion of the sgotplt
3156 section, plus whatever space is used by the jump slots. */
3157 bfd_vma sgotplt_jump_table_size
;
3159 /* The stub hash table. */
3160 struct bfd_hash_table stub_hash_table
;
3162 /* Linker stub bfd. */
3165 /* Linker call-backs. */
3166 asection
* (*add_stub_section
) (const char *, asection
*, asection
*,
3168 void (*layout_sections_again
) (void);
3170 /* Array to keep track of which stub sections have been created, and
3171 information on stub grouping. */
3172 struct map_stub
*stub_group
;
3174 /* Number of elements in stub_group. */
3175 unsigned int top_id
;
3177 /* Assorted information used by elf32_arm_size_stubs. */
3178 unsigned int bfd_count
;
3179 unsigned int top_index
;
3180 asection
**input_list
;
3184 ctz (unsigned int mask
)
3186 #if GCC_VERSION >= 3004
3187 return __builtin_ctz (mask
);
3191 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3202 popcount (unsigned int mask
)
3204 #if GCC_VERSION >= 3004
3205 return __builtin_popcount (mask
);
3207 unsigned int i
, sum
= 0;
3209 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3219 /* Create an entry in an ARM ELF linker hash table. */
3221 static struct bfd_hash_entry
*
3222 elf32_arm_link_hash_newfunc (struct bfd_hash_entry
* entry
,
3223 struct bfd_hash_table
* table
,
3224 const char * string
)
3226 struct elf32_arm_link_hash_entry
* ret
=
3227 (struct elf32_arm_link_hash_entry
*) entry
;
3229 /* Allocate the structure if it has not already been allocated by a
3232 ret
= (struct elf32_arm_link_hash_entry
*)
3233 bfd_hash_allocate (table
, sizeof (struct elf32_arm_link_hash_entry
));
3235 return (struct bfd_hash_entry
*) ret
;
3237 /* Call the allocation method of the superclass. */
3238 ret
= ((struct elf32_arm_link_hash_entry
*)
3239 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
3243 ret
->dyn_relocs
= NULL
;
3244 ret
->tls_type
= GOT_UNKNOWN
;
3245 ret
->tlsdesc_got
= (bfd_vma
) -1;
3246 ret
->plt
.thumb_refcount
= 0;
3247 ret
->plt
.maybe_thumb_refcount
= 0;
3248 ret
->plt
.noncall_refcount
= 0;
3249 ret
->plt
.got_offset
= -1;
3250 ret
->is_iplt
= FALSE
;
3251 ret
->export_glue
= NULL
;
3253 ret
->stub_cache
= NULL
;
3256 return (struct bfd_hash_entry
*) ret
;
3259 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3263 elf32_arm_allocate_local_sym_info (bfd
*abfd
)
3265 if (elf_local_got_refcounts (abfd
) == NULL
)
3267 bfd_size_type num_syms
;
3271 num_syms
= elf_tdata (abfd
)->symtab_hdr
.sh_info
;
3272 size
= num_syms
* (sizeof (bfd_signed_vma
)
3273 + sizeof (struct arm_local_iplt_info
*)
3276 data
= bfd_zalloc (abfd
, size
);
3280 elf_local_got_refcounts (abfd
) = (bfd_signed_vma
*) data
;
3281 data
+= num_syms
* sizeof (bfd_signed_vma
);
3283 elf32_arm_local_iplt (abfd
) = (struct arm_local_iplt_info
**) data
;
3284 data
+= num_syms
* sizeof (struct arm_local_iplt_info
*);
3286 elf32_arm_local_tlsdesc_gotent (abfd
) = (bfd_vma
*) data
;
3287 data
+= num_syms
* sizeof (bfd_vma
);
3289 elf32_arm_local_got_tls_type (abfd
) = data
;
3294 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3295 to input bfd ABFD. Create the information if it doesn't already exist.
3296 Return null if an allocation fails. */
3298 static struct arm_local_iplt_info
*
3299 elf32_arm_create_local_iplt (bfd
*abfd
, unsigned long r_symndx
)
3301 struct arm_local_iplt_info
**ptr
;
3303 if (!elf32_arm_allocate_local_sym_info (abfd
))
3306 BFD_ASSERT (r_symndx
< elf_tdata (abfd
)->symtab_hdr
.sh_info
);
3307 ptr
= &elf32_arm_local_iplt (abfd
)[r_symndx
];
3309 *ptr
= bfd_zalloc (abfd
, sizeof (**ptr
));
3313 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3314 in ABFD's symbol table. If the symbol is global, H points to its
3315 hash table entry, otherwise H is null.
3317 Return true if the symbol does have PLT information. When returning
3318 true, point *ROOT_PLT at the target-independent reference count/offset
3319 union and *ARM_PLT at the ARM-specific information. */
3322 elf32_arm_get_plt_info (bfd
*abfd
, struct elf32_arm_link_hash_entry
*h
,
3323 unsigned long r_symndx
, union gotplt_union
**root_plt
,
3324 struct arm_plt_info
**arm_plt
)
3326 struct arm_local_iplt_info
*local_iplt
;
3330 *root_plt
= &h
->root
.plt
;
3335 if (elf32_arm_local_iplt (abfd
) == NULL
)
3338 local_iplt
= elf32_arm_local_iplt (abfd
)[r_symndx
];
3339 if (local_iplt
== NULL
)
3342 *root_plt
= &local_iplt
->root
;
3343 *arm_plt
= &local_iplt
->arm
;
3347 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3351 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info
*info
,
3352 struct arm_plt_info
*arm_plt
)
3354 struct elf32_arm_link_hash_table
*htab
;
3356 htab
= elf32_arm_hash_table (info
);
3357 return (arm_plt
->thumb_refcount
!= 0
3358 || (!htab
->use_blx
&& arm_plt
->maybe_thumb_refcount
!= 0));
3361 /* Return a pointer to the head of the dynamic reloc list that should
3362 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3363 ABFD's symbol table. Return null if an error occurs. */
3365 static struct elf_dyn_relocs
**
3366 elf32_arm_get_local_dynreloc_list (bfd
*abfd
, unsigned long r_symndx
,
3367 Elf_Internal_Sym
*isym
)
3369 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
)
3371 struct arm_local_iplt_info
*local_iplt
;
3373 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
3374 if (local_iplt
== NULL
)
3376 return &local_iplt
->dyn_relocs
;
3380 /* Track dynamic relocs needed for local syms too.
3381 We really need local syms available to do this
3386 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3390 vpp
= &elf_section_data (s
)->local_dynrel
;
3391 return (struct elf_dyn_relocs
**) vpp
;
3395 /* Initialize an entry in the stub hash table. */
3397 static struct bfd_hash_entry
*
3398 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
3399 struct bfd_hash_table
*table
,
3402 /* Allocate the structure if it has not already been allocated by a
3406 entry
= (struct bfd_hash_entry
*)
3407 bfd_hash_allocate (table
, sizeof (struct elf32_arm_stub_hash_entry
));
3412 /* Call the allocation method of the superclass. */
3413 entry
= bfd_hash_newfunc (entry
, table
, string
);
3416 struct elf32_arm_stub_hash_entry
*eh
;
3418 /* Initialize the local fields. */
3419 eh
= (struct elf32_arm_stub_hash_entry
*) entry
;
3420 eh
->stub_sec
= NULL
;
3421 eh
->stub_offset
= 0;
3422 eh
->source_value
= 0;
3423 eh
->target_value
= 0;
3424 eh
->target_section
= NULL
;
3426 eh
->stub_type
= arm_stub_none
;
3428 eh
->stub_template
= NULL
;
3429 eh
->stub_template_size
= 0;
3432 eh
->output_name
= NULL
;
3438 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3439 shortcuts to them in our hash table. */
3442 create_got_section (bfd
*dynobj
, struct bfd_link_info
*info
)
3444 struct elf32_arm_link_hash_table
*htab
;
3446 htab
= elf32_arm_hash_table (info
);
3450 /* BPABI objects never have a GOT, or associated sections. */
3451 if (htab
->symbian_p
)
3454 if (! _bfd_elf_create_got_section (dynobj
, info
))
3460 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3463 create_ifunc_sections (struct bfd_link_info
*info
)
3465 struct elf32_arm_link_hash_table
*htab
;
3466 const struct elf_backend_data
*bed
;
3471 htab
= elf32_arm_hash_table (info
);
3472 dynobj
= htab
->root
.dynobj
;
3473 bed
= get_elf_backend_data (dynobj
);
3474 flags
= bed
->dynamic_sec_flags
;
3476 if (htab
->root
.iplt
== NULL
)
3478 s
= bfd_make_section_anyway_with_flags (dynobj
, ".iplt",
3479 flags
| SEC_READONLY
| SEC_CODE
);
3481 || !bfd_set_section_alignment (dynobj
, s
, bed
->plt_alignment
))
3483 htab
->root
.iplt
= s
;
3486 if (htab
->root
.irelplt
== NULL
)
3488 s
= bfd_make_section_anyway_with_flags (dynobj
,
3489 RELOC_SECTION (htab
, ".iplt"),
3490 flags
| SEC_READONLY
);
3492 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3494 htab
->root
.irelplt
= s
;
3497 if (htab
->root
.igotplt
== NULL
)
3499 s
= bfd_make_section_anyway_with_flags (dynobj
, ".igot.plt", flags
);
3501 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3503 htab
->root
.igotplt
= s
;
3508 /* Determine if we're dealing with a Thumb only architecture. */
3511 using_thumb_only (struct elf32_arm_link_hash_table
*globals
)
3514 int profile
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3515 Tag_CPU_arch_profile
);
3518 return profile
== 'M';
3520 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3522 if (arch
== TAG_CPU_ARCH_V6_M
3523 || arch
== TAG_CPU_ARCH_V6S_M
3524 || arch
== TAG_CPU_ARCH_V7E_M
3525 || arch
== TAG_CPU_ARCH_V8M_BASE
3526 || arch
== TAG_CPU_ARCH_V8M_MAIN
)
3532 /* Determine if we're dealing with a Thumb-2 object. */
3535 using_thumb2 (struct elf32_arm_link_hash_table
*globals
)
3537 int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3539 return arch
== TAG_CPU_ARCH_V6T2
|| arch
>= TAG_CPU_ARCH_V7
;
3542 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3543 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3547 elf32_arm_create_dynamic_sections (bfd
*dynobj
, struct bfd_link_info
*info
)
3549 struct elf32_arm_link_hash_table
*htab
;
3551 htab
= elf32_arm_hash_table (info
);
3555 if (!htab
->root
.sgot
&& !create_got_section (dynobj
, info
))
3558 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
3561 htab
->sdynbss
= bfd_get_linker_section (dynobj
, ".dynbss");
3562 if (!bfd_link_pic (info
))
3563 htab
->srelbss
= bfd_get_linker_section (dynobj
,
3564 RELOC_SECTION (htab
, ".bss"));
3566 if (htab
->vxworks_p
)
3568 if (!elf_vxworks_create_dynamic_sections (dynobj
, info
, &htab
->srelplt2
))
3571 if (bfd_link_pic (info
))
3573 htab
->plt_header_size
= 0;
3574 htab
->plt_entry_size
3575 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry
);
3579 htab
->plt_header_size
3580 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry
);
3581 htab
->plt_entry_size
3582 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry
);
3585 if (elf_elfheader (dynobj
))
3586 elf_elfheader (dynobj
)->e_ident
[EI_CLASS
] = ELFCLASS32
;
3591 Test for thumb only architectures. Note - we cannot just call
3592 using_thumb_only() as the attributes in the output bfd have not been
3593 initialised at this point, so instead we use the input bfd. */
3594 bfd
* saved_obfd
= htab
->obfd
;
3596 htab
->obfd
= dynobj
;
3597 if (using_thumb_only (htab
))
3599 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
3600 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
3602 htab
->obfd
= saved_obfd
;
3605 if (!htab
->root
.splt
3606 || !htab
->root
.srelplt
3608 || (!bfd_link_pic (info
) && !htab
->srelbss
))
3614 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3617 elf32_arm_copy_indirect_symbol (struct bfd_link_info
*info
,
3618 struct elf_link_hash_entry
*dir
,
3619 struct elf_link_hash_entry
*ind
)
3621 struct elf32_arm_link_hash_entry
*edir
, *eind
;
3623 edir
= (struct elf32_arm_link_hash_entry
*) dir
;
3624 eind
= (struct elf32_arm_link_hash_entry
*) ind
;
3626 if (eind
->dyn_relocs
!= NULL
)
3628 if (edir
->dyn_relocs
!= NULL
)
3630 struct elf_dyn_relocs
**pp
;
3631 struct elf_dyn_relocs
*p
;
3633 /* Add reloc counts against the indirect sym to the direct sym
3634 list. Merge any entries against the same section. */
3635 for (pp
= &eind
->dyn_relocs
; (p
= *pp
) != NULL
; )
3637 struct elf_dyn_relocs
*q
;
3639 for (q
= edir
->dyn_relocs
; q
!= NULL
; q
= q
->next
)
3640 if (q
->sec
== p
->sec
)
3642 q
->pc_count
+= p
->pc_count
;
3643 q
->count
+= p
->count
;
3650 *pp
= edir
->dyn_relocs
;
3653 edir
->dyn_relocs
= eind
->dyn_relocs
;
3654 eind
->dyn_relocs
= NULL
;
3657 if (ind
->root
.type
== bfd_link_hash_indirect
)
3659 /* Copy over PLT info. */
3660 edir
->plt
.thumb_refcount
+= eind
->plt
.thumb_refcount
;
3661 eind
->plt
.thumb_refcount
= 0;
3662 edir
->plt
.maybe_thumb_refcount
+= eind
->plt
.maybe_thumb_refcount
;
3663 eind
->plt
.maybe_thumb_refcount
= 0;
3664 edir
->plt
.noncall_refcount
+= eind
->plt
.noncall_refcount
;
3665 eind
->plt
.noncall_refcount
= 0;
3667 /* We should only allocate a function to .iplt once the final
3668 symbol information is known. */
3669 BFD_ASSERT (!eind
->is_iplt
);
3671 if (dir
->got
.refcount
<= 0)
3673 edir
->tls_type
= eind
->tls_type
;
3674 eind
->tls_type
= GOT_UNKNOWN
;
3678 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
3681 /* Destroy an ARM elf linker hash table. */
3684 elf32_arm_link_hash_table_free (bfd
*obfd
)
3686 struct elf32_arm_link_hash_table
*ret
3687 = (struct elf32_arm_link_hash_table
*) obfd
->link
.hash
;
3689 bfd_hash_table_free (&ret
->stub_hash_table
);
3690 _bfd_elf_link_hash_table_free (obfd
);
3693 /* Create an ARM elf linker hash table. */
3695 static struct bfd_link_hash_table
*
3696 elf32_arm_link_hash_table_create (bfd
*abfd
)
3698 struct elf32_arm_link_hash_table
*ret
;
3699 bfd_size_type amt
= sizeof (struct elf32_arm_link_hash_table
);
3701 ret
= (struct elf32_arm_link_hash_table
*) bfd_zmalloc (amt
);
3705 if (!_bfd_elf_link_hash_table_init (& ret
->root
, abfd
,
3706 elf32_arm_link_hash_newfunc
,
3707 sizeof (struct elf32_arm_link_hash_entry
),
3714 ret
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
3715 ret
->stm32l4xx_fix
= BFD_ARM_STM32L4XX_FIX_NONE
;
3716 #ifdef FOUR_WORD_PLT
3717 ret
->plt_header_size
= 16;
3718 ret
->plt_entry_size
= 16;
3720 ret
->plt_header_size
= 20;
3721 ret
->plt_entry_size
= elf32_arm_use_long_plt_entry
? 16 : 12;
3726 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
3727 sizeof (struct elf32_arm_stub_hash_entry
)))
3729 _bfd_elf_link_hash_table_free (abfd
);
3732 ret
->root
.root
.hash_table_free
= elf32_arm_link_hash_table_free
;
3734 return &ret
->root
.root
;
3737 /* Determine what kind of NOPs are available. */
3740 arch_has_arm_nop (struct elf32_arm_link_hash_table
*globals
)
3742 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3744 return arch
== TAG_CPU_ARCH_V6T2
3745 || arch
== TAG_CPU_ARCH_V6K
3746 || arch
== TAG_CPU_ARCH_V7
3747 || arch
== TAG_CPU_ARCH_V7E_M
;
3751 arch_has_thumb2_nop (struct elf32_arm_link_hash_table
*globals
)
3753 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3755 return (arch
== TAG_CPU_ARCH_V6T2
|| arch
== TAG_CPU_ARCH_V7
3756 || arch
== TAG_CPU_ARCH_V7E_M
);
3760 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type
)
3764 case arm_stub_long_branch_thumb_only
:
3765 case arm_stub_long_branch_v4t_thumb_arm
:
3766 case arm_stub_short_branch_v4t_thumb_arm
:
3767 case arm_stub_long_branch_v4t_thumb_arm_pic
:
3768 case arm_stub_long_branch_v4t_thumb_tls_pic
:
3769 case arm_stub_long_branch_thumb_only_pic
:
3780 /* Determine the type of stub needed, if any, for a call. */
3782 static enum elf32_arm_stub_type
3783 arm_type_of_stub (struct bfd_link_info
*info
,
3784 asection
*input_sec
,
3785 const Elf_Internal_Rela
*rel
,
3786 unsigned char st_type
,
3787 enum arm_st_branch_type
*actual_branch_type
,
3788 struct elf32_arm_link_hash_entry
*hash
,
3789 bfd_vma destination
,
3795 bfd_signed_vma branch_offset
;
3796 unsigned int r_type
;
3797 struct elf32_arm_link_hash_table
* globals
;
3800 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
3802 enum arm_st_branch_type branch_type
= *actual_branch_type
;
3803 union gotplt_union
*root_plt
;
3804 struct arm_plt_info
*arm_plt
;
3806 if (branch_type
== ST_BRANCH_LONG
)
3809 globals
= elf32_arm_hash_table (info
);
3810 if (globals
== NULL
)
3813 thumb_only
= using_thumb_only (globals
);
3815 thumb2
= using_thumb2 (globals
);
3817 /* Determine where the call point is. */
3818 location
= (input_sec
->output_offset
3819 + input_sec
->output_section
->vma
3822 r_type
= ELF32_R_TYPE (rel
->r_info
);
3824 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3825 are considering a function call relocation. */
3826 if (thumb_only
&& (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
3827 || r_type
== R_ARM_THM_JUMP19
)
3828 && branch_type
== ST_BRANCH_TO_ARM
)
3829 branch_type
= ST_BRANCH_TO_THUMB
;
3831 /* For TLS call relocs, it is the caller's responsibility to provide
3832 the address of the appropriate trampoline. */
3833 if (r_type
!= R_ARM_TLS_CALL
3834 && r_type
!= R_ARM_THM_TLS_CALL
3835 && elf32_arm_get_plt_info (input_bfd
, hash
, ELF32_R_SYM (rel
->r_info
),
3836 &root_plt
, &arm_plt
)
3837 && root_plt
->offset
!= (bfd_vma
) -1)
3841 if (hash
== NULL
|| hash
->is_iplt
)
3842 splt
= globals
->root
.iplt
;
3844 splt
= globals
->root
.splt
;
3849 /* Note when dealing with PLT entries: the main PLT stub is in
3850 ARM mode, so if the branch is in Thumb mode, another
3851 Thumb->ARM stub will be inserted later just before the ARM
3852 PLT stub. We don't take this extra distance into account
3853 here, because if a long branch stub is needed, we'll add a
3854 Thumb->Arm one and branch directly to the ARM PLT entry
3855 because it avoids spreading offset corrections in several
3858 destination
= (splt
->output_section
->vma
3859 + splt
->output_offset
3860 + root_plt
->offset
);
3862 branch_type
= ST_BRANCH_TO_ARM
;
3865 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3866 BFD_ASSERT (st_type
!= STT_GNU_IFUNC
);
3868 branch_offset
= (bfd_signed_vma
)(destination
- location
);
3870 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
3871 || r_type
== R_ARM_THM_TLS_CALL
|| r_type
== R_ARM_THM_JUMP19
)
3873 /* Handle cases where:
3874 - this call goes too far (different Thumb/Thumb2 max
3876 - it's a Thumb->Arm call and blx is not available, or it's a
3877 Thumb->Arm branch (not bl). A stub is needed in this case,
3878 but only if this call is not through a PLT entry. Indeed,
3879 PLT stubs handle mode switching already.
3882 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
3883 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
3885 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
3886 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
3888 && (branch_offset
> THM2_MAX_FWD_COND_BRANCH_OFFSET
3889 || (branch_offset
< THM2_MAX_BWD_COND_BRANCH_OFFSET
))
3890 && (r_type
== R_ARM_THM_JUMP19
))
3891 || (branch_type
== ST_BRANCH_TO_ARM
3892 && (((r_type
== R_ARM_THM_CALL
3893 || r_type
== R_ARM_THM_TLS_CALL
) && !globals
->use_blx
)
3894 || (r_type
== R_ARM_THM_JUMP24
)
3895 || (r_type
== R_ARM_THM_JUMP19
))
3898 if (branch_type
== ST_BRANCH_TO_THUMB
)
3900 /* Thumb to thumb. */
3903 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
3905 ? ((globals
->use_blx
3906 && (r_type
== R_ARM_THM_CALL
))
3907 /* V5T and above. Stub starts with ARM code, so
3908 we must be able to switch mode before
3909 reaching it, which is only possible for 'bl'
3910 (ie R_ARM_THM_CALL relocation). */
3911 ? arm_stub_long_branch_any_thumb_pic
3912 /* On V4T, use Thumb code only. */
3913 : arm_stub_long_branch_v4t_thumb_thumb_pic
)
3915 /* non-PIC stubs. */
3916 : ((globals
->use_blx
3917 && (r_type
== R_ARM_THM_CALL
))
3918 /* V5T and above. */
3919 ? arm_stub_long_branch_any_any
3921 : arm_stub_long_branch_v4t_thumb_thumb
);
3925 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
3927 ? arm_stub_long_branch_thumb_only_pic
3929 : arm_stub_long_branch_thumb_only
;
3936 && sym_sec
->owner
!= NULL
3937 && !INTERWORK_FLAG (sym_sec
->owner
))
3939 (*_bfd_error_handler
)
3940 (_("%B(%s): warning: interworking not enabled.\n"
3941 " first occurrence: %B: Thumb call to ARM"),
3942 sym_sec
->owner
, input_bfd
, name
);
3946 (bfd_link_pic (info
) | globals
->pic_veneer
)
3948 ? (r_type
== R_ARM_THM_TLS_CALL
3949 /* TLS PIC stubs. */
3950 ? (globals
->use_blx
? arm_stub_long_branch_any_tls_pic
3951 : arm_stub_long_branch_v4t_thumb_tls_pic
)
3952 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
3953 /* V5T PIC and above. */
3954 ? arm_stub_long_branch_any_arm_pic
3956 : arm_stub_long_branch_v4t_thumb_arm_pic
))
3958 /* non-PIC stubs. */
3959 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
3960 /* V5T and above. */
3961 ? arm_stub_long_branch_any_any
3963 : arm_stub_long_branch_v4t_thumb_arm
);
3965 /* Handle v4t short branches. */
3966 if ((stub_type
== arm_stub_long_branch_v4t_thumb_arm
)
3967 && (branch_offset
<= THM_MAX_FWD_BRANCH_OFFSET
)
3968 && (branch_offset
>= THM_MAX_BWD_BRANCH_OFFSET
))
3969 stub_type
= arm_stub_short_branch_v4t_thumb_arm
;
3973 else if (r_type
== R_ARM_CALL
3974 || r_type
== R_ARM_JUMP24
3975 || r_type
== R_ARM_PLT32
3976 || r_type
== R_ARM_TLS_CALL
)
3978 if (branch_type
== ST_BRANCH_TO_THUMB
)
3983 && sym_sec
->owner
!= NULL
3984 && !INTERWORK_FLAG (sym_sec
->owner
))
3986 (*_bfd_error_handler
)
3987 (_("%B(%s): warning: interworking not enabled.\n"
3988 " first occurrence: %B: ARM call to Thumb"),
3989 sym_sec
->owner
, input_bfd
, name
);
3992 /* We have an extra 2-bytes reach because of
3993 the mode change (bit 24 (H) of BLX encoding). */
3994 if (branch_offset
> (ARM_MAX_FWD_BRANCH_OFFSET
+ 2)
3995 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
3996 || (r_type
== R_ARM_CALL
&& !globals
->use_blx
)
3997 || (r_type
== R_ARM_JUMP24
)
3998 || (r_type
== R_ARM_PLT32
))
4000 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4002 ? ((globals
->use_blx
)
4003 /* V5T and above. */
4004 ? arm_stub_long_branch_any_thumb_pic
4006 : arm_stub_long_branch_v4t_arm_thumb_pic
)
4008 /* non-PIC stubs. */
4009 : ((globals
->use_blx
)
4010 /* V5T and above. */
4011 ? arm_stub_long_branch_any_any
4013 : arm_stub_long_branch_v4t_arm_thumb
);
4019 if (branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
4020 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
))
4023 (bfd_link_pic (info
) | globals
->pic_veneer
)
4025 ? (r_type
== R_ARM_TLS_CALL
4027 ? arm_stub_long_branch_any_tls_pic
4029 ? arm_stub_long_branch_arm_nacl_pic
4030 : arm_stub_long_branch_any_arm_pic
))
4031 /* non-PIC stubs. */
4033 ? arm_stub_long_branch_arm_nacl
4034 : arm_stub_long_branch_any_any
);
4039 /* If a stub is needed, record the actual destination type. */
4040 if (stub_type
!= arm_stub_none
)
4041 *actual_branch_type
= branch_type
;
4046 /* Build a name for an entry in the stub hash table. */
4049 elf32_arm_stub_name (const asection
*input_section
,
4050 const asection
*sym_sec
,
4051 const struct elf32_arm_link_hash_entry
*hash
,
4052 const Elf_Internal_Rela
*rel
,
4053 enum elf32_arm_stub_type stub_type
)
4060 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 8 + 1 + 2 + 1;
4061 stub_name
= (char *) bfd_malloc (len
);
4062 if (stub_name
!= NULL
)
4063 sprintf (stub_name
, "%08x_%s+%x_%d",
4064 input_section
->id
& 0xffffffff,
4065 hash
->root
.root
.root
.string
,
4066 (int) rel
->r_addend
& 0xffffffff,
4071 len
= 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4072 stub_name
= (char *) bfd_malloc (len
);
4073 if (stub_name
!= NULL
)
4074 sprintf (stub_name
, "%08x_%x:%x+%x_%d",
4075 input_section
->id
& 0xffffffff,
4076 sym_sec
->id
& 0xffffffff,
4077 ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
4078 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
4079 ? 0 : (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
4080 (int) rel
->r_addend
& 0xffffffff,
4087 /* Look up an entry in the stub hash. Stub entries are cached because
4088 creating the stub name takes a bit of time. */
4090 static struct elf32_arm_stub_hash_entry
*
4091 elf32_arm_get_stub_entry (const asection
*input_section
,
4092 const asection
*sym_sec
,
4093 struct elf_link_hash_entry
*hash
,
4094 const Elf_Internal_Rela
*rel
,
4095 struct elf32_arm_link_hash_table
*htab
,
4096 enum elf32_arm_stub_type stub_type
)
4098 struct elf32_arm_stub_hash_entry
*stub_entry
;
4099 struct elf32_arm_link_hash_entry
*h
= (struct elf32_arm_link_hash_entry
*) hash
;
4100 const asection
*id_sec
;
4102 if ((input_section
->flags
& SEC_CODE
) == 0)
4105 /* If this input section is part of a group of sections sharing one
4106 stub section, then use the id of the first section in the group.
4107 Stub names need to include a section id, as there may well be
4108 more than one stub used to reach say, printf, and we need to
4109 distinguish between them. */
4110 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
4112 if (h
!= NULL
&& h
->stub_cache
!= NULL
4113 && h
->stub_cache
->h
== h
4114 && h
->stub_cache
->id_sec
== id_sec
4115 && h
->stub_cache
->stub_type
== stub_type
)
4117 stub_entry
= h
->stub_cache
;
4123 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, h
, rel
, stub_type
);
4124 if (stub_name
== NULL
)
4127 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
,
4128 stub_name
, FALSE
, FALSE
);
4130 h
->stub_cache
= stub_entry
;
4138 /* Find or create a stub section. Returns a pointer to the stub section, and
4139 the section to which the stub section will be attached (in *LINK_SEC_P).
4140 LINK_SEC_P may be NULL. */
4143 elf32_arm_create_or_find_stub_sec (asection
**link_sec_p
, asection
*section
,
4144 struct elf32_arm_link_hash_table
*htab
)
4150 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
4151 BFD_ASSERT (link_sec
!= NULL
);
4152 stub_sec
= htab
->stub_group
[section
->id
].stub_sec
;
4154 if (stub_sec
== NULL
)
4156 stub_sec
= htab
->stub_group
[link_sec
->id
].stub_sec
;
4157 if (stub_sec
== NULL
)
4163 namelen
= strlen (link_sec
->name
);
4164 len
= namelen
+ sizeof (STUB_SUFFIX
);
4165 s_name
= (char *) bfd_alloc (htab
->stub_bfd
, len
);
4169 memcpy (s_name
, link_sec
->name
, namelen
);
4170 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
4171 out_sec
= link_sec
->output_section
;
4172 stub_sec
= (*htab
->add_stub_section
) (s_name
, out_sec
, link_sec
,
4173 htab
->nacl_p
? 4 : 3);
4174 if (stub_sec
== NULL
)
4176 htab
->stub_group
[link_sec
->id
].stub_sec
= stub_sec
;
4178 htab
->stub_group
[section
->id
].stub_sec
= stub_sec
;
4182 *link_sec_p
= link_sec
;
4187 /* Add a new stub entry to the stub hash. Not all fields of the new
4188 stub entry are initialised. */
4190 static struct elf32_arm_stub_hash_entry
*
4191 elf32_arm_add_stub (const char *stub_name
,
4193 struct elf32_arm_link_hash_table
*htab
)
4197 struct elf32_arm_stub_hash_entry
*stub_entry
;
4199 stub_sec
= elf32_arm_create_or_find_stub_sec (&link_sec
, section
, htab
);
4200 if (stub_sec
== NULL
)
4203 /* Enter this entry into the linker stub hash table. */
4204 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
4206 if (stub_entry
== NULL
)
4208 if (section
== NULL
)
4210 (*_bfd_error_handler
) (_("%s: cannot create stub entry %s"),
4216 stub_entry
->stub_sec
= stub_sec
;
4217 stub_entry
->stub_offset
= 0;
4218 stub_entry
->id_sec
= link_sec
;
4223 /* Store an Arm insn into an output section not processed by
4224 elf32_arm_write_section. */
4227 put_arm_insn (struct elf32_arm_link_hash_table
* htab
,
4228 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4230 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4231 bfd_putl32 (val
, ptr
);
4233 bfd_putb32 (val
, ptr
);
4236 /* Store a 16-bit Thumb insn into an output section not processed by
4237 elf32_arm_write_section. */
4240 put_thumb_insn (struct elf32_arm_link_hash_table
* htab
,
4241 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4243 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4244 bfd_putl16 (val
, ptr
);
4246 bfd_putb16 (val
, ptr
);
4249 /* Store a Thumb2 insn into an output section not processed by
4250 elf32_arm_write_section. */
4253 put_thumb2_insn (struct elf32_arm_link_hash_table
* htab
,
4254 bfd
* output_bfd
, bfd_vma val
, bfd_byte
* ptr
)
4256 /* T2 instructions are 16-bit streamed. */
4257 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4259 bfd_putl16 ((val
>> 16) & 0xffff, ptr
);
4260 bfd_putl16 ((val
& 0xffff), ptr
+ 2);
4264 bfd_putb16 ((val
>> 16) & 0xffff, ptr
);
4265 bfd_putb16 ((val
& 0xffff), ptr
+ 2);
4269 /* If it's possible to change R_TYPE to a more efficient access
4270 model, return the new reloc type. */
4273 elf32_arm_tls_transition (struct bfd_link_info
*info
, int r_type
,
4274 struct elf_link_hash_entry
*h
)
4276 int is_local
= (h
== NULL
);
4278 if (bfd_link_pic (info
)
4279 || (h
&& h
->root
.type
== bfd_link_hash_undefweak
))
4282 /* We do not support relaxations for Old TLS models. */
4285 case R_ARM_TLS_GOTDESC
:
4286 case R_ARM_TLS_CALL
:
4287 case R_ARM_THM_TLS_CALL
:
4288 case R_ARM_TLS_DESCSEQ
:
4289 case R_ARM_THM_TLS_DESCSEQ
:
4290 return is_local
? R_ARM_TLS_LE32
: R_ARM_TLS_IE32
;
4296 static bfd_reloc_status_type elf32_arm_final_link_relocate
4297 (reloc_howto_type
*, bfd
*, bfd
*, asection
*, bfd_byte
*,
4298 Elf_Internal_Rela
*, bfd_vma
, struct bfd_link_info
*, asection
*,
4299 const char *, unsigned char, enum arm_st_branch_type
,
4300 struct elf_link_hash_entry
*, bfd_boolean
*, char **);
4303 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type
)
4307 case arm_stub_a8_veneer_b_cond
:
4308 case arm_stub_a8_veneer_b
:
4309 case arm_stub_a8_veneer_bl
:
4312 case arm_stub_long_branch_any_any
:
4313 case arm_stub_long_branch_v4t_arm_thumb
:
4314 case arm_stub_long_branch_thumb_only
:
4315 case arm_stub_long_branch_v4t_thumb_thumb
:
4316 case arm_stub_long_branch_v4t_thumb_arm
:
4317 case arm_stub_short_branch_v4t_thumb_arm
:
4318 case arm_stub_long_branch_any_arm_pic
:
4319 case arm_stub_long_branch_any_thumb_pic
:
4320 case arm_stub_long_branch_v4t_thumb_thumb_pic
:
4321 case arm_stub_long_branch_v4t_arm_thumb_pic
:
4322 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4323 case arm_stub_long_branch_thumb_only_pic
:
4324 case arm_stub_long_branch_any_tls_pic
:
4325 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4326 case arm_stub_a8_veneer_blx
:
4329 case arm_stub_long_branch_arm_nacl
:
4330 case arm_stub_long_branch_arm_nacl_pic
:
4334 abort (); /* Should be unreachable. */
4339 arm_build_one_stub (struct bfd_hash_entry
*gen_entry
,
4343 struct elf32_arm_stub_hash_entry
*stub_entry
;
4344 struct elf32_arm_link_hash_table
*globals
;
4345 struct bfd_link_info
*info
;
4352 const insn_sequence
*template_sequence
;
4354 int stub_reloc_idx
[MAXRELOCS
] = {-1, -1};
4355 int stub_reloc_offset
[MAXRELOCS
] = {0, 0};
4358 /* Massage our args to the form they really have. */
4359 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
4360 info
= (struct bfd_link_info
*) in_arg
;
4362 globals
= elf32_arm_hash_table (info
);
4363 if (globals
== NULL
)
4366 stub_sec
= stub_entry
->stub_sec
;
4368 if ((globals
->fix_cortex_a8
< 0)
4369 != (arm_stub_required_alignment (stub_entry
->stub_type
) == 2))
4370 /* We have to do less-strictly-aligned fixes last. */
4373 /* Make a note of the offset within the stubs for this entry. */
4374 stub_entry
->stub_offset
= stub_sec
->size
;
4375 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
4377 stub_bfd
= stub_sec
->owner
;
4379 /* This is the address of the stub destination. */
4380 sym_value
= (stub_entry
->target_value
4381 + stub_entry
->target_section
->output_offset
4382 + stub_entry
->target_section
->output_section
->vma
);
4384 template_sequence
= stub_entry
->stub_template
;
4385 template_size
= stub_entry
->stub_template_size
;
4388 for (i
= 0; i
< template_size
; i
++)
4390 switch (template_sequence
[i
].type
)
4394 bfd_vma data
= (bfd_vma
) template_sequence
[i
].data
;
4395 if (template_sequence
[i
].reloc_addend
!= 0)
4397 /* We've borrowed the reloc_addend field to mean we should
4398 insert a condition code into this (Thumb-1 branch)
4399 instruction. See THUMB16_BCOND_INSN. */
4400 BFD_ASSERT ((data
& 0xff00) == 0xd000);
4401 data
|= ((stub_entry
->orig_insn
>> 22) & 0xf) << 8;
4403 bfd_put_16 (stub_bfd
, data
, loc
+ size
);
4409 bfd_put_16 (stub_bfd
,
4410 (template_sequence
[i
].data
>> 16) & 0xffff,
4412 bfd_put_16 (stub_bfd
, template_sequence
[i
].data
& 0xffff,
4414 if (template_sequence
[i
].r_type
!= R_ARM_NONE
)
4416 stub_reloc_idx
[nrelocs
] = i
;
4417 stub_reloc_offset
[nrelocs
++] = size
;
4423 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
,
4425 /* Handle cases where the target is encoded within the
4427 if (template_sequence
[i
].r_type
== R_ARM_JUMP24
)
4429 stub_reloc_idx
[nrelocs
] = i
;
4430 stub_reloc_offset
[nrelocs
++] = size
;
4436 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
, loc
+ size
);
4437 stub_reloc_idx
[nrelocs
] = i
;
4438 stub_reloc_offset
[nrelocs
++] = size
;
4448 stub_sec
->size
+= size
;
4450 /* Stub size has already been computed in arm_size_one_stub. Check
4452 BFD_ASSERT (size
== stub_entry
->stub_size
);
4454 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4455 if (stub_entry
->branch_type
== ST_BRANCH_TO_THUMB
)
4458 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4460 BFD_ASSERT (nrelocs
!= 0 && nrelocs
<= MAXRELOCS
);
4462 for (i
= 0; i
< nrelocs
; i
++)
4464 Elf_Internal_Rela rel
;
4465 bfd_boolean unresolved_reloc
;
4466 char *error_message
;
4468 sym_value
+ template_sequence
[stub_reloc_idx
[i
]].reloc_addend
;
4470 rel
.r_offset
= stub_entry
->stub_offset
+ stub_reloc_offset
[i
];
4471 rel
.r_info
= ELF32_R_INFO (0,
4472 template_sequence
[stub_reloc_idx
[i
]].r_type
);
4475 if (stub_entry
->stub_type
== arm_stub_a8_veneer_b_cond
&& i
== 0)
4476 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4477 template should refer back to the instruction after the original
4478 branch. We use target_section as Cortex-A8 erratum workaround stubs
4479 are only generated when both source and target are in the same
4481 points_to
= stub_entry
->target_section
->output_section
->vma
4482 + stub_entry
->target_section
->output_offset
4483 + stub_entry
->source_value
;
4485 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4486 (template_sequence
[stub_reloc_idx
[i
]].r_type
),
4487 stub_bfd
, info
->output_bfd
, stub_sec
, stub_sec
->contents
, &rel
,
4488 points_to
, info
, stub_entry
->target_section
, "", STT_FUNC
,
4489 stub_entry
->branch_type
,
4490 (struct elf_link_hash_entry
*) stub_entry
->h
, &unresolved_reloc
,
4498 /* Calculate the template, template size and instruction size for a stub.
4499 Return value is the instruction size. */
4502 find_stub_size_and_template (enum elf32_arm_stub_type stub_type
,
4503 const insn_sequence
**stub_template
,
4504 int *stub_template_size
)
4506 const insn_sequence
*template_sequence
= NULL
;
4507 int template_size
= 0, i
;
4510 template_sequence
= stub_definitions
[stub_type
].template_sequence
;
4512 *stub_template
= template_sequence
;
4514 template_size
= stub_definitions
[stub_type
].template_size
;
4515 if (stub_template_size
)
4516 *stub_template_size
= template_size
;
4519 for (i
= 0; i
< template_size
; i
++)
4521 switch (template_sequence
[i
].type
)
4542 /* As above, but don't actually build the stub. Just bump offset so
4543 we know stub section sizes. */
4546 arm_size_one_stub (struct bfd_hash_entry
*gen_entry
,
4547 void *in_arg ATTRIBUTE_UNUSED
)
4549 struct elf32_arm_stub_hash_entry
*stub_entry
;
4550 const insn_sequence
*template_sequence
;
4551 int template_size
, size
;
4553 /* Massage our args to the form they really have. */
4554 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
4556 BFD_ASSERT((stub_entry
->stub_type
> arm_stub_none
)
4557 && stub_entry
->stub_type
< ARRAY_SIZE(stub_definitions
));
4559 size
= find_stub_size_and_template (stub_entry
->stub_type
, &template_sequence
,
4562 stub_entry
->stub_size
= size
;
4563 stub_entry
->stub_template
= template_sequence
;
4564 stub_entry
->stub_template_size
= template_size
;
4566 size
= (size
+ 7) & ~7;
4567 stub_entry
->stub_sec
->size
+= size
;
4572 /* External entry points for sizing and building linker stubs. */
4574 /* Set up various things so that we can make a list of input sections
4575 for each output section included in the link. Returns -1 on error,
4576 0 when no stubs will be needed, and 1 on success. */
4579 elf32_arm_setup_section_lists (bfd
*output_bfd
,
4580 struct bfd_link_info
*info
)
4583 unsigned int bfd_count
;
4584 unsigned int top_id
, top_index
;
4586 asection
**input_list
, **list
;
4588 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
4592 if (! is_elf_hash_table (htab
))
4595 /* Count the number of input BFDs and find the top input section id. */
4596 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
4598 input_bfd
= input_bfd
->link
.next
)
4601 for (section
= input_bfd
->sections
;
4603 section
= section
->next
)
4605 if (top_id
< section
->id
)
4606 top_id
= section
->id
;
4609 htab
->bfd_count
= bfd_count
;
4611 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
4612 htab
->stub_group
= (struct map_stub
*) bfd_zmalloc (amt
);
4613 if (htab
->stub_group
== NULL
)
4615 htab
->top_id
= top_id
;
4617 /* We can't use output_bfd->section_count here to find the top output
4618 section index as some sections may have been removed, and
4619 _bfd_strip_section_from_output doesn't renumber the indices. */
4620 for (section
= output_bfd
->sections
, top_index
= 0;
4622 section
= section
->next
)
4624 if (top_index
< section
->index
)
4625 top_index
= section
->index
;
4628 htab
->top_index
= top_index
;
4629 amt
= sizeof (asection
*) * (top_index
+ 1);
4630 input_list
= (asection
**) bfd_malloc (amt
);
4631 htab
->input_list
= input_list
;
4632 if (input_list
== NULL
)
4635 /* For sections we aren't interested in, mark their entries with a
4636 value we can check later. */
4637 list
= input_list
+ top_index
;
4639 *list
= bfd_abs_section_ptr
;
4640 while (list
-- != input_list
);
4642 for (section
= output_bfd
->sections
;
4644 section
= section
->next
)
4646 if ((section
->flags
& SEC_CODE
) != 0)
4647 input_list
[section
->index
] = NULL
;
4653 /* The linker repeatedly calls this function for each input section,
4654 in the order that input sections are linked into output sections.
4655 Build lists of input sections to determine groupings between which
4656 we may insert linker stubs. */
4659 elf32_arm_next_input_section (struct bfd_link_info
*info
,
4662 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
4667 if (isec
->output_section
->index
<= htab
->top_index
)
4669 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
4671 if (*list
!= bfd_abs_section_ptr
&& (isec
->flags
& SEC_CODE
) != 0)
4673 /* Steal the link_sec pointer for our list. */
4674 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4675 /* This happens to make the list in reverse order,
4676 which we reverse later. */
4677 PREV_SEC (isec
) = *list
;
4683 /* See whether we can group stub sections together. Grouping stub
4684 sections may result in fewer stubs. More importantly, we need to
4685 put all .init* and .fini* stubs at the end of the .init or
4686 .fini output sections respectively, because glibc splits the
4687 _init and _fini functions into multiple parts. Putting a stub in
4688 the middle of a function is not a good idea. */
4691 group_sections (struct elf32_arm_link_hash_table
*htab
,
4692 bfd_size_type stub_group_size
,
4693 bfd_boolean stubs_always_after_branch
)
4695 asection
**list
= htab
->input_list
;
4699 asection
*tail
= *list
;
4702 if (tail
== bfd_abs_section_ptr
)
4705 /* Reverse the list: we must avoid placing stubs at the
4706 beginning of the section because the beginning of the text
4707 section may be required for an interrupt vector in bare metal
4709 #define NEXT_SEC PREV_SEC
4711 while (tail
!= NULL
)
4713 /* Pop from tail. */
4714 asection
*item
= tail
;
4715 tail
= PREV_SEC (item
);
4718 NEXT_SEC (item
) = head
;
4722 while (head
!= NULL
)
4726 bfd_vma stub_group_start
= head
->output_offset
;
4727 bfd_vma end_of_next
;
4730 while (NEXT_SEC (curr
) != NULL
)
4732 next
= NEXT_SEC (curr
);
4733 end_of_next
= next
->output_offset
+ next
->size
;
4734 if (end_of_next
- stub_group_start
>= stub_group_size
)
4735 /* End of NEXT is too far from start, so stop. */
4737 /* Add NEXT to the group. */
4741 /* OK, the size from the start to the start of CURR is less
4742 than stub_group_size and thus can be handled by one stub
4743 section. (Or the head section is itself larger than
4744 stub_group_size, in which case we may be toast.)
4745 We should really be keeping track of the total size of
4746 stubs added here, as stubs contribute to the final output
4750 next
= NEXT_SEC (head
);
4751 /* Set up this stub group. */
4752 htab
->stub_group
[head
->id
].link_sec
= curr
;
4754 while (head
!= curr
&& (head
= next
) != NULL
);
4756 /* But wait, there's more! Input sections up to stub_group_size
4757 bytes after the stub section can be handled by it too. */
4758 if (!stubs_always_after_branch
)
4760 stub_group_start
= curr
->output_offset
+ curr
->size
;
4762 while (next
!= NULL
)
4764 end_of_next
= next
->output_offset
+ next
->size
;
4765 if (end_of_next
- stub_group_start
>= stub_group_size
)
4766 /* End of NEXT is too far from stubs, so stop. */
4768 /* Add NEXT to the stub group. */
4770 next
= NEXT_SEC (head
);
4771 htab
->stub_group
[head
->id
].link_sec
= curr
;
4777 while (list
++ != htab
->input_list
+ htab
->top_index
);
4779 free (htab
->input_list
);
4784 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4788 a8_reloc_compare (const void *a
, const void *b
)
4790 const struct a8_erratum_reloc
*ra
= (const struct a8_erratum_reloc
*) a
;
4791 const struct a8_erratum_reloc
*rb
= (const struct a8_erratum_reloc
*) b
;
4793 if (ra
->from
< rb
->from
)
4795 else if (ra
->from
> rb
->from
)
4801 static struct elf_link_hash_entry
*find_thumb_glue (struct bfd_link_info
*,
4802 const char *, char **);
4804 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4805 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4806 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4810 cortex_a8_erratum_scan (bfd
*input_bfd
,
4811 struct bfd_link_info
*info
,
4812 struct a8_erratum_fix
**a8_fixes_p
,
4813 unsigned int *num_a8_fixes_p
,
4814 unsigned int *a8_fix_table_size_p
,
4815 struct a8_erratum_reloc
*a8_relocs
,
4816 unsigned int num_a8_relocs
,
4817 unsigned prev_num_a8_fixes
,
4818 bfd_boolean
*stub_changed_p
)
4821 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
4822 struct a8_erratum_fix
*a8_fixes
= *a8_fixes_p
;
4823 unsigned int num_a8_fixes
= *num_a8_fixes_p
;
4824 unsigned int a8_fix_table_size
= *a8_fix_table_size_p
;
4829 for (section
= input_bfd
->sections
;
4831 section
= section
->next
)
4833 bfd_byte
*contents
= NULL
;
4834 struct _arm_elf_section_data
*sec_data
;
4838 if (elf_section_type (section
) != SHT_PROGBITS
4839 || (elf_section_flags (section
) & SHF_EXECINSTR
) == 0
4840 || (section
->flags
& SEC_EXCLUDE
) != 0
4841 || (section
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
)
4842 || (section
->output_section
== bfd_abs_section_ptr
))
4845 base_vma
= section
->output_section
->vma
+ section
->output_offset
;
4847 if (elf_section_data (section
)->this_hdr
.contents
!= NULL
)
4848 contents
= elf_section_data (section
)->this_hdr
.contents
;
4849 else if (! bfd_malloc_and_get_section (input_bfd
, section
, &contents
))
4852 sec_data
= elf32_arm_section_data (section
);
4854 for (span
= 0; span
< sec_data
->mapcount
; span
++)
4856 unsigned int span_start
= sec_data
->map
[span
].vma
;
4857 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
4858 ? section
->size
: sec_data
->map
[span
+ 1].vma
;
4860 char span_type
= sec_data
->map
[span
].type
;
4861 bfd_boolean last_was_32bit
= FALSE
, last_was_branch
= FALSE
;
4863 if (span_type
!= 't')
4866 /* Span is entirely within a single 4KB region: skip scanning. */
4867 if (((base_vma
+ span_start
) & ~0xfff)
4868 == ((base_vma
+ span_end
) & ~0xfff))
4871 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4873 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4874 * The branch target is in the same 4KB region as the
4875 first half of the branch.
4876 * The instruction before the branch is a 32-bit
4877 length non-branch instruction. */
4878 for (i
= span_start
; i
< span_end
;)
4880 unsigned int insn
= bfd_getl16 (&contents
[i
]);
4881 bfd_boolean insn_32bit
= FALSE
, is_blx
= FALSE
, is_b
= FALSE
;
4882 bfd_boolean is_bl
= FALSE
, is_bcc
= FALSE
, is_32bit_branch
;
4884 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
4889 /* Load the rest of the insn (in manual-friendly order). */
4890 insn
= (insn
<< 16) | bfd_getl16 (&contents
[i
+ 2]);
4892 /* Encoding T4: B<c>.W. */
4893 is_b
= (insn
& 0xf800d000) == 0xf0009000;
4894 /* Encoding T1: BL<c>.W. */
4895 is_bl
= (insn
& 0xf800d000) == 0xf000d000;
4896 /* Encoding T2: BLX<c>.W. */
4897 is_blx
= (insn
& 0xf800d000) == 0xf000c000;
4898 /* Encoding T3: B<c>.W (not permitted in IT block). */
4899 is_bcc
= (insn
& 0xf800d000) == 0xf0008000
4900 && (insn
& 0x07f00000) != 0x03800000;
4903 is_32bit_branch
= is_b
|| is_bl
|| is_blx
|| is_bcc
;
4905 if (((base_vma
+ i
) & 0xfff) == 0xffe
4909 && ! last_was_branch
)
4911 bfd_signed_vma offset
= 0;
4912 bfd_boolean force_target_arm
= FALSE
;
4913 bfd_boolean force_target_thumb
= FALSE
;
4915 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
4916 struct a8_erratum_reloc key
, *found
;
4917 bfd_boolean use_plt
= FALSE
;
4919 key
.from
= base_vma
+ i
;
4920 found
= (struct a8_erratum_reloc
*)
4921 bsearch (&key
, a8_relocs
, num_a8_relocs
,
4922 sizeof (struct a8_erratum_reloc
),
4927 char *error_message
= NULL
;
4928 struct elf_link_hash_entry
*entry
;
4930 /* We don't care about the error returned from this
4931 function, only if there is glue or not. */
4932 entry
= find_thumb_glue (info
, found
->sym_name
,
4936 found
->non_a8_stub
= TRUE
;
4938 /* Keep a simpler condition, for the sake of clarity. */
4939 if (htab
->root
.splt
!= NULL
&& found
->hash
!= NULL
4940 && found
->hash
->root
.plt
.offset
!= (bfd_vma
) -1)
4943 if (found
->r_type
== R_ARM_THM_CALL
)
4945 if (found
->branch_type
== ST_BRANCH_TO_ARM
4947 force_target_arm
= TRUE
;
4949 force_target_thumb
= TRUE
;
4953 /* Check if we have an offending branch instruction. */
4955 if (found
&& found
->non_a8_stub
)
4956 /* We've already made a stub for this instruction, e.g.
4957 it's a long branch or a Thumb->ARM stub. Assume that
4958 stub will suffice to work around the A8 erratum (see
4959 setting of always_after_branch above). */
4963 offset
= (insn
& 0x7ff) << 1;
4964 offset
|= (insn
& 0x3f0000) >> 4;
4965 offset
|= (insn
& 0x2000) ? 0x40000 : 0;
4966 offset
|= (insn
& 0x800) ? 0x80000 : 0;
4967 offset
|= (insn
& 0x4000000) ? 0x100000 : 0;
4968 if (offset
& 0x100000)
4969 offset
|= ~ ((bfd_signed_vma
) 0xfffff);
4970 stub_type
= arm_stub_a8_veneer_b_cond
;
4972 else if (is_b
|| is_bl
|| is_blx
)
4974 int s
= (insn
& 0x4000000) != 0;
4975 int j1
= (insn
& 0x2000) != 0;
4976 int j2
= (insn
& 0x800) != 0;
4980 offset
= (insn
& 0x7ff) << 1;
4981 offset
|= (insn
& 0x3ff0000) >> 4;
4985 if (offset
& 0x1000000)
4986 offset
|= ~ ((bfd_signed_vma
) 0xffffff);
4989 offset
&= ~ ((bfd_signed_vma
) 3);
4991 stub_type
= is_blx
? arm_stub_a8_veneer_blx
:
4992 is_bl
? arm_stub_a8_veneer_bl
: arm_stub_a8_veneer_b
;
4995 if (stub_type
!= arm_stub_none
)
4997 bfd_vma pc_for_insn
= base_vma
+ i
+ 4;
4999 /* The original instruction is a BL, but the target is
5000 an ARM instruction. If we were not making a stub,
5001 the BL would have been converted to a BLX. Use the
5002 BLX stub instead in that case. */
5003 if (htab
->use_blx
&& force_target_arm
5004 && stub_type
== arm_stub_a8_veneer_bl
)
5006 stub_type
= arm_stub_a8_veneer_blx
;
5010 /* Conversely, if the original instruction was
5011 BLX but the target is Thumb mode, use the BL
5013 else if (force_target_thumb
5014 && stub_type
== arm_stub_a8_veneer_blx
)
5016 stub_type
= arm_stub_a8_veneer_bl
;
5022 pc_for_insn
&= ~ ((bfd_vma
) 3);
5024 /* If we found a relocation, use the proper destination,
5025 not the offset in the (unrelocated) instruction.
5026 Note this is always done if we switched the stub type
5030 (bfd_signed_vma
) (found
->destination
- pc_for_insn
);
5032 /* If the stub will use a Thumb-mode branch to a
5033 PLT target, redirect it to the preceding Thumb
5035 if (stub_type
!= arm_stub_a8_veneer_blx
&& use_plt
)
5036 offset
-= PLT_THUMB_STUB_SIZE
;
5038 target
= pc_for_insn
+ offset
;
5040 /* The BLX stub is ARM-mode code. Adjust the offset to
5041 take the different PC value (+8 instead of +4) into
5043 if (stub_type
== arm_stub_a8_veneer_blx
)
5046 if (((base_vma
+ i
) & ~0xfff) == (target
& ~0xfff))
5048 char *stub_name
= NULL
;
5050 if (num_a8_fixes
== a8_fix_table_size
)
5052 a8_fix_table_size
*= 2;
5053 a8_fixes
= (struct a8_erratum_fix
*)
5054 bfd_realloc (a8_fixes
,
5055 sizeof (struct a8_erratum_fix
)
5056 * a8_fix_table_size
);
5059 if (num_a8_fixes
< prev_num_a8_fixes
)
5061 /* If we're doing a subsequent scan,
5062 check if we've found the same fix as
5063 before, and try and reuse the stub
5065 stub_name
= a8_fixes
[num_a8_fixes
].stub_name
;
5066 if ((a8_fixes
[num_a8_fixes
].section
!= section
)
5067 || (a8_fixes
[num_a8_fixes
].offset
!= i
))
5071 *stub_changed_p
= TRUE
;
5077 stub_name
= (char *) bfd_malloc (8 + 1 + 8 + 1);
5078 if (stub_name
!= NULL
)
5079 sprintf (stub_name
, "%x:%x", section
->id
, i
);
5082 a8_fixes
[num_a8_fixes
].input_bfd
= input_bfd
;
5083 a8_fixes
[num_a8_fixes
].section
= section
;
5084 a8_fixes
[num_a8_fixes
].offset
= i
;
5085 a8_fixes
[num_a8_fixes
].target_offset
=
5087 a8_fixes
[num_a8_fixes
].orig_insn
= insn
;
5088 a8_fixes
[num_a8_fixes
].stub_name
= stub_name
;
5089 a8_fixes
[num_a8_fixes
].stub_type
= stub_type
;
5090 a8_fixes
[num_a8_fixes
].branch_type
=
5091 is_blx
? ST_BRANCH_TO_ARM
: ST_BRANCH_TO_THUMB
;
5098 i
+= insn_32bit
? 4 : 2;
5099 last_was_32bit
= insn_32bit
;
5100 last_was_branch
= is_32bit_branch
;
5104 if (elf_section_data (section
)->this_hdr
.contents
== NULL
)
5108 *a8_fixes_p
= a8_fixes
;
5109 *num_a8_fixes_p
= num_a8_fixes
;
5110 *a8_fix_table_size_p
= a8_fix_table_size
;
5115 /* Create or update a stub entry depending on whether the stub can already be
5116 found in HTAB. The stub is identified by:
5117 - its type STUB_TYPE
5118 - its source branch (note that several can share the same stub) whose
5119 section and relocation (if any) are given by SECTION and IRELA
5121 - its target symbol whose input section, hash, name, value and branch type
5122 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5125 If found, the value of the stub's target symbol is updated from SYM_VALUE
5126 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5127 TRUE and the stub entry is initialized.
5129 Returns whether the stub could be successfully created or updated, or FALSE
5130 if an error occured. */
5133 elf32_arm_create_stub (struct elf32_arm_link_hash_table
*htab
,
5134 enum elf32_arm_stub_type stub_type
, asection
*section
,
5135 Elf_Internal_Rela
*irela
, asection
*sym_sec
,
5136 struct elf32_arm_link_hash_entry
*hash
, char *sym_name
,
5137 bfd_vma sym_value
, enum arm_st_branch_type branch_type
,
5138 bfd_boolean
*new_stub
)
5140 const asection
*id_sec
;
5142 struct elf32_arm_stub_hash_entry
*stub_entry
;
5143 unsigned int r_type
;
5145 BFD_ASSERT (stub_type
!= arm_stub_none
);
5149 BFD_ASSERT (section
);
5151 /* Support for grouping stub sections. */
5152 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
5154 /* Get the name of this stub. */
5155 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, hash
, irela
, stub_type
);
5159 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
,
5161 /* The proper stub has already been created, just update its value. */
5162 if (stub_entry
!= NULL
)
5165 stub_entry
->target_value
= sym_value
;
5169 stub_entry
= elf32_arm_add_stub (stub_name
, section
, htab
);
5170 if (stub_entry
== NULL
)
5176 stub_entry
->target_value
= sym_value
;
5177 stub_entry
->target_section
= sym_sec
;
5178 stub_entry
->stub_type
= stub_type
;
5179 stub_entry
->h
= hash
;
5180 stub_entry
->branch_type
= branch_type
;
5182 if (sym_name
== NULL
)
5183 sym_name
= "unnamed";
5184 stub_entry
->output_name
= (char *)
5185 bfd_alloc (htab
->stub_bfd
, sizeof (THUMB2ARM_GLUE_ENTRY_NAME
)
5186 + strlen (sym_name
));
5187 if (stub_entry
->output_name
== NULL
)
5193 /* For historical reasons, use the existing names for ARM-to-Thumb and
5194 Thumb-to-ARM stubs. */
5195 r_type
= ELF32_R_TYPE (irela
->r_info
);
5196 if ((r_type
== (unsigned int) R_ARM_THM_CALL
5197 || r_type
== (unsigned int) R_ARM_THM_JUMP24
5198 || r_type
== (unsigned int) R_ARM_THM_JUMP19
)
5199 && branch_type
== ST_BRANCH_TO_ARM
)
5200 sprintf (stub_entry
->output_name
, THUMB2ARM_GLUE_ENTRY_NAME
, sym_name
);
5201 else if ((r_type
== (unsigned int) R_ARM_CALL
5202 || r_type
== (unsigned int) R_ARM_JUMP24
)
5203 && branch_type
== ST_BRANCH_TO_THUMB
)
5204 sprintf (stub_entry
->output_name
, ARM2THUMB_GLUE_ENTRY_NAME
, sym_name
);
5206 sprintf (stub_entry
->output_name
, STUB_ENTRY_NAME
, sym_name
);
5212 /* Determine and set the size of the stub section for a final link.
5214 The basic idea here is to examine all the relocations looking for
5215 PC-relative calls to a target that is unreachable with a "bl"
5219 elf32_arm_size_stubs (bfd
*output_bfd
,
5221 struct bfd_link_info
*info
,
5222 bfd_signed_vma group_size
,
5223 asection
* (*add_stub_section
) (const char *, asection
*,
5226 void (*layout_sections_again
) (void))
5228 bfd_size_type stub_group_size
;
5229 bfd_boolean stubs_always_after_branch
;
5230 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5231 struct a8_erratum_fix
*a8_fixes
= NULL
;
5232 unsigned int num_a8_fixes
= 0, a8_fix_table_size
= 10;
5233 struct a8_erratum_reloc
*a8_relocs
= NULL
;
5234 unsigned int num_a8_relocs
= 0, a8_reloc_table_size
= 10, i
;
5239 if (htab
->fix_cortex_a8
)
5241 a8_fixes
= (struct a8_erratum_fix
*)
5242 bfd_zmalloc (sizeof (struct a8_erratum_fix
) * a8_fix_table_size
);
5243 a8_relocs
= (struct a8_erratum_reloc
*)
5244 bfd_zmalloc (sizeof (struct a8_erratum_reloc
) * a8_reloc_table_size
);
5247 /* Propagate mach to stub bfd, because it may not have been
5248 finalized when we created stub_bfd. */
5249 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
5250 bfd_get_mach (output_bfd
));
5252 /* Stash our params away. */
5253 htab
->stub_bfd
= stub_bfd
;
5254 htab
->add_stub_section
= add_stub_section
;
5255 htab
->layout_sections_again
= layout_sections_again
;
5256 stubs_always_after_branch
= group_size
< 0;
5258 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5259 as the first half of a 32-bit branch straddling two 4K pages. This is a
5260 crude way of enforcing that. */
5261 if (htab
->fix_cortex_a8
)
5262 stubs_always_after_branch
= 1;
5265 stub_group_size
= -group_size
;
5267 stub_group_size
= group_size
;
5269 if (stub_group_size
== 1)
5271 /* Default values. */
5272 /* Thumb branch range is +-4MB has to be used as the default
5273 maximum size (a given section can contain both ARM and Thumb
5274 code, so the worst case has to be taken into account).
5276 This value is 24K less than that, which allows for 2025
5277 12-byte stubs. If we exceed that, then we will fail to link.
5278 The user will have to relink with an explicit group size
5280 stub_group_size
= 4170000;
5283 group_sections (htab
, stub_group_size
, stubs_always_after_branch
);
5285 /* If we're applying the cortex A8 fix, we need to determine the
5286 program header size now, because we cannot change it later --
5287 that could alter section placements. Notice the A8 erratum fix
5288 ends up requiring the section addresses to remain unchanged
5289 modulo the page size. That's something we cannot represent
5290 inside BFD, and we don't want to force the section alignment to
5291 be the page size. */
5292 if (htab
->fix_cortex_a8
)
5293 (*htab
->layout_sections_again
) ();
5298 unsigned int bfd_indx
;
5300 bfd_boolean stub_changed
= FALSE
;
5301 unsigned prev_num_a8_fixes
= num_a8_fixes
;
5304 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
5306 input_bfd
= input_bfd
->link
.next
, bfd_indx
++)
5308 Elf_Internal_Shdr
*symtab_hdr
;
5310 Elf_Internal_Sym
*local_syms
= NULL
;
5312 if (!is_arm_elf (input_bfd
))
5317 /* We'll need the symbol table in a second. */
5318 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
5319 if (symtab_hdr
->sh_info
== 0)
5322 /* Walk over each section attached to the input bfd. */
5323 for (section
= input_bfd
->sections
;
5325 section
= section
->next
)
5327 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
5329 /* If there aren't any relocs, then there's nothing more
5331 if ((section
->flags
& SEC_RELOC
) == 0
5332 || section
->reloc_count
== 0
5333 || (section
->flags
& SEC_CODE
) == 0)
5336 /* If this section is a link-once section that will be
5337 discarded, then don't create any stubs. */
5338 if (section
->output_section
== NULL
5339 || section
->output_section
->owner
!= output_bfd
)
5342 /* Get the relocs. */
5344 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
5345 NULL
, info
->keep_memory
);
5346 if (internal_relocs
== NULL
)
5347 goto error_ret_free_local
;
5349 /* Now examine each relocation. */
5350 irela
= internal_relocs
;
5351 irelaend
= irela
+ section
->reloc_count
;
5352 for (; irela
< irelaend
; irela
++)
5354 unsigned int r_type
, r_indx
;
5355 enum elf32_arm_stub_type stub_type
;
5358 bfd_vma destination
;
5359 struct elf32_arm_link_hash_entry
*hash
;
5360 const char *sym_name
;
5361 unsigned char st_type
;
5362 enum arm_st_branch_type branch_type
;
5363 bfd_boolean created_stub
= FALSE
;
5365 r_type
= ELF32_R_TYPE (irela
->r_info
);
5366 r_indx
= ELF32_R_SYM (irela
->r_info
);
5368 if (r_type
>= (unsigned int) R_ARM_max
)
5370 bfd_set_error (bfd_error_bad_value
);
5371 error_ret_free_internal
:
5372 if (elf_section_data (section
)->relocs
== NULL
)
5373 free (internal_relocs
);
5375 error_ret_free_local
:
5376 if (local_syms
!= NULL
5377 && (symtab_hdr
->contents
5378 != (unsigned char *) local_syms
))
5384 if (r_indx
>= symtab_hdr
->sh_info
)
5385 hash
= elf32_arm_hash_entry
5386 (elf_sym_hashes (input_bfd
)
5387 [r_indx
- symtab_hdr
->sh_info
]);
5389 /* Only look for stubs on branch instructions, or
5390 non-relaxed TLSCALL */
5391 if ((r_type
!= (unsigned int) R_ARM_CALL
)
5392 && (r_type
!= (unsigned int) R_ARM_THM_CALL
)
5393 && (r_type
!= (unsigned int) R_ARM_JUMP24
)
5394 && (r_type
!= (unsigned int) R_ARM_THM_JUMP19
)
5395 && (r_type
!= (unsigned int) R_ARM_THM_XPC22
)
5396 && (r_type
!= (unsigned int) R_ARM_THM_JUMP24
)
5397 && (r_type
!= (unsigned int) R_ARM_PLT32
)
5398 && !((r_type
== (unsigned int) R_ARM_TLS_CALL
5399 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
5400 && r_type
== elf32_arm_tls_transition
5401 (info
, r_type
, &hash
->root
)
5402 && ((hash
? hash
->tls_type
5403 : (elf32_arm_local_got_tls_type
5404 (input_bfd
)[r_indx
]))
5405 & GOT_TLS_GDESC
) != 0))
5408 /* Now determine the call target, its name, value,
5415 if (r_type
== (unsigned int) R_ARM_TLS_CALL
5416 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
5418 /* A non-relaxed TLS call. The target is the
5419 plt-resident trampoline and nothing to do
5421 BFD_ASSERT (htab
->tls_trampoline
> 0);
5422 sym_sec
= htab
->root
.splt
;
5423 sym_value
= htab
->tls_trampoline
;
5426 branch_type
= ST_BRANCH_TO_ARM
;
5430 /* It's a local symbol. */
5431 Elf_Internal_Sym
*sym
;
5433 if (local_syms
== NULL
)
5436 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
5437 if (local_syms
== NULL
)
5439 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
5440 symtab_hdr
->sh_info
, 0,
5442 if (local_syms
== NULL
)
5443 goto error_ret_free_internal
;
5446 sym
= local_syms
+ r_indx
;
5447 if (sym
->st_shndx
== SHN_UNDEF
)
5448 sym_sec
= bfd_und_section_ptr
;
5449 else if (sym
->st_shndx
== SHN_ABS
)
5450 sym_sec
= bfd_abs_section_ptr
;
5451 else if (sym
->st_shndx
== SHN_COMMON
)
5452 sym_sec
= bfd_com_section_ptr
;
5455 bfd_section_from_elf_index (input_bfd
, sym
->st_shndx
);
5458 /* This is an undefined symbol. It can never
5462 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
5463 sym_value
= sym
->st_value
;
5464 destination
= (sym_value
+ irela
->r_addend
5465 + sym_sec
->output_offset
5466 + sym_sec
->output_section
->vma
);
5467 st_type
= ELF_ST_TYPE (sym
->st_info
);
5469 ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
5471 = bfd_elf_string_from_elf_section (input_bfd
,
5472 symtab_hdr
->sh_link
,
5477 /* It's an external symbol. */
5478 while (hash
->root
.root
.type
== bfd_link_hash_indirect
5479 || hash
->root
.root
.type
== bfd_link_hash_warning
)
5480 hash
= ((struct elf32_arm_link_hash_entry
*)
5481 hash
->root
.root
.u
.i
.link
);
5483 if (hash
->root
.root
.type
== bfd_link_hash_defined
5484 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
5486 sym_sec
= hash
->root
.root
.u
.def
.section
;
5487 sym_value
= hash
->root
.root
.u
.def
.value
;
5489 struct elf32_arm_link_hash_table
*globals
=
5490 elf32_arm_hash_table (info
);
5492 /* For a destination in a shared library,
5493 use the PLT stub as target address to
5494 decide whether a branch stub is
5497 && globals
->root
.splt
!= NULL
5499 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5501 sym_sec
= globals
->root
.splt
;
5502 sym_value
= hash
->root
.plt
.offset
;
5503 if (sym_sec
->output_section
!= NULL
)
5504 destination
= (sym_value
5505 + sym_sec
->output_offset
5506 + sym_sec
->output_section
->vma
);
5508 else if (sym_sec
->output_section
!= NULL
)
5509 destination
= (sym_value
+ irela
->r_addend
5510 + sym_sec
->output_offset
5511 + sym_sec
->output_section
->vma
);
5513 else if ((hash
->root
.root
.type
== bfd_link_hash_undefined
)
5514 || (hash
->root
.root
.type
== bfd_link_hash_undefweak
))
5516 /* For a shared library, use the PLT stub as
5517 target address to decide whether a long
5518 branch stub is needed.
5519 For absolute code, they cannot be handled. */
5520 struct elf32_arm_link_hash_table
*globals
=
5521 elf32_arm_hash_table (info
);
5524 && globals
->root
.splt
!= NULL
5526 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5528 sym_sec
= globals
->root
.splt
;
5529 sym_value
= hash
->root
.plt
.offset
;
5530 if (sym_sec
->output_section
!= NULL
)
5531 destination
= (sym_value
5532 + sym_sec
->output_offset
5533 + sym_sec
->output_section
->vma
);
5540 bfd_set_error (bfd_error_bad_value
);
5541 goto error_ret_free_internal
;
5543 st_type
= hash
->root
.type
;
5545 ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
5546 sym_name
= hash
->root
.root
.root
.string
;
5551 bfd_boolean new_stub
;
5553 /* Determine what (if any) linker stub is needed. */
5554 stub_type
= arm_type_of_stub (info
, section
, irela
,
5555 st_type
, &branch_type
,
5556 hash
, destination
, sym_sec
,
5557 input_bfd
, sym_name
);
5558 if (stub_type
== arm_stub_none
)
5561 /* We've either created a stub for this reloc already,
5562 or we are about to. */
5564 elf32_arm_create_stub (htab
, stub_type
, section
, irela
,
5566 (char *) sym_name
, sym_value
,
5567 branch_type
, &new_stub
);
5570 goto error_ret_free_internal
;
5574 stub_changed
= TRUE
;
5578 /* Look for relocations which might trigger Cortex-A8
5580 if (htab
->fix_cortex_a8
5581 && (r_type
== (unsigned int) R_ARM_THM_JUMP24
5582 || r_type
== (unsigned int) R_ARM_THM_JUMP19
5583 || r_type
== (unsigned int) R_ARM_THM_CALL
5584 || r_type
== (unsigned int) R_ARM_THM_XPC22
))
5586 bfd_vma from
= section
->output_section
->vma
5587 + section
->output_offset
5590 if ((from
& 0xfff) == 0xffe)
5592 /* Found a candidate. Note we haven't checked the
5593 destination is within 4K here: if we do so (and
5594 don't create an entry in a8_relocs) we can't tell
5595 that a branch should have been relocated when
5597 if (num_a8_relocs
== a8_reloc_table_size
)
5599 a8_reloc_table_size
*= 2;
5600 a8_relocs
= (struct a8_erratum_reloc
*)
5601 bfd_realloc (a8_relocs
,
5602 sizeof (struct a8_erratum_reloc
)
5603 * a8_reloc_table_size
);
5606 a8_relocs
[num_a8_relocs
].from
= from
;
5607 a8_relocs
[num_a8_relocs
].destination
= destination
;
5608 a8_relocs
[num_a8_relocs
].r_type
= r_type
;
5609 a8_relocs
[num_a8_relocs
].branch_type
= branch_type
;
5610 a8_relocs
[num_a8_relocs
].sym_name
= sym_name
;
5611 a8_relocs
[num_a8_relocs
].non_a8_stub
= created_stub
;
5612 a8_relocs
[num_a8_relocs
].hash
= hash
;
5619 /* We're done with the internal relocs, free them. */
5620 if (elf_section_data (section
)->relocs
== NULL
)
5621 free (internal_relocs
);
5624 if (htab
->fix_cortex_a8
)
5626 /* Sort relocs which might apply to Cortex-A8 erratum. */
5627 qsort (a8_relocs
, num_a8_relocs
,
5628 sizeof (struct a8_erratum_reloc
),
5631 /* Scan for branches which might trigger Cortex-A8 erratum. */
5632 if (cortex_a8_erratum_scan (input_bfd
, info
, &a8_fixes
,
5633 &num_a8_fixes
, &a8_fix_table_size
,
5634 a8_relocs
, num_a8_relocs
,
5635 prev_num_a8_fixes
, &stub_changed
)
5637 goto error_ret_free_local
;
5641 if (prev_num_a8_fixes
!= num_a8_fixes
)
5642 stub_changed
= TRUE
;
5647 /* OK, we've added some stubs. Find out the new size of the
5649 for (stub_sec
= htab
->stub_bfd
->sections
;
5651 stub_sec
= stub_sec
->next
)
5653 /* Ignore non-stub sections. */
5654 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
5660 bfd_hash_traverse (&htab
->stub_hash_table
, arm_size_one_stub
, htab
);
5662 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5663 if (htab
->fix_cortex_a8
)
5664 for (i
= 0; i
< num_a8_fixes
; i
++)
5666 stub_sec
= elf32_arm_create_or_find_stub_sec (NULL
,
5667 a8_fixes
[i
].section
, htab
);
5669 if (stub_sec
== NULL
)
5670 goto error_ret_free_local
;
5673 += find_stub_size_and_template (a8_fixes
[i
].stub_type
, NULL
,
5678 /* Ask the linker to do its stuff. */
5679 (*htab
->layout_sections_again
) ();
5682 /* Add stubs for Cortex-A8 erratum fixes now. */
5683 if (htab
->fix_cortex_a8
)
5685 for (i
= 0; i
< num_a8_fixes
; i
++)
5687 struct elf32_arm_stub_hash_entry
*stub_entry
;
5688 char *stub_name
= a8_fixes
[i
].stub_name
;
5689 asection
*section
= a8_fixes
[i
].section
;
5690 unsigned int section_id
= a8_fixes
[i
].section
->id
;
5691 asection
*link_sec
= htab
->stub_group
[section_id
].link_sec
;
5692 asection
*stub_sec
= htab
->stub_group
[section_id
].stub_sec
;
5693 const insn_sequence
*template_sequence
;
5694 int template_size
, size
= 0;
5696 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
5698 if (stub_entry
== NULL
)
5700 (*_bfd_error_handler
) (_("%s: cannot create stub entry %s"),
5706 stub_entry
->stub_sec
= stub_sec
;
5707 stub_entry
->stub_offset
= 0;
5708 stub_entry
->id_sec
= link_sec
;
5709 stub_entry
->stub_type
= a8_fixes
[i
].stub_type
;
5710 stub_entry
->source_value
= a8_fixes
[i
].offset
;
5711 stub_entry
->target_section
= a8_fixes
[i
].section
;
5712 stub_entry
->target_value
= a8_fixes
[i
].target_offset
;
5713 stub_entry
->orig_insn
= a8_fixes
[i
].orig_insn
;
5714 stub_entry
->branch_type
= a8_fixes
[i
].branch_type
;
5716 size
= find_stub_size_and_template (a8_fixes
[i
].stub_type
,
5720 stub_entry
->stub_size
= size
;
5721 stub_entry
->stub_template
= template_sequence
;
5722 stub_entry
->stub_template_size
= template_size
;
5725 /* Stash the Cortex-A8 erratum fix array for use later in
5726 elf32_arm_write_section(). */
5727 htab
->a8_erratum_fixes
= a8_fixes
;
5728 htab
->num_a8_erratum_fixes
= num_a8_fixes
;
5732 htab
->a8_erratum_fixes
= NULL
;
5733 htab
->num_a8_erratum_fixes
= 0;
5738 /* Build all the stubs associated with the current output file. The
5739 stubs are kept in a hash table attached to the main linker hash
5740 table. We also set up the .plt entries for statically linked PIC
5741 functions here. This function is called via arm_elf_finish in the
5745 elf32_arm_build_stubs (struct bfd_link_info
*info
)
5748 struct bfd_hash_table
*table
;
5749 struct elf32_arm_link_hash_table
*htab
;
5751 htab
= elf32_arm_hash_table (info
);
5755 for (stub_sec
= htab
->stub_bfd
->sections
;
5757 stub_sec
= stub_sec
->next
)
5761 /* Ignore non-stub sections. */
5762 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
5765 /* Allocate memory to hold the linker stubs. */
5766 size
= stub_sec
->size
;
5767 stub_sec
->contents
= (unsigned char *) bfd_zalloc (htab
->stub_bfd
, size
);
5768 if (stub_sec
->contents
== NULL
&& size
!= 0)
5773 /* Build the stubs as directed by the stub hash table. */
5774 table
= &htab
->stub_hash_table
;
5775 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
5776 if (htab
->fix_cortex_a8
)
5778 /* Place the cortex a8 stubs last. */
5779 htab
->fix_cortex_a8
= -1;
5780 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
5786 /* Locate the Thumb encoded calling stub for NAME. */
5788 static struct elf_link_hash_entry
*
5789 find_thumb_glue (struct bfd_link_info
*link_info
,
5791 char **error_message
)
5794 struct elf_link_hash_entry
*hash
;
5795 struct elf32_arm_link_hash_table
*hash_table
;
5797 /* We need a pointer to the armelf specific hash table. */
5798 hash_table
= elf32_arm_hash_table (link_info
);
5799 if (hash_table
== NULL
)
5802 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
5803 + strlen (THUMB2ARM_GLUE_ENTRY_NAME
) + 1);
5805 BFD_ASSERT (tmp_name
);
5807 sprintf (tmp_name
, THUMB2ARM_GLUE_ENTRY_NAME
, name
);
5809 hash
= elf_link_hash_lookup
5810 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
5813 && asprintf (error_message
, _("unable to find THUMB glue '%s' for '%s'"),
5814 tmp_name
, name
) == -1)
5815 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
5822 /* Locate the ARM encoded calling stub for NAME. */
5824 static struct elf_link_hash_entry
*
5825 find_arm_glue (struct bfd_link_info
*link_info
,
5827 char **error_message
)
5830 struct elf_link_hash_entry
*myh
;
5831 struct elf32_arm_link_hash_table
*hash_table
;
5833 /* We need a pointer to the elfarm specific hash table. */
5834 hash_table
= elf32_arm_hash_table (link_info
);
5835 if (hash_table
== NULL
)
5838 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
5839 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
5841 BFD_ASSERT (tmp_name
);
5843 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
5845 myh
= elf_link_hash_lookup
5846 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
5849 && asprintf (error_message
, _("unable to find ARM glue '%s' for '%s'"),
5850 tmp_name
, name
) == -1)
5851 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
5858 /* ARM->Thumb glue (static images):
5862 ldr r12, __func_addr
5865 .word func @ behave as if you saw a ARM_32 reloc.
5872 .word func @ behave as if you saw a ARM_32 reloc.
5874 (relocatable images)
5877 ldr r12, __func_offset
5883 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5884 static const insn32 a2t1_ldr_insn
= 0xe59fc000;
5885 static const insn32 a2t2_bx_r12_insn
= 0xe12fff1c;
5886 static const insn32 a2t3_func_addr_insn
= 0x00000001;
5888 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5889 static const insn32 a2t1v5_ldr_insn
= 0xe51ff004;
5890 static const insn32 a2t2v5_func_addr_insn
= 0x00000001;
5892 #define ARM2THUMB_PIC_GLUE_SIZE 16
5893 static const insn32 a2t1p_ldr_insn
= 0xe59fc004;
5894 static const insn32 a2t2p_add_pc_insn
= 0xe08cc00f;
5895 static const insn32 a2t3p_bx_r12_insn
= 0xe12fff1c;
5897 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5901 __func_from_thumb: __func_from_thumb:
5903 nop ldr r6, __func_addr
5913 #define THUMB2ARM_GLUE_SIZE 8
5914 static const insn16 t2a1_bx_pc_insn
= 0x4778;
5915 static const insn16 t2a2_noop_insn
= 0x46c0;
5916 static const insn32 t2a3_b_insn
= 0xea000000;
5918 #define VFP11_ERRATUM_VENEER_SIZE 8
5919 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
5920 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
5922 #define ARM_BX_VENEER_SIZE 12
5923 static const insn32 armbx1_tst_insn
= 0xe3100001;
5924 static const insn32 armbx2_moveq_insn
= 0x01a0f000;
5925 static const insn32 armbx3_bx_insn
= 0xe12fff10;
5927 #ifndef ELFARM_NABI_C_INCLUDED
5929 arm_allocate_glue_section_space (bfd
* abfd
, bfd_size_type size
, const char * name
)
5932 bfd_byte
* contents
;
5936 /* Do not include empty glue sections in the output. */
5939 s
= bfd_get_linker_section (abfd
, name
);
5941 s
->flags
|= SEC_EXCLUDE
;
5946 BFD_ASSERT (abfd
!= NULL
);
5948 s
= bfd_get_linker_section (abfd
, name
);
5949 BFD_ASSERT (s
!= NULL
);
5951 contents
= (bfd_byte
*) bfd_alloc (abfd
, size
);
5953 BFD_ASSERT (s
->size
== size
);
5954 s
->contents
= contents
;
5958 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info
* info
)
5960 struct elf32_arm_link_hash_table
* globals
;
5962 globals
= elf32_arm_hash_table (info
);
5963 BFD_ASSERT (globals
!= NULL
);
5965 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
5966 globals
->arm_glue_size
,
5967 ARM2THUMB_GLUE_SECTION_NAME
);
5969 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
5970 globals
->thumb_glue_size
,
5971 THUMB2ARM_GLUE_SECTION_NAME
);
5973 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
5974 globals
->vfp11_erratum_glue_size
,
5975 VFP11_ERRATUM_VENEER_SECTION_NAME
);
5977 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
5978 globals
->stm32l4xx_erratum_glue_size
,
5979 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
5981 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
5982 globals
->bx_glue_size
,
5983 ARM_BX_GLUE_SECTION_NAME
);
5988 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5989 returns the symbol identifying the stub. */
5991 static struct elf_link_hash_entry
*
5992 record_arm_to_thumb_glue (struct bfd_link_info
* link_info
,
5993 struct elf_link_hash_entry
* h
)
5995 const char * name
= h
->root
.root
.string
;
5998 struct elf_link_hash_entry
* myh
;
5999 struct bfd_link_hash_entry
* bh
;
6000 struct elf32_arm_link_hash_table
* globals
;
6004 globals
= elf32_arm_hash_table (link_info
);
6005 BFD_ASSERT (globals
!= NULL
);
6006 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
6008 s
= bfd_get_linker_section
6009 (globals
->bfd_of_glue_owner
, ARM2THUMB_GLUE_SECTION_NAME
);
6011 BFD_ASSERT (s
!= NULL
);
6013 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
6014 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
6016 BFD_ASSERT (tmp_name
);
6018 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
6020 myh
= elf_link_hash_lookup
6021 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
6025 /* We've already seen this guy. */
6030 /* The only trick here is using hash_table->arm_glue_size as the value.
6031 Even though the section isn't allocated yet, this is where we will be
6032 putting it. The +1 on the value marks that the stub has not been
6033 output yet - not that it is a Thumb function. */
6035 val
= globals
->arm_glue_size
+ 1;
6036 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
6037 tmp_name
, BSF_GLOBAL
, s
, val
,
6038 NULL
, TRUE
, FALSE
, &bh
);
6040 myh
= (struct elf_link_hash_entry
*) bh
;
6041 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6042 myh
->forced_local
= 1;
6046 if (bfd_link_pic (link_info
)
6047 || globals
->root
.is_relocatable_executable
6048 || globals
->pic_veneer
)
6049 size
= ARM2THUMB_PIC_GLUE_SIZE
;
6050 else if (globals
->use_blx
)
6051 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
6053 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
6056 globals
->arm_glue_size
+= size
;
6061 /* Allocate space for ARMv4 BX veneers. */
6064 record_arm_bx_glue (struct bfd_link_info
* link_info
, int reg
)
6067 struct elf32_arm_link_hash_table
*globals
;
6069 struct elf_link_hash_entry
*myh
;
6070 struct bfd_link_hash_entry
*bh
;
6073 /* BX PC does not need a veneer. */
6077 globals
= elf32_arm_hash_table (link_info
);
6078 BFD_ASSERT (globals
!= NULL
);
6079 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
6081 /* Check if this veneer has already been allocated. */
6082 if (globals
->bx_glue_offset
[reg
])
6085 s
= bfd_get_linker_section
6086 (globals
->bfd_of_glue_owner
, ARM_BX_GLUE_SECTION_NAME
);
6088 BFD_ASSERT (s
!= NULL
);
6090 /* Add symbol for veneer. */
6092 bfd_malloc ((bfd_size_type
) strlen (ARM_BX_GLUE_ENTRY_NAME
) + 1);
6094 BFD_ASSERT (tmp_name
);
6096 sprintf (tmp_name
, ARM_BX_GLUE_ENTRY_NAME
, reg
);
6098 myh
= elf_link_hash_lookup
6099 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6101 BFD_ASSERT (myh
== NULL
);
6104 val
= globals
->bx_glue_size
;
6105 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
6106 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6107 NULL
, TRUE
, FALSE
, &bh
);
6109 myh
= (struct elf_link_hash_entry
*) bh
;
6110 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6111 myh
->forced_local
= 1;
6113 s
->size
+= ARM_BX_VENEER_SIZE
;
6114 globals
->bx_glue_offset
[reg
] = globals
->bx_glue_size
| 2;
6115 globals
->bx_glue_size
+= ARM_BX_VENEER_SIZE
;
6119 /* Add an entry to the code/data map for section SEC. */
6122 elf32_arm_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
6124 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
6125 unsigned int newidx
;
6127 if (sec_data
->map
== NULL
)
6129 sec_data
->map
= (elf32_arm_section_map
*)
6130 bfd_malloc (sizeof (elf32_arm_section_map
));
6131 sec_data
->mapcount
= 0;
6132 sec_data
->mapsize
= 1;
6135 newidx
= sec_data
->mapcount
++;
6137 if (sec_data
->mapcount
> sec_data
->mapsize
)
6139 sec_data
->mapsize
*= 2;
6140 sec_data
->map
= (elf32_arm_section_map
*)
6141 bfd_realloc_or_free (sec_data
->map
, sec_data
->mapsize
6142 * sizeof (elf32_arm_section_map
));
6147 sec_data
->map
[newidx
].vma
= vma
;
6148 sec_data
->map
[newidx
].type
= type
;
6153 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6154 veneers are handled for now. */
6157 record_vfp11_erratum_veneer (struct bfd_link_info
*link_info
,
6158 elf32_vfp11_erratum_list
*branch
,
6160 asection
*branch_sec
,
6161 unsigned int offset
)
6164 struct elf32_arm_link_hash_table
*hash_table
;
6166 struct elf_link_hash_entry
*myh
;
6167 struct bfd_link_hash_entry
*bh
;
6169 struct _arm_elf_section_data
*sec_data
;
6170 elf32_vfp11_erratum_list
*newerr
;
6172 hash_table
= elf32_arm_hash_table (link_info
);
6173 BFD_ASSERT (hash_table
!= NULL
);
6174 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
6176 s
= bfd_get_linker_section
6177 (hash_table
->bfd_of_glue_owner
, VFP11_ERRATUM_VENEER_SECTION_NAME
);
6179 sec_data
= elf32_arm_section_data (s
);
6181 BFD_ASSERT (s
!= NULL
);
6183 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
6184 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
6186 BFD_ASSERT (tmp_name
);
6188 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
6189 hash_table
->num_vfp11_fixes
);
6191 myh
= elf_link_hash_lookup
6192 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6194 BFD_ASSERT (myh
== NULL
);
6197 val
= hash_table
->vfp11_erratum_glue_size
;
6198 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
6199 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6200 NULL
, TRUE
, FALSE
, &bh
);
6202 myh
= (struct elf_link_hash_entry
*) bh
;
6203 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6204 myh
->forced_local
= 1;
6206 /* Link veneer back to calling location. */
6207 sec_data
->erratumcount
+= 1;
6208 newerr
= (elf32_vfp11_erratum_list
*)
6209 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
6211 newerr
->type
= VFP11_ERRATUM_ARM_VENEER
;
6213 newerr
->u
.v
.branch
= branch
;
6214 newerr
->u
.v
.id
= hash_table
->num_vfp11_fixes
;
6215 branch
->u
.b
.veneer
= newerr
;
6217 newerr
->next
= sec_data
->erratumlist
;
6218 sec_data
->erratumlist
= newerr
;
6220 /* A symbol for the return from the veneer. */
6221 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
6222 hash_table
->num_vfp11_fixes
);
6224 myh
= elf_link_hash_lookup
6225 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6232 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
6233 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
6235 myh
= (struct elf_link_hash_entry
*) bh
;
6236 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6237 myh
->forced_local
= 1;
6241 /* Generate a mapping symbol for the veneer section, and explicitly add an
6242 entry for that symbol to the code/data map for the section. */
6243 if (hash_table
->vfp11_erratum_glue_size
== 0)
6246 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6247 ever requires this erratum fix. */
6248 _bfd_generic_link_add_one_symbol (link_info
,
6249 hash_table
->bfd_of_glue_owner
, "$a",
6250 BSF_LOCAL
, s
, 0, NULL
,
6253 myh
= (struct elf_link_hash_entry
*) bh
;
6254 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
6255 myh
->forced_local
= 1;
6257 /* The elf32_arm_init_maps function only cares about symbols from input
6258 BFDs. We must make a note of this generated mapping symbol
6259 ourselves so that code byteswapping works properly in
6260 elf32_arm_write_section. */
6261 elf32_arm_section_map_add (s
, 'a', 0);
6264 s
->size
+= VFP11_ERRATUM_VENEER_SIZE
;
6265 hash_table
->vfp11_erratum_glue_size
+= VFP11_ERRATUM_VENEER_SIZE
;
6266 hash_table
->num_vfp11_fixes
++;
6268 /* The offset of the veneer. */
6272 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6273 veneers need to be handled because used only in Cortex-M. */
6276 record_stm32l4xx_erratum_veneer (struct bfd_link_info
*link_info
,
6277 elf32_stm32l4xx_erratum_list
*branch
,
6279 asection
*branch_sec
,
6280 unsigned int offset
,
6281 bfd_size_type veneer_size
)
6284 struct elf32_arm_link_hash_table
*hash_table
;
6286 struct elf_link_hash_entry
*myh
;
6287 struct bfd_link_hash_entry
*bh
;
6289 struct _arm_elf_section_data
*sec_data
;
6290 elf32_stm32l4xx_erratum_list
*newerr
;
6292 hash_table
= elf32_arm_hash_table (link_info
);
6293 BFD_ASSERT (hash_table
!= NULL
);
6294 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
6296 s
= bfd_get_linker_section
6297 (hash_table
->bfd_of_glue_owner
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
6299 BFD_ASSERT (s
!= NULL
);
6301 sec_data
= elf32_arm_section_data (s
);
6303 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
6304 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
6306 BFD_ASSERT (tmp_name
);
6308 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
6309 hash_table
->num_stm32l4xx_fixes
);
6311 myh
= elf_link_hash_lookup
6312 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6314 BFD_ASSERT (myh
== NULL
);
6317 val
= hash_table
->stm32l4xx_erratum_glue_size
;
6318 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
6319 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6320 NULL
, TRUE
, FALSE
, &bh
);
6322 myh
= (struct elf_link_hash_entry
*) bh
;
6323 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6324 myh
->forced_local
= 1;
6326 /* Link veneer back to calling location. */
6327 sec_data
->stm32l4xx_erratumcount
+= 1;
6328 newerr
= (elf32_stm32l4xx_erratum_list
*)
6329 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list
));
6331 newerr
->type
= STM32L4XX_ERRATUM_VENEER
;
6333 newerr
->u
.v
.branch
= branch
;
6334 newerr
->u
.v
.id
= hash_table
->num_stm32l4xx_fixes
;
6335 branch
->u
.b
.veneer
= newerr
;
6337 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
6338 sec_data
->stm32l4xx_erratumlist
= newerr
;
6340 /* A symbol for the return from the veneer. */
6341 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
6342 hash_table
->num_stm32l4xx_fixes
);
6344 myh
= elf_link_hash_lookup
6345 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6352 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
6353 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
6355 myh
= (struct elf_link_hash_entry
*) bh
;
6356 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6357 myh
->forced_local
= 1;
6361 /* Generate a mapping symbol for the veneer section, and explicitly add an
6362 entry for that symbol to the code/data map for the section. */
6363 if (hash_table
->stm32l4xx_erratum_glue_size
== 0)
6366 /* Creates a THUMB symbol since there is no other choice. */
6367 _bfd_generic_link_add_one_symbol (link_info
,
6368 hash_table
->bfd_of_glue_owner
, "$t",
6369 BSF_LOCAL
, s
, 0, NULL
,
6372 myh
= (struct elf_link_hash_entry
*) bh
;
6373 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
6374 myh
->forced_local
= 1;
6376 /* The elf32_arm_init_maps function only cares about symbols from input
6377 BFDs. We must make a note of this generated mapping symbol
6378 ourselves so that code byteswapping works properly in
6379 elf32_arm_write_section. */
6380 elf32_arm_section_map_add (s
, 't', 0);
6383 s
->size
+= veneer_size
;
6384 hash_table
->stm32l4xx_erratum_glue_size
+= veneer_size
;
6385 hash_table
->num_stm32l4xx_fixes
++;
6387 /* The offset of the veneer. */
6391 #define ARM_GLUE_SECTION_FLAGS \
6392 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6393 | SEC_READONLY | SEC_LINKER_CREATED)
6395 /* Create a fake section for use by the ARM backend of the linker. */
6398 arm_make_glue_section (bfd
* abfd
, const char * name
)
6402 sec
= bfd_get_linker_section (abfd
, name
);
6407 sec
= bfd_make_section_anyway_with_flags (abfd
, name
, ARM_GLUE_SECTION_FLAGS
);
6410 || !bfd_set_section_alignment (abfd
, sec
, 2))
6413 /* Set the gc mark to prevent the section from being removed by garbage
6414 collection, despite the fact that no relocs refer to this section. */
6420 /* Set size of .plt entries. This function is called from the
6421 linker scripts in ld/emultempl/{armelf}.em. */
6424 bfd_elf32_arm_use_long_plt (void)
6426 elf32_arm_use_long_plt_entry
= TRUE
;
6429 /* Add the glue sections to ABFD. This function is called from the
6430 linker scripts in ld/emultempl/{armelf}.em. */
6433 bfd_elf32_arm_add_glue_sections_to_bfd (bfd
*abfd
,
6434 struct bfd_link_info
*info
)
6436 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
6437 bfd_boolean dostm32l4xx
= globals
6438 && globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
;
6439 bfd_boolean addglue
;
6441 /* If we are only performing a partial
6442 link do not bother adding the glue. */
6443 if (bfd_link_relocatable (info
))
6446 addglue
= arm_make_glue_section (abfd
, ARM2THUMB_GLUE_SECTION_NAME
)
6447 && arm_make_glue_section (abfd
, THUMB2ARM_GLUE_SECTION_NAME
)
6448 && arm_make_glue_section (abfd
, VFP11_ERRATUM_VENEER_SECTION_NAME
)
6449 && arm_make_glue_section (abfd
, ARM_BX_GLUE_SECTION_NAME
);
6455 && arm_make_glue_section (abfd
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
6458 /* Select a BFD to be used to hold the sections used by the glue code.
6459 This function is called from the linker scripts in ld/emultempl/
6463 bfd_elf32_arm_get_bfd_for_interworking (bfd
*abfd
, struct bfd_link_info
*info
)
6465 struct elf32_arm_link_hash_table
*globals
;
6467 /* If we are only performing a partial link
6468 do not bother getting a bfd to hold the glue. */
6469 if (bfd_link_relocatable (info
))
6472 /* Make sure we don't attach the glue sections to a dynamic object. */
6473 BFD_ASSERT (!(abfd
->flags
& DYNAMIC
));
6475 globals
= elf32_arm_hash_table (info
);
6476 BFD_ASSERT (globals
!= NULL
);
6478 if (globals
->bfd_of_glue_owner
!= NULL
)
6481 /* Save the bfd for later use. */
6482 globals
->bfd_of_glue_owner
= abfd
;
6488 check_use_blx (struct elf32_arm_link_hash_table
*globals
)
6492 cpu_arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
6495 if (globals
->fix_arm1176
)
6497 if (cpu_arch
== TAG_CPU_ARCH_V6T2
|| cpu_arch
> TAG_CPU_ARCH_V6K
)
6498 globals
->use_blx
= 1;
6502 if (cpu_arch
> TAG_CPU_ARCH_V4T
)
6503 globals
->use_blx
= 1;
6508 bfd_elf32_arm_process_before_allocation (bfd
*abfd
,
6509 struct bfd_link_info
*link_info
)
6511 Elf_Internal_Shdr
*symtab_hdr
;
6512 Elf_Internal_Rela
*internal_relocs
= NULL
;
6513 Elf_Internal_Rela
*irel
, *irelend
;
6514 bfd_byte
*contents
= NULL
;
6517 struct elf32_arm_link_hash_table
*globals
;
6519 /* If we are only performing a partial link do not bother
6520 to construct any glue. */
6521 if (bfd_link_relocatable (link_info
))
6524 /* Here we have a bfd that is to be included on the link. We have a
6525 hook to do reloc rummaging, before section sizes are nailed down. */
6526 globals
= elf32_arm_hash_table (link_info
);
6527 BFD_ASSERT (globals
!= NULL
);
6529 check_use_blx (globals
);
6531 if (globals
->byteswap_code
&& !bfd_big_endian (abfd
))
6533 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6538 /* PR 5398: If we have not decided to include any loadable sections in
6539 the output then we will not have a glue owner bfd. This is OK, it
6540 just means that there is nothing else for us to do here. */
6541 if (globals
->bfd_of_glue_owner
== NULL
)
6544 /* Rummage around all the relocs and map the glue vectors. */
6545 sec
= abfd
->sections
;
6550 for (; sec
!= NULL
; sec
= sec
->next
)
6552 if (sec
->reloc_count
== 0)
6555 if ((sec
->flags
& SEC_EXCLUDE
) != 0)
6558 symtab_hdr
= & elf_symtab_hdr (abfd
);
6560 /* Load the relocs. */
6562 = _bfd_elf_link_read_relocs (abfd
, sec
, NULL
, NULL
, FALSE
);
6564 if (internal_relocs
== NULL
)
6567 irelend
= internal_relocs
+ sec
->reloc_count
;
6568 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
6571 unsigned long r_index
;
6573 struct elf_link_hash_entry
*h
;
6575 r_type
= ELF32_R_TYPE (irel
->r_info
);
6576 r_index
= ELF32_R_SYM (irel
->r_info
);
6578 /* These are the only relocation types we care about. */
6579 if ( r_type
!= R_ARM_PC24
6580 && (r_type
!= R_ARM_V4BX
|| globals
->fix_v4bx
< 2))
6583 /* Get the section contents if we haven't done so already. */
6584 if (contents
== NULL
)
6586 /* Get cached copy if it exists. */
6587 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
6588 contents
= elf_section_data (sec
)->this_hdr
.contents
;
6591 /* Go get them off disk. */
6592 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
6597 if (r_type
== R_ARM_V4BX
)
6601 reg
= bfd_get_32 (abfd
, contents
+ irel
->r_offset
) & 0xf;
6602 record_arm_bx_glue (link_info
, reg
);
6606 /* If the relocation is not against a symbol it cannot concern us. */
6609 /* We don't care about local symbols. */
6610 if (r_index
< symtab_hdr
->sh_info
)
6613 /* This is an external symbol. */
6614 r_index
-= symtab_hdr
->sh_info
;
6615 h
= (struct elf_link_hash_entry
*)
6616 elf_sym_hashes (abfd
)[r_index
];
6618 /* If the relocation is against a static symbol it must be within
6619 the current section and so cannot be a cross ARM/Thumb relocation. */
6623 /* If the call will go through a PLT entry then we do not need
6625 if (globals
->root
.splt
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) -1)
6631 /* This one is a call from arm code. We need to look up
6632 the target of the call. If it is a thumb target, we
6634 if (ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
6635 == ST_BRANCH_TO_THUMB
)
6636 record_arm_to_thumb_glue (link_info
, h
);
6644 if (contents
!= NULL
6645 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
6649 if (internal_relocs
!= NULL
6650 && elf_section_data (sec
)->relocs
!= internal_relocs
)
6651 free (internal_relocs
);
6652 internal_relocs
= NULL
;
6658 if (contents
!= NULL
6659 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
6661 if (internal_relocs
!= NULL
6662 && elf_section_data (sec
)->relocs
!= internal_relocs
)
6663 free (internal_relocs
);
6670 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6673 bfd_elf32_arm_init_maps (bfd
*abfd
)
6675 Elf_Internal_Sym
*isymbuf
;
6676 Elf_Internal_Shdr
*hdr
;
6677 unsigned int i
, localsyms
;
6679 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6680 if (! is_arm_elf (abfd
))
6683 if ((abfd
->flags
& DYNAMIC
) != 0)
6686 hdr
= & elf_symtab_hdr (abfd
);
6687 localsyms
= hdr
->sh_info
;
6689 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6690 should contain the number of local symbols, which should come before any
6691 global symbols. Mapping symbols are always local. */
6692 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
,
6695 /* No internal symbols read? Skip this BFD. */
6696 if (isymbuf
== NULL
)
6699 for (i
= 0; i
< localsyms
; i
++)
6701 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
6702 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
6706 && ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
6708 name
= bfd_elf_string_from_elf_section (abfd
,
6709 hdr
->sh_link
, isym
->st_name
);
6711 if (bfd_is_arm_special_symbol_name (name
,
6712 BFD_ARM_SPECIAL_SYM_TYPE_MAP
))
6713 elf32_arm_section_map_add (sec
, name
[1], isym
->st_value
);
6719 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6720 say what they wanted. */
6723 bfd_elf32_arm_set_cortex_a8_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
6725 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
6726 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
6728 if (globals
== NULL
)
6731 if (globals
->fix_cortex_a8
== -1)
6733 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6734 if (out_attr
[Tag_CPU_arch
].i
== TAG_CPU_ARCH_V7
6735 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
6736 || out_attr
[Tag_CPU_arch_profile
].i
== 0))
6737 globals
->fix_cortex_a8
= 1;
6739 globals
->fix_cortex_a8
= 0;
6745 bfd_elf32_arm_set_vfp11_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
6747 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
6748 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
6750 if (globals
== NULL
)
6752 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6753 if (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V7
)
6755 switch (globals
->vfp11_fix
)
6757 case BFD_ARM_VFP11_FIX_DEFAULT
:
6758 case BFD_ARM_VFP11_FIX_NONE
:
6759 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
6763 /* Give a warning, but do as the user requests anyway. */
6764 (*_bfd_error_handler
) (_("%B: warning: selected VFP11 erratum "
6765 "workaround is not necessary for target architecture"), obfd
);
6768 else if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_DEFAULT
)
6769 /* For earlier architectures, we might need the workaround, but do not
6770 enable it by default. If users is running with broken hardware, they
6771 must enable the erratum fix explicitly. */
6772 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
6776 bfd_elf32_arm_set_stm32l4xx_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
6778 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
6779 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
6781 if (globals
== NULL
)
6784 /* We assume only Cortex-M4 may require the fix. */
6785 if (out_attr
[Tag_CPU_arch
].i
!= TAG_CPU_ARCH_V7E_M
6786 || out_attr
[Tag_CPU_arch_profile
].i
!= 'M')
6788 if (globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
)
6789 /* Give a warning, but do as the user requests anyway. */
6790 (*_bfd_error_handler
)
6791 (_("%B: warning: selected STM32L4XX erratum "
6792 "workaround is not necessary for target architecture"), obfd
);
6796 enum bfd_arm_vfp11_pipe
6804 /* Return a VFP register number. This is encoded as RX:X for single-precision
6805 registers, or X:RX for double-precision registers, where RX is the group of
6806 four bits in the instruction encoding and X is the single extension bit.
6807 RX and X fields are specified using their lowest (starting) bit. The return
6810 0...31: single-precision registers s0...s31
6811 32...63: double-precision registers d0...d31.
6813 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6814 encounter VFP3 instructions, so we allow the full range for DP registers. */
6817 bfd_arm_vfp11_regno (unsigned int insn
, bfd_boolean is_double
, unsigned int rx
,
6821 return (((insn
>> rx
) & 0xf) | (((insn
>> x
) & 1) << 4)) + 32;
6823 return (((insn
>> rx
) & 0xf) << 1) | ((insn
>> x
) & 1);
6826 /* Set bits in *WMASK according to a register number REG as encoded by
6827 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6830 bfd_arm_vfp11_write_mask (unsigned int *wmask
, unsigned int reg
)
6835 *wmask
|= 3 << ((reg
- 32) * 2);
6838 /* Return TRUE if WMASK overwrites anything in REGS. */
6841 bfd_arm_vfp11_antidependency (unsigned int wmask
, int *regs
, int numregs
)
6845 for (i
= 0; i
< numregs
; i
++)
6847 unsigned int reg
= regs
[i
];
6849 if (reg
< 32 && (wmask
& (1 << reg
)) != 0)
6857 if ((wmask
& (3 << (reg
* 2))) != 0)
6864 /* In this function, we're interested in two things: finding input registers
6865 for VFP data-processing instructions, and finding the set of registers which
6866 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6867 hold the written set, so FLDM etc. are easy to deal with (we're only
6868 interested in 32 SP registers or 16 dp registers, due to the VFP version
6869 implemented by the chip in question). DP registers are marked by setting
6870 both SP registers in the write mask). */
6872 static enum bfd_arm_vfp11_pipe
6873 bfd_arm_vfp11_insn_decode (unsigned int insn
, unsigned int *destmask
, int *regs
,
6876 enum bfd_arm_vfp11_pipe vpipe
= VFP11_BAD
;
6877 bfd_boolean is_double
= ((insn
& 0xf00) == 0xb00) ? 1 : 0;
6879 if ((insn
& 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6882 unsigned int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
6883 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
6885 pqrs
= ((insn
& 0x00800000) >> 20)
6886 | ((insn
& 0x00300000) >> 19)
6887 | ((insn
& 0x00000040) >> 6);
6891 case 0: /* fmac[sd]. */
6892 case 1: /* fnmac[sd]. */
6893 case 2: /* fmsc[sd]. */
6894 case 3: /* fnmsc[sd]. */
6896 bfd_arm_vfp11_write_mask (destmask
, fd
);
6898 regs
[1] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
6903 case 4: /* fmul[sd]. */
6904 case 5: /* fnmul[sd]. */
6905 case 6: /* fadd[sd]. */
6906 case 7: /* fsub[sd]. */
6910 case 8: /* fdiv[sd]. */
6913 bfd_arm_vfp11_write_mask (destmask
, fd
);
6914 regs
[0] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
6919 case 15: /* extended opcode. */
6921 unsigned int extn
= ((insn
>> 15) & 0x1e)
6922 | ((insn
>> 7) & 1);
6926 case 0: /* fcpy[sd]. */
6927 case 1: /* fabs[sd]. */
6928 case 2: /* fneg[sd]. */
6929 case 8: /* fcmp[sd]. */
6930 case 9: /* fcmpe[sd]. */
6931 case 10: /* fcmpz[sd]. */
6932 case 11: /* fcmpez[sd]. */
6933 case 16: /* fuito[sd]. */
6934 case 17: /* fsito[sd]. */
6935 case 24: /* ftoui[sd]. */
6936 case 25: /* ftouiz[sd]. */
6937 case 26: /* ftosi[sd]. */
6938 case 27: /* ftosiz[sd]. */
6939 /* These instructions will not bounce due to underflow. */
6944 case 3: /* fsqrt[sd]. */
6945 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6946 registers to cause the erratum in previous instructions. */
6947 bfd_arm_vfp11_write_mask (destmask
, fd
);
6951 case 15: /* fcvt{ds,sd}. */
6955 bfd_arm_vfp11_write_mask (destmask
, fd
);
6957 /* Only FCVTSD can underflow. */
6958 if ((insn
& 0x100) != 0)
6977 /* Two-register transfer. */
6978 else if ((insn
& 0x0fe00ed0) == 0x0c400a10)
6980 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
6982 if ((insn
& 0x100000) == 0)
6985 bfd_arm_vfp11_write_mask (destmask
, fm
);
6988 bfd_arm_vfp11_write_mask (destmask
, fm
);
6989 bfd_arm_vfp11_write_mask (destmask
, fm
+ 1);
6995 else if ((insn
& 0x0e100e00) == 0x0c100a00) /* A load insn. */
6997 int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
6998 unsigned int puw
= ((insn
>> 21) & 0x1) | (((insn
>> 23) & 3) << 1);
7002 case 0: /* Two-reg transfer. We should catch these above. */
7005 case 2: /* fldm[sdx]. */
7009 unsigned int i
, offset
= insn
& 0xff;
7014 for (i
= fd
; i
< fd
+ offset
; i
++)
7015 bfd_arm_vfp11_write_mask (destmask
, i
);
7019 case 4: /* fld[sd]. */
7021 bfd_arm_vfp11_write_mask (destmask
, fd
);
7030 /* Single-register transfer. Note L==0. */
7031 else if ((insn
& 0x0f100e10) == 0x0e000a10)
7033 unsigned int opcode
= (insn
>> 21) & 7;
7034 unsigned int fn
= bfd_arm_vfp11_regno (insn
, is_double
, 16, 7);
7038 case 0: /* fmsr/fmdlr. */
7039 case 1: /* fmdhr. */
7040 /* Mark fmdhr and fmdlr as writing to the whole of the DP
7041 destination register. I don't know if this is exactly right,
7042 but it is the conservative choice. */
7043 bfd_arm_vfp11_write_mask (destmask
, fn
);
7057 static int elf32_arm_compare_mapping (const void * a
, const void * b
);
7060 /* Look for potentially-troublesome code sequences which might trigger the
7061 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
7062 (available from ARM) for details of the erratum. A short version is
7063 described in ld.texinfo. */
7066 bfd_elf32_arm_vfp11_erratum_scan (bfd
*abfd
, struct bfd_link_info
*link_info
)
7069 bfd_byte
*contents
= NULL
;
7071 int regs
[3], numregs
= 0;
7072 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7073 int use_vector
= (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_VECTOR
);
7075 if (globals
== NULL
)
7078 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7079 The states transition as follows:
7081 0 -> 1 (vector) or 0 -> 2 (scalar)
7082 A VFP FMAC-pipeline instruction has been seen. Fill
7083 regs[0]..regs[numregs-1] with its input operands. Remember this
7084 instruction in 'first_fmac'.
7087 Any instruction, except for a VFP instruction which overwrites
7092 A VFP instruction has been seen which overwrites any of regs[*].
7093 We must make a veneer! Reset state to 0 before examining next
7097 If we fail to match anything in state 2, reset to state 0 and reset
7098 the instruction pointer to the instruction after 'first_fmac'.
7100 If the VFP11 vector mode is in use, there must be at least two unrelated
7101 instructions between anti-dependent VFP11 instructions to properly avoid
7102 triggering the erratum, hence the use of the extra state 1. */
7104 /* If we are only performing a partial link do not bother
7105 to construct any glue. */
7106 if (bfd_link_relocatable (link_info
))
7109 /* Skip if this bfd does not correspond to an ELF image. */
7110 if (! is_arm_elf (abfd
))
7113 /* We should have chosen a fix type by the time we get here. */
7114 BFD_ASSERT (globals
->vfp11_fix
!= BFD_ARM_VFP11_FIX_DEFAULT
);
7116 if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_NONE
)
7119 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7120 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
7123 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7125 unsigned int i
, span
, first_fmac
= 0, veneer_of_insn
= 0;
7126 struct _arm_elf_section_data
*sec_data
;
7128 /* If we don't have executable progbits, we're not interested in this
7129 section. Also skip if section is to be excluded. */
7130 if (elf_section_type (sec
) != SHT_PROGBITS
7131 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
7132 || (sec
->flags
& SEC_EXCLUDE
) != 0
7133 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
7134 || sec
->output_section
== bfd_abs_section_ptr
7135 || strcmp (sec
->name
, VFP11_ERRATUM_VENEER_SECTION_NAME
) == 0)
7138 sec_data
= elf32_arm_section_data (sec
);
7140 if (sec_data
->mapcount
== 0)
7143 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7144 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7145 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7148 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
7149 elf32_arm_compare_mapping
);
7151 for (span
= 0; span
< sec_data
->mapcount
; span
++)
7153 unsigned int span_start
= sec_data
->map
[span
].vma
;
7154 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
7155 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
7156 char span_type
= sec_data
->map
[span
].type
;
7158 /* FIXME: Only ARM mode is supported at present. We may need to
7159 support Thumb-2 mode also at some point. */
7160 if (span_type
!= 'a')
7163 for (i
= span_start
; i
< span_end
;)
7165 unsigned int next_i
= i
+ 4;
7166 unsigned int insn
= bfd_big_endian (abfd
)
7167 ? (contents
[i
] << 24)
7168 | (contents
[i
+ 1] << 16)
7169 | (contents
[i
+ 2] << 8)
7171 : (contents
[i
+ 3] << 24)
7172 | (contents
[i
+ 2] << 16)
7173 | (contents
[i
+ 1] << 8)
7175 unsigned int writemask
= 0;
7176 enum bfd_arm_vfp11_pipe vpipe
;
7181 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
, regs
,
7183 /* I'm assuming the VFP11 erratum can trigger with denorm
7184 operands on either the FMAC or the DS pipeline. This might
7185 lead to slightly overenthusiastic veneer insertion. */
7186 if (vpipe
== VFP11_FMAC
|| vpipe
== VFP11_DS
)
7188 state
= use_vector
? 1 : 2;
7190 veneer_of_insn
= insn
;
7196 int other_regs
[3], other_numregs
;
7197 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
7200 if (vpipe
!= VFP11_BAD
7201 && bfd_arm_vfp11_antidependency (writemask
, regs
,
7211 int other_regs
[3], other_numregs
;
7212 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
7215 if (vpipe
!= VFP11_BAD
7216 && bfd_arm_vfp11_antidependency (writemask
, regs
,
7222 next_i
= first_fmac
+ 4;
7228 abort (); /* Should be unreachable. */
7233 elf32_vfp11_erratum_list
*newerr
=(elf32_vfp11_erratum_list
*)
7234 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
7236 elf32_arm_section_data (sec
)->erratumcount
+= 1;
7238 newerr
->u
.b
.vfp_insn
= veneer_of_insn
;
7243 newerr
->type
= VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
;
7250 record_vfp11_erratum_veneer (link_info
, newerr
, abfd
, sec
,
7255 newerr
->next
= sec_data
->erratumlist
;
7256 sec_data
->erratumlist
= newerr
;
7265 if (contents
!= NULL
7266 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7274 if (contents
!= NULL
7275 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7281 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7282 after sections have been laid out, using specially-named symbols. */
7285 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd
*abfd
,
7286 struct bfd_link_info
*link_info
)
7289 struct elf32_arm_link_hash_table
*globals
;
7292 if (bfd_link_relocatable (link_info
))
7295 /* Skip if this bfd does not correspond to an ELF image. */
7296 if (! is_arm_elf (abfd
))
7299 globals
= elf32_arm_hash_table (link_info
);
7300 if (globals
== NULL
)
7303 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7304 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7306 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7308 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7309 elf32_vfp11_erratum_list
*errnode
= sec_data
->erratumlist
;
7311 for (; errnode
!= NULL
; errnode
= errnode
->next
)
7313 struct elf_link_hash_entry
*myh
;
7316 switch (errnode
->type
)
7318 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
7319 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
:
7320 /* Find veneer symbol. */
7321 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
7322 errnode
->u
.b
.veneer
->u
.v
.id
);
7324 myh
= elf_link_hash_lookup
7325 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7328 (*_bfd_error_handler
) (_("%B: unable to find VFP11 veneer "
7329 "`%s'"), abfd
, tmp_name
);
7331 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7332 + myh
->root
.u
.def
.section
->output_offset
7333 + myh
->root
.u
.def
.value
;
7335 errnode
->u
.b
.veneer
->vma
= vma
;
7338 case VFP11_ERRATUM_ARM_VENEER
:
7339 case VFP11_ERRATUM_THUMB_VENEER
:
7340 /* Find return location. */
7341 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
7344 myh
= elf_link_hash_lookup
7345 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7348 (*_bfd_error_handler
) (_("%B: unable to find VFP11 veneer "
7349 "`%s'"), abfd
, tmp_name
);
7351 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7352 + myh
->root
.u
.def
.section
->output_offset
7353 + myh
->root
.u
.def
.value
;
7355 errnode
->u
.v
.branch
->vma
= vma
;
7367 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7368 return locations after sections have been laid out, using
7369 specially-named symbols. */
7372 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd
*abfd
,
7373 struct bfd_link_info
*link_info
)
7376 struct elf32_arm_link_hash_table
*globals
;
7379 if (bfd_link_relocatable (link_info
))
7382 /* Skip if this bfd does not correspond to an ELF image. */
7383 if (! is_arm_elf (abfd
))
7386 globals
= elf32_arm_hash_table (link_info
);
7387 if (globals
== NULL
)
7390 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7391 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7393 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7395 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7396 elf32_stm32l4xx_erratum_list
*errnode
= sec_data
->stm32l4xx_erratumlist
;
7398 for (; errnode
!= NULL
; errnode
= errnode
->next
)
7400 struct elf_link_hash_entry
*myh
;
7403 switch (errnode
->type
)
7405 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
7406 /* Find veneer symbol. */
7407 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
7408 errnode
->u
.b
.veneer
->u
.v
.id
);
7410 myh
= elf_link_hash_lookup
7411 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7414 (*_bfd_error_handler
) (_("%B: unable to find STM32L4XX veneer "
7415 "`%s'"), abfd
, tmp_name
);
7417 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7418 + myh
->root
.u
.def
.section
->output_offset
7419 + myh
->root
.u
.def
.value
;
7421 errnode
->u
.b
.veneer
->vma
= vma
;
7424 case STM32L4XX_ERRATUM_VENEER
:
7425 /* Find return location. */
7426 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
7429 myh
= elf_link_hash_lookup
7430 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7433 (*_bfd_error_handler
) (_("%B: unable to find STM32L4XX veneer "
7434 "`%s'"), abfd
, tmp_name
);
7436 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7437 + myh
->root
.u
.def
.section
->output_offset
7438 + myh
->root
.u
.def
.value
;
7440 errnode
->u
.v
.branch
->vma
= vma
;
7452 static inline bfd_boolean
7453 is_thumb2_ldmia (const insn32 insn
)
7455 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7456 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7457 return (insn
& 0xffd02000) == 0xe8900000;
7460 static inline bfd_boolean
7461 is_thumb2_ldmdb (const insn32 insn
)
7463 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7464 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7465 return (insn
& 0xffd02000) == 0xe9100000;
7468 static inline bfd_boolean
7469 is_thumb2_vldm (const insn32 insn
)
7471 /* A6.5 Extension register load or store instruction
7473 We look for SP 32-bit and DP 64-bit registers.
7474 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7475 <list> is consecutive 64-bit registers
7476 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7477 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7478 <list> is consecutive 32-bit registers
7479 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7480 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7481 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7483 (((insn
& 0xfe100f00) == 0xec100b00) ||
7484 ((insn
& 0xfe100f00) == 0xec100a00))
7485 && /* (IA without !). */
7486 (((((insn
<< 7) >> 28) & 0xd) == 0x4)
7487 /* (IA with !), includes VPOP (when reg number is SP). */
7488 || ((((insn
<< 7) >> 28) & 0xd) == 0x5)
7490 || ((((insn
<< 7) >> 28) & 0xd) == 0x9));
7493 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7495 - computes the number and the mode of memory accesses
7496 - decides if the replacement should be done:
7497 . replaces only if > 8-word accesses
7498 . or (testing purposes only) replaces all accesses. */
7501 stm32l4xx_need_create_replacing_stub (const insn32 insn
,
7502 bfd_arm_stm32l4xx_fix stm32l4xx_fix
)
7506 /* The field encoding the register list is the same for both LDMIA
7507 and LDMDB encodings. */
7508 if (is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
))
7509 nb_words
= popcount (insn
& 0x0000ffff);
7510 else if (is_thumb2_vldm (insn
))
7511 nb_words
= (insn
& 0xff);
7513 /* DEFAULT mode accounts for the real bug condition situation,
7514 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7516 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_DEFAULT
) ? nb_words
> 8 :
7517 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_ALL
) ? TRUE
: FALSE
;
7520 /* Look for potentially-troublesome code sequences which might trigger
7521 the STM STM32L4XX erratum. */
7524 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd
*abfd
,
7525 struct bfd_link_info
*link_info
)
7528 bfd_byte
*contents
= NULL
;
7529 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7531 if (globals
== NULL
)
7534 /* If we are only performing a partial link do not bother
7535 to construct any glue. */
7536 if (bfd_link_relocatable (link_info
))
7539 /* Skip if this bfd does not correspond to an ELF image. */
7540 if (! is_arm_elf (abfd
))
7543 if (globals
->stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_NONE
)
7546 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7547 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
7550 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7552 unsigned int i
, span
;
7553 struct _arm_elf_section_data
*sec_data
;
7555 /* If we don't have executable progbits, we're not interested in this
7556 section. Also skip if section is to be excluded. */
7557 if (elf_section_type (sec
) != SHT_PROGBITS
7558 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
7559 || (sec
->flags
& SEC_EXCLUDE
) != 0
7560 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
7561 || sec
->output_section
== bfd_abs_section_ptr
7562 || strcmp (sec
->name
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
) == 0)
7565 sec_data
= elf32_arm_section_data (sec
);
7567 if (sec_data
->mapcount
== 0)
7570 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7571 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7572 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7575 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
7576 elf32_arm_compare_mapping
);
7578 for (span
= 0; span
< sec_data
->mapcount
; span
++)
7580 unsigned int span_start
= sec_data
->map
[span
].vma
;
7581 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
7582 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
7583 char span_type
= sec_data
->map
[span
].type
;
7584 int itblock_current_pos
= 0;
7586 /* Only Thumb2 mode need be supported with this CM4 specific
7587 code, we should not encounter any arm mode eg span_type
7589 if (span_type
!= 't')
7592 for (i
= span_start
; i
< span_end
;)
7594 unsigned int insn
= bfd_get_16 (abfd
, &contents
[i
]);
7595 bfd_boolean insn_32bit
= FALSE
;
7596 bfd_boolean is_ldm
= FALSE
;
7597 bfd_boolean is_vldm
= FALSE
;
7598 bfd_boolean is_not_last_in_it_block
= FALSE
;
7600 /* The first 16-bits of all 32-bit thumb2 instructions start
7601 with opcode[15..13]=0b111 and the encoded op1 can be anything
7602 except opcode[12..11]!=0b00.
7603 See 32-bit Thumb instruction encoding. */
7604 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
7607 /* Compute the predicate that tells if the instruction
7608 is concerned by the IT block
7609 - Creates an error if there is a ldm that is not
7610 last in the IT block thus cannot be replaced
7611 - Otherwise we can create a branch at the end of the
7612 IT block, it will be controlled naturally by IT
7613 with the proper pseudo-predicate
7614 - So the only interesting predicate is the one that
7615 tells that we are not on the last item of an IT
7617 if (itblock_current_pos
!= 0)
7618 is_not_last_in_it_block
= !!--itblock_current_pos
;
7622 /* Load the rest of the insn (in manual-friendly order). */
7623 insn
= (insn
<< 16) | bfd_get_16 (abfd
, &contents
[i
+ 2]);
7624 is_ldm
= is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
);
7625 is_vldm
= is_thumb2_vldm (insn
);
7627 /* Veneers are created for (v)ldm depending on
7628 option flags and memory accesses conditions; but
7629 if the instruction is not the last instruction of
7630 an IT block, we cannot create a jump there, so we
7632 if ((is_ldm
|| is_vldm
) &&
7633 stm32l4xx_need_create_replacing_stub
7634 (insn
, globals
->stm32l4xx_fix
))
7636 if (is_not_last_in_it_block
)
7638 (*_bfd_error_handler
)
7639 /* Note - overlong line used here to allow for translation. */
7641 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7642 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7643 abfd
, sec
, (long)i
);
7647 elf32_stm32l4xx_erratum_list
*newerr
=
7648 (elf32_stm32l4xx_erratum_list
*)
7650 (sizeof (elf32_stm32l4xx_erratum_list
));
7652 elf32_arm_section_data (sec
)
7653 ->stm32l4xx_erratumcount
+= 1;
7654 newerr
->u
.b
.insn
= insn
;
7655 /* We create only thumb branches. */
7657 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
;
7658 record_stm32l4xx_erratum_veneer
7659 (link_info
, newerr
, abfd
, sec
,
7662 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
:
7663 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
7665 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
7666 sec_data
->stm32l4xx_erratumlist
= newerr
;
7673 IT blocks are only encoded in T1
7674 Encoding T1: IT{x{y{z}}} <firstcond>
7675 1 0 1 1 - 1 1 1 1 - firstcond - mask
7676 if mask = '0000' then see 'related encodings'
7677 We don't deal with UNPREDICTABLE, just ignore these.
7678 There can be no nested IT blocks so an IT block
7679 is naturally a new one for which it is worth
7680 computing its size. */
7681 bfd_boolean is_newitblock
= ((insn
& 0xff00) == 0xbf00) &&
7682 ((insn
& 0x000f) != 0x0000);
7683 /* If we have a new IT block we compute its size. */
7686 /* Compute the number of instructions controlled
7687 by the IT block, it will be used to decide
7688 whether we are inside an IT block or not. */
7689 unsigned int mask
= insn
& 0x000f;
7690 itblock_current_pos
= 4 - ctz (mask
);
7694 i
+= insn_32bit
? 4 : 2;
7698 if (contents
!= NULL
7699 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7707 if (contents
!= NULL
7708 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7714 /* Set target relocation values needed during linking. */
7717 bfd_elf32_arm_set_target_relocs (struct bfd
*output_bfd
,
7718 struct bfd_link_info
*link_info
,
7720 char * target2_type
,
7723 bfd_arm_vfp11_fix vfp11_fix
,
7724 bfd_arm_stm32l4xx_fix stm32l4xx_fix
,
7725 int no_enum_warn
, int no_wchar_warn
,
7726 int pic_veneer
, int fix_cortex_a8
,
7729 struct elf32_arm_link_hash_table
*globals
;
7731 globals
= elf32_arm_hash_table (link_info
);
7732 if (globals
== NULL
)
7735 globals
->target1_is_rel
= target1_is_rel
;
7736 if (strcmp (target2_type
, "rel") == 0)
7737 globals
->target2_reloc
= R_ARM_REL32
;
7738 else if (strcmp (target2_type
, "abs") == 0)
7739 globals
->target2_reloc
= R_ARM_ABS32
;
7740 else if (strcmp (target2_type
, "got-rel") == 0)
7741 globals
->target2_reloc
= R_ARM_GOT_PREL
;
7744 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7747 globals
->fix_v4bx
= fix_v4bx
;
7748 globals
->use_blx
|= use_blx
;
7749 globals
->vfp11_fix
= vfp11_fix
;
7750 globals
->stm32l4xx_fix
= stm32l4xx_fix
;
7751 globals
->pic_veneer
= pic_veneer
;
7752 globals
->fix_cortex_a8
= fix_cortex_a8
;
7753 globals
->fix_arm1176
= fix_arm1176
;
7755 BFD_ASSERT (is_arm_elf (output_bfd
));
7756 elf_arm_tdata (output_bfd
)->no_enum_size_warning
= no_enum_warn
;
7757 elf_arm_tdata (output_bfd
)->no_wchar_size_warning
= no_wchar_warn
;
7760 /* Replace the target offset of a Thumb bl or b.w instruction. */
7763 insert_thumb_branch (bfd
*abfd
, long int offset
, bfd_byte
*insn
)
7769 BFD_ASSERT ((offset
& 1) == 0);
7771 upper
= bfd_get_16 (abfd
, insn
);
7772 lower
= bfd_get_16 (abfd
, insn
+ 2);
7773 reloc_sign
= (offset
< 0) ? 1 : 0;
7774 upper
= (upper
& ~(bfd_vma
) 0x7ff)
7775 | ((offset
>> 12) & 0x3ff)
7776 | (reloc_sign
<< 10);
7777 lower
= (lower
& ~(bfd_vma
) 0x2fff)
7778 | (((!((offset
>> 23) & 1)) ^ reloc_sign
) << 13)
7779 | (((!((offset
>> 22) & 1)) ^ reloc_sign
) << 11)
7780 | ((offset
>> 1) & 0x7ff);
7781 bfd_put_16 (abfd
, upper
, insn
);
7782 bfd_put_16 (abfd
, lower
, insn
+ 2);
7785 /* Thumb code calling an ARM function. */
7788 elf32_thumb_to_arm_stub (struct bfd_link_info
* info
,
7792 asection
* input_section
,
7793 bfd_byte
* hit_data
,
7796 bfd_signed_vma addend
,
7798 char **error_message
)
7802 long int ret_offset
;
7803 struct elf_link_hash_entry
* myh
;
7804 struct elf32_arm_link_hash_table
* globals
;
7806 myh
= find_thumb_glue (info
, name
, error_message
);
7810 globals
= elf32_arm_hash_table (info
);
7811 BFD_ASSERT (globals
!= NULL
);
7812 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7814 my_offset
= myh
->root
.u
.def
.value
;
7816 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
7817 THUMB2ARM_GLUE_SECTION_NAME
);
7819 BFD_ASSERT (s
!= NULL
);
7820 BFD_ASSERT (s
->contents
!= NULL
);
7821 BFD_ASSERT (s
->output_section
!= NULL
);
7823 if ((my_offset
& 0x01) == 0x01)
7826 && sym_sec
->owner
!= NULL
7827 && !INTERWORK_FLAG (sym_sec
->owner
))
7829 (*_bfd_error_handler
)
7830 (_("%B(%s): warning: interworking not enabled.\n"
7831 " first occurrence: %B: Thumb call to ARM"),
7832 sym_sec
->owner
, input_bfd
, name
);
7838 myh
->root
.u
.def
.value
= my_offset
;
7840 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a1_bx_pc_insn
,
7841 s
->contents
+ my_offset
);
7843 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a2_noop_insn
,
7844 s
->contents
+ my_offset
+ 2);
7847 /* Address of destination of the stub. */
7848 ((bfd_signed_vma
) val
)
7850 /* Offset from the start of the current section
7851 to the start of the stubs. */
7853 /* Offset of the start of this stub from the start of the stubs. */
7855 /* Address of the start of the current section. */
7856 + s
->output_section
->vma
)
7857 /* The branch instruction is 4 bytes into the stub. */
7859 /* ARM branches work from the pc of the instruction + 8. */
7862 put_arm_insn (globals
, output_bfd
,
7863 (bfd_vma
) t2a3_b_insn
| ((ret_offset
>> 2) & 0x00FFFFFF),
7864 s
->contents
+ my_offset
+ 4);
7867 BFD_ASSERT (my_offset
<= globals
->thumb_glue_size
);
7869 /* Now go back and fix up the original BL insn to point to here. */
7871 /* Address of where the stub is located. */
7872 (s
->output_section
->vma
+ s
->output_offset
+ my_offset
)
7873 /* Address of where the BL is located. */
7874 - (input_section
->output_section
->vma
+ input_section
->output_offset
7876 /* Addend in the relocation. */
7878 /* Biassing for PC-relative addressing. */
7881 insert_thumb_branch (input_bfd
, ret_offset
, hit_data
- input_section
->vma
);
7886 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7888 static struct elf_link_hash_entry
*
7889 elf32_arm_create_thumb_stub (struct bfd_link_info
* info
,
7896 char ** error_message
)
7899 long int ret_offset
;
7900 struct elf_link_hash_entry
* myh
;
7901 struct elf32_arm_link_hash_table
* globals
;
7903 myh
= find_arm_glue (info
, name
, error_message
);
7907 globals
= elf32_arm_hash_table (info
);
7908 BFD_ASSERT (globals
!= NULL
);
7909 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7911 my_offset
= myh
->root
.u
.def
.value
;
7913 if ((my_offset
& 0x01) == 0x01)
7916 && sym_sec
->owner
!= NULL
7917 && !INTERWORK_FLAG (sym_sec
->owner
))
7919 (*_bfd_error_handler
)
7920 (_("%B(%s): warning: interworking not enabled.\n"
7921 " first occurrence: %B: arm call to thumb"),
7922 sym_sec
->owner
, input_bfd
, name
);
7926 myh
->root
.u
.def
.value
= my_offset
;
7928 if (bfd_link_pic (info
)
7929 || globals
->root
.is_relocatable_executable
7930 || globals
->pic_veneer
)
7932 /* For relocatable objects we can't use absolute addresses,
7933 so construct the address from a relative offset. */
7934 /* TODO: If the offset is small it's probably worth
7935 constructing the address with adds. */
7936 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1p_ldr_insn
,
7937 s
->contents
+ my_offset
);
7938 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2p_add_pc_insn
,
7939 s
->contents
+ my_offset
+ 4);
7940 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t3p_bx_r12_insn
,
7941 s
->contents
+ my_offset
+ 8);
7942 /* Adjust the offset by 4 for the position of the add,
7943 and 8 for the pipeline offset. */
7944 ret_offset
= (val
- (s
->output_offset
7945 + s
->output_section
->vma
7948 bfd_put_32 (output_bfd
, ret_offset
,
7949 s
->contents
+ my_offset
+ 12);
7951 else if (globals
->use_blx
)
7953 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1v5_ldr_insn
,
7954 s
->contents
+ my_offset
);
7956 /* It's a thumb address. Add the low order bit. */
7957 bfd_put_32 (output_bfd
, val
| a2t2v5_func_addr_insn
,
7958 s
->contents
+ my_offset
+ 4);
7962 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1_ldr_insn
,
7963 s
->contents
+ my_offset
);
7965 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2_bx_r12_insn
,
7966 s
->contents
+ my_offset
+ 4);
7968 /* It's a thumb address. Add the low order bit. */
7969 bfd_put_32 (output_bfd
, val
| a2t3_func_addr_insn
,
7970 s
->contents
+ my_offset
+ 8);
7976 BFD_ASSERT (my_offset
<= globals
->arm_glue_size
);
7981 /* Arm code calling a Thumb function. */
7984 elf32_arm_to_thumb_stub (struct bfd_link_info
* info
,
7988 asection
* input_section
,
7989 bfd_byte
* hit_data
,
7992 bfd_signed_vma addend
,
7994 char **error_message
)
7996 unsigned long int tmp
;
7999 long int ret_offset
;
8000 struct elf_link_hash_entry
* myh
;
8001 struct elf32_arm_link_hash_table
* globals
;
8003 globals
= elf32_arm_hash_table (info
);
8004 BFD_ASSERT (globals
!= NULL
);
8005 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8007 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
8008 ARM2THUMB_GLUE_SECTION_NAME
);
8009 BFD_ASSERT (s
!= NULL
);
8010 BFD_ASSERT (s
->contents
!= NULL
);
8011 BFD_ASSERT (s
->output_section
!= NULL
);
8013 myh
= elf32_arm_create_thumb_stub (info
, name
, input_bfd
, output_bfd
,
8014 sym_sec
, val
, s
, error_message
);
8018 my_offset
= myh
->root
.u
.def
.value
;
8019 tmp
= bfd_get_32 (input_bfd
, hit_data
);
8020 tmp
= tmp
& 0xFF000000;
8022 /* Somehow these are both 4 too far, so subtract 8. */
8023 ret_offset
= (s
->output_offset
8025 + s
->output_section
->vma
8026 - (input_section
->output_offset
8027 + input_section
->output_section
->vma
8031 tmp
= tmp
| ((ret_offset
>> 2) & 0x00FFFFFF);
8033 bfd_put_32 (output_bfd
, (bfd_vma
) tmp
, hit_data
- input_section
->vma
);
8038 /* Populate Arm stub for an exported Thumb function. */
8041 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry
*h
, void * inf
)
8043 struct bfd_link_info
* info
= (struct bfd_link_info
*) inf
;
8045 struct elf_link_hash_entry
* myh
;
8046 struct elf32_arm_link_hash_entry
*eh
;
8047 struct elf32_arm_link_hash_table
* globals
;
8050 char *error_message
;
8052 eh
= elf32_arm_hash_entry (h
);
8053 /* Allocate stubs for exported Thumb functions on v4t. */
8054 if (eh
->export_glue
== NULL
)
8057 globals
= elf32_arm_hash_table (info
);
8058 BFD_ASSERT (globals
!= NULL
);
8059 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8061 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
8062 ARM2THUMB_GLUE_SECTION_NAME
);
8063 BFD_ASSERT (s
!= NULL
);
8064 BFD_ASSERT (s
->contents
!= NULL
);
8065 BFD_ASSERT (s
->output_section
!= NULL
);
8067 sec
= eh
->export_glue
->root
.u
.def
.section
;
8069 BFD_ASSERT (sec
->output_section
!= NULL
);
8071 val
= eh
->export_glue
->root
.u
.def
.value
+ sec
->output_offset
8072 + sec
->output_section
->vma
;
8074 myh
= elf32_arm_create_thumb_stub (info
, h
->root
.root
.string
,
8075 h
->root
.u
.def
.section
->owner
,
8076 globals
->obfd
, sec
, val
, s
,
8082 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
8085 elf32_arm_bx_glue (struct bfd_link_info
* info
, int reg
)
8090 struct elf32_arm_link_hash_table
*globals
;
8092 globals
= elf32_arm_hash_table (info
);
8093 BFD_ASSERT (globals
!= NULL
);
8094 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8096 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
8097 ARM_BX_GLUE_SECTION_NAME
);
8098 BFD_ASSERT (s
!= NULL
);
8099 BFD_ASSERT (s
->contents
!= NULL
);
8100 BFD_ASSERT (s
->output_section
!= NULL
);
8102 BFD_ASSERT (globals
->bx_glue_offset
[reg
] & 2);
8104 glue_addr
= globals
->bx_glue_offset
[reg
] & ~(bfd_vma
)3;
8106 if ((globals
->bx_glue_offset
[reg
] & 1) == 0)
8108 p
= s
->contents
+ glue_addr
;
8109 bfd_put_32 (globals
->obfd
, armbx1_tst_insn
+ (reg
<< 16), p
);
8110 bfd_put_32 (globals
->obfd
, armbx2_moveq_insn
+ reg
, p
+ 4);
8111 bfd_put_32 (globals
->obfd
, armbx3_bx_insn
+ reg
, p
+ 8);
8112 globals
->bx_glue_offset
[reg
] |= 1;
8115 return glue_addr
+ s
->output_section
->vma
+ s
->output_offset
;
8118 /* Generate Arm stubs for exported Thumb symbols. */
8120 elf32_arm_begin_write_processing (bfd
*abfd ATTRIBUTE_UNUSED
,
8121 struct bfd_link_info
*link_info
)
8123 struct elf32_arm_link_hash_table
* globals
;
8125 if (link_info
== NULL
)
8126 /* Ignore this if we are not called by the ELF backend linker. */
8129 globals
= elf32_arm_hash_table (link_info
);
8130 if (globals
== NULL
)
8133 /* If blx is available then exported Thumb symbols are OK and there is
8135 if (globals
->use_blx
)
8138 elf_link_hash_traverse (&globals
->root
, elf32_arm_to_thumb_export_stub
,
8142 /* Reserve space for COUNT dynamic relocations in relocation selection
8146 elf32_arm_allocate_dynrelocs (struct bfd_link_info
*info
, asection
*sreloc
,
8147 bfd_size_type count
)
8149 struct elf32_arm_link_hash_table
*htab
;
8151 htab
= elf32_arm_hash_table (info
);
8152 BFD_ASSERT (htab
->root
.dynamic_sections_created
);
8155 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
8158 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8159 dynamic, the relocations should go in SRELOC, otherwise they should
8160 go in the special .rel.iplt section. */
8163 elf32_arm_allocate_irelocs (struct bfd_link_info
*info
, asection
*sreloc
,
8164 bfd_size_type count
)
8166 struct elf32_arm_link_hash_table
*htab
;
8168 htab
= elf32_arm_hash_table (info
);
8169 if (!htab
->root
.dynamic_sections_created
)
8170 htab
->root
.irelplt
->size
+= RELOC_SIZE (htab
) * count
;
8173 BFD_ASSERT (sreloc
!= NULL
);
8174 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
8178 /* Add relocation REL to the end of relocation section SRELOC. */
8181 elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
8182 asection
*sreloc
, Elf_Internal_Rela
*rel
)
8185 struct elf32_arm_link_hash_table
*htab
;
8187 htab
= elf32_arm_hash_table (info
);
8188 if (!htab
->root
.dynamic_sections_created
8189 && ELF32_R_TYPE (rel
->r_info
) == R_ARM_IRELATIVE
)
8190 sreloc
= htab
->root
.irelplt
;
8193 loc
= sreloc
->contents
;
8194 loc
+= sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
8195 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
8197 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, loc
);
8200 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8201 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8205 elf32_arm_allocate_plt_entry (struct bfd_link_info
*info
,
8206 bfd_boolean is_iplt_entry
,
8207 union gotplt_union
*root_plt
,
8208 struct arm_plt_info
*arm_plt
)
8210 struct elf32_arm_link_hash_table
*htab
;
8214 htab
= elf32_arm_hash_table (info
);
8218 splt
= htab
->root
.iplt
;
8219 sgotplt
= htab
->root
.igotplt
;
8221 /* NaCl uses a special first entry in .iplt too. */
8222 if (htab
->nacl_p
&& splt
->size
== 0)
8223 splt
->size
+= htab
->plt_header_size
;
8225 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8226 elf32_arm_allocate_irelocs (info
, htab
->root
.irelplt
, 1);
8230 splt
= htab
->root
.splt
;
8231 sgotplt
= htab
->root
.sgotplt
;
8233 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8234 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
8236 /* If this is the first .plt entry, make room for the special
8238 if (splt
->size
== 0)
8239 splt
->size
+= htab
->plt_header_size
;
8241 htab
->next_tls_desc_index
++;
8244 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8245 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
8246 splt
->size
+= PLT_THUMB_STUB_SIZE
;
8247 root_plt
->offset
= splt
->size
;
8248 splt
->size
+= htab
->plt_entry_size
;
8250 if (!htab
->symbian_p
)
8252 /* We also need to make an entry in the .got.plt section, which
8253 will be placed in the .got section by the linker script. */
8255 arm_plt
->got_offset
= sgotplt
->size
;
8257 arm_plt
->got_offset
= sgotplt
->size
- 8 * htab
->num_tls_desc
;
8263 arm_movw_immediate (bfd_vma value
)
8265 return (value
& 0x00000fff) | ((value
& 0x0000f000) << 4);
8269 arm_movt_immediate (bfd_vma value
)
8271 return ((value
& 0x0fff0000) >> 16) | ((value
& 0xf0000000) >> 12);
8274 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8275 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8276 Otherwise, DYNINDX is the index of the symbol in the dynamic
8277 symbol table and SYM_VALUE is undefined.
8279 ROOT_PLT points to the offset of the PLT entry from the start of its
8280 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8281 bookkeeping information.
8283 Returns FALSE if there was a problem. */
8286 elf32_arm_populate_plt_entry (bfd
*output_bfd
, struct bfd_link_info
*info
,
8287 union gotplt_union
*root_plt
,
8288 struct arm_plt_info
*arm_plt
,
8289 int dynindx
, bfd_vma sym_value
)
8291 struct elf32_arm_link_hash_table
*htab
;
8297 Elf_Internal_Rela rel
;
8298 bfd_vma plt_header_size
;
8299 bfd_vma got_header_size
;
8301 htab
= elf32_arm_hash_table (info
);
8303 /* Pick the appropriate sections and sizes. */
8306 splt
= htab
->root
.iplt
;
8307 sgot
= htab
->root
.igotplt
;
8308 srel
= htab
->root
.irelplt
;
8310 /* There are no reserved entries in .igot.plt, and no special
8311 first entry in .iplt. */
8312 got_header_size
= 0;
8313 plt_header_size
= 0;
8317 splt
= htab
->root
.splt
;
8318 sgot
= htab
->root
.sgotplt
;
8319 srel
= htab
->root
.srelplt
;
8321 got_header_size
= get_elf_backend_data (output_bfd
)->got_header_size
;
8322 plt_header_size
= htab
->plt_header_size
;
8324 BFD_ASSERT (splt
!= NULL
&& srel
!= NULL
);
8326 /* Fill in the entry in the procedure linkage table. */
8327 if (htab
->symbian_p
)
8329 BFD_ASSERT (dynindx
>= 0);
8330 put_arm_insn (htab
, output_bfd
,
8331 elf32_arm_symbian_plt_entry
[0],
8332 splt
->contents
+ root_plt
->offset
);
8333 bfd_put_32 (output_bfd
,
8334 elf32_arm_symbian_plt_entry
[1],
8335 splt
->contents
+ root_plt
->offset
+ 4);
8337 /* Fill in the entry in the .rel.plt section. */
8338 rel
.r_offset
= (splt
->output_section
->vma
8339 + splt
->output_offset
8340 + root_plt
->offset
+ 4);
8341 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_GLOB_DAT
);
8343 /* Get the index in the procedure linkage table which
8344 corresponds to this symbol. This is the index of this symbol
8345 in all the symbols for which we are making plt entries. The
8346 first entry in the procedure linkage table is reserved. */
8347 plt_index
= ((root_plt
->offset
- plt_header_size
)
8348 / htab
->plt_entry_size
);
8352 bfd_vma got_offset
, got_address
, plt_address
;
8353 bfd_vma got_displacement
, initial_got_entry
;
8356 BFD_ASSERT (sgot
!= NULL
);
8358 /* Get the offset into the .(i)got.plt table of the entry that
8359 corresponds to this function. */
8360 got_offset
= (arm_plt
->got_offset
& -2);
8362 /* Get the index in the procedure linkage table which
8363 corresponds to this symbol. This is the index of this symbol
8364 in all the symbols for which we are making plt entries.
8365 After the reserved .got.plt entries, all symbols appear in
8366 the same order as in .plt. */
8367 plt_index
= (got_offset
- got_header_size
) / 4;
8369 /* Calculate the address of the GOT entry. */
8370 got_address
= (sgot
->output_section
->vma
8371 + sgot
->output_offset
8374 /* ...and the address of the PLT entry. */
8375 plt_address
= (splt
->output_section
->vma
8376 + splt
->output_offset
8377 + root_plt
->offset
);
8379 ptr
= splt
->contents
+ root_plt
->offset
;
8380 if (htab
->vxworks_p
&& bfd_link_pic (info
))
8385 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
8387 val
= elf32_arm_vxworks_shared_plt_entry
[i
];
8389 val
|= got_address
- sgot
->output_section
->vma
;
8391 val
|= plt_index
* RELOC_SIZE (htab
);
8392 if (i
== 2 || i
== 5)
8393 bfd_put_32 (output_bfd
, val
, ptr
);
8395 put_arm_insn (htab
, output_bfd
, val
, ptr
);
8398 else if (htab
->vxworks_p
)
8403 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
8405 val
= elf32_arm_vxworks_exec_plt_entry
[i
];
8409 val
|= 0xffffff & -((root_plt
->offset
+ i
* 4 + 8) >> 2);
8411 val
|= plt_index
* RELOC_SIZE (htab
);
8412 if (i
== 2 || i
== 5)
8413 bfd_put_32 (output_bfd
, val
, ptr
);
8415 put_arm_insn (htab
, output_bfd
, val
, ptr
);
8418 loc
= (htab
->srelplt2
->contents
8419 + (plt_index
* 2 + 1) * RELOC_SIZE (htab
));
8421 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8422 referencing the GOT for this PLT entry. */
8423 rel
.r_offset
= plt_address
+ 8;
8424 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
8425 rel
.r_addend
= got_offset
;
8426 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8427 loc
+= RELOC_SIZE (htab
);
8429 /* Create the R_ARM_ABS32 relocation referencing the
8430 beginning of the PLT for this GOT entry. */
8431 rel
.r_offset
= got_address
;
8432 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
8434 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8436 else if (htab
->nacl_p
)
8438 /* Calculate the displacement between the PLT slot and the
8439 common tail that's part of the special initial PLT slot. */
8440 int32_t tail_displacement
8441 = ((splt
->output_section
->vma
+ splt
->output_offset
8442 + ARM_NACL_PLT_TAIL_OFFSET
)
8443 - (plt_address
+ htab
->plt_entry_size
+ 4));
8444 BFD_ASSERT ((tail_displacement
& 3) == 0);
8445 tail_displacement
>>= 2;
8447 BFD_ASSERT ((tail_displacement
& 0xff000000) == 0
8448 || (-tail_displacement
& 0xff000000) == 0);
8450 /* Calculate the displacement between the PLT slot and the entry
8451 in the GOT. The offset accounts for the value produced by
8452 adding to pc in the penultimate instruction of the PLT stub. */
8453 got_displacement
= (got_address
8454 - (plt_address
+ htab
->plt_entry_size
));
8456 /* NaCl does not support interworking at all. */
8457 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
));
8459 put_arm_insn (htab
, output_bfd
,
8460 elf32_arm_nacl_plt_entry
[0]
8461 | arm_movw_immediate (got_displacement
),
8463 put_arm_insn (htab
, output_bfd
,
8464 elf32_arm_nacl_plt_entry
[1]
8465 | arm_movt_immediate (got_displacement
),
8467 put_arm_insn (htab
, output_bfd
,
8468 elf32_arm_nacl_plt_entry
[2],
8470 put_arm_insn (htab
, output_bfd
,
8471 elf32_arm_nacl_plt_entry
[3]
8472 | (tail_displacement
& 0x00ffffff),
8475 else if (using_thumb_only (htab
))
8477 /* PR ld/16017: Generate thumb only PLT entries. */
8478 if (!using_thumb2 (htab
))
8480 /* FIXME: We ought to be able to generate thumb-1 PLT
8482 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8487 /* Calculate the displacement between the PLT slot and the entry in
8488 the GOT. The 12-byte offset accounts for the value produced by
8489 adding to pc in the 3rd instruction of the PLT stub. */
8490 got_displacement
= got_address
- (plt_address
+ 12);
8492 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8493 instead of 'put_thumb_insn'. */
8494 put_arm_insn (htab
, output_bfd
,
8495 elf32_thumb2_plt_entry
[0]
8496 | ((got_displacement
& 0x000000ff) << 16)
8497 | ((got_displacement
& 0x00000700) << 20)
8498 | ((got_displacement
& 0x00000800) >> 1)
8499 | ((got_displacement
& 0x0000f000) >> 12),
8501 put_arm_insn (htab
, output_bfd
,
8502 elf32_thumb2_plt_entry
[1]
8503 | ((got_displacement
& 0x00ff0000) )
8504 | ((got_displacement
& 0x07000000) << 4)
8505 | ((got_displacement
& 0x08000000) >> 17)
8506 | ((got_displacement
& 0xf0000000) >> 28),
8508 put_arm_insn (htab
, output_bfd
,
8509 elf32_thumb2_plt_entry
[2],
8511 put_arm_insn (htab
, output_bfd
,
8512 elf32_thumb2_plt_entry
[3],
8517 /* Calculate the displacement between the PLT slot and the
8518 entry in the GOT. The eight-byte offset accounts for the
8519 value produced by adding to pc in the first instruction
8521 got_displacement
= got_address
- (plt_address
+ 8);
8523 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
8525 put_thumb_insn (htab
, output_bfd
,
8526 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
8527 put_thumb_insn (htab
, output_bfd
,
8528 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
8531 if (!elf32_arm_use_long_plt_entry
)
8533 BFD_ASSERT ((got_displacement
& 0xf0000000) == 0);
8535 put_arm_insn (htab
, output_bfd
,
8536 elf32_arm_plt_entry_short
[0]
8537 | ((got_displacement
& 0x0ff00000) >> 20),
8539 put_arm_insn (htab
, output_bfd
,
8540 elf32_arm_plt_entry_short
[1]
8541 | ((got_displacement
& 0x000ff000) >> 12),
8543 put_arm_insn (htab
, output_bfd
,
8544 elf32_arm_plt_entry_short
[2]
8545 | (got_displacement
& 0x00000fff),
8547 #ifdef FOUR_WORD_PLT
8548 bfd_put_32 (output_bfd
, elf32_arm_plt_entry_short
[3], ptr
+ 12);
8553 put_arm_insn (htab
, output_bfd
,
8554 elf32_arm_plt_entry_long
[0]
8555 | ((got_displacement
& 0xf0000000) >> 28),
8557 put_arm_insn (htab
, output_bfd
,
8558 elf32_arm_plt_entry_long
[1]
8559 | ((got_displacement
& 0x0ff00000) >> 20),
8561 put_arm_insn (htab
, output_bfd
,
8562 elf32_arm_plt_entry_long
[2]
8563 | ((got_displacement
& 0x000ff000) >> 12),
8565 put_arm_insn (htab
, output_bfd
,
8566 elf32_arm_plt_entry_long
[3]
8567 | (got_displacement
& 0x00000fff),
8572 /* Fill in the entry in the .rel(a).(i)plt section. */
8573 rel
.r_offset
= got_address
;
8577 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8578 The dynamic linker or static executable then calls SYM_VALUE
8579 to determine the correct run-time value of the .igot.plt entry. */
8580 rel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
8581 initial_got_entry
= sym_value
;
8585 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_JUMP_SLOT
);
8586 initial_got_entry
= (splt
->output_section
->vma
8587 + splt
->output_offset
);
8590 /* Fill in the entry in the global offset table. */
8591 bfd_put_32 (output_bfd
, initial_got_entry
,
8592 sgot
->contents
+ got_offset
);
8596 elf32_arm_add_dynreloc (output_bfd
, info
, srel
, &rel
);
8599 loc
= srel
->contents
+ plt_index
* RELOC_SIZE (htab
);
8600 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8606 /* Some relocations map to different relocations depending on the
8607 target. Return the real relocation. */
8610 arm_real_reloc_type (struct elf32_arm_link_hash_table
* globals
,
8616 if (globals
->target1_is_rel
)
8622 return globals
->target2_reloc
;
8629 /* Return the base VMA address which should be subtracted from real addresses
8630 when resolving @dtpoff relocation.
8631 This is PT_TLS segment p_vaddr. */
8634 dtpoff_base (struct bfd_link_info
*info
)
8636 /* If tls_sec is NULL, we should have signalled an error already. */
8637 if (elf_hash_table (info
)->tls_sec
== NULL
)
8639 return elf_hash_table (info
)->tls_sec
->vma
;
8642 /* Return the relocation value for @tpoff relocation
8643 if STT_TLS virtual address is ADDRESS. */
8646 tpoff (struct bfd_link_info
*info
, bfd_vma address
)
8648 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
8651 /* If tls_sec is NULL, we should have signalled an error already. */
8652 if (htab
->tls_sec
== NULL
)
8654 base
= align_power ((bfd_vma
) TCB_SIZE
, htab
->tls_sec
->alignment_power
);
8655 return address
- htab
->tls_sec
->vma
+ base
;
8658 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8659 VALUE is the relocation value. */
8661 static bfd_reloc_status_type
8662 elf32_arm_abs12_reloc (bfd
*abfd
, void *data
, bfd_vma value
)
8665 return bfd_reloc_overflow
;
8667 value
|= bfd_get_32 (abfd
, data
) & 0xfffff000;
8668 bfd_put_32 (abfd
, value
, data
);
8669 return bfd_reloc_ok
;
8672 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8673 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8674 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8676 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8677 is to then call final_link_relocate. Return other values in the
8680 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8681 the pre-relaxed code. It would be nice if the relocs were updated
8682 to match the optimization. */
8684 static bfd_reloc_status_type
8685 elf32_arm_tls_relax (struct elf32_arm_link_hash_table
*globals
,
8686 bfd
*input_bfd
, asection
*input_sec
, bfd_byte
*contents
,
8687 Elf_Internal_Rela
*rel
, unsigned long is_local
)
8691 switch (ELF32_R_TYPE (rel
->r_info
))
8694 return bfd_reloc_notsupported
;
8696 case R_ARM_TLS_GOTDESC
:
8701 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
8703 insn
-= 5; /* THUMB */
8705 insn
-= 8; /* ARM */
8707 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
8708 return bfd_reloc_continue
;
8710 case R_ARM_THM_TLS_DESCSEQ
:
8712 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
);
8713 if ((insn
& 0xff78) == 0x4478) /* add rx, pc */
8717 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
8719 else if ((insn
& 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8723 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
8726 bfd_put_16 (input_bfd
, insn
& 0xf83f, contents
+ rel
->r_offset
);
8728 else if ((insn
& 0xff87) == 0x4780) /* blx rx */
8732 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
8735 bfd_put_16 (input_bfd
, 0x4600 | (insn
& 0x78),
8736 contents
+ rel
->r_offset
);
8740 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
8741 /* It's a 32 bit instruction, fetch the rest of it for
8742 error generation. */
8744 | bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
+ 2);
8745 (*_bfd_error_handler
)
8746 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8747 input_bfd
, input_sec
, (unsigned long)rel
->r_offset
, insn
);
8748 return bfd_reloc_notsupported
;
8752 case R_ARM_TLS_DESCSEQ
:
8754 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
8755 if ((insn
& 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8759 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xffff),
8760 contents
+ rel
->r_offset
);
8762 else if ((insn
& 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8766 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
8769 bfd_put_32 (input_bfd
, insn
& 0xfffff000,
8770 contents
+ rel
->r_offset
);
8772 else if ((insn
& 0xfffffff0) == 0xe12fff30) /* blx rx */
8776 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
8779 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xf),
8780 contents
+ rel
->r_offset
);
8784 (*_bfd_error_handler
)
8785 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
8786 input_bfd
, input_sec
, (unsigned long)rel
->r_offset
, insn
);
8787 return bfd_reloc_notsupported
;
8791 case R_ARM_TLS_CALL
:
8792 /* GD->IE relaxation, turn the instruction into 'nop' or
8793 'ldr r0, [pc,r0]' */
8794 insn
= is_local
? 0xe1a00000 : 0xe79f0000;
8795 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
8798 case R_ARM_THM_TLS_CALL
:
8799 /* GD->IE relaxation. */
8801 /* add r0,pc; ldr r0, [r0] */
8803 else if (arch_has_thumb2_nop (globals
))
8810 bfd_put_16 (input_bfd
, insn
>> 16, contents
+ rel
->r_offset
);
8811 bfd_put_16 (input_bfd
, insn
& 0xffff, contents
+ rel
->r_offset
+ 2);
8814 return bfd_reloc_ok
;
8817 /* For a given value of n, calculate the value of G_n as required to
8818 deal with group relocations. We return it in the form of an
8819 encoded constant-and-rotation, together with the final residual. If n is
8820 specified as less than zero, then final_residual is filled with the
8821 input value and no further action is performed. */
8824 calculate_group_reloc_mask (bfd_vma value
, int n
, bfd_vma
*final_residual
)
8828 bfd_vma encoded_g_n
= 0;
8829 bfd_vma residual
= value
; /* Also known as Y_n. */
8831 for (current_n
= 0; current_n
<= n
; current_n
++)
8835 /* Calculate which part of the value to mask. */
8842 /* Determine the most significant bit in the residual and
8843 align the resulting value to a 2-bit boundary. */
8844 for (msb
= 30; msb
>= 0; msb
-= 2)
8845 if (residual
& (3 << msb
))
8848 /* The desired shift is now (msb - 6), or zero, whichever
8855 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
8856 g_n
= residual
& (0xff << shift
);
8857 encoded_g_n
= (g_n
>> shift
)
8858 | ((g_n
<= 0xff ? 0 : (32 - shift
) / 2) << 8);
8860 /* Calculate the residual for the next time around. */
8864 *final_residual
= residual
;
8869 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
8870 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
8873 identify_add_or_sub (bfd_vma insn
)
8875 int opcode
= insn
& 0x1e00000;
8877 if (opcode
== 1 << 23) /* ADD */
8880 if (opcode
== 1 << 22) /* SUB */
8886 /* Perform a relocation as part of a final link. */
8888 static bfd_reloc_status_type
8889 elf32_arm_final_link_relocate (reloc_howto_type
* howto
,
8892 asection
* input_section
,
8893 bfd_byte
* contents
,
8894 Elf_Internal_Rela
* rel
,
8896 struct bfd_link_info
* info
,
8898 const char * sym_name
,
8899 unsigned char st_type
,
8900 enum arm_st_branch_type branch_type
,
8901 struct elf_link_hash_entry
* h
,
8902 bfd_boolean
* unresolved_reloc_p
,
8903 char ** error_message
)
8905 unsigned long r_type
= howto
->type
;
8906 unsigned long r_symndx
;
8907 bfd_byte
* hit_data
= contents
+ rel
->r_offset
;
8908 bfd_vma
* local_got_offsets
;
8909 bfd_vma
* local_tlsdesc_gotents
;
8912 asection
* sreloc
= NULL
;
8915 bfd_signed_vma signed_addend
;
8916 unsigned char dynreloc_st_type
;
8917 bfd_vma dynreloc_value
;
8918 struct elf32_arm_link_hash_table
* globals
;
8919 struct elf32_arm_link_hash_entry
*eh
;
8920 union gotplt_union
*root_plt
;
8921 struct arm_plt_info
*arm_plt
;
8923 bfd_vma gotplt_offset
;
8924 bfd_boolean has_iplt_entry
;
8926 globals
= elf32_arm_hash_table (info
);
8927 if (globals
== NULL
)
8928 return bfd_reloc_notsupported
;
8930 BFD_ASSERT (is_arm_elf (input_bfd
));
8932 /* Some relocation types map to different relocations depending on the
8933 target. We pick the right one here. */
8934 r_type
= arm_real_reloc_type (globals
, r_type
);
8936 /* It is possible to have linker relaxations on some TLS access
8937 models. Update our information here. */
8938 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
8940 if (r_type
!= howto
->type
)
8941 howto
= elf32_arm_howto_from_type (r_type
);
8943 eh
= (struct elf32_arm_link_hash_entry
*) h
;
8944 sgot
= globals
->root
.sgot
;
8945 local_got_offsets
= elf_local_got_offsets (input_bfd
);
8946 local_tlsdesc_gotents
= elf32_arm_local_tlsdesc_gotent (input_bfd
);
8948 if (globals
->root
.dynamic_sections_created
)
8949 srelgot
= globals
->root
.srelgot
;
8953 r_symndx
= ELF32_R_SYM (rel
->r_info
);
8955 if (globals
->use_rel
)
8957 addend
= bfd_get_32 (input_bfd
, hit_data
) & howto
->src_mask
;
8959 if (addend
& ((howto
->src_mask
+ 1) >> 1))
8962 signed_addend
&= ~ howto
->src_mask
;
8963 signed_addend
|= addend
;
8966 signed_addend
= addend
;
8969 addend
= signed_addend
= rel
->r_addend
;
8971 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
8972 are resolving a function call relocation. */
8973 if (using_thumb_only (globals
)
8974 && (r_type
== R_ARM_THM_CALL
8975 || r_type
== R_ARM_THM_JUMP24
)
8976 && branch_type
== ST_BRANCH_TO_ARM
)
8977 branch_type
= ST_BRANCH_TO_THUMB
;
8979 /* Record the symbol information that should be used in dynamic
8981 dynreloc_st_type
= st_type
;
8982 dynreloc_value
= value
;
8983 if (branch_type
== ST_BRANCH_TO_THUMB
)
8984 dynreloc_value
|= 1;
8986 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8987 VALUE appropriately for relocations that we resolve at link time. */
8988 has_iplt_entry
= FALSE
;
8989 if (elf32_arm_get_plt_info (input_bfd
, eh
, r_symndx
, &root_plt
, &arm_plt
)
8990 && root_plt
->offset
!= (bfd_vma
) -1)
8992 plt_offset
= root_plt
->offset
;
8993 gotplt_offset
= arm_plt
->got_offset
;
8995 if (h
== NULL
|| eh
->is_iplt
)
8997 has_iplt_entry
= TRUE
;
8998 splt
= globals
->root
.iplt
;
9000 /* Populate .iplt entries here, because not all of them will
9001 be seen by finish_dynamic_symbol. The lower bit is set if
9002 we have already populated the entry. */
9007 if (elf32_arm_populate_plt_entry (output_bfd
, info
, root_plt
, arm_plt
,
9008 -1, dynreloc_value
))
9009 root_plt
->offset
|= 1;
9011 return bfd_reloc_notsupported
;
9014 /* Static relocations always resolve to the .iplt entry. */
9016 value
= (splt
->output_section
->vma
9017 + splt
->output_offset
9019 branch_type
= ST_BRANCH_TO_ARM
;
9021 /* If there are non-call relocations that resolve to the .iplt
9022 entry, then all dynamic ones must too. */
9023 if (arm_plt
->noncall_refcount
!= 0)
9025 dynreloc_st_type
= st_type
;
9026 dynreloc_value
= value
;
9030 /* We populate the .plt entry in finish_dynamic_symbol. */
9031 splt
= globals
->root
.splt
;
9036 plt_offset
= (bfd_vma
) -1;
9037 gotplt_offset
= (bfd_vma
) -1;
9043 /* We don't need to find a value for this symbol. It's just a
9045 *unresolved_reloc_p
= FALSE
;
9046 return bfd_reloc_ok
;
9049 if (!globals
->vxworks_p
)
9050 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
9054 case R_ARM_ABS32_NOI
:
9056 case R_ARM_REL32_NOI
:
9062 /* Handle relocations which should use the PLT entry. ABS32/REL32
9063 will use the symbol's value, which may point to a PLT entry, but we
9064 don't need to handle that here. If we created a PLT entry, all
9065 branches in this object should go to it, except if the PLT is too
9066 far away, in which case a long branch stub should be inserted. */
9067 if ((r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_REL32
9068 && r_type
!= R_ARM_ABS32_NOI
&& r_type
!= R_ARM_REL32_NOI
9069 && r_type
!= R_ARM_CALL
9070 && r_type
!= R_ARM_JUMP24
9071 && r_type
!= R_ARM_PLT32
)
9072 && plt_offset
!= (bfd_vma
) -1)
9074 /* If we've created a .plt section, and assigned a PLT entry
9075 to this function, it must either be a STT_GNU_IFUNC reference
9076 or not be known to bind locally. In other cases, we should
9077 have cleared the PLT entry by now. */
9078 BFD_ASSERT (has_iplt_entry
|| !SYMBOL_CALLS_LOCAL (info
, h
));
9080 value
= (splt
->output_section
->vma
9081 + splt
->output_offset
9083 *unresolved_reloc_p
= FALSE
;
9084 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
9085 contents
, rel
->r_offset
, value
,
9089 /* When generating a shared object or relocatable executable, these
9090 relocations are copied into the output file to be resolved at
9092 if ((bfd_link_pic (info
)
9093 || globals
->root
.is_relocatable_executable
)
9094 && (input_section
->flags
& SEC_ALLOC
)
9095 && !(globals
->vxworks_p
9096 && strcmp (input_section
->output_section
->name
,
9098 && ((r_type
!= R_ARM_REL32
&& r_type
!= R_ARM_REL32_NOI
)
9099 || !SYMBOL_CALLS_LOCAL (info
, h
))
9100 && !(input_bfd
== globals
->stub_bfd
9101 && strstr (input_section
->name
, STUB_SUFFIX
))
9103 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
9104 || h
->root
.type
!= bfd_link_hash_undefweak
)
9105 && r_type
!= R_ARM_PC24
9106 && r_type
!= R_ARM_CALL
9107 && r_type
!= R_ARM_JUMP24
9108 && r_type
!= R_ARM_PREL31
9109 && r_type
!= R_ARM_PLT32
)
9111 Elf_Internal_Rela outrel
;
9112 bfd_boolean skip
, relocate
;
9114 if ((r_type
== R_ARM_REL32
|| r_type
== R_ARM_REL32_NOI
)
9117 char *v
= _("shared object");
9119 if (bfd_link_executable (info
))
9120 v
= _("PIE executable");
9122 (*_bfd_error_handler
)
9123 (_("%B: relocation %s against external or undefined symbol `%s'"
9124 " can not be used when making a %s; recompile with -fPIC"), input_bfd
,
9125 elf32_arm_howto_table_1
[r_type
].name
, h
->root
.root
.string
, v
);
9126 return bfd_reloc_notsupported
;
9129 *unresolved_reloc_p
= FALSE
;
9131 if (sreloc
== NULL
&& globals
->root
.dynamic_sections_created
)
9133 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
, input_section
,
9134 ! globals
->use_rel
);
9137 return bfd_reloc_notsupported
;
9143 outrel
.r_addend
= addend
;
9145 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
9147 if (outrel
.r_offset
== (bfd_vma
) -1)
9149 else if (outrel
.r_offset
== (bfd_vma
) -2)
9150 skip
= TRUE
, relocate
= TRUE
;
9151 outrel
.r_offset
+= (input_section
->output_section
->vma
9152 + input_section
->output_offset
);
9155 memset (&outrel
, 0, sizeof outrel
);
9158 && (!bfd_link_pic (info
)
9159 || !SYMBOLIC_BIND (info
, h
)
9160 || !h
->def_regular
))
9161 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, r_type
);
9166 /* This symbol is local, or marked to become local. */
9167 BFD_ASSERT (r_type
== R_ARM_ABS32
|| r_type
== R_ARM_ABS32_NOI
);
9168 if (globals
->symbian_p
)
9172 /* On Symbian OS, the data segment and text segement
9173 can be relocated independently. Therefore, we
9174 must indicate the segment to which this
9175 relocation is relative. The BPABI allows us to
9176 use any symbol in the right segment; we just use
9177 the section symbol as it is convenient. (We
9178 cannot use the symbol given by "h" directly as it
9179 will not appear in the dynamic symbol table.)
9181 Note that the dynamic linker ignores the section
9182 symbol value, so we don't subtract osec->vma
9183 from the emitted reloc addend. */
9185 osec
= sym_sec
->output_section
;
9187 osec
= input_section
->output_section
;
9188 symbol
= elf_section_data (osec
)->dynindx
;
9191 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
9193 if ((osec
->flags
& SEC_READONLY
) == 0
9194 && htab
->data_index_section
!= NULL
)
9195 osec
= htab
->data_index_section
;
9197 osec
= htab
->text_index_section
;
9198 symbol
= elf_section_data (osec
)->dynindx
;
9200 BFD_ASSERT (symbol
!= 0);
9203 /* On SVR4-ish systems, the dynamic loader cannot
9204 relocate the text and data segments independently,
9205 so the symbol does not matter. */
9207 if (dynreloc_st_type
== STT_GNU_IFUNC
)
9208 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9209 to the .iplt entry. Instead, every non-call reference
9210 must use an R_ARM_IRELATIVE relocation to obtain the
9211 correct run-time address. */
9212 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_IRELATIVE
);
9214 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_RELATIVE
);
9215 if (globals
->use_rel
)
9218 outrel
.r_addend
+= dynreloc_value
;
9221 elf32_arm_add_dynreloc (output_bfd
, info
, sreloc
, &outrel
);
9223 /* If this reloc is against an external symbol, we do not want to
9224 fiddle with the addend. Otherwise, we need to include the symbol
9225 value so that it becomes an addend for the dynamic reloc. */
9227 return bfd_reloc_ok
;
9229 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
9230 contents
, rel
->r_offset
,
9231 dynreloc_value
, (bfd_vma
) 0);
9233 else switch (r_type
)
9236 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
9238 case R_ARM_XPC25
: /* Arm BLX instruction. */
9241 case R_ARM_PC24
: /* Arm B/BL instruction. */
9244 struct elf32_arm_stub_hash_entry
*stub_entry
= NULL
;
9246 if (r_type
== R_ARM_XPC25
)
9248 /* Check for Arm calling Arm function. */
9249 /* FIXME: Should we translate the instruction into a BL
9250 instruction instead ? */
9251 if (branch_type
!= ST_BRANCH_TO_THUMB
)
9252 (*_bfd_error_handler
)
9253 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9255 h
? h
->root
.root
.string
: "(local)");
9257 else if (r_type
== R_ARM_PC24
)
9259 /* Check for Arm calling Thumb function. */
9260 if (branch_type
== ST_BRANCH_TO_THUMB
)
9262 if (elf32_arm_to_thumb_stub (info
, sym_name
, input_bfd
,
9263 output_bfd
, input_section
,
9264 hit_data
, sym_sec
, rel
->r_offset
,
9265 signed_addend
, value
,
9267 return bfd_reloc_ok
;
9269 return bfd_reloc_dangerous
;
9273 /* Check if a stub has to be inserted because the
9274 destination is too far or we are changing mode. */
9275 if ( r_type
== R_ARM_CALL
9276 || r_type
== R_ARM_JUMP24
9277 || r_type
== R_ARM_PLT32
)
9279 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
9280 struct elf32_arm_link_hash_entry
*hash
;
9282 hash
= (struct elf32_arm_link_hash_entry
*) h
;
9283 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
9284 st_type
, &branch_type
,
9285 hash
, value
, sym_sec
,
9286 input_bfd
, sym_name
);
9288 if (stub_type
!= arm_stub_none
)
9290 /* The target is out of reach, so redirect the
9291 branch to the local stub for this function. */
9292 stub_entry
= elf32_arm_get_stub_entry (input_section
,
9297 if (stub_entry
!= NULL
)
9298 value
= (stub_entry
->stub_offset
9299 + stub_entry
->stub_sec
->output_offset
9300 + stub_entry
->stub_sec
->output_section
->vma
);
9302 if (plt_offset
!= (bfd_vma
) -1)
9303 *unresolved_reloc_p
= FALSE
;
9308 /* If the call goes through a PLT entry, make sure to
9309 check distance to the right destination address. */
9310 if (plt_offset
!= (bfd_vma
) -1)
9312 value
= (splt
->output_section
->vma
9313 + splt
->output_offset
9315 *unresolved_reloc_p
= FALSE
;
9316 /* The PLT entry is in ARM mode, regardless of the
9318 branch_type
= ST_BRANCH_TO_ARM
;
9323 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9325 S is the address of the symbol in the relocation.
9326 P is address of the instruction being relocated.
9327 A is the addend (extracted from the instruction) in bytes.
9329 S is held in 'value'.
9330 P is the base address of the section containing the
9331 instruction plus the offset of the reloc into that
9333 (input_section->output_section->vma +
9334 input_section->output_offset +
9336 A is the addend, converted into bytes, ie:
9339 Note: None of these operations have knowledge of the pipeline
9340 size of the processor, thus it is up to the assembler to
9341 encode this information into the addend. */
9342 value
-= (input_section
->output_section
->vma
9343 + input_section
->output_offset
);
9344 value
-= rel
->r_offset
;
9345 if (globals
->use_rel
)
9346 value
+= (signed_addend
<< howto
->size
);
9348 /* RELA addends do not have to be adjusted by howto->size. */
9349 value
+= signed_addend
;
9351 signed_addend
= value
;
9352 signed_addend
>>= howto
->rightshift
;
9354 /* A branch to an undefined weak symbol is turned into a jump to
9355 the next instruction unless a PLT entry will be created.
9356 Do the same for local undefined symbols (but not for STN_UNDEF).
9357 The jump to the next instruction is optimized as a NOP depending
9358 on the architecture. */
9359 if (h
? (h
->root
.type
== bfd_link_hash_undefweak
9360 && plt_offset
== (bfd_vma
) -1)
9361 : r_symndx
!= STN_UNDEF
&& bfd_is_und_section (sym_sec
))
9363 value
= (bfd_get_32 (input_bfd
, hit_data
) & 0xf0000000);
9365 if (arch_has_arm_nop (globals
))
9366 value
|= 0x0320f000;
9368 value
|= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9372 /* Perform a signed range check. */
9373 if ( signed_addend
> ((bfd_signed_vma
) (howto
->dst_mask
>> 1))
9374 || signed_addend
< - ((bfd_signed_vma
) ((howto
->dst_mask
+ 1) >> 1)))
9375 return bfd_reloc_overflow
;
9377 addend
= (value
& 2);
9379 value
= (signed_addend
& howto
->dst_mask
)
9380 | (bfd_get_32 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
9382 if (r_type
== R_ARM_CALL
)
9384 /* Set the H bit in the BLX instruction. */
9385 if (branch_type
== ST_BRANCH_TO_THUMB
)
9390 value
&= ~(bfd_vma
)(1 << 24);
9393 /* Select the correct instruction (BL or BLX). */
9394 /* Only if we are not handling a BL to a stub. In this
9395 case, mode switching is performed by the stub. */
9396 if (branch_type
== ST_BRANCH_TO_THUMB
&& !stub_entry
)
9398 else if (stub_entry
|| branch_type
!= ST_BRANCH_UNKNOWN
)
9400 value
&= ~(bfd_vma
)(1 << 28);
9410 if (branch_type
== ST_BRANCH_TO_THUMB
)
9414 case R_ARM_ABS32_NOI
:
9420 if (branch_type
== ST_BRANCH_TO_THUMB
)
9422 value
-= (input_section
->output_section
->vma
9423 + input_section
->output_offset
+ rel
->r_offset
);
9426 case R_ARM_REL32_NOI
:
9428 value
-= (input_section
->output_section
->vma
9429 + input_section
->output_offset
+ rel
->r_offset
);
9433 value
-= (input_section
->output_section
->vma
9434 + input_section
->output_offset
+ rel
->r_offset
);
9435 value
+= signed_addend
;
9436 if (! h
|| h
->root
.type
!= bfd_link_hash_undefweak
)
9438 /* Check for overflow. */
9439 if ((value
^ (value
>> 1)) & (1 << 30))
9440 return bfd_reloc_overflow
;
9442 value
&= 0x7fffffff;
9443 value
|= (bfd_get_32 (input_bfd
, hit_data
) & 0x80000000);
9444 if (branch_type
== ST_BRANCH_TO_THUMB
)
9449 bfd_put_32 (input_bfd
, value
, hit_data
);
9450 return bfd_reloc_ok
;
9453 /* PR 16202: Refectch the addend using the correct size. */
9454 if (globals
->use_rel
)
9455 addend
= bfd_get_8 (input_bfd
, hit_data
);
9458 /* There is no way to tell whether the user intended to use a signed or
9459 unsigned addend. When checking for overflow we accept either,
9460 as specified by the AAELF. */
9461 if ((long) value
> 0xff || (long) value
< -0x80)
9462 return bfd_reloc_overflow
;
9464 bfd_put_8 (input_bfd
, value
, hit_data
);
9465 return bfd_reloc_ok
;
9468 /* PR 16202: Refectch the addend using the correct size. */
9469 if (globals
->use_rel
)
9470 addend
= bfd_get_16 (input_bfd
, hit_data
);
9473 /* See comment for R_ARM_ABS8. */
9474 if ((long) value
> 0xffff || (long) value
< -0x8000)
9475 return bfd_reloc_overflow
;
9477 bfd_put_16 (input_bfd
, value
, hit_data
);
9478 return bfd_reloc_ok
;
9480 case R_ARM_THM_ABS5
:
9481 /* Support ldr and str instructions for the thumb. */
9482 if (globals
->use_rel
)
9484 /* Need to refetch addend. */
9485 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
9486 /* ??? Need to determine shift amount from operand size. */
9487 addend
>>= howto
->rightshift
;
9491 /* ??? Isn't value unsigned? */
9492 if ((long) value
> 0x1f || (long) value
< -0x10)
9493 return bfd_reloc_overflow
;
9495 /* ??? Value needs to be properly shifted into place first. */
9496 value
|= bfd_get_16 (input_bfd
, hit_data
) & 0xf83f;
9497 bfd_put_16 (input_bfd
, value
, hit_data
);
9498 return bfd_reloc_ok
;
9500 case R_ARM_THM_ALU_PREL_11_0
:
9501 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9504 bfd_signed_vma relocation
;
9506 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
9507 | bfd_get_16 (input_bfd
, hit_data
+ 2);
9509 if (globals
->use_rel
)
9511 signed_addend
= (insn
& 0xff) | ((insn
& 0x7000) >> 4)
9512 | ((insn
& (1 << 26)) >> 15);
9513 if (insn
& 0xf00000)
9514 signed_addend
= -signed_addend
;
9517 relocation
= value
+ signed_addend
;
9518 relocation
-= Pa (input_section
->output_section
->vma
9519 + input_section
->output_offset
9524 if (value
>= 0x1000)
9525 return bfd_reloc_overflow
;
9527 insn
= (insn
& 0xfb0f8f00) | (value
& 0xff)
9528 | ((value
& 0x700) << 4)
9529 | ((value
& 0x800) << 15);
9533 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
9534 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
9536 return bfd_reloc_ok
;
9540 /* PR 10073: This reloc is not generated by the GNU toolchain,
9541 but it is supported for compatibility with third party libraries
9542 generated by other compilers, specifically the ARM/IAR. */
9545 bfd_signed_vma relocation
;
9547 insn
= bfd_get_16 (input_bfd
, hit_data
);
9549 if (globals
->use_rel
)
9550 addend
= ((((insn
& 0x00ff) << 2) + 4) & 0x3ff) -4;
9552 relocation
= value
+ addend
;
9553 relocation
-= Pa (input_section
->output_section
->vma
9554 + input_section
->output_offset
9559 /* We do not check for overflow of this reloc. Although strictly
9560 speaking this is incorrect, it appears to be necessary in order
9561 to work with IAR generated relocs. Since GCC and GAS do not
9562 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9563 a problem for them. */
9566 insn
= (insn
& 0xff00) | (value
>> 2);
9568 bfd_put_16 (input_bfd
, insn
, hit_data
);
9570 return bfd_reloc_ok
;
9573 case R_ARM_THM_PC12
:
9574 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9577 bfd_signed_vma relocation
;
9579 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
9580 | bfd_get_16 (input_bfd
, hit_data
+ 2);
9582 if (globals
->use_rel
)
9584 signed_addend
= insn
& 0xfff;
9585 if (!(insn
& (1 << 23)))
9586 signed_addend
= -signed_addend
;
9589 relocation
= value
+ signed_addend
;
9590 relocation
-= Pa (input_section
->output_section
->vma
9591 + input_section
->output_offset
9596 if (value
>= 0x1000)
9597 return bfd_reloc_overflow
;
9599 insn
= (insn
& 0xff7ff000) | value
;
9600 if (relocation
>= 0)
9603 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
9604 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
9606 return bfd_reloc_ok
;
9609 case R_ARM_THM_XPC22
:
9610 case R_ARM_THM_CALL
:
9611 case R_ARM_THM_JUMP24
:
9612 /* Thumb BL (branch long instruction). */
9616 bfd_boolean overflow
= FALSE
;
9617 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
9618 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
9619 bfd_signed_vma reloc_signed_max
;
9620 bfd_signed_vma reloc_signed_min
;
9622 bfd_signed_vma signed_check
;
9624 const int thumb2
= using_thumb2 (globals
);
9626 /* A branch to an undefined weak symbol is turned into a jump to
9627 the next instruction unless a PLT entry will be created.
9628 The jump to the next instruction is optimized as a NOP.W for
9629 Thumb-2 enabled architectures. */
9630 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
9631 && plt_offset
== (bfd_vma
) -1)
9633 if (arch_has_thumb2_nop (globals
))
9635 bfd_put_16 (input_bfd
, 0xf3af, hit_data
);
9636 bfd_put_16 (input_bfd
, 0x8000, hit_data
+ 2);
9640 bfd_put_16 (input_bfd
, 0xe000, hit_data
);
9641 bfd_put_16 (input_bfd
, 0xbf00, hit_data
+ 2);
9643 return bfd_reloc_ok
;
9646 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9647 with Thumb-1) involving the J1 and J2 bits. */
9648 if (globals
->use_rel
)
9650 bfd_vma s
= (upper_insn
& (1 << 10)) >> 10;
9651 bfd_vma upper
= upper_insn
& 0x3ff;
9652 bfd_vma lower
= lower_insn
& 0x7ff;
9653 bfd_vma j1
= (lower_insn
& (1 << 13)) >> 13;
9654 bfd_vma j2
= (lower_insn
& (1 << 11)) >> 11;
9655 bfd_vma i1
= j1
^ s
? 0 : 1;
9656 bfd_vma i2
= j2
^ s
? 0 : 1;
9658 addend
= (i1
<< 23) | (i2
<< 22) | (upper
<< 12) | (lower
<< 1);
9660 addend
= (addend
| ((s
? 0 : 1) << 24)) - (1 << 24);
9662 signed_addend
= addend
;
9665 if (r_type
== R_ARM_THM_XPC22
)
9667 /* Check for Thumb to Thumb call. */
9668 /* FIXME: Should we translate the instruction into a BL
9669 instruction instead ? */
9670 if (branch_type
== ST_BRANCH_TO_THUMB
)
9671 (*_bfd_error_handler
)
9672 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9674 h
? h
->root
.root
.string
: "(local)");
9678 /* If it is not a call to Thumb, assume call to Arm.
9679 If it is a call relative to a section name, then it is not a
9680 function call at all, but rather a long jump. Calls through
9681 the PLT do not require stubs. */
9682 if (branch_type
== ST_BRANCH_TO_ARM
&& plt_offset
== (bfd_vma
) -1)
9684 if (globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
9686 /* Convert BL to BLX. */
9687 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
9689 else if (( r_type
!= R_ARM_THM_CALL
)
9690 && (r_type
!= R_ARM_THM_JUMP24
))
9692 if (elf32_thumb_to_arm_stub
9693 (info
, sym_name
, input_bfd
, output_bfd
, input_section
,
9694 hit_data
, sym_sec
, rel
->r_offset
, signed_addend
, value
,
9696 return bfd_reloc_ok
;
9698 return bfd_reloc_dangerous
;
9701 else if (branch_type
== ST_BRANCH_TO_THUMB
9703 && r_type
== R_ARM_THM_CALL
)
9705 /* Make sure this is a BL. */
9706 lower_insn
|= 0x1800;
9710 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
9711 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
)
9713 /* Check if a stub has to be inserted because the destination
9715 struct elf32_arm_stub_hash_entry
*stub_entry
;
9716 struct elf32_arm_link_hash_entry
*hash
;
9718 hash
= (struct elf32_arm_link_hash_entry
*) h
;
9720 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
9721 st_type
, &branch_type
,
9722 hash
, value
, sym_sec
,
9723 input_bfd
, sym_name
);
9725 if (stub_type
!= arm_stub_none
)
9727 /* The target is out of reach or we are changing modes, so
9728 redirect the branch to the local stub for this
9730 stub_entry
= elf32_arm_get_stub_entry (input_section
,
9734 if (stub_entry
!= NULL
)
9736 value
= (stub_entry
->stub_offset
9737 + stub_entry
->stub_sec
->output_offset
9738 + stub_entry
->stub_sec
->output_section
->vma
);
9740 if (plt_offset
!= (bfd_vma
) -1)
9741 *unresolved_reloc_p
= FALSE
;
9744 /* If this call becomes a call to Arm, force BLX. */
9745 if (globals
->use_blx
&& (r_type
== R_ARM_THM_CALL
))
9748 && !arm_stub_is_thumb (stub_entry
->stub_type
))
9749 || branch_type
!= ST_BRANCH_TO_THUMB
)
9750 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
9755 /* Handle calls via the PLT. */
9756 if (stub_type
== arm_stub_none
&& plt_offset
!= (bfd_vma
) -1)
9758 value
= (splt
->output_section
->vma
9759 + splt
->output_offset
9762 if (globals
->use_blx
9763 && r_type
== R_ARM_THM_CALL
9764 && ! using_thumb_only (globals
))
9766 /* If the Thumb BLX instruction is available, convert
9767 the BL to a BLX instruction to call the ARM-mode
9769 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
9770 branch_type
= ST_BRANCH_TO_ARM
;
9774 if (! using_thumb_only (globals
))
9775 /* Target the Thumb stub before the ARM PLT entry. */
9776 value
-= PLT_THUMB_STUB_SIZE
;
9777 branch_type
= ST_BRANCH_TO_THUMB
;
9779 *unresolved_reloc_p
= FALSE
;
9782 relocation
= value
+ signed_addend
;
9784 relocation
-= (input_section
->output_section
->vma
9785 + input_section
->output_offset
9788 check
= relocation
>> howto
->rightshift
;
9790 /* If this is a signed value, the rightshift just dropped
9791 leading 1 bits (assuming twos complement). */
9792 if ((bfd_signed_vma
) relocation
>= 0)
9793 signed_check
= check
;
9795 signed_check
= check
| ~((bfd_vma
) -1 >> howto
->rightshift
);
9797 /* Calculate the permissable maximum and minimum values for
9798 this relocation according to whether we're relocating for
9800 bitsize
= howto
->bitsize
;
9803 reloc_signed_max
= (1 << (bitsize
- 1)) - 1;
9804 reloc_signed_min
= ~reloc_signed_max
;
9806 /* Assumes two's complement. */
9807 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
9810 if ((lower_insn
& 0x5000) == 0x4000)
9811 /* For a BLX instruction, make sure that the relocation is rounded up
9812 to a word boundary. This follows the semantics of the instruction
9813 which specifies that bit 1 of the target address will come from bit
9814 1 of the base address. */
9815 relocation
= (relocation
+ 2) & ~ 3;
9817 /* Put RELOCATION back into the insn. Assumes two's complement.
9818 We use the Thumb-2 encoding, which is safe even if dealing with
9819 a Thumb-1 instruction by virtue of our overflow check above. */
9820 reloc_sign
= (signed_check
< 0) ? 1 : 0;
9821 upper_insn
= (upper_insn
& ~(bfd_vma
) 0x7ff)
9822 | ((relocation
>> 12) & 0x3ff)
9823 | (reloc_sign
<< 10);
9824 lower_insn
= (lower_insn
& ~(bfd_vma
) 0x2fff)
9825 | (((!((relocation
>> 23) & 1)) ^ reloc_sign
) << 13)
9826 | (((!((relocation
>> 22) & 1)) ^ reloc_sign
) << 11)
9827 | ((relocation
>> 1) & 0x7ff);
9829 /* Put the relocated value back in the object file: */
9830 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
9831 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
9833 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
9837 case R_ARM_THM_JUMP19
:
9838 /* Thumb32 conditional branch instruction. */
9841 bfd_boolean overflow
= FALSE
;
9842 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
9843 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
9844 bfd_signed_vma reloc_signed_max
= 0xffffe;
9845 bfd_signed_vma reloc_signed_min
= -0x100000;
9846 bfd_signed_vma signed_check
;
9847 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
9848 struct elf32_arm_stub_hash_entry
*stub_entry
;
9849 struct elf32_arm_link_hash_entry
*hash
;
9851 /* Need to refetch the addend, reconstruct the top three bits,
9852 and squish the two 11 bit pieces together. */
9853 if (globals
->use_rel
)
9855 bfd_vma S
= (upper_insn
& 0x0400) >> 10;
9856 bfd_vma upper
= (upper_insn
& 0x003f);
9857 bfd_vma J1
= (lower_insn
& 0x2000) >> 13;
9858 bfd_vma J2
= (lower_insn
& 0x0800) >> 11;
9859 bfd_vma lower
= (lower_insn
& 0x07ff);
9864 upper
-= 0x0100; /* Sign extend. */
9866 addend
= (upper
<< 12) | (lower
<< 1);
9867 signed_addend
= addend
;
9870 /* Handle calls via the PLT. */
9871 if (plt_offset
!= (bfd_vma
) -1)
9873 value
= (splt
->output_section
->vma
9874 + splt
->output_offset
9876 /* Target the Thumb stub before the ARM PLT entry. */
9877 value
-= PLT_THUMB_STUB_SIZE
;
9878 *unresolved_reloc_p
= FALSE
;
9881 hash
= (struct elf32_arm_link_hash_entry
*)h
;
9883 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
9884 st_type
, &branch_type
,
9885 hash
, value
, sym_sec
,
9886 input_bfd
, sym_name
);
9887 if (stub_type
!= arm_stub_none
)
9889 stub_entry
= elf32_arm_get_stub_entry (input_section
,
9893 if (stub_entry
!= NULL
)
9895 value
= (stub_entry
->stub_offset
9896 + stub_entry
->stub_sec
->output_offset
9897 + stub_entry
->stub_sec
->output_section
->vma
);
9901 relocation
= value
+ signed_addend
;
9902 relocation
-= (input_section
->output_section
->vma
9903 + input_section
->output_offset
9905 signed_check
= (bfd_signed_vma
) relocation
;
9907 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
9910 /* Put RELOCATION back into the insn. */
9912 bfd_vma S
= (relocation
& 0x00100000) >> 20;
9913 bfd_vma J2
= (relocation
& 0x00080000) >> 19;
9914 bfd_vma J1
= (relocation
& 0x00040000) >> 18;
9915 bfd_vma hi
= (relocation
& 0x0003f000) >> 12;
9916 bfd_vma lo
= (relocation
& 0x00000ffe) >> 1;
9918 upper_insn
= (upper_insn
& 0xfbc0) | (S
<< 10) | hi
;
9919 lower_insn
= (lower_insn
& 0xd000) | (J1
<< 13) | (J2
<< 11) | lo
;
9922 /* Put the relocated value back in the object file: */
9923 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
9924 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
9926 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
9929 case R_ARM_THM_JUMP11
:
9930 case R_ARM_THM_JUMP8
:
9931 case R_ARM_THM_JUMP6
:
9932 /* Thumb B (branch) instruction). */
9934 bfd_signed_vma relocation
;
9935 bfd_signed_vma reloc_signed_max
= (1 << (howto
->bitsize
- 1)) - 1;
9936 bfd_signed_vma reloc_signed_min
= ~ reloc_signed_max
;
9937 bfd_signed_vma signed_check
;
9939 /* CZB cannot jump backward. */
9940 if (r_type
== R_ARM_THM_JUMP6
)
9941 reloc_signed_min
= 0;
9943 if (globals
->use_rel
)
9945 /* Need to refetch addend. */
9946 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
9947 if (addend
& ((howto
->src_mask
+ 1) >> 1))
9950 signed_addend
&= ~ howto
->src_mask
;
9951 signed_addend
|= addend
;
9954 signed_addend
= addend
;
9955 /* The value in the insn has been right shifted. We need to
9956 undo this, so that we can perform the address calculation
9957 in terms of bytes. */
9958 signed_addend
<<= howto
->rightshift
;
9960 relocation
= value
+ signed_addend
;
9962 relocation
-= (input_section
->output_section
->vma
9963 + input_section
->output_offset
9966 relocation
>>= howto
->rightshift
;
9967 signed_check
= relocation
;
9969 if (r_type
== R_ARM_THM_JUMP6
)
9970 relocation
= ((relocation
& 0x0020) << 4) | ((relocation
& 0x001f) << 3);
9972 relocation
&= howto
->dst_mask
;
9973 relocation
|= (bfd_get_16 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
9975 bfd_put_16 (input_bfd
, relocation
, hit_data
);
9977 /* Assumes two's complement. */
9978 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
9979 return bfd_reloc_overflow
;
9981 return bfd_reloc_ok
;
9984 case R_ARM_ALU_PCREL7_0
:
9985 case R_ARM_ALU_PCREL15_8
:
9986 case R_ARM_ALU_PCREL23_15
:
9991 insn
= bfd_get_32 (input_bfd
, hit_data
);
9992 if (globals
->use_rel
)
9994 /* Extract the addend. */
9995 addend
= (insn
& 0xff) << ((insn
& 0xf00) >> 7);
9996 signed_addend
= addend
;
9998 relocation
= value
+ signed_addend
;
10000 relocation
-= (input_section
->output_section
->vma
10001 + input_section
->output_offset
10003 insn
= (insn
& ~0xfff)
10004 | ((howto
->bitpos
<< 7) & 0xf00)
10005 | ((relocation
>> howto
->bitpos
) & 0xff);
10006 bfd_put_32 (input_bfd
, value
, hit_data
);
10008 return bfd_reloc_ok
;
10010 case R_ARM_GNU_VTINHERIT
:
10011 case R_ARM_GNU_VTENTRY
:
10012 return bfd_reloc_ok
;
10014 case R_ARM_GOTOFF32
:
10015 /* Relocation is relative to the start of the
10016 global offset table. */
10018 BFD_ASSERT (sgot
!= NULL
);
10020 return bfd_reloc_notsupported
;
10022 /* If we are addressing a Thumb function, we need to adjust the
10023 address by one, so that attempts to call the function pointer will
10024 correctly interpret it as Thumb code. */
10025 if (branch_type
== ST_BRANCH_TO_THUMB
)
10028 /* Note that sgot->output_offset is not involved in this
10029 calculation. We always want the start of .got. If we
10030 define _GLOBAL_OFFSET_TABLE in a different way, as is
10031 permitted by the ABI, we might have to change this
10033 value
-= sgot
->output_section
->vma
;
10034 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10035 contents
, rel
->r_offset
, value
,
10039 /* Use global offset table as symbol value. */
10040 BFD_ASSERT (sgot
!= NULL
);
10043 return bfd_reloc_notsupported
;
10045 *unresolved_reloc_p
= FALSE
;
10046 value
= sgot
->output_section
->vma
;
10047 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10048 contents
, rel
->r_offset
, value
,
10052 case R_ARM_GOT_PREL
:
10053 /* Relocation is to the entry for this symbol in the
10054 global offset table. */
10056 return bfd_reloc_notsupported
;
10058 if (dynreloc_st_type
== STT_GNU_IFUNC
10059 && plt_offset
!= (bfd_vma
) -1
10060 && (h
== NULL
|| SYMBOL_REFERENCES_LOCAL (info
, h
)))
10062 /* We have a relocation against a locally-binding STT_GNU_IFUNC
10063 symbol, and the relocation resolves directly to the runtime
10064 target rather than to the .iplt entry. This means that any
10065 .got entry would be the same value as the .igot.plt entry,
10066 so there's no point creating both. */
10067 sgot
= globals
->root
.igotplt
;
10068 value
= sgot
->output_offset
+ gotplt_offset
;
10070 else if (h
!= NULL
)
10074 off
= h
->got
.offset
;
10075 BFD_ASSERT (off
!= (bfd_vma
) -1);
10076 if ((off
& 1) != 0)
10078 /* We have already processsed one GOT relocation against
10081 if (globals
->root
.dynamic_sections_created
10082 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
10083 *unresolved_reloc_p
= FALSE
;
10087 Elf_Internal_Rela outrel
;
10089 if (h
->dynindx
!= -1 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
10091 /* If the symbol doesn't resolve locally in a static
10092 object, we have an undefined reference. If the
10093 symbol doesn't resolve locally in a dynamic object,
10094 it should be resolved by the dynamic linker. */
10095 if (globals
->root
.dynamic_sections_created
)
10097 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_GLOB_DAT
);
10098 *unresolved_reloc_p
= FALSE
;
10102 outrel
.r_addend
= 0;
10106 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10107 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
10108 else if (bfd_link_pic (info
) &&
10109 (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10110 || h
->root
.type
!= bfd_link_hash_undefweak
))
10111 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
10114 outrel
.r_addend
= dynreloc_value
;
10117 /* The GOT entry is initialized to zero by default.
10118 See if we should install a different value. */
10119 if (outrel
.r_addend
!= 0
10120 && (outrel
.r_info
== 0 || globals
->use_rel
))
10122 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10123 sgot
->contents
+ off
);
10124 outrel
.r_addend
= 0;
10127 if (outrel
.r_info
!= 0)
10129 outrel
.r_offset
= (sgot
->output_section
->vma
10130 + sgot
->output_offset
10132 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10134 h
->got
.offset
|= 1;
10136 value
= sgot
->output_offset
+ off
;
10142 BFD_ASSERT (local_got_offsets
!= NULL
&&
10143 local_got_offsets
[r_symndx
] != (bfd_vma
) -1);
10145 off
= local_got_offsets
[r_symndx
];
10147 /* The offset must always be a multiple of 4. We use the
10148 least significant bit to record whether we have already
10149 generated the necessary reloc. */
10150 if ((off
& 1) != 0)
10154 if (globals
->use_rel
)
10155 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ off
);
10157 if (bfd_link_pic (info
) || dynreloc_st_type
== STT_GNU_IFUNC
)
10159 Elf_Internal_Rela outrel
;
10161 outrel
.r_addend
= addend
+ dynreloc_value
;
10162 outrel
.r_offset
= (sgot
->output_section
->vma
10163 + sgot
->output_offset
10165 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10166 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
10168 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
10169 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10172 local_got_offsets
[r_symndx
] |= 1;
10175 value
= sgot
->output_offset
+ off
;
10177 if (r_type
!= R_ARM_GOT32
)
10178 value
+= sgot
->output_section
->vma
;
10180 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10181 contents
, rel
->r_offset
, value
,
10184 case R_ARM_TLS_LDO32
:
10185 value
= value
- dtpoff_base (info
);
10187 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10188 contents
, rel
->r_offset
, value
,
10191 case R_ARM_TLS_LDM32
:
10198 off
= globals
->tls_ldm_got
.offset
;
10200 if ((off
& 1) != 0)
10204 /* If we don't know the module number, create a relocation
10206 if (bfd_link_pic (info
))
10208 Elf_Internal_Rela outrel
;
10210 if (srelgot
== NULL
)
10213 outrel
.r_addend
= 0;
10214 outrel
.r_offset
= (sgot
->output_section
->vma
10215 + sgot
->output_offset
+ off
);
10216 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32
);
10218 if (globals
->use_rel
)
10219 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10220 sgot
->contents
+ off
);
10222 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10225 bfd_put_32 (output_bfd
, 1, sgot
->contents
+ off
);
10227 globals
->tls_ldm_got
.offset
|= 1;
10230 value
= sgot
->output_section
->vma
+ sgot
->output_offset
+ off
10231 - (input_section
->output_section
->vma
+ input_section
->output_offset
+ rel
->r_offset
);
10233 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10234 contents
, rel
->r_offset
, value
,
10238 case R_ARM_TLS_CALL
:
10239 case R_ARM_THM_TLS_CALL
:
10240 case R_ARM_TLS_GD32
:
10241 case R_ARM_TLS_IE32
:
10242 case R_ARM_TLS_GOTDESC
:
10243 case R_ARM_TLS_DESCSEQ
:
10244 case R_ARM_THM_TLS_DESCSEQ
:
10246 bfd_vma off
, offplt
;
10250 BFD_ASSERT (sgot
!= NULL
);
10255 dyn
= globals
->root
.dynamic_sections_created
;
10256 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
10257 bfd_link_pic (info
),
10259 && (!bfd_link_pic (info
)
10260 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
10262 *unresolved_reloc_p
= FALSE
;
10265 off
= h
->got
.offset
;
10266 offplt
= elf32_arm_hash_entry (h
)->tlsdesc_got
;
10267 tls_type
= ((struct elf32_arm_link_hash_entry
*) h
)->tls_type
;
10271 BFD_ASSERT (local_got_offsets
!= NULL
);
10272 off
= local_got_offsets
[r_symndx
];
10273 offplt
= local_tlsdesc_gotents
[r_symndx
];
10274 tls_type
= elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
];
10277 /* Linker relaxations happens from one of the
10278 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10279 if (ELF32_R_TYPE(rel
->r_info
) != r_type
)
10280 tls_type
= GOT_TLS_IE
;
10282 BFD_ASSERT (tls_type
!= GOT_UNKNOWN
);
10284 if ((off
& 1) != 0)
10288 bfd_boolean need_relocs
= FALSE
;
10289 Elf_Internal_Rela outrel
;
10292 /* The GOT entries have not been initialized yet. Do it
10293 now, and emit any relocations. If both an IE GOT and a
10294 GD GOT are necessary, we emit the GD first. */
10296 if ((bfd_link_pic (info
) || indx
!= 0)
10298 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10299 || h
->root
.type
!= bfd_link_hash_undefweak
))
10301 need_relocs
= TRUE
;
10302 BFD_ASSERT (srelgot
!= NULL
);
10305 if (tls_type
& GOT_TLS_GDESC
)
10309 /* We should have relaxed, unless this is an undefined
10311 BFD_ASSERT ((h
&& (h
->root
.type
== bfd_link_hash_undefweak
))
10312 || bfd_link_pic (info
));
10313 BFD_ASSERT (globals
->sgotplt_jump_table_size
+ offplt
+ 8
10314 <= globals
->root
.sgotplt
->size
);
10316 outrel
.r_addend
= 0;
10317 outrel
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
10318 + globals
->root
.sgotplt
->output_offset
10320 + globals
->sgotplt_jump_table_size
);
10322 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DESC
);
10323 sreloc
= globals
->root
.srelplt
;
10324 loc
= sreloc
->contents
;
10325 loc
+= globals
->next_tls_desc_index
++ * RELOC_SIZE (globals
);
10326 BFD_ASSERT (loc
+ RELOC_SIZE (globals
)
10327 <= sreloc
->contents
+ sreloc
->size
);
10329 SWAP_RELOC_OUT (globals
) (output_bfd
, &outrel
, loc
);
10331 /* For globals, the first word in the relocation gets
10332 the relocation index and the top bit set, or zero,
10333 if we're binding now. For locals, it gets the
10334 symbol's offset in the tls section. */
10335 bfd_put_32 (output_bfd
,
10336 !h
? value
- elf_hash_table (info
)->tls_sec
->vma
10337 : info
->flags
& DF_BIND_NOW
? 0
10338 : 0x80000000 | ELF32_R_SYM (outrel
.r_info
),
10339 globals
->root
.sgotplt
->contents
+ offplt
10340 + globals
->sgotplt_jump_table_size
);
10342 /* Second word in the relocation is always zero. */
10343 bfd_put_32 (output_bfd
, 0,
10344 globals
->root
.sgotplt
->contents
+ offplt
10345 + globals
->sgotplt_jump_table_size
+ 4);
10347 if (tls_type
& GOT_TLS_GD
)
10351 outrel
.r_addend
= 0;
10352 outrel
.r_offset
= (sgot
->output_section
->vma
10353 + sgot
->output_offset
10355 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DTPMOD32
);
10357 if (globals
->use_rel
)
10358 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10359 sgot
->contents
+ cur_off
);
10361 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10364 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
10365 sgot
->contents
+ cur_off
+ 4);
10368 outrel
.r_addend
= 0;
10369 outrel
.r_info
= ELF32_R_INFO (indx
,
10370 R_ARM_TLS_DTPOFF32
);
10371 outrel
.r_offset
+= 4;
10373 if (globals
->use_rel
)
10374 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10375 sgot
->contents
+ cur_off
+ 4);
10377 elf32_arm_add_dynreloc (output_bfd
, info
,
10383 /* If we are not emitting relocations for a
10384 general dynamic reference, then we must be in a
10385 static link or an executable link with the
10386 symbol binding locally. Mark it as belonging
10387 to module 1, the executable. */
10388 bfd_put_32 (output_bfd
, 1,
10389 sgot
->contents
+ cur_off
);
10390 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
10391 sgot
->contents
+ cur_off
+ 4);
10397 if (tls_type
& GOT_TLS_IE
)
10402 outrel
.r_addend
= value
- dtpoff_base (info
);
10404 outrel
.r_addend
= 0;
10405 outrel
.r_offset
= (sgot
->output_section
->vma
10406 + sgot
->output_offset
10408 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_TPOFF32
);
10410 if (globals
->use_rel
)
10411 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10412 sgot
->contents
+ cur_off
);
10414 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10417 bfd_put_32 (output_bfd
, tpoff (info
, value
),
10418 sgot
->contents
+ cur_off
);
10423 h
->got
.offset
|= 1;
10425 local_got_offsets
[r_symndx
] |= 1;
10428 if ((tls_type
& GOT_TLS_GD
) && r_type
!= R_ARM_TLS_GD32
)
10430 else if (tls_type
& GOT_TLS_GDESC
)
10433 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
10434 || ELF32_R_TYPE(rel
->r_info
) == R_ARM_THM_TLS_CALL
)
10436 bfd_signed_vma offset
;
10437 /* TLS stubs are arm mode. The original symbol is a
10438 data object, so branch_type is bogus. */
10439 branch_type
= ST_BRANCH_TO_ARM
;
10440 enum elf32_arm_stub_type stub_type
10441 = arm_type_of_stub (info
, input_section
, rel
,
10442 st_type
, &branch_type
,
10443 (struct elf32_arm_link_hash_entry
*)h
,
10444 globals
->tls_trampoline
, globals
->root
.splt
,
10445 input_bfd
, sym_name
);
10447 if (stub_type
!= arm_stub_none
)
10449 struct elf32_arm_stub_hash_entry
*stub_entry
10450 = elf32_arm_get_stub_entry
10451 (input_section
, globals
->root
.splt
, 0, rel
,
10452 globals
, stub_type
);
10453 offset
= (stub_entry
->stub_offset
10454 + stub_entry
->stub_sec
->output_offset
10455 + stub_entry
->stub_sec
->output_section
->vma
);
10458 offset
= (globals
->root
.splt
->output_section
->vma
10459 + globals
->root
.splt
->output_offset
10460 + globals
->tls_trampoline
);
10462 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
)
10464 unsigned long inst
;
10466 offset
-= (input_section
->output_section
->vma
10467 + input_section
->output_offset
10468 + rel
->r_offset
+ 8);
10470 inst
= offset
>> 2;
10471 inst
&= 0x00ffffff;
10472 value
= inst
| (globals
->use_blx
? 0xfa000000 : 0xeb000000);
10476 /* Thumb blx encodes the offset in a complicated
10478 unsigned upper_insn
, lower_insn
;
10481 offset
-= (input_section
->output_section
->vma
10482 + input_section
->output_offset
10483 + rel
->r_offset
+ 4);
10485 if (stub_type
!= arm_stub_none
10486 && arm_stub_is_thumb (stub_type
))
10488 lower_insn
= 0xd000;
10492 lower_insn
= 0xc000;
10493 /* Round up the offset to a word boundary. */
10494 offset
= (offset
+ 2) & ~2;
10498 upper_insn
= (0xf000
10499 | ((offset
>> 12) & 0x3ff)
10501 lower_insn
|= (((!((offset
>> 23) & 1)) ^ neg
) << 13)
10502 | (((!((offset
>> 22) & 1)) ^ neg
) << 11)
10503 | ((offset
>> 1) & 0x7ff);
10504 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
10505 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
10506 return bfd_reloc_ok
;
10509 /* These relocations needs special care, as besides the fact
10510 they point somewhere in .gotplt, the addend must be
10511 adjusted accordingly depending on the type of instruction
10513 else if ((r_type
== R_ARM_TLS_GOTDESC
) && (tls_type
& GOT_TLS_GDESC
))
10515 unsigned long data
, insn
;
10518 data
= bfd_get_32 (input_bfd
, hit_data
);
10524 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
- data
);
10525 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
10526 insn
= (insn
<< 16)
10527 | bfd_get_16 (input_bfd
,
10528 contents
+ rel
->r_offset
- data
+ 2);
10529 if ((insn
& 0xf800c000) == 0xf000c000)
10532 else if ((insn
& 0xffffff00) == 0x4400)
10537 (*_bfd_error_handler
)
10538 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10539 input_bfd
, input_section
,
10540 (unsigned long)rel
->r_offset
, insn
);
10541 return bfd_reloc_notsupported
;
10546 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
- data
);
10548 switch (insn
>> 24)
10550 case 0xeb: /* bl */
10551 case 0xfa: /* blx */
10555 case 0xe0: /* add */
10560 (*_bfd_error_handler
)
10561 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10562 input_bfd
, input_section
,
10563 (unsigned long)rel
->r_offset
, insn
);
10564 return bfd_reloc_notsupported
;
10568 value
+= ((globals
->root
.sgotplt
->output_section
->vma
10569 + globals
->root
.sgotplt
->output_offset
+ off
)
10570 - (input_section
->output_section
->vma
10571 + input_section
->output_offset
10573 + globals
->sgotplt_jump_table_size
);
10576 value
= ((globals
->root
.sgot
->output_section
->vma
10577 + globals
->root
.sgot
->output_offset
+ off
)
10578 - (input_section
->output_section
->vma
10579 + input_section
->output_offset
+ rel
->r_offset
));
10581 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10582 contents
, rel
->r_offset
, value
,
10586 case R_ARM_TLS_LE32
:
10587 if (bfd_link_dll (info
))
10589 (*_bfd_error_handler
)
10590 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10591 input_bfd
, input_section
,
10592 (long) rel
->r_offset
, howto
->name
);
10593 return bfd_reloc_notsupported
;
10596 value
= tpoff (info
, value
);
10598 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10599 contents
, rel
->r_offset
, value
,
10603 if (globals
->fix_v4bx
)
10605 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10607 /* Ensure that we have a BX instruction. */
10608 BFD_ASSERT ((insn
& 0x0ffffff0) == 0x012fff10);
10610 if (globals
->fix_v4bx
== 2 && (insn
& 0xf) != 0xf)
10612 /* Branch to veneer. */
10614 glue_addr
= elf32_arm_bx_glue (info
, insn
& 0xf);
10615 glue_addr
-= input_section
->output_section
->vma
10616 + input_section
->output_offset
10617 + rel
->r_offset
+ 8;
10618 insn
= (insn
& 0xf0000000) | 0x0a000000
10619 | ((glue_addr
>> 2) & 0x00ffffff);
10623 /* Preserve Rm (lowest four bits) and the condition code
10624 (highest four bits). Other bits encode MOV PC,Rm. */
10625 insn
= (insn
& 0xf000000f) | 0x01a0f000;
10628 bfd_put_32 (input_bfd
, insn
, hit_data
);
10630 return bfd_reloc_ok
;
10632 case R_ARM_MOVW_ABS_NC
:
10633 case R_ARM_MOVT_ABS
:
10634 case R_ARM_MOVW_PREL_NC
:
10635 case R_ARM_MOVT_PREL
:
10636 /* Until we properly support segment-base-relative addressing then
10637 we assume the segment base to be zero, as for the group relocations.
10638 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10639 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10640 case R_ARM_MOVW_BREL_NC
:
10641 case R_ARM_MOVW_BREL
:
10642 case R_ARM_MOVT_BREL
:
10644 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10646 if (globals
->use_rel
)
10648 addend
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
10649 signed_addend
= (addend
^ 0x8000) - 0x8000;
10652 value
+= signed_addend
;
10654 if (r_type
== R_ARM_MOVW_PREL_NC
|| r_type
== R_ARM_MOVT_PREL
)
10655 value
-= (input_section
->output_section
->vma
10656 + input_section
->output_offset
+ rel
->r_offset
);
10658 if (r_type
== R_ARM_MOVW_BREL
&& value
>= 0x10000)
10659 return bfd_reloc_overflow
;
10661 if (branch_type
== ST_BRANCH_TO_THUMB
)
10664 if (r_type
== R_ARM_MOVT_ABS
|| r_type
== R_ARM_MOVT_PREL
10665 || r_type
== R_ARM_MOVT_BREL
)
10668 insn
&= 0xfff0f000;
10669 insn
|= value
& 0xfff;
10670 insn
|= (value
& 0xf000) << 4;
10671 bfd_put_32 (input_bfd
, insn
, hit_data
);
10673 return bfd_reloc_ok
;
10675 case R_ARM_THM_MOVW_ABS_NC
:
10676 case R_ARM_THM_MOVT_ABS
:
10677 case R_ARM_THM_MOVW_PREL_NC
:
10678 case R_ARM_THM_MOVT_PREL
:
10679 /* Until we properly support segment-base-relative addressing then
10680 we assume the segment base to be zero, as for the above relocations.
10681 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10682 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10683 as R_ARM_THM_MOVT_ABS. */
10684 case R_ARM_THM_MOVW_BREL_NC
:
10685 case R_ARM_THM_MOVW_BREL
:
10686 case R_ARM_THM_MOVT_BREL
:
10690 insn
= bfd_get_16 (input_bfd
, hit_data
) << 16;
10691 insn
|= bfd_get_16 (input_bfd
, hit_data
+ 2);
10693 if (globals
->use_rel
)
10695 addend
= ((insn
>> 4) & 0xf000)
10696 | ((insn
>> 15) & 0x0800)
10697 | ((insn
>> 4) & 0x0700)
10699 signed_addend
= (addend
^ 0x8000) - 0x8000;
10702 value
+= signed_addend
;
10704 if (r_type
== R_ARM_THM_MOVW_PREL_NC
|| r_type
== R_ARM_THM_MOVT_PREL
)
10705 value
-= (input_section
->output_section
->vma
10706 + input_section
->output_offset
+ rel
->r_offset
);
10708 if (r_type
== R_ARM_THM_MOVW_BREL
&& value
>= 0x10000)
10709 return bfd_reloc_overflow
;
10711 if (branch_type
== ST_BRANCH_TO_THUMB
)
10714 if (r_type
== R_ARM_THM_MOVT_ABS
|| r_type
== R_ARM_THM_MOVT_PREL
10715 || r_type
== R_ARM_THM_MOVT_BREL
)
10718 insn
&= 0xfbf08f00;
10719 insn
|= (value
& 0xf000) << 4;
10720 insn
|= (value
& 0x0800) << 15;
10721 insn
|= (value
& 0x0700) << 4;
10722 insn
|= (value
& 0x00ff);
10724 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
10725 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
10727 return bfd_reloc_ok
;
10729 case R_ARM_ALU_PC_G0_NC
:
10730 case R_ARM_ALU_PC_G1_NC
:
10731 case R_ARM_ALU_PC_G0
:
10732 case R_ARM_ALU_PC_G1
:
10733 case R_ARM_ALU_PC_G2
:
10734 case R_ARM_ALU_SB_G0_NC
:
10735 case R_ARM_ALU_SB_G1_NC
:
10736 case R_ARM_ALU_SB_G0
:
10737 case R_ARM_ALU_SB_G1
:
10738 case R_ARM_ALU_SB_G2
:
10740 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10741 bfd_vma pc
= input_section
->output_section
->vma
10742 + input_section
->output_offset
+ rel
->r_offset
;
10743 /* sb is the origin of the *segment* containing the symbol. */
10744 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
10747 bfd_signed_vma signed_value
;
10750 /* Determine which group of bits to select. */
10753 case R_ARM_ALU_PC_G0_NC
:
10754 case R_ARM_ALU_PC_G0
:
10755 case R_ARM_ALU_SB_G0_NC
:
10756 case R_ARM_ALU_SB_G0
:
10760 case R_ARM_ALU_PC_G1_NC
:
10761 case R_ARM_ALU_PC_G1
:
10762 case R_ARM_ALU_SB_G1_NC
:
10763 case R_ARM_ALU_SB_G1
:
10767 case R_ARM_ALU_PC_G2
:
10768 case R_ARM_ALU_SB_G2
:
10776 /* If REL, extract the addend from the insn. If RELA, it will
10777 have already been fetched for us. */
10778 if (globals
->use_rel
)
10781 bfd_vma constant
= insn
& 0xff;
10782 bfd_vma rotation
= (insn
& 0xf00) >> 8;
10785 signed_addend
= constant
;
10788 /* Compensate for the fact that in the instruction, the
10789 rotation is stored in multiples of 2 bits. */
10792 /* Rotate "constant" right by "rotation" bits. */
10793 signed_addend
= (constant
>> rotation
) |
10794 (constant
<< (8 * sizeof (bfd_vma
) - rotation
));
10797 /* Determine if the instruction is an ADD or a SUB.
10798 (For REL, this determines the sign of the addend.) */
10799 negative
= identify_add_or_sub (insn
);
10802 (*_bfd_error_handler
)
10803 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
10804 input_bfd
, input_section
,
10805 (long) rel
->r_offset
, howto
->name
);
10806 return bfd_reloc_overflow
;
10809 signed_addend
*= negative
;
10812 /* Compute the value (X) to go in the place. */
10813 if (r_type
== R_ARM_ALU_PC_G0_NC
10814 || r_type
== R_ARM_ALU_PC_G1_NC
10815 || r_type
== R_ARM_ALU_PC_G0
10816 || r_type
== R_ARM_ALU_PC_G1
10817 || r_type
== R_ARM_ALU_PC_G2
)
10819 signed_value
= value
- pc
+ signed_addend
;
10821 /* Section base relative. */
10822 signed_value
= value
- sb
+ signed_addend
;
10824 /* If the target symbol is a Thumb function, then set the
10825 Thumb bit in the address. */
10826 if (branch_type
== ST_BRANCH_TO_THUMB
)
10829 /* Calculate the value of the relevant G_n, in encoded
10830 constant-with-rotation format. */
10831 g_n
= calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
10834 /* Check for overflow if required. */
10835 if ((r_type
== R_ARM_ALU_PC_G0
10836 || r_type
== R_ARM_ALU_PC_G1
10837 || r_type
== R_ARM_ALU_PC_G2
10838 || r_type
== R_ARM_ALU_SB_G0
10839 || r_type
== R_ARM_ALU_SB_G1
10840 || r_type
== R_ARM_ALU_SB_G2
) && residual
!= 0)
10842 (*_bfd_error_handler
)
10843 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10844 input_bfd
, input_section
,
10845 (long) rel
->r_offset
, signed_value
< 0 ? - signed_value
: signed_value
,
10847 return bfd_reloc_overflow
;
10850 /* Mask out the value and the ADD/SUB part of the opcode; take care
10851 not to destroy the S bit. */
10852 insn
&= 0xff1ff000;
10854 /* Set the opcode according to whether the value to go in the
10855 place is negative. */
10856 if (signed_value
< 0)
10861 /* Encode the offset. */
10864 bfd_put_32 (input_bfd
, insn
, hit_data
);
10866 return bfd_reloc_ok
;
10868 case R_ARM_LDR_PC_G0
:
10869 case R_ARM_LDR_PC_G1
:
10870 case R_ARM_LDR_PC_G2
:
10871 case R_ARM_LDR_SB_G0
:
10872 case R_ARM_LDR_SB_G1
:
10873 case R_ARM_LDR_SB_G2
:
10875 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10876 bfd_vma pc
= input_section
->output_section
->vma
10877 + input_section
->output_offset
+ rel
->r_offset
;
10878 /* sb is the origin of the *segment* containing the symbol. */
10879 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
10881 bfd_signed_vma signed_value
;
10884 /* Determine which groups of bits to calculate. */
10887 case R_ARM_LDR_PC_G0
:
10888 case R_ARM_LDR_SB_G0
:
10892 case R_ARM_LDR_PC_G1
:
10893 case R_ARM_LDR_SB_G1
:
10897 case R_ARM_LDR_PC_G2
:
10898 case R_ARM_LDR_SB_G2
:
10906 /* If REL, extract the addend from the insn. If RELA, it will
10907 have already been fetched for us. */
10908 if (globals
->use_rel
)
10910 int negative
= (insn
& (1 << 23)) ? 1 : -1;
10911 signed_addend
= negative
* (insn
& 0xfff);
10914 /* Compute the value (X) to go in the place. */
10915 if (r_type
== R_ARM_LDR_PC_G0
10916 || r_type
== R_ARM_LDR_PC_G1
10917 || r_type
== R_ARM_LDR_PC_G2
)
10919 signed_value
= value
- pc
+ signed_addend
;
10921 /* Section base relative. */
10922 signed_value
= value
- sb
+ signed_addend
;
10924 /* Calculate the value of the relevant G_{n-1} to obtain
10925 the residual at that stage. */
10926 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
10927 group
- 1, &residual
);
10929 /* Check for overflow. */
10930 if (residual
>= 0x1000)
10932 (*_bfd_error_handler
)
10933 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10934 input_bfd
, input_section
,
10935 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
10936 return bfd_reloc_overflow
;
10939 /* Mask out the value and U bit. */
10940 insn
&= 0xff7ff000;
10942 /* Set the U bit if the value to go in the place is non-negative. */
10943 if (signed_value
>= 0)
10946 /* Encode the offset. */
10949 bfd_put_32 (input_bfd
, insn
, hit_data
);
10951 return bfd_reloc_ok
;
10953 case R_ARM_LDRS_PC_G0
:
10954 case R_ARM_LDRS_PC_G1
:
10955 case R_ARM_LDRS_PC_G2
:
10956 case R_ARM_LDRS_SB_G0
:
10957 case R_ARM_LDRS_SB_G1
:
10958 case R_ARM_LDRS_SB_G2
:
10960 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10961 bfd_vma pc
= input_section
->output_section
->vma
10962 + input_section
->output_offset
+ rel
->r_offset
;
10963 /* sb is the origin of the *segment* containing the symbol. */
10964 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
10966 bfd_signed_vma signed_value
;
10969 /* Determine which groups of bits to calculate. */
10972 case R_ARM_LDRS_PC_G0
:
10973 case R_ARM_LDRS_SB_G0
:
10977 case R_ARM_LDRS_PC_G1
:
10978 case R_ARM_LDRS_SB_G1
:
10982 case R_ARM_LDRS_PC_G2
:
10983 case R_ARM_LDRS_SB_G2
:
10991 /* If REL, extract the addend from the insn. If RELA, it will
10992 have already been fetched for us. */
10993 if (globals
->use_rel
)
10995 int negative
= (insn
& (1 << 23)) ? 1 : -1;
10996 signed_addend
= negative
* (((insn
& 0xf00) >> 4) + (insn
& 0xf));
10999 /* Compute the value (X) to go in the place. */
11000 if (r_type
== R_ARM_LDRS_PC_G0
11001 || r_type
== R_ARM_LDRS_PC_G1
11002 || r_type
== R_ARM_LDRS_PC_G2
)
11004 signed_value
= value
- pc
+ signed_addend
;
11006 /* Section base relative. */
11007 signed_value
= value
- sb
+ signed_addend
;
11009 /* Calculate the value of the relevant G_{n-1} to obtain
11010 the residual at that stage. */
11011 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
11012 group
- 1, &residual
);
11014 /* Check for overflow. */
11015 if (residual
>= 0x100)
11017 (*_bfd_error_handler
)
11018 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11019 input_bfd
, input_section
,
11020 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
11021 return bfd_reloc_overflow
;
11024 /* Mask out the value and U bit. */
11025 insn
&= 0xff7ff0f0;
11027 /* Set the U bit if the value to go in the place is non-negative. */
11028 if (signed_value
>= 0)
11031 /* Encode the offset. */
11032 insn
|= ((residual
& 0xf0) << 4) | (residual
& 0xf);
11034 bfd_put_32 (input_bfd
, insn
, hit_data
);
11036 return bfd_reloc_ok
;
11038 case R_ARM_LDC_PC_G0
:
11039 case R_ARM_LDC_PC_G1
:
11040 case R_ARM_LDC_PC_G2
:
11041 case R_ARM_LDC_SB_G0
:
11042 case R_ARM_LDC_SB_G1
:
11043 case R_ARM_LDC_SB_G2
:
11045 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
11046 bfd_vma pc
= input_section
->output_section
->vma
11047 + input_section
->output_offset
+ rel
->r_offset
;
11048 /* sb is the origin of the *segment* containing the symbol. */
11049 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
11051 bfd_signed_vma signed_value
;
11054 /* Determine which groups of bits to calculate. */
11057 case R_ARM_LDC_PC_G0
:
11058 case R_ARM_LDC_SB_G0
:
11062 case R_ARM_LDC_PC_G1
:
11063 case R_ARM_LDC_SB_G1
:
11067 case R_ARM_LDC_PC_G2
:
11068 case R_ARM_LDC_SB_G2
:
11076 /* If REL, extract the addend from the insn. If RELA, it will
11077 have already been fetched for us. */
11078 if (globals
->use_rel
)
11080 int negative
= (insn
& (1 << 23)) ? 1 : -1;
11081 signed_addend
= negative
* ((insn
& 0xff) << 2);
11084 /* Compute the value (X) to go in the place. */
11085 if (r_type
== R_ARM_LDC_PC_G0
11086 || r_type
== R_ARM_LDC_PC_G1
11087 || r_type
== R_ARM_LDC_PC_G2
)
11089 signed_value
= value
- pc
+ signed_addend
;
11091 /* Section base relative. */
11092 signed_value
= value
- sb
+ signed_addend
;
11094 /* Calculate the value of the relevant G_{n-1} to obtain
11095 the residual at that stage. */
11096 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
11097 group
- 1, &residual
);
11099 /* Check for overflow. (The absolute value to go in the place must be
11100 divisible by four and, after having been divided by four, must
11101 fit in eight bits.) */
11102 if ((residual
& 0x3) != 0 || residual
>= 0x400)
11104 (*_bfd_error_handler
)
11105 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11106 input_bfd
, input_section
,
11107 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
11108 return bfd_reloc_overflow
;
11111 /* Mask out the value and U bit. */
11112 insn
&= 0xff7fff00;
11114 /* Set the U bit if the value to go in the place is non-negative. */
11115 if (signed_value
>= 0)
11118 /* Encode the offset. */
11119 insn
|= residual
>> 2;
11121 bfd_put_32 (input_bfd
, insn
, hit_data
);
11123 return bfd_reloc_ok
;
11125 case R_ARM_THM_ALU_ABS_G0_NC
:
11126 case R_ARM_THM_ALU_ABS_G1_NC
:
11127 case R_ARM_THM_ALU_ABS_G2_NC
:
11128 case R_ARM_THM_ALU_ABS_G3_NC
:
11130 const int shift_array
[4] = {0, 8, 16, 24};
11131 bfd_vma insn
= bfd_get_16 (input_bfd
, hit_data
);
11132 bfd_vma addr
= value
;
11133 int shift
= shift_array
[r_type
- R_ARM_THM_ALU_ABS_G0_NC
];
11135 /* Compute address. */
11136 if (globals
->use_rel
)
11137 signed_addend
= insn
& 0xff;
11138 addr
+= signed_addend
;
11139 if (branch_type
== ST_BRANCH_TO_THUMB
)
11141 /* Clean imm8 insn. */
11143 /* And update with correct part of address. */
11144 insn
|= (addr
>> shift
) & 0xff;
11146 bfd_put_16 (input_bfd
, insn
, hit_data
);
11149 *unresolved_reloc_p
= FALSE
;
11150 return bfd_reloc_ok
;
11153 return bfd_reloc_notsupported
;
11157 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11159 arm_add_to_rel (bfd
* abfd
,
11160 bfd_byte
* address
,
11161 reloc_howto_type
* howto
,
11162 bfd_signed_vma increment
)
11164 bfd_signed_vma addend
;
11166 if (howto
->type
== R_ARM_THM_CALL
11167 || howto
->type
== R_ARM_THM_JUMP24
)
11169 int upper_insn
, lower_insn
;
11172 upper_insn
= bfd_get_16 (abfd
, address
);
11173 lower_insn
= bfd_get_16 (abfd
, address
+ 2);
11174 upper
= upper_insn
& 0x7ff;
11175 lower
= lower_insn
& 0x7ff;
11177 addend
= (upper
<< 12) | (lower
<< 1);
11178 addend
+= increment
;
11181 upper_insn
= (upper_insn
& 0xf800) | ((addend
>> 11) & 0x7ff);
11182 lower_insn
= (lower_insn
& 0xf800) | (addend
& 0x7ff);
11184 bfd_put_16 (abfd
, (bfd_vma
) upper_insn
, address
);
11185 bfd_put_16 (abfd
, (bfd_vma
) lower_insn
, address
+ 2);
11191 contents
= bfd_get_32 (abfd
, address
);
11193 /* Get the (signed) value from the instruction. */
11194 addend
= contents
& howto
->src_mask
;
11195 if (addend
& ((howto
->src_mask
+ 1) >> 1))
11197 bfd_signed_vma mask
;
11200 mask
&= ~ howto
->src_mask
;
11204 /* Add in the increment, (which is a byte value). */
11205 switch (howto
->type
)
11208 addend
+= increment
;
11215 addend
<<= howto
->size
;
11216 addend
+= increment
;
11218 /* Should we check for overflow here ? */
11220 /* Drop any undesired bits. */
11221 addend
>>= howto
->rightshift
;
11225 contents
= (contents
& ~ howto
->dst_mask
) | (addend
& howto
->dst_mask
);
11227 bfd_put_32 (abfd
, contents
, address
);
11231 #define IS_ARM_TLS_RELOC(R_TYPE) \
11232 ((R_TYPE) == R_ARM_TLS_GD32 \
11233 || (R_TYPE) == R_ARM_TLS_LDO32 \
11234 || (R_TYPE) == R_ARM_TLS_LDM32 \
11235 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11236 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11237 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11238 || (R_TYPE) == R_ARM_TLS_LE32 \
11239 || (R_TYPE) == R_ARM_TLS_IE32 \
11240 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11242 /* Specific set of relocations for the gnu tls dialect. */
11243 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11244 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11245 || (R_TYPE) == R_ARM_TLS_CALL \
11246 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11247 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11248 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11250 /* Relocate an ARM ELF section. */
11253 elf32_arm_relocate_section (bfd
* output_bfd
,
11254 struct bfd_link_info
* info
,
11256 asection
* input_section
,
11257 bfd_byte
* contents
,
11258 Elf_Internal_Rela
* relocs
,
11259 Elf_Internal_Sym
* local_syms
,
11260 asection
** local_sections
)
11262 Elf_Internal_Shdr
*symtab_hdr
;
11263 struct elf_link_hash_entry
**sym_hashes
;
11264 Elf_Internal_Rela
*rel
;
11265 Elf_Internal_Rela
*relend
;
11267 struct elf32_arm_link_hash_table
* globals
;
11269 globals
= elf32_arm_hash_table (info
);
11270 if (globals
== NULL
)
11273 symtab_hdr
= & elf_symtab_hdr (input_bfd
);
11274 sym_hashes
= elf_sym_hashes (input_bfd
);
11277 relend
= relocs
+ input_section
->reloc_count
;
11278 for (; rel
< relend
; rel
++)
11281 reloc_howto_type
* howto
;
11282 unsigned long r_symndx
;
11283 Elf_Internal_Sym
* sym
;
11285 struct elf_link_hash_entry
* h
;
11286 bfd_vma relocation
;
11287 bfd_reloc_status_type r
;
11290 bfd_boolean unresolved_reloc
= FALSE
;
11291 char *error_message
= NULL
;
11293 r_symndx
= ELF32_R_SYM (rel
->r_info
);
11294 r_type
= ELF32_R_TYPE (rel
->r_info
);
11295 r_type
= arm_real_reloc_type (globals
, r_type
);
11297 if ( r_type
== R_ARM_GNU_VTENTRY
11298 || r_type
== R_ARM_GNU_VTINHERIT
)
11301 bfd_reloc
.howto
= elf32_arm_howto_from_type (r_type
);
11302 howto
= bfd_reloc
.howto
;
11308 if (r_symndx
< symtab_hdr
->sh_info
)
11310 sym
= local_syms
+ r_symndx
;
11311 sym_type
= ELF32_ST_TYPE (sym
->st_info
);
11312 sec
= local_sections
[r_symndx
];
11314 /* An object file might have a reference to a local
11315 undefined symbol. This is a daft object file, but we
11316 should at least do something about it. V4BX & NONE
11317 relocations do not use the symbol and are explicitly
11318 allowed to use the undefined symbol, so allow those.
11319 Likewise for relocations against STN_UNDEF. */
11320 if (r_type
!= R_ARM_V4BX
11321 && r_type
!= R_ARM_NONE
11322 && r_symndx
!= STN_UNDEF
11323 && bfd_is_und_section (sec
)
11324 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
11326 if (!info
->callbacks
->undefined_symbol
11327 (info
, bfd_elf_string_from_elf_section
11328 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
11329 input_bfd
, input_section
,
11330 rel
->r_offset
, TRUE
))
11334 if (globals
->use_rel
)
11336 relocation
= (sec
->output_section
->vma
11337 + sec
->output_offset
11339 if (!bfd_link_relocatable (info
)
11340 && (sec
->flags
& SEC_MERGE
)
11341 && ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
11344 bfd_vma addend
, value
;
11348 case R_ARM_MOVW_ABS_NC
:
11349 case R_ARM_MOVT_ABS
:
11350 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
11351 addend
= ((value
& 0xf0000) >> 4) | (value
& 0xfff);
11352 addend
= (addend
^ 0x8000) - 0x8000;
11355 case R_ARM_THM_MOVW_ABS_NC
:
11356 case R_ARM_THM_MOVT_ABS
:
11357 value
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
)
11359 value
|= bfd_get_16 (input_bfd
,
11360 contents
+ rel
->r_offset
+ 2);
11361 addend
= ((value
& 0xf7000) >> 4) | (value
& 0xff)
11362 | ((value
& 0x04000000) >> 15);
11363 addend
= (addend
^ 0x8000) - 0x8000;
11367 if (howto
->rightshift
11368 || (howto
->src_mask
& (howto
->src_mask
+ 1)))
11370 (*_bfd_error_handler
)
11371 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11372 input_bfd
, input_section
,
11373 (long) rel
->r_offset
, howto
->name
);
11377 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
11379 /* Get the (signed) value from the instruction. */
11380 addend
= value
& howto
->src_mask
;
11381 if (addend
& ((howto
->src_mask
+ 1) >> 1))
11383 bfd_signed_vma mask
;
11386 mask
&= ~ howto
->src_mask
;
11394 _bfd_elf_rel_local_sym (output_bfd
, sym
, &msec
, addend
)
11396 addend
+= msec
->output_section
->vma
+ msec
->output_offset
;
11398 /* Cases here must match those in the preceding
11399 switch statement. */
11402 case R_ARM_MOVW_ABS_NC
:
11403 case R_ARM_MOVT_ABS
:
11404 value
= (value
& 0xfff0f000) | ((addend
& 0xf000) << 4)
11405 | (addend
& 0xfff);
11406 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
11409 case R_ARM_THM_MOVW_ABS_NC
:
11410 case R_ARM_THM_MOVT_ABS
:
11411 value
= (value
& 0xfbf08f00) | ((addend
& 0xf700) << 4)
11412 | (addend
& 0xff) | ((addend
& 0x0800) << 15);
11413 bfd_put_16 (input_bfd
, value
>> 16,
11414 contents
+ rel
->r_offset
);
11415 bfd_put_16 (input_bfd
, value
,
11416 contents
+ rel
->r_offset
+ 2);
11420 value
= (value
& ~ howto
->dst_mask
)
11421 | (addend
& howto
->dst_mask
);
11422 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
11428 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
11432 bfd_boolean warned
, ignored
;
11434 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
11435 r_symndx
, symtab_hdr
, sym_hashes
,
11436 h
, sec
, relocation
,
11437 unresolved_reloc
, warned
, ignored
);
11439 sym_type
= h
->type
;
11442 if (sec
!= NULL
&& discarded_section (sec
))
11443 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
11444 rel
, 1, relend
, howto
, 0, contents
);
11446 if (bfd_link_relocatable (info
))
11448 /* This is a relocatable link. We don't have to change
11449 anything, unless the reloc is against a section symbol,
11450 in which case we have to adjust according to where the
11451 section symbol winds up in the output section. */
11452 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
11454 if (globals
->use_rel
)
11455 arm_add_to_rel (input_bfd
, contents
+ rel
->r_offset
,
11456 howto
, (bfd_signed_vma
) sec
->output_offset
);
11458 rel
->r_addend
+= sec
->output_offset
;
11464 name
= h
->root
.root
.string
;
11467 name
= (bfd_elf_string_from_elf_section
11468 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
11469 if (name
== NULL
|| *name
== '\0')
11470 name
= bfd_section_name (input_bfd
, sec
);
11473 if (r_symndx
!= STN_UNDEF
11474 && r_type
!= R_ARM_NONE
11476 || h
->root
.type
== bfd_link_hash_defined
11477 || h
->root
.type
== bfd_link_hash_defweak
)
11478 && IS_ARM_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
11480 (*_bfd_error_handler
)
11481 ((sym_type
== STT_TLS
11482 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11483 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11486 (long) rel
->r_offset
,
11491 /* We call elf32_arm_final_link_relocate unless we're completely
11492 done, i.e., the relaxation produced the final output we want,
11493 and we won't let anybody mess with it. Also, we have to do
11494 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11495 both in relaxed and non-relaxed cases. */
11496 if ((elf32_arm_tls_transition (info
, r_type
, h
) != (unsigned)r_type
)
11497 || (IS_ARM_TLS_GNU_RELOC (r_type
)
11498 && !((h
? elf32_arm_hash_entry (h
)->tls_type
:
11499 elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
])
11502 r
= elf32_arm_tls_relax (globals
, input_bfd
, input_section
,
11503 contents
, rel
, h
== NULL
);
11504 /* This may have been marked unresolved because it came from
11505 a shared library. But we've just dealt with that. */
11506 unresolved_reloc
= 0;
11509 r
= bfd_reloc_continue
;
11511 if (r
== bfd_reloc_continue
)
11513 unsigned char branch_type
=
11514 h
? ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
11515 : ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
11517 r
= elf32_arm_final_link_relocate (howto
, input_bfd
, output_bfd
,
11518 input_section
, contents
, rel
,
11519 relocation
, info
, sec
, name
,
11520 sym_type
, branch_type
, h
,
11525 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11526 because such sections are not SEC_ALLOC and thus ld.so will
11527 not process them. */
11528 if (unresolved_reloc
11529 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
11531 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
11532 rel
->r_offset
) != (bfd_vma
) -1)
11534 (*_bfd_error_handler
)
11535 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11538 (long) rel
->r_offset
,
11540 h
->root
.root
.string
);
11544 if (r
!= bfd_reloc_ok
)
11548 case bfd_reloc_overflow
:
11549 /* If the overflowing reloc was to an undefined symbol,
11550 we have already printed one error message and there
11551 is no point complaining again. */
11553 h
->root
.type
!= bfd_link_hash_undefined
)
11554 && (!((*info
->callbacks
->reloc_overflow
)
11555 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
11556 (bfd_vma
) 0, input_bfd
, input_section
,
11561 case bfd_reloc_undefined
:
11562 if (!((*info
->callbacks
->undefined_symbol
)
11563 (info
, name
, input_bfd
, input_section
,
11564 rel
->r_offset
, TRUE
)))
11568 case bfd_reloc_outofrange
:
11569 error_message
= _("out of range");
11572 case bfd_reloc_notsupported
:
11573 error_message
= _("unsupported relocation");
11576 case bfd_reloc_dangerous
:
11577 /* error_message should already be set. */
11581 error_message
= _("unknown error");
11582 /* Fall through. */
11585 BFD_ASSERT (error_message
!= NULL
);
11586 if (!((*info
->callbacks
->reloc_dangerous
)
11587 (info
, error_message
, input_bfd
, input_section
,
11598 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11599 adds the edit to the start of the list. (The list must be built in order of
11600 ascending TINDEX: the function's callers are primarily responsible for
11601 maintaining that condition). */
11604 add_unwind_table_edit (arm_unwind_table_edit
**head
,
11605 arm_unwind_table_edit
**tail
,
11606 arm_unwind_edit_type type
,
11607 asection
*linked_section
,
11608 unsigned int tindex
)
11610 arm_unwind_table_edit
*new_edit
= (arm_unwind_table_edit
*)
11611 xmalloc (sizeof (arm_unwind_table_edit
));
11613 new_edit
->type
= type
;
11614 new_edit
->linked_section
= linked_section
;
11615 new_edit
->index
= tindex
;
11619 new_edit
->next
= NULL
;
11622 (*tail
)->next
= new_edit
;
11624 (*tail
) = new_edit
;
11627 (*head
) = new_edit
;
11631 new_edit
->next
= *head
;
11640 static _arm_elf_section_data
*get_arm_elf_section_data (asection
*);
11642 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11644 adjust_exidx_size(asection
*exidx_sec
, int adjust
)
11648 if (!exidx_sec
->rawsize
)
11649 exidx_sec
->rawsize
= exidx_sec
->size
;
11651 bfd_set_section_size (exidx_sec
->owner
, exidx_sec
, exidx_sec
->size
+ adjust
);
11652 out_sec
= exidx_sec
->output_section
;
11653 /* Adjust size of output section. */
11654 bfd_set_section_size (out_sec
->owner
, out_sec
, out_sec
->size
+adjust
);
11657 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11659 insert_cantunwind_after(asection
*text_sec
, asection
*exidx_sec
)
11661 struct _arm_elf_section_data
*exidx_arm_data
;
11663 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
11664 add_unwind_table_edit (
11665 &exidx_arm_data
->u
.exidx
.unwind_edit_list
,
11666 &exidx_arm_data
->u
.exidx
.unwind_edit_tail
,
11667 INSERT_EXIDX_CANTUNWIND_AT_END
, text_sec
, UINT_MAX
);
11669 exidx_arm_data
->additional_reloc_count
++;
11671 adjust_exidx_size(exidx_sec
, 8);
11674 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11675 made to those tables, such that:
11677 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11678 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11679 codes which have been inlined into the index).
11681 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11683 The edits are applied when the tables are written
11684 (in elf32_arm_write_section). */
11687 elf32_arm_fix_exidx_coverage (asection
**text_section_order
,
11688 unsigned int num_text_sections
,
11689 struct bfd_link_info
*info
,
11690 bfd_boolean merge_exidx_entries
)
11693 unsigned int last_second_word
= 0, i
;
11694 asection
*last_exidx_sec
= NULL
;
11695 asection
*last_text_sec
= NULL
;
11696 int last_unwind_type
= -1;
11698 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11700 for (inp
= info
->input_bfds
; inp
!= NULL
; inp
= inp
->link
.next
)
11704 for (sec
= inp
->sections
; sec
!= NULL
; sec
= sec
->next
)
11706 struct bfd_elf_section_data
*elf_sec
= elf_section_data (sec
);
11707 Elf_Internal_Shdr
*hdr
= &elf_sec
->this_hdr
;
11709 if (!hdr
|| hdr
->sh_type
!= SHT_ARM_EXIDX
)
11712 if (elf_sec
->linked_to
)
11714 Elf_Internal_Shdr
*linked_hdr
11715 = &elf_section_data (elf_sec
->linked_to
)->this_hdr
;
11716 struct _arm_elf_section_data
*linked_sec_arm_data
11717 = get_arm_elf_section_data (linked_hdr
->bfd_section
);
11719 if (linked_sec_arm_data
== NULL
)
11722 /* Link this .ARM.exidx section back from the text section it
11724 linked_sec_arm_data
->u
.text
.arm_exidx_sec
= sec
;
11729 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
11730 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
11731 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
11733 for (i
= 0; i
< num_text_sections
; i
++)
11735 asection
*sec
= text_section_order
[i
];
11736 asection
*exidx_sec
;
11737 struct _arm_elf_section_data
*arm_data
= get_arm_elf_section_data (sec
);
11738 struct _arm_elf_section_data
*exidx_arm_data
;
11739 bfd_byte
*contents
= NULL
;
11740 int deleted_exidx_bytes
= 0;
11742 arm_unwind_table_edit
*unwind_edit_head
= NULL
;
11743 arm_unwind_table_edit
*unwind_edit_tail
= NULL
;
11744 Elf_Internal_Shdr
*hdr
;
11747 if (arm_data
== NULL
)
11750 exidx_sec
= arm_data
->u
.text
.arm_exidx_sec
;
11751 if (exidx_sec
== NULL
)
11753 /* Section has no unwind data. */
11754 if (last_unwind_type
== 0 || !last_exidx_sec
)
11757 /* Ignore zero sized sections. */
11758 if (sec
->size
== 0)
11761 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
11762 last_unwind_type
= 0;
11766 /* Skip /DISCARD/ sections. */
11767 if (bfd_is_abs_section (exidx_sec
->output_section
))
11770 hdr
= &elf_section_data (exidx_sec
)->this_hdr
;
11771 if (hdr
->sh_type
!= SHT_ARM_EXIDX
)
11774 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
11775 if (exidx_arm_data
== NULL
)
11778 ibfd
= exidx_sec
->owner
;
11780 if (hdr
->contents
!= NULL
)
11781 contents
= hdr
->contents
;
11782 else if (! bfd_malloc_and_get_section (ibfd
, exidx_sec
, &contents
))
11786 if (last_unwind_type
> 0)
11788 unsigned int first_word
= bfd_get_32 (ibfd
, contents
);
11789 /* Add cantunwind if first unwind item does not match section
11791 if (first_word
!= sec
->vma
)
11793 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
11794 last_unwind_type
= 0;
11798 for (j
= 0; j
< hdr
->sh_size
; j
+= 8)
11800 unsigned int second_word
= bfd_get_32 (ibfd
, contents
+ j
+ 4);
11804 /* An EXIDX_CANTUNWIND entry. */
11805 if (second_word
== 1)
11807 if (last_unwind_type
== 0)
11811 /* Inlined unwinding data. Merge if equal to previous. */
11812 else if ((second_word
& 0x80000000) != 0)
11814 if (merge_exidx_entries
11815 && last_second_word
== second_word
&& last_unwind_type
== 1)
11818 last_second_word
= second_word
;
11820 /* Normal table entry. In theory we could merge these too,
11821 but duplicate entries are likely to be much less common. */
11825 if (elide
&& !bfd_link_relocatable (info
))
11827 add_unwind_table_edit (&unwind_edit_head
, &unwind_edit_tail
,
11828 DELETE_EXIDX_ENTRY
, NULL
, j
/ 8);
11830 deleted_exidx_bytes
+= 8;
11833 last_unwind_type
= unwind_type
;
11836 /* Free contents if we allocated it ourselves. */
11837 if (contents
!= hdr
->contents
)
11840 /* Record edits to be applied later (in elf32_arm_write_section). */
11841 exidx_arm_data
->u
.exidx
.unwind_edit_list
= unwind_edit_head
;
11842 exidx_arm_data
->u
.exidx
.unwind_edit_tail
= unwind_edit_tail
;
11844 if (deleted_exidx_bytes
> 0)
11845 adjust_exidx_size(exidx_sec
, -deleted_exidx_bytes
);
11847 last_exidx_sec
= exidx_sec
;
11848 last_text_sec
= sec
;
11851 /* Add terminating CANTUNWIND entry. */
11852 if (!bfd_link_relocatable (info
) && last_exidx_sec
11853 && last_unwind_type
!= 0)
11854 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
11860 elf32_arm_output_glue_section (struct bfd_link_info
*info
, bfd
*obfd
,
11861 bfd
*ibfd
, const char *name
)
11863 asection
*sec
, *osec
;
11865 sec
= bfd_get_linker_section (ibfd
, name
);
11866 if (sec
== NULL
|| (sec
->flags
& SEC_EXCLUDE
) != 0)
11869 osec
= sec
->output_section
;
11870 if (elf32_arm_write_section (obfd
, info
, sec
, sec
->contents
))
11873 if (! bfd_set_section_contents (obfd
, osec
, sec
->contents
,
11874 sec
->output_offset
, sec
->size
))
11881 elf32_arm_final_link (bfd
*abfd
, struct bfd_link_info
*info
)
11883 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
11884 asection
*sec
, *osec
;
11886 if (globals
== NULL
)
11889 /* Invoke the regular ELF backend linker to do all the work. */
11890 if (!bfd_elf_final_link (abfd
, info
))
11893 /* Process stub sections (eg BE8 encoding, ...). */
11894 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
11896 for (i
=0; i
<htab
->top_id
; i
++)
11898 sec
= htab
->stub_group
[i
].stub_sec
;
11899 /* Only process it once, in its link_sec slot. */
11900 if (sec
&& i
== htab
->stub_group
[i
].link_sec
->id
)
11902 osec
= sec
->output_section
;
11903 elf32_arm_write_section (abfd
, info
, sec
, sec
->contents
);
11904 if (! bfd_set_section_contents (abfd
, osec
, sec
->contents
,
11905 sec
->output_offset
, sec
->size
))
11910 /* Write out any glue sections now that we have created all the
11912 if (globals
->bfd_of_glue_owner
!= NULL
)
11914 if (! elf32_arm_output_glue_section (info
, abfd
,
11915 globals
->bfd_of_glue_owner
,
11916 ARM2THUMB_GLUE_SECTION_NAME
))
11919 if (! elf32_arm_output_glue_section (info
, abfd
,
11920 globals
->bfd_of_glue_owner
,
11921 THUMB2ARM_GLUE_SECTION_NAME
))
11924 if (! elf32_arm_output_glue_section (info
, abfd
,
11925 globals
->bfd_of_glue_owner
,
11926 VFP11_ERRATUM_VENEER_SECTION_NAME
))
11929 if (! elf32_arm_output_glue_section (info
, abfd
,
11930 globals
->bfd_of_glue_owner
,
11931 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
))
11934 if (! elf32_arm_output_glue_section (info
, abfd
,
11935 globals
->bfd_of_glue_owner
,
11936 ARM_BX_GLUE_SECTION_NAME
))
11943 /* Return a best guess for the machine number based on the attributes. */
11945 static unsigned int
11946 bfd_arm_get_mach_from_attributes (bfd
* abfd
)
11948 int arch
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
11952 case TAG_CPU_ARCH_V4
: return bfd_mach_arm_4
;
11953 case TAG_CPU_ARCH_V4T
: return bfd_mach_arm_4T
;
11954 case TAG_CPU_ARCH_V5T
: return bfd_mach_arm_5T
;
11956 case TAG_CPU_ARCH_V5TE
:
11960 BFD_ASSERT (Tag_CPU_name
< NUM_KNOWN_OBJ_ATTRIBUTES
);
11961 name
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_CPU_name
].s
;
11965 if (strcmp (name
, "IWMMXT2") == 0)
11966 return bfd_mach_arm_iWMMXt2
;
11968 if (strcmp (name
, "IWMMXT") == 0)
11969 return bfd_mach_arm_iWMMXt
;
11971 if (strcmp (name
, "XSCALE") == 0)
11975 BFD_ASSERT (Tag_WMMX_arch
< NUM_KNOWN_OBJ_ATTRIBUTES
);
11976 wmmx
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_WMMX_arch
].i
;
11979 case 1: return bfd_mach_arm_iWMMXt
;
11980 case 2: return bfd_mach_arm_iWMMXt2
;
11981 default: return bfd_mach_arm_XScale
;
11986 return bfd_mach_arm_5TE
;
11990 return bfd_mach_arm_unknown
;
11994 /* Set the right machine number. */
11997 elf32_arm_object_p (bfd
*abfd
)
12001 mach
= bfd_arm_get_mach_from_notes (abfd
, ARM_NOTE_SECTION
);
12003 if (mach
== bfd_mach_arm_unknown
)
12005 if (elf_elfheader (abfd
)->e_flags
& EF_ARM_MAVERICK_FLOAT
)
12006 mach
= bfd_mach_arm_ep9312
;
12008 mach
= bfd_arm_get_mach_from_attributes (abfd
);
12011 bfd_default_set_arch_mach (abfd
, bfd_arch_arm
, mach
);
12015 /* Function to keep ARM specific flags in the ELF header. */
12018 elf32_arm_set_private_flags (bfd
*abfd
, flagword flags
)
12020 if (elf_flags_init (abfd
)
12021 && elf_elfheader (abfd
)->e_flags
!= flags
)
12023 if (EF_ARM_EABI_VERSION (flags
) == EF_ARM_EABI_UNKNOWN
)
12025 if (flags
& EF_ARM_INTERWORK
)
12026 (*_bfd_error_handler
)
12027 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12031 (_("Warning: Clearing the interworking flag of %B due to outside request"),
12037 elf_elfheader (abfd
)->e_flags
= flags
;
12038 elf_flags_init (abfd
) = TRUE
;
12044 /* Copy backend specific data from one object module to another. */
12047 elf32_arm_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
12050 flagword out_flags
;
12052 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
12055 in_flags
= elf_elfheader (ibfd
)->e_flags
;
12056 out_flags
= elf_elfheader (obfd
)->e_flags
;
12058 if (elf_flags_init (obfd
)
12059 && EF_ARM_EABI_VERSION (out_flags
) == EF_ARM_EABI_UNKNOWN
12060 && in_flags
!= out_flags
)
12062 /* Cannot mix APCS26 and APCS32 code. */
12063 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
12066 /* Cannot mix float APCS and non-float APCS code. */
12067 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
12070 /* If the src and dest have different interworking flags
12071 then turn off the interworking bit. */
12072 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
12074 if (out_flags
& EF_ARM_INTERWORK
)
12076 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12079 in_flags
&= ~EF_ARM_INTERWORK
;
12082 /* Likewise for PIC, though don't warn for this case. */
12083 if ((in_flags
& EF_ARM_PIC
) != (out_flags
& EF_ARM_PIC
))
12084 in_flags
&= ~EF_ARM_PIC
;
12087 elf_elfheader (obfd
)->e_flags
= in_flags
;
12088 elf_flags_init (obfd
) = TRUE
;
12090 return _bfd_elf_copy_private_bfd_data (ibfd
, obfd
);
12093 /* Values for Tag_ABI_PCS_R9_use. */
12102 /* Values for Tag_ABI_PCS_RW_data. */
12105 AEABI_PCS_RW_data_absolute
,
12106 AEABI_PCS_RW_data_PCrel
,
12107 AEABI_PCS_RW_data_SBrel
,
12108 AEABI_PCS_RW_data_unused
12111 /* Values for Tag_ABI_enum_size. */
12117 AEABI_enum_forced_wide
12120 /* Determine whether an object attribute tag takes an integer, a
12124 elf32_arm_obj_attrs_arg_type (int tag
)
12126 if (tag
== Tag_compatibility
)
12127 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_STR_VAL
;
12128 else if (tag
== Tag_nodefaults
)
12129 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_NO_DEFAULT
;
12130 else if (tag
== Tag_CPU_raw_name
|| tag
== Tag_CPU_name
)
12131 return ATTR_TYPE_FLAG_STR_VAL
;
12133 return ATTR_TYPE_FLAG_INT_VAL
;
12135 return (tag
& 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL
: ATTR_TYPE_FLAG_INT_VAL
;
12138 /* The ABI defines that Tag_conformance should be emitted first, and that
12139 Tag_nodefaults should be second (if either is defined). This sets those
12140 two positions, and bumps up the position of all the remaining tags to
12143 elf32_arm_obj_attrs_order (int num
)
12145 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
)
12146 return Tag_conformance
;
12147 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
+ 1)
12148 return Tag_nodefaults
;
12149 if ((num
- 2) < Tag_nodefaults
)
12151 if ((num
- 1) < Tag_conformance
)
12156 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12158 elf32_arm_obj_attrs_handle_unknown (bfd
*abfd
, int tag
)
12160 if ((tag
& 127) < 64)
12163 (_("%B: Unknown mandatory EABI object attribute %d"),
12165 bfd_set_error (bfd_error_bad_value
);
12171 (_("Warning: %B: Unknown EABI object attribute %d"),
12177 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12178 Returns -1 if no architecture could be read. */
12181 get_secondary_compatible_arch (bfd
*abfd
)
12183 obj_attribute
*attr
=
12184 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
12186 /* Note: the tag and its argument below are uleb128 values, though
12187 currently-defined values fit in one byte for each. */
12189 && attr
->s
[0] == Tag_CPU_arch
12190 && (attr
->s
[1] & 128) != 128
12191 && attr
->s
[2] == 0)
12194 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12198 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12199 The tag is removed if ARCH is -1. */
12202 set_secondary_compatible_arch (bfd
*abfd
, int arch
)
12204 obj_attribute
*attr
=
12205 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
12213 /* Note: the tag and its argument below are uleb128 values, though
12214 currently-defined values fit in one byte for each. */
12216 attr
->s
= (char *) bfd_alloc (abfd
, 3);
12217 attr
->s
[0] = Tag_CPU_arch
;
12222 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12226 tag_cpu_arch_combine (bfd
*ibfd
, int oldtag
, int *secondary_compat_out
,
12227 int newtag
, int secondary_compat
)
12229 #define T(X) TAG_CPU_ARCH_##X
12230 int tagl
, tagh
, result
;
12233 T(V6T2
), /* PRE_V4. */
12235 T(V6T2
), /* V4T. */
12236 T(V6T2
), /* V5T. */
12237 T(V6T2
), /* V5TE. */
12238 T(V6T2
), /* V5TEJ. */
12241 T(V6T2
) /* V6T2. */
12245 T(V6K
), /* PRE_V4. */
12249 T(V6K
), /* V5TE. */
12250 T(V6K
), /* V5TEJ. */
12252 T(V6KZ
), /* V6KZ. */
12258 T(V7
), /* PRE_V4. */
12263 T(V7
), /* V5TEJ. */
12276 T(V6K
), /* V5TE. */
12277 T(V6K
), /* V5TEJ. */
12279 T(V6KZ
), /* V6KZ. */
12283 T(V6_M
) /* V6_M. */
12285 const int v6s_m
[] =
12291 T(V6K
), /* V5TE. */
12292 T(V6K
), /* V5TEJ. */
12294 T(V6KZ
), /* V6KZ. */
12298 T(V6S_M
), /* V6_M. */
12299 T(V6S_M
) /* V6S_M. */
12301 const int v7e_m
[] =
12305 T(V7E_M
), /* V4T. */
12306 T(V7E_M
), /* V5T. */
12307 T(V7E_M
), /* V5TE. */
12308 T(V7E_M
), /* V5TEJ. */
12309 T(V7E_M
), /* V6. */
12310 T(V7E_M
), /* V6KZ. */
12311 T(V7E_M
), /* V6T2. */
12312 T(V7E_M
), /* V6K. */
12313 T(V7E_M
), /* V7. */
12314 T(V7E_M
), /* V6_M. */
12315 T(V7E_M
), /* V6S_M. */
12316 T(V7E_M
) /* V7E_M. */
12320 T(V8
), /* PRE_V4. */
12325 T(V8
), /* V5TEJ. */
12332 T(V8
), /* V6S_M. */
12333 T(V8
), /* V7E_M. */
12336 const int v8m_baseline
[] =
12349 T(V8M_BASE
), /* V6_M. */
12350 T(V8M_BASE
), /* V6S_M. */
12354 T(V8M_BASE
) /* V8-M BASELINE. */
12356 const int v8m_mainline
[] =
12368 T(V8M_MAIN
), /* V7. */
12369 T(V8M_MAIN
), /* V6_M. */
12370 T(V8M_MAIN
), /* V6S_M. */
12371 T(V8M_MAIN
), /* V7E_M. */
12374 T(V8M_MAIN
), /* V8-M BASELINE. */
12375 T(V8M_MAIN
) /* V8-M MAINLINE. */
12377 const int v4t_plus_v6_m
[] =
12383 T(V5TE
), /* V5TE. */
12384 T(V5TEJ
), /* V5TEJ. */
12386 T(V6KZ
), /* V6KZ. */
12387 T(V6T2
), /* V6T2. */
12390 T(V6_M
), /* V6_M. */
12391 T(V6S_M
), /* V6S_M. */
12392 T(V7E_M
), /* V7E_M. */
12395 T(V8M_BASE
), /* V8-M BASELINE. */
12396 T(V8M_MAIN
), /* V8-M MAINLINE. */
12397 T(V4T_PLUS_V6_M
) /* V4T plus V6_M. */
12399 const int *comb
[] =
12411 /* Pseudo-architecture. */
12415 /* Check we've not got a higher architecture than we know about. */
12417 if (oldtag
> MAX_TAG_CPU_ARCH
|| newtag
> MAX_TAG_CPU_ARCH
)
12419 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd
);
12423 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12425 if ((oldtag
== T(V6_M
) && *secondary_compat_out
== T(V4T
))
12426 || (oldtag
== T(V4T
) && *secondary_compat_out
== T(V6_M
)))
12427 oldtag
= T(V4T_PLUS_V6_M
);
12429 /* And override the new tag if we have a Tag_also_compatible_with on the
12432 if ((newtag
== T(V6_M
) && secondary_compat
== T(V4T
))
12433 || (newtag
== T(V4T
) && secondary_compat
== T(V6_M
)))
12434 newtag
= T(V4T_PLUS_V6_M
);
12436 tagl
= (oldtag
< newtag
) ? oldtag
: newtag
;
12437 result
= tagh
= (oldtag
> newtag
) ? oldtag
: newtag
;
12439 /* Architectures before V6KZ add features monotonically. */
12440 if (tagh
<= TAG_CPU_ARCH_V6KZ
)
12443 result
= comb
[tagh
- T(V6T2
)] ? comb
[tagh
- T(V6T2
)][tagl
] : -1;
12445 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12446 as the canonical version. */
12447 if (result
== T(V4T_PLUS_V6_M
))
12450 *secondary_compat_out
= T(V6_M
);
12453 *secondary_compat_out
= -1;
12457 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12458 ibfd
, oldtag
, newtag
);
12466 /* Query attributes object to see if integer divide instructions may be
12467 present in an object. */
12469 elf32_arm_attributes_accept_div (const obj_attribute
*attr
)
12471 int arch
= attr
[Tag_CPU_arch
].i
;
12472 int profile
= attr
[Tag_CPU_arch_profile
].i
;
12474 switch (attr
[Tag_DIV_use
].i
)
12477 /* Integer divide allowed if instruction contained in archetecture. */
12478 if (arch
== TAG_CPU_ARCH_V7
&& (profile
== 'R' || profile
== 'M'))
12480 else if (arch
>= TAG_CPU_ARCH_V7E_M
)
12486 /* Integer divide explicitly prohibited. */
12490 /* Unrecognised case - treat as allowing divide everywhere. */
12492 /* Integer divide allowed in ARM state. */
12497 /* Query attributes object to see if integer divide instructions are
12498 forbidden to be in the object. This is not the inverse of
12499 elf32_arm_attributes_accept_div. */
12501 elf32_arm_attributes_forbid_div (const obj_attribute
*attr
)
12503 return attr
[Tag_DIV_use
].i
== 1;
12506 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12507 are conflicting attributes. */
12510 elf32_arm_merge_eabi_attributes (bfd
*ibfd
, bfd
*obfd
)
12512 obj_attribute
*in_attr
;
12513 obj_attribute
*out_attr
;
12514 /* Some tags have 0 = don't care, 1 = strong requirement,
12515 2 = weak requirement. */
12516 static const int order_021
[3] = {0, 2, 1};
12518 bfd_boolean result
= TRUE
;
12519 const char *sec_name
= get_elf_backend_data (ibfd
)->obj_attrs_section
;
12521 /* Skip the linker stubs file. This preserves previous behavior
12522 of accepting unknown attributes in the first input file - but
12524 if (ibfd
->flags
& BFD_LINKER_CREATED
)
12527 /* Skip any input that hasn't attribute section.
12528 This enables to link object files without attribute section with
12530 if (bfd_get_section_by_name (ibfd
, sec_name
) == NULL
)
12533 if (!elf_known_obj_attributes_proc (obfd
)[0].i
)
12535 /* This is the first object. Copy the attributes. */
12536 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
12538 out_attr
= elf_known_obj_attributes_proc (obfd
);
12540 /* Use the Tag_null value to indicate the attributes have been
12544 /* We do not output objects with Tag_MPextension_use_legacy - we move
12545 the attribute's value to Tag_MPextension_use. */
12546 if (out_attr
[Tag_MPextension_use_legacy
].i
!= 0)
12548 if (out_attr
[Tag_MPextension_use
].i
!= 0
12549 && out_attr
[Tag_MPextension_use_legacy
].i
12550 != out_attr
[Tag_MPextension_use
].i
)
12553 (_("Error: %B has both the current and legacy "
12554 "Tag_MPextension_use attributes"), ibfd
);
12558 out_attr
[Tag_MPextension_use
] =
12559 out_attr
[Tag_MPextension_use_legacy
];
12560 out_attr
[Tag_MPextension_use_legacy
].type
= 0;
12561 out_attr
[Tag_MPextension_use_legacy
].i
= 0;
12567 in_attr
= elf_known_obj_attributes_proc (ibfd
);
12568 out_attr
= elf_known_obj_attributes_proc (obfd
);
12569 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12570 if (in_attr
[Tag_ABI_VFP_args
].i
!= out_attr
[Tag_ABI_VFP_args
].i
)
12572 /* Ignore mismatches if the object doesn't use floating point or is
12573 floating point ABI independent. */
12574 if (out_attr
[Tag_ABI_FP_number_model
].i
== AEABI_FP_number_model_none
12575 || (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
12576 && out_attr
[Tag_ABI_VFP_args
].i
== AEABI_VFP_args_compatible
))
12577 out_attr
[Tag_ABI_VFP_args
].i
= in_attr
[Tag_ABI_VFP_args
].i
;
12578 else if (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
12579 && in_attr
[Tag_ABI_VFP_args
].i
!= AEABI_VFP_args_compatible
)
12582 (_("error: %B uses VFP register arguments, %B does not"),
12583 in_attr
[Tag_ABI_VFP_args
].i
? ibfd
: obfd
,
12584 in_attr
[Tag_ABI_VFP_args
].i
? obfd
: ibfd
);
12589 for (i
= LEAST_KNOWN_OBJ_ATTRIBUTE
; i
< NUM_KNOWN_OBJ_ATTRIBUTES
; i
++)
12591 /* Merge this attribute with existing attributes. */
12594 case Tag_CPU_raw_name
:
12596 /* These are merged after Tag_CPU_arch. */
12599 case Tag_ABI_optimization_goals
:
12600 case Tag_ABI_FP_optimization_goals
:
12601 /* Use the first value seen. */
12606 int secondary_compat
= -1, secondary_compat_out
= -1;
12607 unsigned int saved_out_attr
= out_attr
[i
].i
;
12609 static const char *name_table
[] =
12611 /* These aren't real CPU names, but we can't guess
12612 that from the architecture version alone. */
12628 "ARM v8-M.baseline",
12629 "ARM v8-M.mainline",
12632 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12633 secondary_compat
= get_secondary_compatible_arch (ibfd
);
12634 secondary_compat_out
= get_secondary_compatible_arch (obfd
);
12635 arch_attr
= tag_cpu_arch_combine (ibfd
, out_attr
[i
].i
,
12636 &secondary_compat_out
,
12640 /* Return with error if failed to merge. */
12641 if (arch_attr
== -1)
12644 out_attr
[i
].i
= arch_attr
;
12646 set_secondary_compatible_arch (obfd
, secondary_compat_out
);
12648 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12649 if (out_attr
[i
].i
== saved_out_attr
)
12650 ; /* Leave the names alone. */
12651 else if (out_attr
[i
].i
== in_attr
[i
].i
)
12653 /* The output architecture has been changed to match the
12654 input architecture. Use the input names. */
12655 out_attr
[Tag_CPU_name
].s
= in_attr
[Tag_CPU_name
].s
12656 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_name
].s
)
12658 out_attr
[Tag_CPU_raw_name
].s
= in_attr
[Tag_CPU_raw_name
].s
12659 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_raw_name
].s
)
12664 out_attr
[Tag_CPU_name
].s
= NULL
;
12665 out_attr
[Tag_CPU_raw_name
].s
= NULL
;
12668 /* If we still don't have a value for Tag_CPU_name,
12669 make one up now. Tag_CPU_raw_name remains blank. */
12670 if (out_attr
[Tag_CPU_name
].s
== NULL
12671 && out_attr
[i
].i
< ARRAY_SIZE (name_table
))
12672 out_attr
[Tag_CPU_name
].s
=
12673 _bfd_elf_attr_strdup (obfd
, name_table
[out_attr
[i
].i
]);
12677 case Tag_ARM_ISA_use
:
12678 case Tag_THUMB_ISA_use
:
12679 case Tag_WMMX_arch
:
12680 case Tag_Advanced_SIMD_arch
:
12681 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12682 case Tag_ABI_FP_rounding
:
12683 case Tag_ABI_FP_exceptions
:
12684 case Tag_ABI_FP_user_exceptions
:
12685 case Tag_ABI_FP_number_model
:
12686 case Tag_FP_HP_extension
:
12687 case Tag_CPU_unaligned_access
:
12689 case Tag_MPextension_use
:
12690 /* Use the largest value specified. */
12691 if (in_attr
[i
].i
> out_attr
[i
].i
)
12692 out_attr
[i
].i
= in_attr
[i
].i
;
12695 case Tag_ABI_align_preserved
:
12696 case Tag_ABI_PCS_RO_data
:
12697 /* Use the smallest value specified. */
12698 if (in_attr
[i
].i
< out_attr
[i
].i
)
12699 out_attr
[i
].i
= in_attr
[i
].i
;
12702 case Tag_ABI_align_needed
:
12703 if ((in_attr
[i
].i
> 0 || out_attr
[i
].i
> 0)
12704 && (in_attr
[Tag_ABI_align_preserved
].i
== 0
12705 || out_attr
[Tag_ABI_align_preserved
].i
== 0))
12707 /* This error message should be enabled once all non-conformant
12708 binaries in the toolchain have had the attributes set
12711 (_("error: %B: 8-byte data alignment conflicts with %B"),
12715 /* Fall through. */
12716 case Tag_ABI_FP_denormal
:
12717 case Tag_ABI_PCS_GOT_use
:
12718 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
12719 value if greater than 2 (for future-proofing). */
12720 if ((in_attr
[i
].i
> 2 && in_attr
[i
].i
> out_attr
[i
].i
)
12721 || (in_attr
[i
].i
<= 2 && out_attr
[i
].i
<= 2
12722 && order_021
[in_attr
[i
].i
] > order_021
[out_attr
[i
].i
]))
12723 out_attr
[i
].i
= in_attr
[i
].i
;
12726 case Tag_Virtualization_use
:
12727 /* The virtualization tag effectively stores two bits of
12728 information: the intended use of TrustZone (in bit 0), and the
12729 intended use of Virtualization (in bit 1). */
12730 if (out_attr
[i
].i
== 0)
12731 out_attr
[i
].i
= in_attr
[i
].i
;
12732 else if (in_attr
[i
].i
!= 0
12733 && in_attr
[i
].i
!= out_attr
[i
].i
)
12735 if (in_attr
[i
].i
<= 3 && out_attr
[i
].i
<= 3)
12740 (_("error: %B: unable to merge virtualization attributes "
12748 case Tag_CPU_arch_profile
:
12749 if (out_attr
[i
].i
!= in_attr
[i
].i
)
12751 /* 0 will merge with anything.
12752 'A' and 'S' merge to 'A'.
12753 'R' and 'S' merge to 'R'.
12754 'M' and 'A|R|S' is an error. */
12755 if (out_attr
[i
].i
== 0
12756 || (out_attr
[i
].i
== 'S'
12757 && (in_attr
[i
].i
== 'A' || in_attr
[i
].i
== 'R')))
12758 out_attr
[i
].i
= in_attr
[i
].i
;
12759 else if (in_attr
[i
].i
== 0
12760 || (in_attr
[i
].i
== 'S'
12761 && (out_attr
[i
].i
== 'A' || out_attr
[i
].i
== 'R')))
12762 ; /* Do nothing. */
12766 (_("error: %B: Conflicting architecture profiles %c/%c"),
12768 in_attr
[i
].i
? in_attr
[i
].i
: '0',
12769 out_attr
[i
].i
? out_attr
[i
].i
: '0');
12775 case Tag_DSP_extension
:
12776 /* No need to change output value if any of:
12777 - pre (<=) ARMv5T input architecture (do not have DSP)
12778 - M input profile not ARMv7E-M and do not have DSP. */
12779 if (in_attr
[Tag_CPU_arch
].i
<= 3
12780 || (in_attr
[Tag_CPU_arch_profile
].i
== 'M'
12781 && in_attr
[Tag_CPU_arch
].i
!= 13
12782 && in_attr
[i
].i
== 0))
12783 ; /* Do nothing. */
12784 /* Output value should be 0 if DSP part of architecture, ie.
12785 - post (>=) ARMv5te architecture output
12786 - A, R or S profile output or ARMv7E-M output architecture. */
12787 else if (out_attr
[Tag_CPU_arch
].i
>= 4
12788 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
12789 || out_attr
[Tag_CPU_arch_profile
].i
== 'R'
12790 || out_attr
[Tag_CPU_arch_profile
].i
== 'S'
12791 || out_attr
[Tag_CPU_arch
].i
== 13))
12793 /* Otherwise, DSP instructions are added and not part of output
12801 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
12802 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
12803 when it's 0. It might mean absence of FP hardware if
12804 Tag_FP_arch is zero. */
12806 #define VFP_VERSION_COUNT 9
12807 static const struct
12811 } vfp_versions
[VFP_VERSION_COUNT
] =
12827 /* If the output has no requirement about FP hardware,
12828 follow the requirement of the input. */
12829 if (out_attr
[i
].i
== 0)
12831 BFD_ASSERT (out_attr
[Tag_ABI_HardFP_use
].i
== 0);
12832 out_attr
[i
].i
= in_attr
[i
].i
;
12833 out_attr
[Tag_ABI_HardFP_use
].i
12834 = in_attr
[Tag_ABI_HardFP_use
].i
;
12837 /* If the input has no requirement about FP hardware, do
12839 else if (in_attr
[i
].i
== 0)
12841 BFD_ASSERT (in_attr
[Tag_ABI_HardFP_use
].i
== 0);
12845 /* Both the input and the output have nonzero Tag_FP_arch.
12846 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
12848 /* If both the input and the output have zero Tag_ABI_HardFP_use,
12850 if (in_attr
[Tag_ABI_HardFP_use
].i
== 0
12851 && out_attr
[Tag_ABI_HardFP_use
].i
== 0)
12853 /* If the input and the output have different Tag_ABI_HardFP_use,
12854 the combination of them is 0 (implied by Tag_FP_arch). */
12855 else if (in_attr
[Tag_ABI_HardFP_use
].i
12856 != out_attr
[Tag_ABI_HardFP_use
].i
)
12857 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
12859 /* Now we can handle Tag_FP_arch. */
12861 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
12862 pick the biggest. */
12863 if (in_attr
[i
].i
>= VFP_VERSION_COUNT
12864 && in_attr
[i
].i
> out_attr
[i
].i
)
12866 out_attr
[i
] = in_attr
[i
];
12869 /* The output uses the superset of input features
12870 (ISA version) and registers. */
12871 ver
= vfp_versions
[in_attr
[i
].i
].ver
;
12872 if (ver
< vfp_versions
[out_attr
[i
].i
].ver
)
12873 ver
= vfp_versions
[out_attr
[i
].i
].ver
;
12874 regs
= vfp_versions
[in_attr
[i
].i
].regs
;
12875 if (regs
< vfp_versions
[out_attr
[i
].i
].regs
)
12876 regs
= vfp_versions
[out_attr
[i
].i
].regs
;
12877 /* This assumes all possible supersets are also a valid
12879 for (newval
= VFP_VERSION_COUNT
- 1; newval
> 0; newval
--)
12881 if (regs
== vfp_versions
[newval
].regs
12882 && ver
== vfp_versions
[newval
].ver
)
12885 out_attr
[i
].i
= newval
;
12888 case Tag_PCS_config
:
12889 if (out_attr
[i
].i
== 0)
12890 out_attr
[i
].i
= in_attr
[i
].i
;
12891 else if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= in_attr
[i
].i
)
12893 /* It's sometimes ok to mix different configs, so this is only
12896 (_("Warning: %B: Conflicting platform configuration"), ibfd
);
12899 case Tag_ABI_PCS_R9_use
:
12900 if (in_attr
[i
].i
!= out_attr
[i
].i
12901 && out_attr
[i
].i
!= AEABI_R9_unused
12902 && in_attr
[i
].i
!= AEABI_R9_unused
)
12905 (_("error: %B: Conflicting use of R9"), ibfd
);
12908 if (out_attr
[i
].i
== AEABI_R9_unused
)
12909 out_attr
[i
].i
= in_attr
[i
].i
;
12911 case Tag_ABI_PCS_RW_data
:
12912 if (in_attr
[i
].i
== AEABI_PCS_RW_data_SBrel
12913 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_SB
12914 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_unused
)
12917 (_("error: %B: SB relative addressing conflicts with use of R9"),
12921 /* Use the smallest value specified. */
12922 if (in_attr
[i
].i
< out_attr
[i
].i
)
12923 out_attr
[i
].i
= in_attr
[i
].i
;
12925 case Tag_ABI_PCS_wchar_t
:
12926 if (out_attr
[i
].i
&& in_attr
[i
].i
&& out_attr
[i
].i
!= in_attr
[i
].i
12927 && !elf_arm_tdata (obfd
)->no_wchar_size_warning
)
12930 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
12931 ibfd
, in_attr
[i
].i
, out_attr
[i
].i
);
12933 else if (in_attr
[i
].i
&& !out_attr
[i
].i
)
12934 out_attr
[i
].i
= in_attr
[i
].i
;
12936 case Tag_ABI_enum_size
:
12937 if (in_attr
[i
].i
!= AEABI_enum_unused
)
12939 if (out_attr
[i
].i
== AEABI_enum_unused
12940 || out_attr
[i
].i
== AEABI_enum_forced_wide
)
12942 /* The existing object is compatible with anything.
12943 Use whatever requirements the new object has. */
12944 out_attr
[i
].i
= in_attr
[i
].i
;
12946 else if (in_attr
[i
].i
!= AEABI_enum_forced_wide
12947 && out_attr
[i
].i
!= in_attr
[i
].i
12948 && !elf_arm_tdata (obfd
)->no_enum_size_warning
)
12950 static const char *aeabi_enum_names
[] =
12951 { "", "variable-size", "32-bit", "" };
12952 const char *in_name
=
12953 in_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
12954 ? aeabi_enum_names
[in_attr
[i
].i
]
12956 const char *out_name
=
12957 out_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
12958 ? aeabi_enum_names
[out_attr
[i
].i
]
12961 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
12962 ibfd
, in_name
, out_name
);
12966 case Tag_ABI_VFP_args
:
12969 case Tag_ABI_WMMX_args
:
12970 if (in_attr
[i
].i
!= out_attr
[i
].i
)
12973 (_("error: %B uses iWMMXt register arguments, %B does not"),
12978 case Tag_compatibility
:
12979 /* Merged in target-independent code. */
12981 case Tag_ABI_HardFP_use
:
12982 /* This is handled along with Tag_FP_arch. */
12984 case Tag_ABI_FP_16bit_format
:
12985 if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= 0)
12987 if (in_attr
[i
].i
!= out_attr
[i
].i
)
12990 (_("error: fp16 format mismatch between %B and %B"),
12995 if (in_attr
[i
].i
!= 0)
12996 out_attr
[i
].i
= in_attr
[i
].i
;
13000 /* A value of zero on input means that the divide instruction may
13001 be used if available in the base architecture as specified via
13002 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
13003 the user did not want divide instructions. A value of 2
13004 explicitly means that divide instructions were allowed in ARM
13005 and Thumb state. */
13006 if (in_attr
[i
].i
== out_attr
[i
].i
)
13007 /* Do nothing. */ ;
13008 else if (elf32_arm_attributes_forbid_div (in_attr
)
13009 && !elf32_arm_attributes_accept_div (out_attr
))
13011 else if (elf32_arm_attributes_forbid_div (out_attr
)
13012 && elf32_arm_attributes_accept_div (in_attr
))
13013 out_attr
[i
].i
= in_attr
[i
].i
;
13014 else if (in_attr
[i
].i
== 2)
13015 out_attr
[i
].i
= in_attr
[i
].i
;
13018 case Tag_MPextension_use_legacy
:
13019 /* We don't output objects with Tag_MPextension_use_legacy - we
13020 move the value to Tag_MPextension_use. */
13021 if (in_attr
[i
].i
!= 0 && in_attr
[Tag_MPextension_use
].i
!= 0)
13023 if (in_attr
[Tag_MPextension_use
].i
!= in_attr
[i
].i
)
13026 (_("%B has has both the current and legacy "
13027 "Tag_MPextension_use attributes"),
13033 if (in_attr
[i
].i
> out_attr
[Tag_MPextension_use
].i
)
13034 out_attr
[Tag_MPextension_use
] = in_attr
[i
];
13038 case Tag_nodefaults
:
13039 /* This tag is set if it exists, but the value is unused (and is
13040 typically zero). We don't actually need to do anything here -
13041 the merge happens automatically when the type flags are merged
13044 case Tag_also_compatible_with
:
13045 /* Already done in Tag_CPU_arch. */
13047 case Tag_conformance
:
13048 /* Keep the attribute if it matches. Throw it away otherwise.
13049 No attribute means no claim to conform. */
13050 if (!in_attr
[i
].s
|| !out_attr
[i
].s
13051 || strcmp (in_attr
[i
].s
, out_attr
[i
].s
) != 0)
13052 out_attr
[i
].s
= NULL
;
13057 = result
&& _bfd_elf_merge_unknown_attribute_low (ibfd
, obfd
, i
);
13060 /* If out_attr was copied from in_attr then it won't have a type yet. */
13061 if (in_attr
[i
].type
&& !out_attr
[i
].type
)
13062 out_attr
[i
].type
= in_attr
[i
].type
;
13065 /* Merge Tag_compatibility attributes and any common GNU ones. */
13066 if (!_bfd_elf_merge_object_attributes (ibfd
, obfd
))
13069 /* Check for any attributes not known on ARM. */
13070 result
&= _bfd_elf_merge_unknown_attribute_list (ibfd
, obfd
);
13076 /* Return TRUE if the two EABI versions are incompatible. */
13079 elf32_arm_versions_compatible (unsigned iver
, unsigned over
)
13081 /* v4 and v5 are the same spec before and after it was released,
13082 so allow mixing them. */
13083 if ((iver
== EF_ARM_EABI_VER4
&& over
== EF_ARM_EABI_VER5
)
13084 || (iver
== EF_ARM_EABI_VER5
&& over
== EF_ARM_EABI_VER4
))
13087 return (iver
== over
);
13090 /* Merge backend specific data from an object file to the output
13091 object file when linking. */
13094 elf32_arm_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
);
13096 /* Display the flags field. */
13099 elf32_arm_print_private_bfd_data (bfd
*abfd
, void * ptr
)
13101 FILE * file
= (FILE *) ptr
;
13102 unsigned long flags
;
13104 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
13106 /* Print normal ELF private data. */
13107 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
13109 flags
= elf_elfheader (abfd
)->e_flags
;
13110 /* Ignore init flag - it may not be set, despite the flags field
13111 containing valid data. */
13113 /* xgettext:c-format */
13114 fprintf (file
, _("private flags = %lx:"), elf_elfheader (abfd
)->e_flags
);
13116 switch (EF_ARM_EABI_VERSION (flags
))
13118 case EF_ARM_EABI_UNKNOWN
:
13119 /* The following flag bits are GNU extensions and not part of the
13120 official ARM ELF extended ABI. Hence they are only decoded if
13121 the EABI version is not set. */
13122 if (flags
& EF_ARM_INTERWORK
)
13123 fprintf (file
, _(" [interworking enabled]"));
13125 if (flags
& EF_ARM_APCS_26
)
13126 fprintf (file
, " [APCS-26]");
13128 fprintf (file
, " [APCS-32]");
13130 if (flags
& EF_ARM_VFP_FLOAT
)
13131 fprintf (file
, _(" [VFP float format]"));
13132 else if (flags
& EF_ARM_MAVERICK_FLOAT
)
13133 fprintf (file
, _(" [Maverick float format]"));
13135 fprintf (file
, _(" [FPA float format]"));
13137 if (flags
& EF_ARM_APCS_FLOAT
)
13138 fprintf (file
, _(" [floats passed in float registers]"));
13140 if (flags
& EF_ARM_PIC
)
13141 fprintf (file
, _(" [position independent]"));
13143 if (flags
& EF_ARM_NEW_ABI
)
13144 fprintf (file
, _(" [new ABI]"));
13146 if (flags
& EF_ARM_OLD_ABI
)
13147 fprintf (file
, _(" [old ABI]"));
13149 if (flags
& EF_ARM_SOFT_FLOAT
)
13150 fprintf (file
, _(" [software FP]"));
13152 flags
&= ~(EF_ARM_INTERWORK
| EF_ARM_APCS_26
| EF_ARM_APCS_FLOAT
13153 | EF_ARM_PIC
| EF_ARM_NEW_ABI
| EF_ARM_OLD_ABI
13154 | EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
13155 | EF_ARM_MAVERICK_FLOAT
);
13158 case EF_ARM_EABI_VER1
:
13159 fprintf (file
, _(" [Version1 EABI]"));
13161 if (flags
& EF_ARM_SYMSARESORTED
)
13162 fprintf (file
, _(" [sorted symbol table]"));
13164 fprintf (file
, _(" [unsorted symbol table]"));
13166 flags
&= ~ EF_ARM_SYMSARESORTED
;
13169 case EF_ARM_EABI_VER2
:
13170 fprintf (file
, _(" [Version2 EABI]"));
13172 if (flags
& EF_ARM_SYMSARESORTED
)
13173 fprintf (file
, _(" [sorted symbol table]"));
13175 fprintf (file
, _(" [unsorted symbol table]"));
13177 if (flags
& EF_ARM_DYNSYMSUSESEGIDX
)
13178 fprintf (file
, _(" [dynamic symbols use segment index]"));
13180 if (flags
& EF_ARM_MAPSYMSFIRST
)
13181 fprintf (file
, _(" [mapping symbols precede others]"));
13183 flags
&= ~(EF_ARM_SYMSARESORTED
| EF_ARM_DYNSYMSUSESEGIDX
13184 | EF_ARM_MAPSYMSFIRST
);
13187 case EF_ARM_EABI_VER3
:
13188 fprintf (file
, _(" [Version3 EABI]"));
13191 case EF_ARM_EABI_VER4
:
13192 fprintf (file
, _(" [Version4 EABI]"));
13195 case EF_ARM_EABI_VER5
:
13196 fprintf (file
, _(" [Version5 EABI]"));
13198 if (flags
& EF_ARM_ABI_FLOAT_SOFT
)
13199 fprintf (file
, _(" [soft-float ABI]"));
13201 if (flags
& EF_ARM_ABI_FLOAT_HARD
)
13202 fprintf (file
, _(" [hard-float ABI]"));
13204 flags
&= ~(EF_ARM_ABI_FLOAT_SOFT
| EF_ARM_ABI_FLOAT_HARD
);
13207 if (flags
& EF_ARM_BE8
)
13208 fprintf (file
, _(" [BE8]"));
13210 if (flags
& EF_ARM_LE8
)
13211 fprintf (file
, _(" [LE8]"));
13213 flags
&= ~(EF_ARM_LE8
| EF_ARM_BE8
);
13217 fprintf (file
, _(" <EABI version unrecognised>"));
13221 flags
&= ~ EF_ARM_EABIMASK
;
13223 if (flags
& EF_ARM_RELEXEC
)
13224 fprintf (file
, _(" [relocatable executable]"));
13226 flags
&= ~EF_ARM_RELEXEC
;
13229 fprintf (file
, _("<Unrecognised flag bits set>"));
13231 fputc ('\n', file
);
13237 elf32_arm_get_symbol_type (Elf_Internal_Sym
* elf_sym
, int type
)
13239 switch (ELF_ST_TYPE (elf_sym
->st_info
))
13241 case STT_ARM_TFUNC
:
13242 return ELF_ST_TYPE (elf_sym
->st_info
);
13244 case STT_ARM_16BIT
:
13245 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13246 This allows us to distinguish between data used by Thumb instructions
13247 and non-data (which is probably code) inside Thumb regions of an
13249 if (type
!= STT_OBJECT
&& type
!= STT_TLS
)
13250 return ELF_ST_TYPE (elf_sym
->st_info
);
13261 elf32_arm_gc_mark_hook (asection
*sec
,
13262 struct bfd_link_info
*info
,
13263 Elf_Internal_Rela
*rel
,
13264 struct elf_link_hash_entry
*h
,
13265 Elf_Internal_Sym
*sym
)
13268 switch (ELF32_R_TYPE (rel
->r_info
))
13270 case R_ARM_GNU_VTINHERIT
:
13271 case R_ARM_GNU_VTENTRY
:
13275 return _bfd_elf_gc_mark_hook (sec
, info
, rel
, h
, sym
);
13278 /* Update the got entry reference counts for the section being removed. */
13281 elf32_arm_gc_sweep_hook (bfd
* abfd
,
13282 struct bfd_link_info
* info
,
13284 const Elf_Internal_Rela
* relocs
)
13286 Elf_Internal_Shdr
*symtab_hdr
;
13287 struct elf_link_hash_entry
**sym_hashes
;
13288 bfd_signed_vma
*local_got_refcounts
;
13289 const Elf_Internal_Rela
*rel
, *relend
;
13290 struct elf32_arm_link_hash_table
* globals
;
13292 if (bfd_link_relocatable (info
))
13295 globals
= elf32_arm_hash_table (info
);
13296 if (globals
== NULL
)
13299 elf_section_data (sec
)->local_dynrel
= NULL
;
13301 symtab_hdr
= & elf_symtab_hdr (abfd
);
13302 sym_hashes
= elf_sym_hashes (abfd
);
13303 local_got_refcounts
= elf_local_got_refcounts (abfd
);
13305 check_use_blx (globals
);
13307 relend
= relocs
+ sec
->reloc_count
;
13308 for (rel
= relocs
; rel
< relend
; rel
++)
13310 unsigned long r_symndx
;
13311 struct elf_link_hash_entry
*h
= NULL
;
13312 struct elf32_arm_link_hash_entry
*eh
;
13314 bfd_boolean call_reloc_p
;
13315 bfd_boolean may_become_dynamic_p
;
13316 bfd_boolean may_need_local_target_p
;
13317 union gotplt_union
*root_plt
;
13318 struct arm_plt_info
*arm_plt
;
13320 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13321 if (r_symndx
>= symtab_hdr
->sh_info
)
13323 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
13324 while (h
->root
.type
== bfd_link_hash_indirect
13325 || h
->root
.type
== bfd_link_hash_warning
)
13326 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
13328 eh
= (struct elf32_arm_link_hash_entry
*) h
;
13330 call_reloc_p
= FALSE
;
13331 may_become_dynamic_p
= FALSE
;
13332 may_need_local_target_p
= FALSE
;
13334 r_type
= ELF32_R_TYPE (rel
->r_info
);
13335 r_type
= arm_real_reloc_type (globals
, r_type
);
13339 case R_ARM_GOT_PREL
:
13340 case R_ARM_TLS_GD32
:
13341 case R_ARM_TLS_IE32
:
13344 if (h
->got
.refcount
> 0)
13345 h
->got
.refcount
-= 1;
13347 else if (local_got_refcounts
!= NULL
)
13349 if (local_got_refcounts
[r_symndx
] > 0)
13350 local_got_refcounts
[r_symndx
] -= 1;
13354 case R_ARM_TLS_LDM32
:
13355 globals
->tls_ldm_got
.refcount
-= 1;
13363 case R_ARM_THM_CALL
:
13364 case R_ARM_THM_JUMP24
:
13365 case R_ARM_THM_JUMP19
:
13366 call_reloc_p
= TRUE
;
13367 may_need_local_target_p
= TRUE
;
13371 if (!globals
->vxworks_p
)
13373 may_need_local_target_p
= TRUE
;
13376 /* Fall through. */
13378 case R_ARM_ABS32_NOI
:
13380 case R_ARM_REL32_NOI
:
13381 case R_ARM_MOVW_ABS_NC
:
13382 case R_ARM_MOVT_ABS
:
13383 case R_ARM_MOVW_PREL_NC
:
13384 case R_ARM_MOVT_PREL
:
13385 case R_ARM_THM_MOVW_ABS_NC
:
13386 case R_ARM_THM_MOVT_ABS
:
13387 case R_ARM_THM_MOVW_PREL_NC
:
13388 case R_ARM_THM_MOVT_PREL
:
13389 /* Should the interworking branches be here also? */
13390 if ((bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
13391 && (sec
->flags
& SEC_ALLOC
) != 0)
13394 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
13396 call_reloc_p
= TRUE
;
13397 may_need_local_target_p
= TRUE
;
13400 may_become_dynamic_p
= TRUE
;
13403 may_need_local_target_p
= TRUE
;
13410 if (may_need_local_target_p
13411 && elf32_arm_get_plt_info (abfd
, eh
, r_symndx
, &root_plt
, &arm_plt
))
13413 /* If PLT refcount book-keeping is wrong and too low, we'll
13414 see a zero value (going to -1) for the root PLT reference
13416 if (root_plt
->refcount
>= 0)
13418 BFD_ASSERT (root_plt
->refcount
!= 0);
13419 root_plt
->refcount
-= 1;
13422 /* A value of -1 means the symbol has become local, forced
13423 or seeing a hidden definition. Any other negative value
13425 BFD_ASSERT (root_plt
->refcount
== -1);
13428 arm_plt
->noncall_refcount
--;
13430 if (r_type
== R_ARM_THM_CALL
)
13431 arm_plt
->maybe_thumb_refcount
--;
13433 if (r_type
== R_ARM_THM_JUMP24
13434 || r_type
== R_ARM_THM_JUMP19
)
13435 arm_plt
->thumb_refcount
--;
13438 if (may_become_dynamic_p
)
13440 struct elf_dyn_relocs
**pp
;
13441 struct elf_dyn_relocs
*p
;
13444 pp
= &(eh
->dyn_relocs
);
13447 Elf_Internal_Sym
*isym
;
13449 isym
= bfd_sym_from_r_symndx (&globals
->sym_cache
,
13453 pp
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
13457 for (; (p
= *pp
) != NULL
; pp
= &p
->next
)
13460 /* Everything must go for SEC. */
13470 /* Look through the relocs for a section during the first phase. */
13473 elf32_arm_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
13474 asection
*sec
, const Elf_Internal_Rela
*relocs
)
13476 Elf_Internal_Shdr
*symtab_hdr
;
13477 struct elf_link_hash_entry
**sym_hashes
;
13478 const Elf_Internal_Rela
*rel
;
13479 const Elf_Internal_Rela
*rel_end
;
13482 struct elf32_arm_link_hash_table
*htab
;
13483 bfd_boolean call_reloc_p
;
13484 bfd_boolean may_become_dynamic_p
;
13485 bfd_boolean may_need_local_target_p
;
13486 unsigned long nsyms
;
13488 if (bfd_link_relocatable (info
))
13491 BFD_ASSERT (is_arm_elf (abfd
));
13493 htab
= elf32_arm_hash_table (info
);
13499 /* Create dynamic sections for relocatable executables so that we can
13500 copy relocations. */
13501 if (htab
->root
.is_relocatable_executable
13502 && ! htab
->root
.dynamic_sections_created
)
13504 if (! _bfd_elf_link_create_dynamic_sections (abfd
, info
))
13508 if (htab
->root
.dynobj
== NULL
)
13509 htab
->root
.dynobj
= abfd
;
13510 if (!create_ifunc_sections (info
))
13513 dynobj
= htab
->root
.dynobj
;
13515 symtab_hdr
= & elf_symtab_hdr (abfd
);
13516 sym_hashes
= elf_sym_hashes (abfd
);
13517 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
13519 rel_end
= relocs
+ sec
->reloc_count
;
13520 for (rel
= relocs
; rel
< rel_end
; rel
++)
13522 Elf_Internal_Sym
*isym
;
13523 struct elf_link_hash_entry
*h
;
13524 struct elf32_arm_link_hash_entry
*eh
;
13525 unsigned long r_symndx
;
13528 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13529 r_type
= ELF32_R_TYPE (rel
->r_info
);
13530 r_type
= arm_real_reloc_type (htab
, r_type
);
13532 if (r_symndx
>= nsyms
13533 /* PR 9934: It is possible to have relocations that do not
13534 refer to symbols, thus it is also possible to have an
13535 object file containing relocations but no symbol table. */
13536 && (r_symndx
> STN_UNDEF
|| nsyms
> 0))
13538 (*_bfd_error_handler
) (_("%B: bad symbol index: %d"), abfd
,
13547 if (r_symndx
< symtab_hdr
->sh_info
)
13549 /* A local symbol. */
13550 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
,
13557 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
13558 while (h
->root
.type
== bfd_link_hash_indirect
13559 || h
->root
.type
== bfd_link_hash_warning
)
13560 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
13562 /* PR15323, ref flags aren't set for references in the
13564 h
->root
.non_ir_ref
= 1;
13568 eh
= (struct elf32_arm_link_hash_entry
*) h
;
13570 call_reloc_p
= FALSE
;
13571 may_become_dynamic_p
= FALSE
;
13572 may_need_local_target_p
= FALSE
;
13574 /* Could be done earlier, if h were already available. */
13575 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
13579 case R_ARM_GOT_PREL
:
13580 case R_ARM_TLS_GD32
:
13581 case R_ARM_TLS_IE32
:
13582 case R_ARM_TLS_GOTDESC
:
13583 case R_ARM_TLS_DESCSEQ
:
13584 case R_ARM_THM_TLS_DESCSEQ
:
13585 case R_ARM_TLS_CALL
:
13586 case R_ARM_THM_TLS_CALL
:
13587 /* This symbol requires a global offset table entry. */
13589 int tls_type
, old_tls_type
;
13593 case R_ARM_TLS_GD32
: tls_type
= GOT_TLS_GD
; break;
13595 case R_ARM_TLS_IE32
: tls_type
= GOT_TLS_IE
; break;
13597 case R_ARM_TLS_GOTDESC
:
13598 case R_ARM_TLS_CALL
: case R_ARM_THM_TLS_CALL
:
13599 case R_ARM_TLS_DESCSEQ
: case R_ARM_THM_TLS_DESCSEQ
:
13600 tls_type
= GOT_TLS_GDESC
; break;
13602 default: tls_type
= GOT_NORMAL
; break;
13605 if (!bfd_link_executable (info
) && (tls_type
& GOT_TLS_IE
))
13606 info
->flags
|= DF_STATIC_TLS
;
13611 old_tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
13615 /* This is a global offset table entry for a local symbol. */
13616 if (!elf32_arm_allocate_local_sym_info (abfd
))
13618 elf_local_got_refcounts (abfd
)[r_symndx
] += 1;
13619 old_tls_type
= elf32_arm_local_got_tls_type (abfd
) [r_symndx
];
13622 /* If a variable is accessed with both tls methods, two
13623 slots may be created. */
13624 if (GOT_TLS_GD_ANY_P (old_tls_type
)
13625 && GOT_TLS_GD_ANY_P (tls_type
))
13626 tls_type
|= old_tls_type
;
13628 /* We will already have issued an error message if there
13629 is a TLS/non-TLS mismatch, based on the symbol
13630 type. So just combine any TLS types needed. */
13631 if (old_tls_type
!= GOT_UNKNOWN
&& old_tls_type
!= GOT_NORMAL
13632 && tls_type
!= GOT_NORMAL
)
13633 tls_type
|= old_tls_type
;
13635 /* If the symbol is accessed in both IE and GDESC
13636 method, we're able to relax. Turn off the GDESC flag,
13637 without messing up with any other kind of tls types
13638 that may be involved. */
13639 if ((tls_type
& GOT_TLS_IE
) && (tls_type
& GOT_TLS_GDESC
))
13640 tls_type
&= ~GOT_TLS_GDESC
;
13642 if (old_tls_type
!= tls_type
)
13645 elf32_arm_hash_entry (h
)->tls_type
= tls_type
;
13647 elf32_arm_local_got_tls_type (abfd
) [r_symndx
] = tls_type
;
13650 /* Fall through. */
13652 case R_ARM_TLS_LDM32
:
13653 if (r_type
== R_ARM_TLS_LDM32
)
13654 htab
->tls_ldm_got
.refcount
++;
13655 /* Fall through. */
13657 case R_ARM_GOTOFF32
:
13659 if (htab
->root
.sgot
== NULL
13660 && !create_got_section (htab
->root
.dynobj
, info
))
13669 case R_ARM_THM_CALL
:
13670 case R_ARM_THM_JUMP24
:
13671 case R_ARM_THM_JUMP19
:
13672 call_reloc_p
= TRUE
;
13673 may_need_local_target_p
= TRUE
;
13677 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13678 ldr __GOTT_INDEX__ offsets. */
13679 if (!htab
->vxworks_p
)
13681 may_need_local_target_p
= TRUE
;
13684 else goto jump_over
;
13686 /* Fall through. */
13688 case R_ARM_MOVW_ABS_NC
:
13689 case R_ARM_MOVT_ABS
:
13690 case R_ARM_THM_MOVW_ABS_NC
:
13691 case R_ARM_THM_MOVT_ABS
:
13692 if (bfd_link_pic (info
))
13694 (*_bfd_error_handler
)
13695 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13696 abfd
, elf32_arm_howto_table_1
[r_type
].name
,
13697 (h
) ? h
->root
.root
.string
: "a local symbol");
13698 bfd_set_error (bfd_error_bad_value
);
13702 /* Fall through. */
13704 case R_ARM_ABS32_NOI
:
13706 if (h
!= NULL
&& bfd_link_executable (info
))
13708 h
->pointer_equality_needed
= 1;
13710 /* Fall through. */
13712 case R_ARM_REL32_NOI
:
13713 case R_ARM_MOVW_PREL_NC
:
13714 case R_ARM_MOVT_PREL
:
13715 case R_ARM_THM_MOVW_PREL_NC
:
13716 case R_ARM_THM_MOVT_PREL
:
13718 /* Should the interworking branches be listed here? */
13719 if ((bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
)
13720 && (sec
->flags
& SEC_ALLOC
) != 0)
13723 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
13725 /* In shared libraries and relocatable executables,
13726 we treat local relative references as calls;
13727 see the related SYMBOL_CALLS_LOCAL code in
13728 allocate_dynrelocs. */
13729 call_reloc_p
= TRUE
;
13730 may_need_local_target_p
= TRUE
;
13733 /* We are creating a shared library or relocatable
13734 executable, and this is a reloc against a global symbol,
13735 or a non-PC-relative reloc against a local symbol.
13736 We may need to copy the reloc into the output. */
13737 may_become_dynamic_p
= TRUE
;
13740 may_need_local_target_p
= TRUE
;
13743 /* This relocation describes the C++ object vtable hierarchy.
13744 Reconstruct it for later use during GC. */
13745 case R_ARM_GNU_VTINHERIT
:
13746 if (!bfd_elf_gc_record_vtinherit (abfd
, sec
, h
, rel
->r_offset
))
13750 /* This relocation describes which C++ vtable entries are actually
13751 used. Record for later use during GC. */
13752 case R_ARM_GNU_VTENTRY
:
13753 BFD_ASSERT (h
!= NULL
);
13755 && !bfd_elf_gc_record_vtentry (abfd
, sec
, h
, rel
->r_offset
))
13763 /* We may need a .plt entry if the function this reloc
13764 refers to is in a different object, regardless of the
13765 symbol's type. We can't tell for sure yet, because
13766 something later might force the symbol local. */
13768 else if (may_need_local_target_p
)
13769 /* If this reloc is in a read-only section, we might
13770 need a copy reloc. We can't check reliably at this
13771 stage whether the section is read-only, as input
13772 sections have not yet been mapped to output sections.
13773 Tentatively set the flag for now, and correct in
13774 adjust_dynamic_symbol. */
13775 h
->non_got_ref
= 1;
13778 if (may_need_local_target_p
13779 && (h
!= NULL
|| ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
))
13781 union gotplt_union
*root_plt
;
13782 struct arm_plt_info
*arm_plt
;
13783 struct arm_local_iplt_info
*local_iplt
;
13787 root_plt
= &h
->plt
;
13788 arm_plt
= &eh
->plt
;
13792 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
13793 if (local_iplt
== NULL
)
13795 root_plt
= &local_iplt
->root
;
13796 arm_plt
= &local_iplt
->arm
;
13799 /* If the symbol is a function that doesn't bind locally,
13800 this relocation will need a PLT entry. */
13801 if (root_plt
->refcount
!= -1)
13802 root_plt
->refcount
+= 1;
13805 arm_plt
->noncall_refcount
++;
13807 /* It's too early to use htab->use_blx here, so we have to
13808 record possible blx references separately from
13809 relocs that definitely need a thumb stub. */
13811 if (r_type
== R_ARM_THM_CALL
)
13812 arm_plt
->maybe_thumb_refcount
+= 1;
13814 if (r_type
== R_ARM_THM_JUMP24
13815 || r_type
== R_ARM_THM_JUMP19
)
13816 arm_plt
->thumb_refcount
+= 1;
13819 if (may_become_dynamic_p
)
13821 struct elf_dyn_relocs
*p
, **head
;
13823 /* Create a reloc section in dynobj. */
13824 if (sreloc
== NULL
)
13826 sreloc
= _bfd_elf_make_dynamic_reloc_section
13827 (sec
, dynobj
, 2, abfd
, ! htab
->use_rel
);
13829 if (sreloc
== NULL
)
13832 /* BPABI objects never have dynamic relocations mapped. */
13833 if (htab
->symbian_p
)
13837 flags
= bfd_get_section_flags (dynobj
, sreloc
);
13838 flags
&= ~(SEC_LOAD
| SEC_ALLOC
);
13839 bfd_set_section_flags (dynobj
, sreloc
, flags
);
13843 /* If this is a global symbol, count the number of
13844 relocations we need for this symbol. */
13846 head
= &((struct elf32_arm_link_hash_entry
*) h
)->dyn_relocs
;
13849 head
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
13855 if (p
== NULL
|| p
->sec
!= sec
)
13857 bfd_size_type amt
= sizeof *p
;
13859 p
= (struct elf_dyn_relocs
*) bfd_alloc (htab
->root
.dynobj
, amt
);
13869 if (elf32_arm_howto_from_type (r_type
)->pc_relative
)
13878 /* Unwinding tables are not referenced directly. This pass marks them as
13879 required if the corresponding code section is marked. */
13882 elf32_arm_gc_mark_extra_sections (struct bfd_link_info
*info
,
13883 elf_gc_mark_hook_fn gc_mark_hook
)
13886 Elf_Internal_Shdr
**elf_shdrp
;
13889 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
13891 /* Marking EH data may cause additional code sections to be marked,
13892 requiring multiple passes. */
13897 for (sub
= info
->input_bfds
; sub
!= NULL
; sub
= sub
->link
.next
)
13901 if (! is_arm_elf (sub
))
13904 elf_shdrp
= elf_elfsections (sub
);
13905 for (o
= sub
->sections
; o
!= NULL
; o
= o
->next
)
13907 Elf_Internal_Shdr
*hdr
;
13909 hdr
= &elf_section_data (o
)->this_hdr
;
13910 if (hdr
->sh_type
== SHT_ARM_EXIDX
13912 && hdr
->sh_link
< elf_numsections (sub
)
13914 && elf_shdrp
[hdr
->sh_link
]->bfd_section
->gc_mark
)
13917 if (!_bfd_elf_gc_mark (info
, o
, gc_mark_hook
))
13927 /* Treat mapping symbols as special target symbols. */
13930 elf32_arm_is_target_special_symbol (bfd
* abfd ATTRIBUTE_UNUSED
, asymbol
* sym
)
13932 return bfd_is_arm_special_symbol_name (sym
->name
,
13933 BFD_ARM_SPECIAL_SYM_TYPE_ANY
);
13936 /* This is a copy of elf_find_function() from elf.c except that
13937 ARM mapping symbols are ignored when looking for function names
13938 and STT_ARM_TFUNC is considered to a function type. */
13941 arm_elf_find_function (bfd
* abfd ATTRIBUTE_UNUSED
,
13942 asymbol
** symbols
,
13943 asection
* section
,
13945 const char ** filename_ptr
,
13946 const char ** functionname_ptr
)
13948 const char * filename
= NULL
;
13949 asymbol
* func
= NULL
;
13950 bfd_vma low_func
= 0;
13953 for (p
= symbols
; *p
!= NULL
; p
++)
13955 elf_symbol_type
*q
;
13957 q
= (elf_symbol_type
*) *p
;
13959 switch (ELF_ST_TYPE (q
->internal_elf_sym
.st_info
))
13964 filename
= bfd_asymbol_name (&q
->symbol
);
13967 case STT_ARM_TFUNC
:
13969 /* Skip mapping symbols. */
13970 if ((q
->symbol
.flags
& BSF_LOCAL
)
13971 && bfd_is_arm_special_symbol_name (q
->symbol
.name
,
13972 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
13974 /* Fall through. */
13975 if (bfd_get_section (&q
->symbol
) == section
13976 && q
->symbol
.value
>= low_func
13977 && q
->symbol
.value
<= offset
)
13979 func
= (asymbol
*) q
;
13980 low_func
= q
->symbol
.value
;
13990 *filename_ptr
= filename
;
13991 if (functionname_ptr
)
13992 *functionname_ptr
= bfd_asymbol_name (func
);
13998 /* Find the nearest line to a particular section and offset, for error
13999 reporting. This code is a duplicate of the code in elf.c, except
14000 that it uses arm_elf_find_function. */
14003 elf32_arm_find_nearest_line (bfd
* abfd
,
14004 asymbol
** symbols
,
14005 asection
* section
,
14007 const char ** filename_ptr
,
14008 const char ** functionname_ptr
,
14009 unsigned int * line_ptr
,
14010 unsigned int * discriminator_ptr
)
14012 bfd_boolean found
= FALSE
;
14014 if (_bfd_dwarf2_find_nearest_line (abfd
, symbols
, NULL
, section
, offset
,
14015 filename_ptr
, functionname_ptr
,
14016 line_ptr
, discriminator_ptr
,
14017 dwarf_debug_sections
, 0,
14018 & elf_tdata (abfd
)->dwarf2_find_line_info
))
14020 if (!*functionname_ptr
)
14021 arm_elf_find_function (abfd
, symbols
, section
, offset
,
14022 *filename_ptr
? NULL
: filename_ptr
,
14028 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14031 if (! _bfd_stab_section_find_nearest_line (abfd
, symbols
, section
, offset
,
14032 & found
, filename_ptr
,
14033 functionname_ptr
, line_ptr
,
14034 & elf_tdata (abfd
)->line_info
))
14037 if (found
&& (*functionname_ptr
|| *line_ptr
))
14040 if (symbols
== NULL
)
14043 if (! arm_elf_find_function (abfd
, symbols
, section
, offset
,
14044 filename_ptr
, functionname_ptr
))
14052 elf32_arm_find_inliner_info (bfd
* abfd
,
14053 const char ** filename_ptr
,
14054 const char ** functionname_ptr
,
14055 unsigned int * line_ptr
)
14058 found
= _bfd_dwarf2_find_inliner_info (abfd
, filename_ptr
,
14059 functionname_ptr
, line_ptr
,
14060 & elf_tdata (abfd
)->dwarf2_find_line_info
);
14064 /* Adjust a symbol defined by a dynamic object and referenced by a
14065 regular object. The current definition is in some section of the
14066 dynamic object, but we're not including those sections. We have to
14067 change the definition to something the rest of the link can
14071 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info
* info
,
14072 struct elf_link_hash_entry
* h
)
14076 struct elf32_arm_link_hash_entry
* eh
;
14077 struct elf32_arm_link_hash_table
*globals
;
14079 globals
= elf32_arm_hash_table (info
);
14080 if (globals
== NULL
)
14083 dynobj
= elf_hash_table (info
)->dynobj
;
14085 /* Make sure we know what is going on here. */
14086 BFD_ASSERT (dynobj
!= NULL
14088 || h
->type
== STT_GNU_IFUNC
14089 || h
->u
.weakdef
!= NULL
14092 && !h
->def_regular
)));
14094 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14096 /* If this is a function, put it in the procedure linkage table. We
14097 will fill in the contents of the procedure linkage table later,
14098 when we know the address of the .got section. */
14099 if (h
->type
== STT_FUNC
|| h
->type
== STT_GNU_IFUNC
|| h
->needs_plt
)
14101 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14102 symbol binds locally. */
14103 if (h
->plt
.refcount
<= 0
14104 || (h
->type
!= STT_GNU_IFUNC
14105 && (SYMBOL_CALLS_LOCAL (info
, h
)
14106 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
14107 && h
->root
.type
== bfd_link_hash_undefweak
))))
14109 /* This case can occur if we saw a PLT32 reloc in an input
14110 file, but the symbol was never referred to by a dynamic
14111 object, or if all references were garbage collected. In
14112 such a case, we don't actually need to build a procedure
14113 linkage table, and we can just do a PC24 reloc instead. */
14114 h
->plt
.offset
= (bfd_vma
) -1;
14115 eh
->plt
.thumb_refcount
= 0;
14116 eh
->plt
.maybe_thumb_refcount
= 0;
14117 eh
->plt
.noncall_refcount
= 0;
14125 /* It's possible that we incorrectly decided a .plt reloc was
14126 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14127 in check_relocs. We can't decide accurately between function
14128 and non-function syms in check-relocs; Objects loaded later in
14129 the link may change h->type. So fix it now. */
14130 h
->plt
.offset
= (bfd_vma
) -1;
14131 eh
->plt
.thumb_refcount
= 0;
14132 eh
->plt
.maybe_thumb_refcount
= 0;
14133 eh
->plt
.noncall_refcount
= 0;
14136 /* If this is a weak symbol, and there is a real definition, the
14137 processor independent code will have arranged for us to see the
14138 real definition first, and we can just use the same value. */
14139 if (h
->u
.weakdef
!= NULL
)
14141 BFD_ASSERT (h
->u
.weakdef
->root
.type
== bfd_link_hash_defined
14142 || h
->u
.weakdef
->root
.type
== bfd_link_hash_defweak
);
14143 h
->root
.u
.def
.section
= h
->u
.weakdef
->root
.u
.def
.section
;
14144 h
->root
.u
.def
.value
= h
->u
.weakdef
->root
.u
.def
.value
;
14148 /* If there are no non-GOT references, we do not need a copy
14150 if (!h
->non_got_ref
)
14153 /* This is a reference to a symbol defined by a dynamic object which
14154 is not a function. */
14156 /* If we are creating a shared library, we must presume that the
14157 only references to the symbol are via the global offset table.
14158 For such cases we need not do anything here; the relocations will
14159 be handled correctly by relocate_section. Relocatable executables
14160 can reference data in shared objects directly, so we don't need to
14161 do anything here. */
14162 if (bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
14165 /* We must allocate the symbol in our .dynbss section, which will
14166 become part of the .bss section of the executable. There will be
14167 an entry for this symbol in the .dynsym section. The dynamic
14168 object will contain position independent code, so all references
14169 from the dynamic object to this symbol will go through the global
14170 offset table. The dynamic linker will use the .dynsym entry to
14171 determine the address it must put in the global offset table, so
14172 both the dynamic object and the regular object will refer to the
14173 same memory location for the variable. */
14174 s
= bfd_get_linker_section (dynobj
, ".dynbss");
14175 BFD_ASSERT (s
!= NULL
);
14177 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14178 linker to copy the initial value out of the dynamic object and into
14179 the runtime process image. We need to remember the offset into the
14180 .rel(a).bss section we are going to use. */
14181 if (info
->nocopyreloc
== 0
14182 && (h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0
14187 srel
= bfd_get_linker_section (dynobj
, RELOC_SECTION (globals
, ".bss"));
14188 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
14192 return _bfd_elf_adjust_dynamic_copy (info
, h
, s
);
14195 /* Allocate space in .plt, .got and associated reloc sections for
14199 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry
*h
, void * inf
)
14201 struct bfd_link_info
*info
;
14202 struct elf32_arm_link_hash_table
*htab
;
14203 struct elf32_arm_link_hash_entry
*eh
;
14204 struct elf_dyn_relocs
*p
;
14206 if (h
->root
.type
== bfd_link_hash_indirect
)
14209 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14211 info
= (struct bfd_link_info
*) inf
;
14212 htab
= elf32_arm_hash_table (info
);
14216 if ((htab
->root
.dynamic_sections_created
|| h
->type
== STT_GNU_IFUNC
)
14217 && h
->plt
.refcount
> 0)
14219 /* Make sure this symbol is output as a dynamic symbol.
14220 Undefined weak syms won't yet be marked as dynamic. */
14221 if (h
->dynindx
== -1
14222 && !h
->forced_local
)
14224 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14228 /* If the call in the PLT entry binds locally, the associated
14229 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14230 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14231 than the .plt section. */
14232 if (h
->type
== STT_GNU_IFUNC
&& SYMBOL_CALLS_LOCAL (info
, h
))
14235 if (eh
->plt
.noncall_refcount
== 0
14236 && SYMBOL_REFERENCES_LOCAL (info
, h
))
14237 /* All non-call references can be resolved directly.
14238 This means that they can (and in some cases, must)
14239 resolve directly to the run-time target, rather than
14240 to the PLT. That in turns means that any .got entry
14241 would be equal to the .igot.plt entry, so there's
14242 no point having both. */
14243 h
->got
.refcount
= 0;
14246 if (bfd_link_pic (info
)
14248 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
14250 elf32_arm_allocate_plt_entry (info
, eh
->is_iplt
, &h
->plt
, &eh
->plt
);
14252 /* If this symbol is not defined in a regular file, and we are
14253 not generating a shared library, then set the symbol to this
14254 location in the .plt. This is required to make function
14255 pointers compare as equal between the normal executable and
14256 the shared library. */
14257 if (! bfd_link_pic (info
)
14258 && !h
->def_regular
)
14260 h
->root
.u
.def
.section
= htab
->root
.splt
;
14261 h
->root
.u
.def
.value
= h
->plt
.offset
;
14263 /* Make sure the function is not marked as Thumb, in case
14264 it is the target of an ABS32 relocation, which will
14265 point to the PLT entry. */
14266 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
14269 /* VxWorks executables have a second set of relocations for
14270 each PLT entry. They go in a separate relocation section,
14271 which is processed by the kernel loader. */
14272 if (htab
->vxworks_p
&& !bfd_link_pic (info
))
14274 /* There is a relocation for the initial PLT entry:
14275 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14276 if (h
->plt
.offset
== htab
->plt_header_size
)
14277 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 1);
14279 /* There are two extra relocations for each subsequent
14280 PLT entry: an R_ARM_32 relocation for the GOT entry,
14281 and an R_ARM_32 relocation for the PLT entry. */
14282 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 2);
14287 h
->plt
.offset
= (bfd_vma
) -1;
14293 h
->plt
.offset
= (bfd_vma
) -1;
14297 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14298 eh
->tlsdesc_got
= (bfd_vma
) -1;
14300 if (h
->got
.refcount
> 0)
14304 int tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
14307 /* Make sure this symbol is output as a dynamic symbol.
14308 Undefined weak syms won't yet be marked as dynamic. */
14309 if (h
->dynindx
== -1
14310 && !h
->forced_local
)
14312 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14316 if (!htab
->symbian_p
)
14318 s
= htab
->root
.sgot
;
14319 h
->got
.offset
= s
->size
;
14321 if (tls_type
== GOT_UNKNOWN
)
14324 if (tls_type
== GOT_NORMAL
)
14325 /* Non-TLS symbols need one GOT slot. */
14329 if (tls_type
& GOT_TLS_GDESC
)
14331 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14333 = (htab
->root
.sgotplt
->size
14334 - elf32_arm_compute_jump_table_size (htab
));
14335 htab
->root
.sgotplt
->size
+= 8;
14336 h
->got
.offset
= (bfd_vma
) -2;
14337 /* plt.got_offset needs to know there's a TLS_DESC
14338 reloc in the middle of .got.plt. */
14339 htab
->num_tls_desc
++;
14342 if (tls_type
& GOT_TLS_GD
)
14344 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14345 the symbol is both GD and GDESC, got.offset may
14346 have been overwritten. */
14347 h
->got
.offset
= s
->size
;
14351 if (tls_type
& GOT_TLS_IE
)
14352 /* R_ARM_TLS_IE32 needs one GOT slot. */
14356 dyn
= htab
->root
.dynamic_sections_created
;
14359 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
14360 bfd_link_pic (info
),
14362 && (!bfd_link_pic (info
)
14363 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
14366 if (tls_type
!= GOT_NORMAL
14367 && (bfd_link_pic (info
) || indx
!= 0)
14368 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
14369 || h
->root
.type
!= bfd_link_hash_undefweak
))
14371 if (tls_type
& GOT_TLS_IE
)
14372 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14374 if (tls_type
& GOT_TLS_GD
)
14375 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14377 if (tls_type
& GOT_TLS_GDESC
)
14379 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
14380 /* GDESC needs a trampoline to jump to. */
14381 htab
->tls_trampoline
= -1;
14384 /* Only GD needs it. GDESC just emits one relocation per
14386 if ((tls_type
& GOT_TLS_GD
) && indx
!= 0)
14387 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14389 else if (indx
!= -1 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
14391 if (htab
->root
.dynamic_sections_created
)
14392 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14393 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14395 else if (h
->type
== STT_GNU_IFUNC
14396 && eh
->plt
.noncall_refcount
== 0)
14397 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14398 they all resolve dynamically instead. Reserve room for the
14399 GOT entry's R_ARM_IRELATIVE relocation. */
14400 elf32_arm_allocate_irelocs (info
, htab
->root
.srelgot
, 1);
14401 else if (bfd_link_pic (info
)
14402 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
14403 || h
->root
.type
!= bfd_link_hash_undefweak
))
14404 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14405 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14409 h
->got
.offset
= (bfd_vma
) -1;
14411 /* Allocate stubs for exported Thumb functions on v4t. */
14412 if (!htab
->use_blx
&& h
->dynindx
!= -1
14414 && ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
) == ST_BRANCH_TO_THUMB
14415 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
14417 struct elf_link_hash_entry
* th
;
14418 struct bfd_link_hash_entry
* bh
;
14419 struct elf_link_hash_entry
* myh
;
14423 /* Create a new symbol to regist the real location of the function. */
14424 s
= h
->root
.u
.def
.section
;
14425 sprintf (name
, "__real_%s", h
->root
.root
.string
);
14426 _bfd_generic_link_add_one_symbol (info
, s
->owner
,
14427 name
, BSF_GLOBAL
, s
,
14428 h
->root
.u
.def
.value
,
14429 NULL
, TRUE
, FALSE
, &bh
);
14431 myh
= (struct elf_link_hash_entry
*) bh
;
14432 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
14433 myh
->forced_local
= 1;
14434 ARM_SET_SYM_BRANCH_TYPE (myh
->target_internal
, ST_BRANCH_TO_THUMB
);
14435 eh
->export_glue
= myh
;
14436 th
= record_arm_to_thumb_glue (info
, h
);
14437 /* Point the symbol at the stub. */
14438 h
->type
= ELF_ST_INFO (ELF_ST_BIND (h
->type
), STT_FUNC
);
14439 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
14440 h
->root
.u
.def
.section
= th
->root
.u
.def
.section
;
14441 h
->root
.u
.def
.value
= th
->root
.u
.def
.value
& ~1;
14444 if (eh
->dyn_relocs
== NULL
)
14447 /* In the shared -Bsymbolic case, discard space allocated for
14448 dynamic pc-relative relocs against symbols which turn out to be
14449 defined in regular objects. For the normal shared case, discard
14450 space for pc-relative relocs that have become local due to symbol
14451 visibility changes. */
14453 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
)
14455 /* Relocs that use pc_count are PC-relative forms, which will appear
14456 on something like ".long foo - ." or "movw REG, foo - .". We want
14457 calls to protected symbols to resolve directly to the function
14458 rather than going via the plt. If people want function pointer
14459 comparisons to work as expected then they should avoid writing
14460 assembly like ".long foo - .". */
14461 if (SYMBOL_CALLS_LOCAL (info
, h
))
14463 struct elf_dyn_relocs
**pp
;
14465 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
14467 p
->count
-= p
->pc_count
;
14476 if (htab
->vxworks_p
)
14478 struct elf_dyn_relocs
**pp
;
14480 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
14482 if (strcmp (p
->sec
->output_section
->name
, ".tls_vars") == 0)
14489 /* Also discard relocs on undefined weak syms with non-default
14491 if (eh
->dyn_relocs
!= NULL
14492 && h
->root
.type
== bfd_link_hash_undefweak
)
14494 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
)
14495 eh
->dyn_relocs
= NULL
;
14497 /* Make sure undefined weak symbols are output as a dynamic
14499 else if (h
->dynindx
== -1
14500 && !h
->forced_local
)
14502 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14507 else if (htab
->root
.is_relocatable_executable
&& h
->dynindx
== -1
14508 && h
->root
.type
== bfd_link_hash_new
)
14510 /* Output absolute symbols so that we can create relocations
14511 against them. For normal symbols we output a relocation
14512 against the section that contains them. */
14513 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14520 /* For the non-shared case, discard space for relocs against
14521 symbols which turn out to need copy relocs or are not
14524 if (!h
->non_got_ref
14525 && ((h
->def_dynamic
14526 && !h
->def_regular
)
14527 || (htab
->root
.dynamic_sections_created
14528 && (h
->root
.type
== bfd_link_hash_undefweak
14529 || h
->root
.type
== bfd_link_hash_undefined
))))
14531 /* Make sure this symbol is output as a dynamic symbol.
14532 Undefined weak syms won't yet be marked as dynamic. */
14533 if (h
->dynindx
== -1
14534 && !h
->forced_local
)
14536 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14540 /* If that succeeded, we know we'll be keeping all the
14542 if (h
->dynindx
!= -1)
14546 eh
->dyn_relocs
= NULL
;
14551 /* Finally, allocate space. */
14552 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
14554 asection
*sreloc
= elf_section_data (p
->sec
)->sreloc
;
14555 if (h
->type
== STT_GNU_IFUNC
14556 && eh
->plt
.noncall_refcount
== 0
14557 && SYMBOL_REFERENCES_LOCAL (info
, h
))
14558 elf32_arm_allocate_irelocs (info
, sreloc
, p
->count
);
14560 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
14566 /* Find any dynamic relocs that apply to read-only sections. */
14569 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry
* h
, void * inf
)
14571 struct elf32_arm_link_hash_entry
* eh
;
14572 struct elf_dyn_relocs
* p
;
14574 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14575 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
14577 asection
*s
= p
->sec
;
14579 if (s
!= NULL
&& (s
->flags
& SEC_READONLY
) != 0)
14581 struct bfd_link_info
*info
= (struct bfd_link_info
*) inf
;
14583 info
->flags
|= DF_TEXTREL
;
14585 /* Not an error, just cut short the traversal. */
14593 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info
*info
,
14596 struct elf32_arm_link_hash_table
*globals
;
14598 globals
= elf32_arm_hash_table (info
);
14599 if (globals
== NULL
)
14602 globals
->byteswap_code
= byteswap_code
;
14605 /* Set the sizes of the dynamic sections. */
14608 elf32_arm_size_dynamic_sections (bfd
* output_bfd ATTRIBUTE_UNUSED
,
14609 struct bfd_link_info
* info
)
14614 bfd_boolean relocs
;
14616 struct elf32_arm_link_hash_table
*htab
;
14618 htab
= elf32_arm_hash_table (info
);
14622 dynobj
= elf_hash_table (info
)->dynobj
;
14623 BFD_ASSERT (dynobj
!= NULL
);
14624 check_use_blx (htab
);
14626 if (elf_hash_table (info
)->dynamic_sections_created
)
14628 /* Set the contents of the .interp section to the interpreter. */
14629 if (bfd_link_executable (info
) && !info
->nointerp
)
14631 s
= bfd_get_linker_section (dynobj
, ".interp");
14632 BFD_ASSERT (s
!= NULL
);
14633 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
14634 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
14638 /* Set up .got offsets for local syms, and space for local dynamic
14640 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
14642 bfd_signed_vma
*local_got
;
14643 bfd_signed_vma
*end_local_got
;
14644 struct arm_local_iplt_info
**local_iplt_ptr
, *local_iplt
;
14645 char *local_tls_type
;
14646 bfd_vma
*local_tlsdesc_gotent
;
14647 bfd_size_type locsymcount
;
14648 Elf_Internal_Shdr
*symtab_hdr
;
14650 bfd_boolean is_vxworks
= htab
->vxworks_p
;
14651 unsigned int symndx
;
14653 if (! is_arm_elf (ibfd
))
14656 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
14658 struct elf_dyn_relocs
*p
;
14660 for (p
= (struct elf_dyn_relocs
*)
14661 elf_section_data (s
)->local_dynrel
; p
!= NULL
; p
= p
->next
)
14663 if (!bfd_is_abs_section (p
->sec
)
14664 && bfd_is_abs_section (p
->sec
->output_section
))
14666 /* Input section has been discarded, either because
14667 it is a copy of a linkonce section or due to
14668 linker script /DISCARD/, so we'll be discarding
14671 else if (is_vxworks
14672 && strcmp (p
->sec
->output_section
->name
,
14675 /* Relocations in vxworks .tls_vars sections are
14676 handled specially by the loader. */
14678 else if (p
->count
!= 0)
14680 srel
= elf_section_data (p
->sec
)->sreloc
;
14681 elf32_arm_allocate_dynrelocs (info
, srel
, p
->count
);
14682 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
14683 info
->flags
|= DF_TEXTREL
;
14688 local_got
= elf_local_got_refcounts (ibfd
);
14692 symtab_hdr
= & elf_symtab_hdr (ibfd
);
14693 locsymcount
= symtab_hdr
->sh_info
;
14694 end_local_got
= local_got
+ locsymcount
;
14695 local_iplt_ptr
= elf32_arm_local_iplt (ibfd
);
14696 local_tls_type
= elf32_arm_local_got_tls_type (ibfd
);
14697 local_tlsdesc_gotent
= elf32_arm_local_tlsdesc_gotent (ibfd
);
14699 s
= htab
->root
.sgot
;
14700 srel
= htab
->root
.srelgot
;
14701 for (; local_got
< end_local_got
;
14702 ++local_got
, ++local_iplt_ptr
, ++local_tls_type
,
14703 ++local_tlsdesc_gotent
, ++symndx
)
14705 *local_tlsdesc_gotent
= (bfd_vma
) -1;
14706 local_iplt
= *local_iplt_ptr
;
14707 if (local_iplt
!= NULL
)
14709 struct elf_dyn_relocs
*p
;
14711 if (local_iplt
->root
.refcount
> 0)
14713 elf32_arm_allocate_plt_entry (info
, TRUE
,
14716 if (local_iplt
->arm
.noncall_refcount
== 0)
14717 /* All references to the PLT are calls, so all
14718 non-call references can resolve directly to the
14719 run-time target. This means that the .got entry
14720 would be the same as the .igot.plt entry, so there's
14721 no point creating both. */
14726 BFD_ASSERT (local_iplt
->arm
.noncall_refcount
== 0);
14727 local_iplt
->root
.offset
= (bfd_vma
) -1;
14730 for (p
= local_iplt
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
14734 psrel
= elf_section_data (p
->sec
)->sreloc
;
14735 if (local_iplt
->arm
.noncall_refcount
== 0)
14736 elf32_arm_allocate_irelocs (info
, psrel
, p
->count
);
14738 elf32_arm_allocate_dynrelocs (info
, psrel
, p
->count
);
14741 if (*local_got
> 0)
14743 Elf_Internal_Sym
*isym
;
14745 *local_got
= s
->size
;
14746 if (*local_tls_type
& GOT_TLS_GD
)
14747 /* TLS_GD relocs need an 8-byte structure in the GOT. */
14749 if (*local_tls_type
& GOT_TLS_GDESC
)
14751 *local_tlsdesc_gotent
= htab
->root
.sgotplt
->size
14752 - elf32_arm_compute_jump_table_size (htab
);
14753 htab
->root
.sgotplt
->size
+= 8;
14754 *local_got
= (bfd_vma
) -2;
14755 /* plt.got_offset needs to know there's a TLS_DESC
14756 reloc in the middle of .got.plt. */
14757 htab
->num_tls_desc
++;
14759 if (*local_tls_type
& GOT_TLS_IE
)
14762 if (*local_tls_type
& GOT_NORMAL
)
14764 /* If the symbol is both GD and GDESC, *local_got
14765 may have been overwritten. */
14766 *local_got
= s
->size
;
14770 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
, ibfd
, symndx
);
14774 /* If all references to an STT_GNU_IFUNC PLT are calls,
14775 then all non-call references, including this GOT entry,
14776 resolve directly to the run-time target. */
14777 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
14778 && (local_iplt
== NULL
14779 || local_iplt
->arm
.noncall_refcount
== 0))
14780 elf32_arm_allocate_irelocs (info
, srel
, 1);
14781 else if (bfd_link_pic (info
) || output_bfd
->flags
& DYNAMIC
)
14783 if ((bfd_link_pic (info
) && !(*local_tls_type
& GOT_TLS_GDESC
))
14784 || *local_tls_type
& GOT_TLS_GD
)
14785 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
14787 if (bfd_link_pic (info
) && *local_tls_type
& GOT_TLS_GDESC
)
14789 elf32_arm_allocate_dynrelocs (info
,
14790 htab
->root
.srelplt
, 1);
14791 htab
->tls_trampoline
= -1;
14796 *local_got
= (bfd_vma
) -1;
14800 if (htab
->tls_ldm_got
.refcount
> 0)
14802 /* Allocate two GOT entries and one dynamic relocation (if necessary)
14803 for R_ARM_TLS_LDM32 relocations. */
14804 htab
->tls_ldm_got
.offset
= htab
->root
.sgot
->size
;
14805 htab
->root
.sgot
->size
+= 8;
14806 if (bfd_link_pic (info
))
14807 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14810 htab
->tls_ldm_got
.offset
= -1;
14812 /* Allocate global sym .plt and .got entries, and space for global
14813 sym dynamic relocs. */
14814 elf_link_hash_traverse (& htab
->root
, allocate_dynrelocs_for_symbol
, info
);
14816 /* Here we rummage through the found bfds to collect glue information. */
14817 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
14819 if (! is_arm_elf (ibfd
))
14822 /* Initialise mapping tables for code/data. */
14823 bfd_elf32_arm_init_maps (ibfd
);
14825 if (!bfd_elf32_arm_process_before_allocation (ibfd
, info
)
14826 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd
, info
)
14827 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd
, info
))
14828 /* xgettext:c-format */
14829 _bfd_error_handler (_("Errors encountered processing file %s"),
14833 /* Allocate space for the glue sections now that we've sized them. */
14834 bfd_elf32_arm_allocate_interworking_sections (info
);
14836 /* For every jump slot reserved in the sgotplt, reloc_count is
14837 incremented. However, when we reserve space for TLS descriptors,
14838 it's not incremented, so in order to compute the space reserved
14839 for them, it suffices to multiply the reloc count by the jump
14841 if (htab
->root
.srelplt
)
14842 htab
->sgotplt_jump_table_size
= elf32_arm_compute_jump_table_size(htab
);
14844 if (htab
->tls_trampoline
)
14846 if (htab
->root
.splt
->size
== 0)
14847 htab
->root
.splt
->size
+= htab
->plt_header_size
;
14849 htab
->tls_trampoline
= htab
->root
.splt
->size
;
14850 htab
->root
.splt
->size
+= htab
->plt_entry_size
;
14852 /* If we're not using lazy TLS relocations, don't generate the
14853 PLT and GOT entries they require. */
14854 if (!(info
->flags
& DF_BIND_NOW
))
14856 htab
->dt_tlsdesc_got
= htab
->root
.sgot
->size
;
14857 htab
->root
.sgot
->size
+= 4;
14859 htab
->dt_tlsdesc_plt
= htab
->root
.splt
->size
;
14860 htab
->root
.splt
->size
+= 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline
);
14864 /* The check_relocs and adjust_dynamic_symbol entry points have
14865 determined the sizes of the various dynamic sections. Allocate
14866 memory for them. */
14869 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
14873 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
14876 /* It's OK to base decisions on the section name, because none
14877 of the dynobj section names depend upon the input files. */
14878 name
= bfd_get_section_name (dynobj
, s
);
14880 if (s
== htab
->root
.splt
)
14882 /* Remember whether there is a PLT. */
14883 plt
= s
->size
!= 0;
14885 else if (CONST_STRNEQ (name
, ".rel"))
14889 /* Remember whether there are any reloc sections other
14890 than .rel(a).plt and .rela.plt.unloaded. */
14891 if (s
!= htab
->root
.srelplt
&& s
!= htab
->srelplt2
)
14894 /* We use the reloc_count field as a counter if we need
14895 to copy relocs into the output file. */
14896 s
->reloc_count
= 0;
14899 else if (s
!= htab
->root
.sgot
14900 && s
!= htab
->root
.sgotplt
14901 && s
!= htab
->root
.iplt
14902 && s
!= htab
->root
.igotplt
14903 && s
!= htab
->sdynbss
)
14905 /* It's not one of our sections, so don't allocate space. */
14911 /* If we don't need this section, strip it from the
14912 output file. This is mostly to handle .rel(a).bss and
14913 .rel(a).plt. We must create both sections in
14914 create_dynamic_sections, because they must be created
14915 before the linker maps input sections to output
14916 sections. The linker does that before
14917 adjust_dynamic_symbol is called, and it is that
14918 function which decides whether anything needs to go
14919 into these sections. */
14920 s
->flags
|= SEC_EXCLUDE
;
14924 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
14927 /* Allocate memory for the section contents. */
14928 s
->contents
= (unsigned char *) bfd_zalloc (dynobj
, s
->size
);
14929 if (s
->contents
== NULL
)
14933 if (elf_hash_table (info
)->dynamic_sections_created
)
14935 /* Add some entries to the .dynamic section. We fill in the
14936 values later, in elf32_arm_finish_dynamic_sections, but we
14937 must add the entries now so that we get the correct size for
14938 the .dynamic section. The DT_DEBUG entry is filled in by the
14939 dynamic linker and used by the debugger. */
14940 #define add_dynamic_entry(TAG, VAL) \
14941 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
14943 if (bfd_link_executable (info
))
14945 if (!add_dynamic_entry (DT_DEBUG
, 0))
14951 if ( !add_dynamic_entry (DT_PLTGOT
, 0)
14952 || !add_dynamic_entry (DT_PLTRELSZ
, 0)
14953 || !add_dynamic_entry (DT_PLTREL
,
14954 htab
->use_rel
? DT_REL
: DT_RELA
)
14955 || !add_dynamic_entry (DT_JMPREL
, 0))
14958 if (htab
->dt_tlsdesc_plt
&&
14959 (!add_dynamic_entry (DT_TLSDESC_PLT
,0)
14960 || !add_dynamic_entry (DT_TLSDESC_GOT
,0)))
14968 if (!add_dynamic_entry (DT_REL
, 0)
14969 || !add_dynamic_entry (DT_RELSZ
, 0)
14970 || !add_dynamic_entry (DT_RELENT
, RELOC_SIZE (htab
)))
14975 if (!add_dynamic_entry (DT_RELA
, 0)
14976 || !add_dynamic_entry (DT_RELASZ
, 0)
14977 || !add_dynamic_entry (DT_RELAENT
, RELOC_SIZE (htab
)))
14982 /* If any dynamic relocs apply to a read-only section,
14983 then we need a DT_TEXTREL entry. */
14984 if ((info
->flags
& DF_TEXTREL
) == 0)
14985 elf_link_hash_traverse (& htab
->root
, elf32_arm_readonly_dynrelocs
,
14988 if ((info
->flags
& DF_TEXTREL
) != 0)
14990 if (!add_dynamic_entry (DT_TEXTREL
, 0))
14993 if (htab
->vxworks_p
14994 && !elf_vxworks_add_dynamic_entries (output_bfd
, info
))
14997 #undef add_dynamic_entry
15002 /* Size sections even though they're not dynamic. We use it to setup
15003 _TLS_MODULE_BASE_, if needed. */
15006 elf32_arm_always_size_sections (bfd
*output_bfd
,
15007 struct bfd_link_info
*info
)
15011 if (bfd_link_relocatable (info
))
15014 tls_sec
= elf_hash_table (info
)->tls_sec
;
15018 struct elf_link_hash_entry
*tlsbase
;
15020 tlsbase
= elf_link_hash_lookup
15021 (elf_hash_table (info
), "_TLS_MODULE_BASE_", TRUE
, TRUE
, FALSE
);
15025 struct bfd_link_hash_entry
*bh
= NULL
;
15026 const struct elf_backend_data
*bed
15027 = get_elf_backend_data (output_bfd
);
15029 if (!(_bfd_generic_link_add_one_symbol
15030 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
15031 tls_sec
, 0, NULL
, FALSE
,
15032 bed
->collect
, &bh
)))
15035 tlsbase
->type
= STT_TLS
;
15036 tlsbase
= (struct elf_link_hash_entry
*)bh
;
15037 tlsbase
->def_regular
= 1;
15038 tlsbase
->other
= STV_HIDDEN
;
15039 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, TRUE
);
15045 /* Finish up dynamic symbol handling. We set the contents of various
15046 dynamic sections here. */
15049 elf32_arm_finish_dynamic_symbol (bfd
* output_bfd
,
15050 struct bfd_link_info
* info
,
15051 struct elf_link_hash_entry
* h
,
15052 Elf_Internal_Sym
* sym
)
15054 struct elf32_arm_link_hash_table
*htab
;
15055 struct elf32_arm_link_hash_entry
*eh
;
15057 htab
= elf32_arm_hash_table (info
);
15061 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15063 if (h
->plt
.offset
!= (bfd_vma
) -1)
15067 BFD_ASSERT (h
->dynindx
!= -1);
15068 if (! elf32_arm_populate_plt_entry (output_bfd
, info
, &h
->plt
, &eh
->plt
,
15073 if (!h
->def_regular
)
15075 /* Mark the symbol as undefined, rather than as defined in
15076 the .plt section. */
15077 sym
->st_shndx
= SHN_UNDEF
;
15078 /* If the symbol is weak we need to clear the value.
15079 Otherwise, the PLT entry would provide a definition for
15080 the symbol even if the symbol wasn't defined anywhere,
15081 and so the symbol would never be NULL. Leave the value if
15082 there were any relocations where pointer equality matters
15083 (this is a clue for the dynamic linker, to make function
15084 pointer comparisons work between an application and shared
15086 if (!h
->ref_regular_nonweak
|| !h
->pointer_equality_needed
)
15089 else if (eh
->is_iplt
&& eh
->plt
.noncall_refcount
!= 0)
15091 /* At least one non-call relocation references this .iplt entry,
15092 so the .iplt entry is the function's canonical address. */
15093 sym
->st_info
= ELF_ST_INFO (ELF_ST_BIND (sym
->st_info
), STT_FUNC
);
15094 ARM_SET_SYM_BRANCH_TYPE (sym
->st_target_internal
, ST_BRANCH_TO_ARM
);
15095 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
15096 (output_bfd
, htab
->root
.iplt
->output_section
));
15097 sym
->st_value
= (h
->plt
.offset
15098 + htab
->root
.iplt
->output_section
->vma
15099 + htab
->root
.iplt
->output_offset
);
15106 Elf_Internal_Rela rel
;
15108 /* This symbol needs a copy reloc. Set it up. */
15109 BFD_ASSERT (h
->dynindx
!= -1
15110 && (h
->root
.type
== bfd_link_hash_defined
15111 || h
->root
.type
== bfd_link_hash_defweak
));
15114 BFD_ASSERT (s
!= NULL
);
15117 rel
.r_offset
= (h
->root
.u
.def
.value
15118 + h
->root
.u
.def
.section
->output_section
->vma
15119 + h
->root
.u
.def
.section
->output_offset
);
15120 rel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_COPY
);
15121 elf32_arm_add_dynreloc (output_bfd
, info
, s
, &rel
);
15124 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
15125 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15126 to the ".got" section. */
15127 if (h
== htab
->root
.hdynamic
15128 || (!htab
->vxworks_p
&& h
== htab
->root
.hgot
))
15129 sym
->st_shndx
= SHN_ABS
;
15135 arm_put_trampoline (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
15137 const unsigned long *template, unsigned count
)
15141 for (ix
= 0; ix
!= count
; ix
++)
15143 unsigned long insn
= template[ix
];
15145 /* Emit mov pc,rx if bx is not permitted. */
15146 if (htab
->fix_v4bx
== 1 && (insn
& 0x0ffffff0) == 0x012fff10)
15147 insn
= (insn
& 0xf000000f) | 0x01a0f000;
15148 put_arm_insn (htab
, output_bfd
, insn
, (char *)contents
+ ix
*4);
15152 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
15153 other variants, NaCl needs this entry in a static executable's
15154 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
15155 zero. For .iplt really only the last bundle is useful, and .iplt
15156 could have a shorter first entry, with each individual PLT entry's
15157 relative branch calculated differently so it targets the last
15158 bundle instead of the instruction before it (labelled .Lplt_tail
15159 above). But it's simpler to keep the size and layout of PLT0
15160 consistent with the dynamic case, at the cost of some dead code at
15161 the start of .iplt and the one dead store to the stack at the start
15164 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
15165 asection
*plt
, bfd_vma got_displacement
)
15169 put_arm_insn (htab
, output_bfd
,
15170 elf32_arm_nacl_plt0_entry
[0]
15171 | arm_movw_immediate (got_displacement
),
15172 plt
->contents
+ 0);
15173 put_arm_insn (htab
, output_bfd
,
15174 elf32_arm_nacl_plt0_entry
[1]
15175 | arm_movt_immediate (got_displacement
),
15176 plt
->contents
+ 4);
15178 for (i
= 2; i
< ARRAY_SIZE (elf32_arm_nacl_plt0_entry
); ++i
)
15179 put_arm_insn (htab
, output_bfd
,
15180 elf32_arm_nacl_plt0_entry
[i
],
15181 plt
->contents
+ (i
* 4));
15184 /* Finish up the dynamic sections. */
15187 elf32_arm_finish_dynamic_sections (bfd
* output_bfd
, struct bfd_link_info
* info
)
15192 struct elf32_arm_link_hash_table
*htab
;
15194 htab
= elf32_arm_hash_table (info
);
15198 dynobj
= elf_hash_table (info
)->dynobj
;
15200 sgot
= htab
->root
.sgotplt
;
15201 /* A broken linker script might have discarded the dynamic sections.
15202 Catch this here so that we do not seg-fault later on. */
15203 if (sgot
!= NULL
&& bfd_is_abs_section (sgot
->output_section
))
15205 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
15207 if (elf_hash_table (info
)->dynamic_sections_created
)
15210 Elf32_External_Dyn
*dyncon
, *dynconend
;
15212 splt
= htab
->root
.splt
;
15213 BFD_ASSERT (splt
!= NULL
&& sdyn
!= NULL
);
15214 BFD_ASSERT (htab
->symbian_p
|| sgot
!= NULL
);
15216 dyncon
= (Elf32_External_Dyn
*) sdyn
->contents
;
15217 dynconend
= (Elf32_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
15219 for (; dyncon
< dynconend
; dyncon
++)
15221 Elf_Internal_Dyn dyn
;
15225 bfd_elf32_swap_dyn_in (dynobj
, dyncon
, &dyn
);
15232 if (htab
->vxworks_p
15233 && elf_vxworks_finish_dynamic_entry (output_bfd
, &dyn
))
15234 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15239 goto get_vma_if_bpabi
;
15242 goto get_vma_if_bpabi
;
15245 goto get_vma_if_bpabi
;
15247 name
= ".gnu.version";
15248 goto get_vma_if_bpabi
;
15250 name
= ".gnu.version_d";
15251 goto get_vma_if_bpabi
;
15253 name
= ".gnu.version_r";
15254 goto get_vma_if_bpabi
;
15260 name
= RELOC_SECTION (htab
, ".plt");
15262 s
= bfd_get_section_by_name (output_bfd
, name
);
15265 /* PR ld/14397: Issue an error message if a required section is missing. */
15266 (*_bfd_error_handler
)
15267 (_("error: required section '%s' not found in the linker script"), name
);
15268 bfd_set_error (bfd_error_invalid_operation
);
15271 if (!htab
->symbian_p
)
15272 dyn
.d_un
.d_ptr
= s
->vma
;
15274 /* In the BPABI, tags in the PT_DYNAMIC section point
15275 at the file offset, not the memory address, for the
15276 convenience of the post linker. */
15277 dyn
.d_un
.d_ptr
= s
->filepos
;
15278 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15282 if (htab
->symbian_p
)
15287 s
= htab
->root
.srelplt
;
15288 BFD_ASSERT (s
!= NULL
);
15289 dyn
.d_un
.d_val
= s
->size
;
15290 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15295 if (!htab
->symbian_p
)
15297 /* My reading of the SVR4 ABI indicates that the
15298 procedure linkage table relocs (DT_JMPREL) should be
15299 included in the overall relocs (DT_REL). This is
15300 what Solaris does. However, UnixWare can not handle
15301 that case. Therefore, we override the DT_RELSZ entry
15302 here to make it not include the JMPREL relocs. Since
15303 the linker script arranges for .rel(a).plt to follow all
15304 other relocation sections, we don't have to worry
15305 about changing the DT_REL entry. */
15306 s
= htab
->root
.srelplt
;
15308 dyn
.d_un
.d_val
-= s
->size
;
15309 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15312 /* Fall through. */
15316 /* In the BPABI, the DT_REL tag must point at the file
15317 offset, not the VMA, of the first relocation
15318 section. So, we use code similar to that in
15319 elflink.c, but do not check for SHF_ALLOC on the
15320 relcoation section, since relocations sections are
15321 never allocated under the BPABI. The comments above
15322 about Unixware notwithstanding, we include all of the
15323 relocations here. */
15324 if (htab
->symbian_p
)
15327 type
= ((dyn
.d_tag
== DT_REL
|| dyn
.d_tag
== DT_RELSZ
)
15328 ? SHT_REL
: SHT_RELA
);
15329 dyn
.d_un
.d_val
= 0;
15330 for (i
= 1; i
< elf_numsections (output_bfd
); i
++)
15332 Elf_Internal_Shdr
*hdr
15333 = elf_elfsections (output_bfd
)[i
];
15334 if (hdr
->sh_type
== type
)
15336 if (dyn
.d_tag
== DT_RELSZ
15337 || dyn
.d_tag
== DT_RELASZ
)
15338 dyn
.d_un
.d_val
+= hdr
->sh_size
;
15339 else if ((ufile_ptr
) hdr
->sh_offset
15340 <= dyn
.d_un
.d_val
- 1)
15341 dyn
.d_un
.d_val
= hdr
->sh_offset
;
15344 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15348 case DT_TLSDESC_PLT
:
15349 s
= htab
->root
.splt
;
15350 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
15351 + htab
->dt_tlsdesc_plt
);
15352 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15355 case DT_TLSDESC_GOT
:
15356 s
= htab
->root
.sgot
;
15357 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
15358 + htab
->dt_tlsdesc_got
);
15359 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15362 /* Set the bottom bit of DT_INIT/FINI if the
15363 corresponding function is Thumb. */
15365 name
= info
->init_function
;
15368 name
= info
->fini_function
;
15370 /* If it wasn't set by elf_bfd_final_link
15371 then there is nothing to adjust. */
15372 if (dyn
.d_un
.d_val
!= 0)
15374 struct elf_link_hash_entry
* eh
;
15376 eh
= elf_link_hash_lookup (elf_hash_table (info
), name
,
15377 FALSE
, FALSE
, TRUE
);
15379 && ARM_GET_SYM_BRANCH_TYPE (eh
->target_internal
)
15380 == ST_BRANCH_TO_THUMB
)
15382 dyn
.d_un
.d_val
|= 1;
15383 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15390 /* Fill in the first entry in the procedure linkage table. */
15391 if (splt
->size
> 0 && htab
->plt_header_size
)
15393 const bfd_vma
*plt0_entry
;
15394 bfd_vma got_address
, plt_address
, got_displacement
;
15396 /* Calculate the addresses of the GOT and PLT. */
15397 got_address
= sgot
->output_section
->vma
+ sgot
->output_offset
;
15398 plt_address
= splt
->output_section
->vma
+ splt
->output_offset
;
15400 if (htab
->vxworks_p
)
15402 /* The VxWorks GOT is relocated by the dynamic linker.
15403 Therefore, we must emit relocations rather than simply
15404 computing the values now. */
15405 Elf_Internal_Rela rel
;
15407 plt0_entry
= elf32_arm_vxworks_exec_plt0_entry
;
15408 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15409 splt
->contents
+ 0);
15410 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15411 splt
->contents
+ 4);
15412 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15413 splt
->contents
+ 8);
15414 bfd_put_32 (output_bfd
, got_address
, splt
->contents
+ 12);
15416 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15417 rel
.r_offset
= plt_address
+ 12;
15418 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
15420 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
,
15421 htab
->srelplt2
->contents
);
15423 else if (htab
->nacl_p
)
15424 arm_nacl_put_plt0 (htab
, output_bfd
, splt
,
15425 got_address
+ 8 - (plt_address
+ 16));
15426 else if (using_thumb_only (htab
))
15428 got_displacement
= got_address
- (plt_address
+ 12);
15430 plt0_entry
= elf32_thumb2_plt0_entry
;
15431 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15432 splt
->contents
+ 0);
15433 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15434 splt
->contents
+ 4);
15435 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15436 splt
->contents
+ 8);
15438 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 12);
15442 got_displacement
= got_address
- (plt_address
+ 16);
15444 plt0_entry
= elf32_arm_plt0_entry
;
15445 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15446 splt
->contents
+ 0);
15447 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15448 splt
->contents
+ 4);
15449 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15450 splt
->contents
+ 8);
15451 put_arm_insn (htab
, output_bfd
, plt0_entry
[3],
15452 splt
->contents
+ 12);
15454 #ifdef FOUR_WORD_PLT
15455 /* The displacement value goes in the otherwise-unused
15456 last word of the second entry. */
15457 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 28);
15459 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 16);
15464 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15465 really seem like the right value. */
15466 if (splt
->output_section
->owner
== output_bfd
)
15467 elf_section_data (splt
->output_section
)->this_hdr
.sh_entsize
= 4;
15469 if (htab
->dt_tlsdesc_plt
)
15471 bfd_vma got_address
15472 = sgot
->output_section
->vma
+ sgot
->output_offset
;
15473 bfd_vma gotplt_address
= (htab
->root
.sgot
->output_section
->vma
15474 + htab
->root
.sgot
->output_offset
);
15475 bfd_vma plt_address
15476 = splt
->output_section
->vma
+ splt
->output_offset
;
15478 arm_put_trampoline (htab
, output_bfd
,
15479 splt
->contents
+ htab
->dt_tlsdesc_plt
,
15480 dl_tlsdesc_lazy_trampoline
, 6);
15482 bfd_put_32 (output_bfd
,
15483 gotplt_address
+ htab
->dt_tlsdesc_got
15484 - (plt_address
+ htab
->dt_tlsdesc_plt
)
15485 - dl_tlsdesc_lazy_trampoline
[6],
15486 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24);
15487 bfd_put_32 (output_bfd
,
15488 got_address
- (plt_address
+ htab
->dt_tlsdesc_plt
)
15489 - dl_tlsdesc_lazy_trampoline
[7],
15490 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24 + 4);
15493 if (htab
->tls_trampoline
)
15495 arm_put_trampoline (htab
, output_bfd
,
15496 splt
->contents
+ htab
->tls_trampoline
,
15497 tls_trampoline
, 3);
15498 #ifdef FOUR_WORD_PLT
15499 bfd_put_32 (output_bfd
, 0x00000000,
15500 splt
->contents
+ htab
->tls_trampoline
+ 12);
15504 if (htab
->vxworks_p
15505 && !bfd_link_pic (info
)
15506 && htab
->root
.splt
->size
> 0)
15508 /* Correct the .rel(a).plt.unloaded relocations. They will have
15509 incorrect symbol indexes. */
15513 num_plts
= ((htab
->root
.splt
->size
- htab
->plt_header_size
)
15514 / htab
->plt_entry_size
);
15515 p
= htab
->srelplt2
->contents
+ RELOC_SIZE (htab
);
15517 for (; num_plts
; num_plts
--)
15519 Elf_Internal_Rela rel
;
15521 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
15522 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
15523 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
15524 p
+= RELOC_SIZE (htab
);
15526 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
15527 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
15528 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
15529 p
+= RELOC_SIZE (htab
);
15534 if (htab
->nacl_p
&& htab
->root
.iplt
!= NULL
&& htab
->root
.iplt
->size
> 0)
15535 /* NaCl uses a special first entry in .iplt too. */
15536 arm_nacl_put_plt0 (htab
, output_bfd
, htab
->root
.iplt
, 0);
15538 /* Fill in the first three entries in the global offset table. */
15541 if (sgot
->size
> 0)
15544 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
);
15546 bfd_put_32 (output_bfd
,
15547 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
15549 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 4);
15550 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 8);
15553 elf_section_data (sgot
->output_section
)->this_hdr
.sh_entsize
= 4;
15560 elf32_arm_post_process_headers (bfd
* abfd
, struct bfd_link_info
* link_info ATTRIBUTE_UNUSED
)
15562 Elf_Internal_Ehdr
* i_ehdrp
; /* ELF file header, internal form. */
15563 struct elf32_arm_link_hash_table
*globals
;
15564 struct elf_segment_map
*m
;
15566 i_ehdrp
= elf_elfheader (abfd
);
15568 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_UNKNOWN
)
15569 i_ehdrp
->e_ident
[EI_OSABI
] = ELFOSABI_ARM
;
15571 _bfd_elf_post_process_headers (abfd
, link_info
);
15572 i_ehdrp
->e_ident
[EI_ABIVERSION
] = ARM_ELF_ABI_VERSION
;
15576 globals
= elf32_arm_hash_table (link_info
);
15577 if (globals
!= NULL
&& globals
->byteswap_code
)
15578 i_ehdrp
->e_flags
|= EF_ARM_BE8
;
15581 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_VER5
15582 && ((i_ehdrp
->e_type
== ET_DYN
) || (i_ehdrp
->e_type
== ET_EXEC
)))
15584 int abi
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_ABI_VFP_args
);
15585 if (abi
== AEABI_VFP_args_vfp
)
15586 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_HARD
;
15588 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_SOFT
;
15591 /* Scan segment to set p_flags attribute if it contains only sections with
15592 SHF_ARM_NOREAD flag. */
15593 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
15599 for (j
= 0; j
< m
->count
; j
++)
15601 if (!(elf_section_flags (m
->sections
[j
]) & SHF_ARM_NOREAD
))
15607 m
->p_flags_valid
= 1;
15612 static enum elf_reloc_type_class
15613 elf32_arm_reloc_type_class (const struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
15614 const asection
*rel_sec ATTRIBUTE_UNUSED
,
15615 const Elf_Internal_Rela
*rela
)
15617 switch ((int) ELF32_R_TYPE (rela
->r_info
))
15619 case R_ARM_RELATIVE
:
15620 return reloc_class_relative
;
15621 case R_ARM_JUMP_SLOT
:
15622 return reloc_class_plt
;
15624 return reloc_class_copy
;
15625 case R_ARM_IRELATIVE
:
15626 return reloc_class_ifunc
;
15628 return reloc_class_normal
;
15633 elf32_arm_final_write_processing (bfd
*abfd
, bfd_boolean linker ATTRIBUTE_UNUSED
)
15635 bfd_arm_update_notes (abfd
, ARM_NOTE_SECTION
);
15638 /* Return TRUE if this is an unwinding table entry. */
15641 is_arm_elf_unwind_section_name (bfd
* abfd ATTRIBUTE_UNUSED
, const char * name
)
15643 return (CONST_STRNEQ (name
, ELF_STRING_ARM_unwind
)
15644 || CONST_STRNEQ (name
, ELF_STRING_ARM_unwind_once
));
15648 /* Set the type and flags for an ARM section. We do this by
15649 the section name, which is a hack, but ought to work. */
15652 elf32_arm_fake_sections (bfd
* abfd
, Elf_Internal_Shdr
* hdr
, asection
* sec
)
15656 name
= bfd_get_section_name (abfd
, sec
);
15658 if (is_arm_elf_unwind_section_name (abfd
, name
))
15660 hdr
->sh_type
= SHT_ARM_EXIDX
;
15661 hdr
->sh_flags
|= SHF_LINK_ORDER
;
15664 if (sec
->flags
& SEC_ELF_NOREAD
)
15665 hdr
->sh_flags
|= SHF_ARM_NOREAD
;
15670 /* Handle an ARM specific section when reading an object file. This is
15671 called when bfd_section_from_shdr finds a section with an unknown
15675 elf32_arm_section_from_shdr (bfd
*abfd
,
15676 Elf_Internal_Shdr
* hdr
,
15680 /* There ought to be a place to keep ELF backend specific flags, but
15681 at the moment there isn't one. We just keep track of the
15682 sections by their name, instead. Fortunately, the ABI gives
15683 names for all the ARM specific sections, so we will probably get
15685 switch (hdr
->sh_type
)
15687 case SHT_ARM_EXIDX
:
15688 case SHT_ARM_PREEMPTMAP
:
15689 case SHT_ARM_ATTRIBUTES
:
15696 if (! _bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
15702 static _arm_elf_section_data
*
15703 get_arm_elf_section_data (asection
* sec
)
15705 if (sec
&& sec
->owner
&& is_arm_elf (sec
->owner
))
15706 return elf32_arm_section_data (sec
);
15714 struct bfd_link_info
*info
;
15717 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
15718 asection
*, struct elf_link_hash_entry
*);
15719 } output_arch_syminfo
;
15721 enum map_symbol_type
15729 /* Output a single mapping symbol. */
15732 elf32_arm_output_map_sym (output_arch_syminfo
*osi
,
15733 enum map_symbol_type type
,
15736 static const char *names
[3] = {"$a", "$t", "$d"};
15737 Elf_Internal_Sym sym
;
15739 sym
.st_value
= osi
->sec
->output_section
->vma
15740 + osi
->sec
->output_offset
15744 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
15745 sym
.st_shndx
= osi
->sec_shndx
;
15746 sym
.st_target_internal
= 0;
15747 elf32_arm_section_map_add (osi
->sec
, names
[type
][1], offset
);
15748 return osi
->func (osi
->flaginfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
15751 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
15752 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
15755 elf32_arm_output_plt_map_1 (output_arch_syminfo
*osi
,
15756 bfd_boolean is_iplt_entry_p
,
15757 union gotplt_union
*root_plt
,
15758 struct arm_plt_info
*arm_plt
)
15760 struct elf32_arm_link_hash_table
*htab
;
15761 bfd_vma addr
, plt_header_size
;
15763 if (root_plt
->offset
== (bfd_vma
) -1)
15766 htab
= elf32_arm_hash_table (osi
->info
);
15770 if (is_iplt_entry_p
)
15772 osi
->sec
= htab
->root
.iplt
;
15773 plt_header_size
= 0;
15777 osi
->sec
= htab
->root
.splt
;
15778 plt_header_size
= htab
->plt_header_size
;
15780 osi
->sec_shndx
= (_bfd_elf_section_from_bfd_section
15781 (osi
->info
->output_bfd
, osi
->sec
->output_section
));
15783 addr
= root_plt
->offset
& -2;
15784 if (htab
->symbian_p
)
15786 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15788 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 4))
15791 else if (htab
->vxworks_p
)
15793 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15795 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 8))
15797 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
+ 12))
15799 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 20))
15802 else if (htab
->nacl_p
)
15804 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15807 else if (using_thumb_only (htab
))
15809 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
))
15814 bfd_boolean thumb_stub_p
;
15816 thumb_stub_p
= elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
);
15819 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
15822 #ifdef FOUR_WORD_PLT
15823 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15825 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 12))
15828 /* A three-word PLT with no Thumb thunk contains only Arm code,
15829 so only need to output a mapping symbol for the first PLT entry and
15830 entries with thumb thunks. */
15831 if (thumb_stub_p
|| addr
== plt_header_size
)
15833 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15842 /* Output mapping symbols for PLT entries associated with H. */
15845 elf32_arm_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
15847 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
15848 struct elf32_arm_link_hash_entry
*eh
;
15850 if (h
->root
.type
== bfd_link_hash_indirect
)
15853 if (h
->root
.type
== bfd_link_hash_warning
)
15854 /* When warning symbols are created, they **replace** the "real"
15855 entry in the hash table, thus we never get to see the real
15856 symbol in a hash traversal. So look at it now. */
15857 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
15859 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15860 return elf32_arm_output_plt_map_1 (osi
, SYMBOL_CALLS_LOCAL (osi
->info
, h
),
15861 &h
->plt
, &eh
->plt
);
15864 /* Output a single local symbol for a generated stub. */
15867 elf32_arm_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
15868 bfd_vma offset
, bfd_vma size
)
15870 Elf_Internal_Sym sym
;
15872 sym
.st_value
= osi
->sec
->output_section
->vma
15873 + osi
->sec
->output_offset
15875 sym
.st_size
= size
;
15877 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
15878 sym
.st_shndx
= osi
->sec_shndx
;
15879 sym
.st_target_internal
= 0;
15880 return osi
->func (osi
->flaginfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
15884 arm_map_one_stub (struct bfd_hash_entry
* gen_entry
,
15887 struct elf32_arm_stub_hash_entry
*stub_entry
;
15888 asection
*stub_sec
;
15891 output_arch_syminfo
*osi
;
15892 const insn_sequence
*template_sequence
;
15893 enum stub_insn_type prev_type
;
15896 enum map_symbol_type sym_type
;
15898 /* Massage our args to the form they really have. */
15899 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
15900 osi
= (output_arch_syminfo
*) in_arg
;
15902 stub_sec
= stub_entry
->stub_sec
;
15904 /* Ensure this stub is attached to the current section being
15906 if (stub_sec
!= osi
->sec
)
15909 addr
= (bfd_vma
) stub_entry
->stub_offset
;
15910 stub_name
= stub_entry
->output_name
;
15912 template_sequence
= stub_entry
->stub_template
;
15913 switch (template_sequence
[0].type
)
15916 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
, stub_entry
->stub_size
))
15921 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
| 1,
15922 stub_entry
->stub_size
))
15930 prev_type
= DATA_TYPE
;
15932 for (i
= 0; i
< stub_entry
->stub_template_size
; i
++)
15934 switch (template_sequence
[i
].type
)
15937 sym_type
= ARM_MAP_ARM
;
15942 sym_type
= ARM_MAP_THUMB
;
15946 sym_type
= ARM_MAP_DATA
;
15954 if (template_sequence
[i
].type
!= prev_type
)
15956 prev_type
= template_sequence
[i
].type
;
15957 if (!elf32_arm_output_map_sym (osi
, sym_type
, addr
+ size
))
15961 switch (template_sequence
[i
].type
)
15985 /* Output mapping symbols for linker generated sections,
15986 and for those data-only sections that do not have a
15990 elf32_arm_output_arch_local_syms (bfd
*output_bfd
,
15991 struct bfd_link_info
*info
,
15993 int (*func
) (void *, const char *,
15994 Elf_Internal_Sym
*,
15996 struct elf_link_hash_entry
*))
15998 output_arch_syminfo osi
;
15999 struct elf32_arm_link_hash_table
*htab
;
16001 bfd_size_type size
;
16004 htab
= elf32_arm_hash_table (info
);
16008 check_use_blx (htab
);
16010 osi
.flaginfo
= flaginfo
;
16014 /* Add a $d mapping symbol to data-only sections that
16015 don't have any mapping symbol. This may result in (harmless) redundant
16016 mapping symbols. */
16017 for (input_bfd
= info
->input_bfds
;
16019 input_bfd
= input_bfd
->link
.next
)
16021 if ((input_bfd
->flags
& (BFD_LINKER_CREATED
| HAS_SYMS
)) == HAS_SYMS
)
16022 for (osi
.sec
= input_bfd
->sections
;
16024 osi
.sec
= osi
.sec
->next
)
16026 if (osi
.sec
->output_section
!= NULL
16027 && ((osi
.sec
->output_section
->flags
& (SEC_ALLOC
| SEC_CODE
))
16029 && (osi
.sec
->flags
& (SEC_HAS_CONTENTS
| SEC_LINKER_CREATED
))
16030 == SEC_HAS_CONTENTS
16031 && get_arm_elf_section_data (osi
.sec
) != NULL
16032 && get_arm_elf_section_data (osi
.sec
)->mapcount
== 0
16033 && osi
.sec
->size
> 0
16034 && (osi
.sec
->flags
& SEC_EXCLUDE
) == 0)
16036 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16037 (output_bfd
, osi
.sec
->output_section
);
16038 if (osi
.sec_shndx
!= (int)SHN_BAD
)
16039 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 0);
16044 /* ARM->Thumb glue. */
16045 if (htab
->arm_glue_size
> 0)
16047 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
16048 ARM2THUMB_GLUE_SECTION_NAME
);
16050 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16051 (output_bfd
, osi
.sec
->output_section
);
16052 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
16053 || htab
->pic_veneer
)
16054 size
= ARM2THUMB_PIC_GLUE_SIZE
;
16055 else if (htab
->use_blx
)
16056 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
16058 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
16060 for (offset
= 0; offset
< htab
->arm_glue_size
; offset
+= size
)
16062 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
);
16063 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, offset
+ size
- 4);
16067 /* Thumb->ARM glue. */
16068 if (htab
->thumb_glue_size
> 0)
16070 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
16071 THUMB2ARM_GLUE_SECTION_NAME
);
16073 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16074 (output_bfd
, osi
.sec
->output_section
);
16075 size
= THUMB2ARM_GLUE_SIZE
;
16077 for (offset
= 0; offset
< htab
->thumb_glue_size
; offset
+= size
)
16079 elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, offset
);
16080 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
+ 4);
16084 /* ARMv4 BX veneers. */
16085 if (htab
->bx_glue_size
> 0)
16087 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
16088 ARM_BX_GLUE_SECTION_NAME
);
16090 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16091 (output_bfd
, osi
.sec
->output_section
);
16093 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0);
16096 /* Long calls stubs. */
16097 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
16099 asection
* stub_sec
;
16101 for (stub_sec
= htab
->stub_bfd
->sections
;
16103 stub_sec
= stub_sec
->next
)
16105 /* Ignore non-stub sections. */
16106 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
16109 osi
.sec
= stub_sec
;
16111 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16112 (output_bfd
, osi
.sec
->output_section
);
16114 bfd_hash_traverse (&htab
->stub_hash_table
, arm_map_one_stub
, &osi
);
16118 /* Finally, output mapping symbols for the PLT. */
16119 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
16121 osi
.sec
= htab
->root
.splt
;
16122 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
16123 (output_bfd
, osi
.sec
->output_section
));
16125 /* Output mapping symbols for the plt header. SymbianOS does not have a
16127 if (htab
->vxworks_p
)
16129 /* VxWorks shared libraries have no PLT header. */
16130 if (!bfd_link_pic (info
))
16132 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16134 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
16138 else if (htab
->nacl_p
)
16140 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16143 else if (using_thumb_only (htab
))
16145 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 0))
16147 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
16149 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 16))
16152 else if (!htab
->symbian_p
)
16154 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16156 #ifndef FOUR_WORD_PLT
16157 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 16))
16162 if (htab
->nacl_p
&& htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0)
16164 /* NaCl uses a special first entry in .iplt too. */
16165 osi
.sec
= htab
->root
.iplt
;
16166 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
16167 (output_bfd
, osi
.sec
->output_section
));
16168 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16171 if ((htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
16172 || (htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0))
16174 elf_link_hash_traverse (&htab
->root
, elf32_arm_output_plt_map
, &osi
);
16175 for (input_bfd
= info
->input_bfds
;
16177 input_bfd
= input_bfd
->link
.next
)
16179 struct arm_local_iplt_info
**local_iplt
;
16180 unsigned int i
, num_syms
;
16182 local_iplt
= elf32_arm_local_iplt (input_bfd
);
16183 if (local_iplt
!= NULL
)
16185 num_syms
= elf_symtab_hdr (input_bfd
).sh_info
;
16186 for (i
= 0; i
< num_syms
; i
++)
16187 if (local_iplt
[i
] != NULL
16188 && !elf32_arm_output_plt_map_1 (&osi
, TRUE
,
16189 &local_iplt
[i
]->root
,
16190 &local_iplt
[i
]->arm
))
16195 if (htab
->dt_tlsdesc_plt
!= 0)
16197 /* Mapping symbols for the lazy tls trampoline. */
16198 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->dt_tlsdesc_plt
))
16201 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
16202 htab
->dt_tlsdesc_plt
+ 24))
16205 if (htab
->tls_trampoline
!= 0)
16207 /* Mapping symbols for the tls trampoline. */
16208 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->tls_trampoline
))
16210 #ifdef FOUR_WORD_PLT
16211 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
16212 htab
->tls_trampoline
+ 12))
16220 /* Allocate target specific section data. */
16223 elf32_arm_new_section_hook (bfd
*abfd
, asection
*sec
)
16225 if (!sec
->used_by_bfd
)
16227 _arm_elf_section_data
*sdata
;
16228 bfd_size_type amt
= sizeof (*sdata
);
16230 sdata
= (_arm_elf_section_data
*) bfd_zalloc (abfd
, amt
);
16233 sec
->used_by_bfd
= sdata
;
16236 return _bfd_elf_new_section_hook (abfd
, sec
);
16240 /* Used to order a list of mapping symbols by address. */
16243 elf32_arm_compare_mapping (const void * a
, const void * b
)
16245 const elf32_arm_section_map
*amap
= (const elf32_arm_section_map
*) a
;
16246 const elf32_arm_section_map
*bmap
= (const elf32_arm_section_map
*) b
;
16248 if (amap
->vma
> bmap
->vma
)
16250 else if (amap
->vma
< bmap
->vma
)
16252 else if (amap
->type
> bmap
->type
)
16253 /* Ensure results do not depend on the host qsort for objects with
16254 multiple mapping symbols at the same address by sorting on type
16257 else if (amap
->type
< bmap
->type
)
16263 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16265 static unsigned long
16266 offset_prel31 (unsigned long addr
, bfd_vma offset
)
16268 return (addr
& ~0x7ffffffful
) | ((addr
+ offset
) & 0x7ffffffful
);
16271 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16275 copy_exidx_entry (bfd
*output_bfd
, bfd_byte
*to
, bfd_byte
*from
, bfd_vma offset
)
16277 unsigned long first_word
= bfd_get_32 (output_bfd
, from
);
16278 unsigned long second_word
= bfd_get_32 (output_bfd
, from
+ 4);
16280 /* High bit of first word is supposed to be zero. */
16281 if ((first_word
& 0x80000000ul
) == 0)
16282 first_word
= offset_prel31 (first_word
, offset
);
16284 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16285 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16286 if ((second_word
!= 0x1) && ((second_word
& 0x80000000ul
) == 0))
16287 second_word
= offset_prel31 (second_word
, offset
);
16289 bfd_put_32 (output_bfd
, first_word
, to
);
16290 bfd_put_32 (output_bfd
, second_word
, to
+ 4);
16293 /* Data for make_branch_to_a8_stub(). */
16295 struct a8_branch_to_stub_data
16297 asection
*writing_section
;
16298 bfd_byte
*contents
;
16302 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16303 places for a particular section. */
16306 make_branch_to_a8_stub (struct bfd_hash_entry
*gen_entry
,
16309 struct elf32_arm_stub_hash_entry
*stub_entry
;
16310 struct a8_branch_to_stub_data
*data
;
16311 bfd_byte
*contents
;
16312 unsigned long branch_insn
;
16313 bfd_vma veneered_insn_loc
, veneer_entry_loc
;
16314 bfd_signed_vma branch_offset
;
16318 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
16319 data
= (struct a8_branch_to_stub_data
*) in_arg
;
16321 if (stub_entry
->target_section
!= data
->writing_section
16322 || stub_entry
->stub_type
< arm_stub_a8_veneer_lwm
)
16325 contents
= data
->contents
;
16327 /* We use target_section as Cortex-A8 erratum workaround stubs are only
16328 generated when both source and target are in the same section. */
16329 veneered_insn_loc
= stub_entry
->target_section
->output_section
->vma
16330 + stub_entry
->target_section
->output_offset
16331 + stub_entry
->source_value
;
16333 veneer_entry_loc
= stub_entry
->stub_sec
->output_section
->vma
16334 + stub_entry
->stub_sec
->output_offset
16335 + stub_entry
->stub_offset
;
16337 if (stub_entry
->stub_type
== arm_stub_a8_veneer_blx
)
16338 veneered_insn_loc
&= ~3u;
16340 branch_offset
= veneer_entry_loc
- veneered_insn_loc
- 4;
16342 abfd
= stub_entry
->target_section
->owner
;
16343 loc
= stub_entry
->source_value
;
16345 /* We attempt to avoid this condition by setting stubs_always_after_branch
16346 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16347 This check is just to be on the safe side... */
16348 if ((veneered_insn_loc
& ~0xfff) == (veneer_entry_loc
& ~0xfff))
16350 (*_bfd_error_handler
) (_("%B: error: Cortex-A8 erratum stub is "
16351 "allocated in unsafe location"), abfd
);
16355 switch (stub_entry
->stub_type
)
16357 case arm_stub_a8_veneer_b
:
16358 case arm_stub_a8_veneer_b_cond
:
16359 branch_insn
= 0xf0009000;
16362 case arm_stub_a8_veneer_blx
:
16363 branch_insn
= 0xf000e800;
16366 case arm_stub_a8_veneer_bl
:
16368 unsigned int i1
, j1
, i2
, j2
, s
;
16370 branch_insn
= 0xf000d000;
16373 if (branch_offset
< -16777216 || branch_offset
> 16777214)
16375 /* There's not much we can do apart from complain if this
16377 (*_bfd_error_handler
) (_("%B: error: Cortex-A8 erratum stub out "
16378 "of range (input file too large)"), abfd
);
16382 /* i1 = not(j1 eor s), so:
16384 j1 = (not i1) eor s. */
16386 branch_insn
|= (branch_offset
>> 1) & 0x7ff;
16387 branch_insn
|= ((branch_offset
>> 12) & 0x3ff) << 16;
16388 i2
= (branch_offset
>> 22) & 1;
16389 i1
= (branch_offset
>> 23) & 1;
16390 s
= (branch_offset
>> 24) & 1;
16393 branch_insn
|= j2
<< 11;
16394 branch_insn
|= j1
<< 13;
16395 branch_insn
|= s
<< 26;
16404 bfd_put_16 (abfd
, (branch_insn
>> 16) & 0xffff, &contents
[loc
]);
16405 bfd_put_16 (abfd
, branch_insn
& 0xffff, &contents
[loc
+ 2]);
16410 /* Beginning of stm32l4xx work-around. */
16412 /* Functions encoding instructions necessary for the emission of the
16413 fix-stm32l4xx-629360.
16414 Encoding is extracted from the
16415 ARM (C) Architecture Reference Manual
16416 ARMv7-A and ARMv7-R edition
16417 ARM DDI 0406C.b (ID072512). */
16419 static inline bfd_vma
16420 create_instruction_branch_absolute (int branch_offset
)
16422 /* A8.8.18 B (A8-334)
16423 B target_address (Encoding T4). */
16424 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16425 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16426 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16428 int s
= ((branch_offset
& 0x1000000) >> 24);
16429 int j1
= s
^ !((branch_offset
& 0x800000) >> 23);
16430 int j2
= s
^ !((branch_offset
& 0x400000) >> 22);
16432 if (branch_offset
< -(1 << 24) || branch_offset
>= (1 << 24))
16433 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16435 bfd_vma patched_inst
= 0xf0009000
16437 | (((unsigned long) (branch_offset
) >> 12) & 0x3ff) << 16 /* imm10. */
16438 | j1
<< 13 /* J1. */
16439 | j2
<< 11 /* J2. */
16440 | (((unsigned long) (branch_offset
) >> 1) & 0x7ff); /* imm11. */
16442 return patched_inst
;
16445 static inline bfd_vma
16446 create_instruction_ldmia (int base_reg
, int wback
, int reg_mask
)
16448 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16449 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16450 bfd_vma patched_inst
= 0xe8900000
16451 | (/*W=*/wback
<< 21)
16453 | (reg_mask
& 0x0000ffff);
16455 return patched_inst
;
16458 static inline bfd_vma
16459 create_instruction_ldmdb (int base_reg
, int wback
, int reg_mask
)
16461 /* A8.8.60 LDMDB/LDMEA (A8-402)
16462 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16463 bfd_vma patched_inst
= 0xe9100000
16464 | (/*W=*/wback
<< 21)
16466 | (reg_mask
& 0x0000ffff);
16468 return patched_inst
;
16471 static inline bfd_vma
16472 create_instruction_mov (int target_reg
, int source_reg
)
16474 /* A8.8.103 MOV (register) (A8-486)
16475 MOV Rd, Rm (Encoding T1). */
16476 bfd_vma patched_inst
= 0x4600
16477 | (target_reg
& 0x7)
16478 | ((target_reg
& 0x8) >> 3) << 7
16479 | (source_reg
<< 3);
16481 return patched_inst
;
16484 static inline bfd_vma
16485 create_instruction_sub (int target_reg
, int source_reg
, int value
)
16487 /* A8.8.221 SUB (immediate) (A8-708)
16488 SUB Rd, Rn, #value (Encoding T3). */
16489 bfd_vma patched_inst
= 0xf1a00000
16490 | (target_reg
<< 8)
16491 | (source_reg
<< 16)
16493 | ((value
& 0x800) >> 11) << 26
16494 | ((value
& 0x700) >> 8) << 12
16497 return patched_inst
;
16500 static inline bfd_vma
16501 create_instruction_vldmia (int base_reg
, int is_dp
, int wback
, int num_words
,
16504 /* A8.8.332 VLDM (A8-922)
16505 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
16506 bfd_vma patched_inst
= (is_dp
? 0xec900b00 : 0xec900a00)
16507 | (/*W=*/wback
<< 21)
16509 | (num_words
& 0x000000ff)
16510 | (((unsigned)first_reg
>> 1) & 0x0000000f) << 12
16511 | (first_reg
& 0x00000001) << 22;
16513 return patched_inst
;
16516 static inline bfd_vma
16517 create_instruction_vldmdb (int base_reg
, int is_dp
, int num_words
,
16520 /* A8.8.332 VLDM (A8-922)
16521 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
16522 bfd_vma patched_inst
= (is_dp
? 0xed300b00 : 0xed300a00)
16524 | (num_words
& 0x000000ff)
16525 | (((unsigned)first_reg
>>1 ) & 0x0000000f) << 12
16526 | (first_reg
& 0x00000001) << 22;
16528 return patched_inst
;
16531 static inline bfd_vma
16532 create_instruction_udf_w (int value
)
16534 /* A8.8.247 UDF (A8-758)
16535 Undefined (Encoding T2). */
16536 bfd_vma patched_inst
= 0xf7f0a000
16537 | (value
& 0x00000fff)
16538 | (value
& 0x000f0000) << 16;
16540 return patched_inst
;
16543 static inline bfd_vma
16544 create_instruction_udf (int value
)
16546 /* A8.8.247 UDF (A8-758)
16547 Undefined (Encoding T1). */
16548 bfd_vma patched_inst
= 0xde00
16551 return patched_inst
;
16554 /* Functions writing an instruction in memory, returning the next
16555 memory position to write to. */
16557 static inline bfd_byte
*
16558 push_thumb2_insn32 (struct elf32_arm_link_hash_table
* htab
,
16559 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
16561 put_thumb2_insn (htab
, output_bfd
, insn
, pt
);
16565 static inline bfd_byte
*
16566 push_thumb2_insn16 (struct elf32_arm_link_hash_table
* htab
,
16567 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
16569 put_thumb_insn (htab
, output_bfd
, insn
, pt
);
16573 /* Function filling up a region in memory with T1 and T2 UDFs taking
16574 care of alignment. */
16577 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table
* htab
,
16579 const bfd_byte
* const base_stub_contents
,
16580 bfd_byte
* const from_stub_contents
,
16581 const bfd_byte
* const end_stub_contents
)
16583 bfd_byte
*current_stub_contents
= from_stub_contents
;
16585 /* Fill the remaining of the stub with deterministic contents : UDF
16587 Check if realignment is needed on modulo 4 frontier using T1, to
16589 if ((current_stub_contents
< end_stub_contents
)
16590 && !((current_stub_contents
- base_stub_contents
) % 2)
16591 && ((current_stub_contents
- base_stub_contents
) % 4))
16592 current_stub_contents
=
16593 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16594 create_instruction_udf (0));
16596 for (; current_stub_contents
< end_stub_contents
;)
16597 current_stub_contents
=
16598 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16599 create_instruction_udf_w (0));
16601 return current_stub_contents
;
16604 /* Functions writing the stream of instructions equivalent to the
16605 derived sequence for ldmia, ldmdb, vldm respectively. */
16608 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table
* htab
,
16610 const insn32 initial_insn
,
16611 const bfd_byte
*const initial_insn_addr
,
16612 bfd_byte
*const base_stub_contents
)
16614 int wback
= (initial_insn
& 0x00200000) >> 21;
16615 int ri
, rn
= (initial_insn
& 0x000F0000) >> 16;
16616 int insn_all_registers
= initial_insn
& 0x0000ffff;
16617 int insn_low_registers
, insn_high_registers
;
16618 int usable_register_mask
;
16619 int nb_registers
= popcount (insn_all_registers
);
16620 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
16621 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
16622 bfd_byte
*current_stub_contents
= base_stub_contents
;
16624 BFD_ASSERT (is_thumb2_ldmia (initial_insn
));
16626 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16627 smaller than 8 registers load sequences that do not cause the
16629 if (nb_registers
<= 8)
16631 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16632 current_stub_contents
=
16633 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16636 /* B initial_insn_addr+4. */
16638 current_stub_contents
=
16639 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16640 create_instruction_branch_absolute
16641 (initial_insn_addr
- current_stub_contents
));
16644 /* Fill the remaining of the stub with deterministic contents. */
16645 current_stub_contents
=
16646 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16647 base_stub_contents
, current_stub_contents
,
16648 base_stub_contents
+
16649 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16654 /* - reg_list[13] == 0. */
16655 BFD_ASSERT ((insn_all_registers
& (1 << 13))==0);
16657 /* - reg_list[14] & reg_list[15] != 1. */
16658 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
16660 /* - if (wback==1) reg_list[rn] == 0. */
16661 BFD_ASSERT (!wback
|| !restore_rn
);
16663 /* - nb_registers > 8. */
16664 BFD_ASSERT (popcount (insn_all_registers
) > 8);
16666 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16668 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16669 - One with the 7 lowest registers (register mask 0x007F)
16670 This LDM will finally contain between 2 and 7 registers
16671 - One with the 7 highest registers (register mask 0xDF80)
16672 This ldm will finally contain between 2 and 7 registers. */
16673 insn_low_registers
= insn_all_registers
& 0x007F;
16674 insn_high_registers
= insn_all_registers
& 0xDF80;
16676 /* A spare register may be needed during this veneer to temporarily
16677 handle the base register. This register will be restored with the
16678 last LDM operation.
16679 The usable register may be any general purpose register (that
16680 excludes PC, SP, LR : register mask is 0x1FFF). */
16681 usable_register_mask
= 0x1FFF;
16683 /* Generate the stub function. */
16686 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16687 current_stub_contents
=
16688 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16689 create_instruction_ldmia
16690 (rn
, /*wback=*/1, insn_low_registers
));
16692 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16693 current_stub_contents
=
16694 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16695 create_instruction_ldmia
16696 (rn
, /*wback=*/1, insn_high_registers
));
16699 /* B initial_insn_addr+4. */
16700 current_stub_contents
=
16701 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16702 create_instruction_branch_absolute
16703 (initial_insn_addr
- current_stub_contents
));
16706 else /* if (!wback). */
16710 /* If Rn is not part of the high-register-list, move it there. */
16711 if (!(insn_high_registers
& (1 << rn
)))
16713 /* Choose a Ri in the high-register-list that will be restored. */
16714 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
16717 current_stub_contents
=
16718 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16719 create_instruction_mov (ri
, rn
));
16722 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
16723 current_stub_contents
=
16724 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16725 create_instruction_ldmia
16726 (ri
, /*wback=*/1, insn_low_registers
));
16728 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
16729 current_stub_contents
=
16730 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16731 create_instruction_ldmia
16732 (ri
, /*wback=*/0, insn_high_registers
));
16736 /* B initial_insn_addr+4. */
16737 current_stub_contents
=
16738 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16739 create_instruction_branch_absolute
16740 (initial_insn_addr
- current_stub_contents
));
16744 /* Fill the remaining of the stub with deterministic contents. */
16745 current_stub_contents
=
16746 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16747 base_stub_contents
, current_stub_contents
,
16748 base_stub_contents
+
16749 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16753 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table
* htab
,
16755 const insn32 initial_insn
,
16756 const bfd_byte
*const initial_insn_addr
,
16757 bfd_byte
*const base_stub_contents
)
16759 int wback
= (initial_insn
& 0x00200000) >> 21;
16760 int ri
, rn
= (initial_insn
& 0x000f0000) >> 16;
16761 int insn_all_registers
= initial_insn
& 0x0000ffff;
16762 int insn_low_registers
, insn_high_registers
;
16763 int usable_register_mask
;
16764 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
16765 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
16766 int nb_registers
= popcount (insn_all_registers
);
16767 bfd_byte
*current_stub_contents
= base_stub_contents
;
16769 BFD_ASSERT (is_thumb2_ldmdb (initial_insn
));
16771 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16772 smaller than 8 registers load sequences that do not cause the
16774 if (nb_registers
<= 8)
16776 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16777 current_stub_contents
=
16778 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16781 /* B initial_insn_addr+4. */
16782 current_stub_contents
=
16783 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16784 create_instruction_branch_absolute
16785 (initial_insn_addr
- current_stub_contents
));
16787 /* Fill the remaining of the stub with deterministic contents. */
16788 current_stub_contents
=
16789 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16790 base_stub_contents
, current_stub_contents
,
16791 base_stub_contents
+
16792 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16797 /* - reg_list[13] == 0. */
16798 BFD_ASSERT ((insn_all_registers
& (1 << 13)) == 0);
16800 /* - reg_list[14] & reg_list[15] != 1. */
16801 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
16803 /* - if (wback==1) reg_list[rn] == 0. */
16804 BFD_ASSERT (!wback
|| !restore_rn
);
16806 /* - nb_registers > 8. */
16807 BFD_ASSERT (popcount (insn_all_registers
) > 8);
16809 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16811 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
16812 - One with the 7 lowest registers (register mask 0x007F)
16813 This LDM will finally contain between 2 and 7 registers
16814 - One with the 7 highest registers (register mask 0xDF80)
16815 This ldm will finally contain between 2 and 7 registers. */
16816 insn_low_registers
= insn_all_registers
& 0x007F;
16817 insn_high_registers
= insn_all_registers
& 0xDF80;
16819 /* A spare register may be needed during this veneer to temporarily
16820 handle the base register. This register will be restored with
16821 the last LDM operation.
16822 The usable register may be any general purpose register (that excludes
16823 PC, SP, LR : register mask is 0x1FFF). */
16824 usable_register_mask
= 0x1FFF;
16826 /* Generate the stub function. */
16827 if (!wback
&& !restore_pc
&& !restore_rn
)
16829 /* Choose a Ri in the low-register-list that will be restored. */
16830 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
16833 current_stub_contents
=
16834 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16835 create_instruction_mov (ri
, rn
));
16837 /* LDMDB Ri!, {R-high-register-list}. */
16838 current_stub_contents
=
16839 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16840 create_instruction_ldmdb
16841 (ri
, /*wback=*/1, insn_high_registers
));
16843 /* LDMDB Ri, {R-low-register-list}. */
16844 current_stub_contents
=
16845 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16846 create_instruction_ldmdb
16847 (ri
, /*wback=*/0, insn_low_registers
));
16849 /* B initial_insn_addr+4. */
16850 current_stub_contents
=
16851 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16852 create_instruction_branch_absolute
16853 (initial_insn_addr
- current_stub_contents
));
16855 else if (wback
&& !restore_pc
&& !restore_rn
)
16857 /* LDMDB Rn!, {R-high-register-list}. */
16858 current_stub_contents
=
16859 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16860 create_instruction_ldmdb
16861 (rn
, /*wback=*/1, insn_high_registers
));
16863 /* LDMDB Rn!, {R-low-register-list}. */
16864 current_stub_contents
=
16865 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16866 create_instruction_ldmdb
16867 (rn
, /*wback=*/1, insn_low_registers
));
16869 /* B initial_insn_addr+4. */
16870 current_stub_contents
=
16871 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16872 create_instruction_branch_absolute
16873 (initial_insn_addr
- current_stub_contents
));
16875 else if (!wback
&& restore_pc
&& !restore_rn
)
16877 /* Choose a Ri in the high-register-list that will be restored. */
16878 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
16880 /* SUB Ri, Rn, #(4*nb_registers). */
16881 current_stub_contents
=
16882 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16883 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
16885 /* LDMIA Ri!, {R-low-register-list}. */
16886 current_stub_contents
=
16887 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16888 create_instruction_ldmia
16889 (ri
, /*wback=*/1, insn_low_registers
));
16891 /* LDMIA Ri, {R-high-register-list}. */
16892 current_stub_contents
=
16893 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16894 create_instruction_ldmia
16895 (ri
, /*wback=*/0, insn_high_registers
));
16897 else if (wback
&& restore_pc
&& !restore_rn
)
16899 /* Choose a Ri in the high-register-list that will be restored. */
16900 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
16902 /* SUB Rn, Rn, #(4*nb_registers) */
16903 current_stub_contents
=
16904 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16905 create_instruction_sub (rn
, rn
, (4 * nb_registers
)));
16908 current_stub_contents
=
16909 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16910 create_instruction_mov (ri
, rn
));
16912 /* LDMIA Ri!, {R-low-register-list}. */
16913 current_stub_contents
=
16914 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16915 create_instruction_ldmia
16916 (ri
, /*wback=*/1, insn_low_registers
));
16918 /* LDMIA Ri, {R-high-register-list}. */
16919 current_stub_contents
=
16920 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16921 create_instruction_ldmia
16922 (ri
, /*wback=*/0, insn_high_registers
));
16924 else if (!wback
&& !restore_pc
&& restore_rn
)
16927 if (!(insn_low_registers
& (1 << rn
)))
16929 /* Choose a Ri in the low-register-list that will be restored. */
16930 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
16933 current_stub_contents
=
16934 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16935 create_instruction_mov (ri
, rn
));
16938 /* LDMDB Ri!, {R-high-register-list}. */
16939 current_stub_contents
=
16940 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16941 create_instruction_ldmdb
16942 (ri
, /*wback=*/1, insn_high_registers
));
16944 /* LDMDB Ri, {R-low-register-list}. */
16945 current_stub_contents
=
16946 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16947 create_instruction_ldmdb
16948 (ri
, /*wback=*/0, insn_low_registers
));
16950 /* B initial_insn_addr+4. */
16951 current_stub_contents
=
16952 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16953 create_instruction_branch_absolute
16954 (initial_insn_addr
- current_stub_contents
));
16956 else if (!wback
&& restore_pc
&& restore_rn
)
16959 if (!(insn_high_registers
& (1 << rn
)))
16961 /* Choose a Ri in the high-register-list that will be restored. */
16962 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
16965 /* SUB Ri, Rn, #(4*nb_registers). */
16966 current_stub_contents
=
16967 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16968 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
16970 /* LDMIA Ri!, {R-low-register-list}. */
16971 current_stub_contents
=
16972 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16973 create_instruction_ldmia
16974 (ri
, /*wback=*/1, insn_low_registers
));
16976 /* LDMIA Ri, {R-high-register-list}. */
16977 current_stub_contents
=
16978 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16979 create_instruction_ldmia
16980 (ri
, /*wback=*/0, insn_high_registers
));
16982 else if (wback
&& restore_rn
)
16984 /* The assembler should not have accepted to encode this. */
16985 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
16986 "undefined behavior.\n");
16989 /* Fill the remaining of the stub with deterministic contents. */
16990 current_stub_contents
=
16991 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16992 base_stub_contents
, current_stub_contents
,
16993 base_stub_contents
+
16994 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16999 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table
* htab
,
17001 const insn32 initial_insn
,
17002 const bfd_byte
*const initial_insn_addr
,
17003 bfd_byte
*const base_stub_contents
)
17005 int num_words
= ((unsigned int) initial_insn
<< 24) >> 24;
17006 bfd_byte
*current_stub_contents
= base_stub_contents
;
17008 BFD_ASSERT (is_thumb2_vldm (initial_insn
));
17010 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17011 smaller than 8 words load sequences that do not cause the
17013 if (num_words
<= 8)
17015 /* Untouched instruction. */
17016 current_stub_contents
=
17017 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17020 /* B initial_insn_addr+4. */
17021 current_stub_contents
=
17022 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17023 create_instruction_branch_absolute
17024 (initial_insn_addr
- current_stub_contents
));
17028 bfd_boolean is_dp
= /* DP encoding. */
17029 (initial_insn
& 0xfe100f00) == 0xec100b00;
17030 bfd_boolean is_ia_nobang
= /* (IA without !). */
17031 (((initial_insn
<< 7) >> 28) & 0xd) == 0x4;
17032 bfd_boolean is_ia_bang
= /* (IA with !) - includes VPOP. */
17033 (((initial_insn
<< 7) >> 28) & 0xd) == 0x5;
17034 bfd_boolean is_db_bang
= /* (DB with !). */
17035 (((initial_insn
<< 7) >> 28) & 0xd) == 0x9;
17036 int base_reg
= ((unsigned int) initial_insn
<< 12) >> 28;
17037 /* d = UInt (Vd:D);. */
17038 int first_reg
= ((((unsigned int) initial_insn
<< 16) >> 28) << 1)
17039 | (((unsigned int)initial_insn
<< 9) >> 31);
17041 /* Compute the number of 8-words chunks needed to split. */
17042 int chunks
= (num_words
% 8) ? (num_words
/ 8 + 1) : (num_words
/ 8);
17045 /* The test coverage has been done assuming the following
17046 hypothesis that exactly one of the previous is_ predicates is
17048 BFD_ASSERT ( (is_ia_nobang
^ is_ia_bang
^ is_db_bang
)
17049 && !(is_ia_nobang
& is_ia_bang
& is_db_bang
));
17051 /* We treat the cutting of the words in one pass for all
17052 cases, then we emit the adjustments:
17055 -> vldm rx!, {8_words_or_less} for each needed 8_word
17056 -> sub rx, rx, #size (list)
17059 -> vldm rx!, {8_words_or_less} for each needed 8_word
17060 This also handles vpop instruction (when rx is sp)
17063 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
17064 for (chunk
= 0; chunk
< chunks
; ++chunk
)
17066 bfd_vma new_insn
= 0;
17068 if (is_ia_nobang
|| is_ia_bang
)
17070 new_insn
= create_instruction_vldmia
17074 chunks
- (chunk
+ 1) ?
17075 8 : num_words
- chunk
* 8,
17076 first_reg
+ chunk
* 8);
17078 else if (is_db_bang
)
17080 new_insn
= create_instruction_vldmdb
17083 chunks
- (chunk
+ 1) ?
17084 8 : num_words
- chunk
* 8,
17085 first_reg
+ chunk
* 8);
17089 current_stub_contents
=
17090 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17094 /* Only this case requires the base register compensation
17098 current_stub_contents
=
17099 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17100 create_instruction_sub
17101 (base_reg
, base_reg
, 4*num_words
));
17104 /* B initial_insn_addr+4. */
17105 current_stub_contents
=
17106 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17107 create_instruction_branch_absolute
17108 (initial_insn_addr
- current_stub_contents
));
17111 /* Fill the remaining of the stub with deterministic contents. */
17112 current_stub_contents
=
17113 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
17114 base_stub_contents
, current_stub_contents
,
17115 base_stub_contents
+
17116 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
17120 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table
* htab
,
17122 const insn32 wrong_insn
,
17123 const bfd_byte
*const wrong_insn_addr
,
17124 bfd_byte
*const stub_contents
)
17126 if (is_thumb2_ldmia (wrong_insn
))
17127 stm32l4xx_create_replacing_stub_ldmia (htab
, output_bfd
,
17128 wrong_insn
, wrong_insn_addr
,
17130 else if (is_thumb2_ldmdb (wrong_insn
))
17131 stm32l4xx_create_replacing_stub_ldmdb (htab
, output_bfd
,
17132 wrong_insn
, wrong_insn_addr
,
17134 else if (is_thumb2_vldm (wrong_insn
))
17135 stm32l4xx_create_replacing_stub_vldm (htab
, output_bfd
,
17136 wrong_insn
, wrong_insn_addr
,
17140 /* End of stm32l4xx work-around. */
17144 elf32_arm_add_relocation (bfd
*output_bfd
, struct bfd_link_info
*info
,
17145 asection
*output_sec
, Elf_Internal_Rela
*rel
)
17147 BFD_ASSERT (output_sec
&& rel
);
17148 struct bfd_elf_section_reloc_data
*output_reldata
;
17149 struct elf32_arm_link_hash_table
*htab
;
17150 struct bfd_elf_section_data
*oesd
= elf_section_data (output_sec
);
17151 Elf_Internal_Shdr
*rel_hdr
;
17156 rel_hdr
= oesd
->rel
.hdr
;
17157 output_reldata
= &(oesd
->rel
);
17159 else if (oesd
->rela
.hdr
)
17161 rel_hdr
= oesd
->rela
.hdr
;
17162 output_reldata
= &(oesd
->rela
);
17169 bfd_byte
*erel
= rel_hdr
->contents
;
17170 erel
+= output_reldata
->count
* rel_hdr
->sh_entsize
;
17171 htab
= elf32_arm_hash_table (info
);
17172 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, erel
);
17173 output_reldata
->count
++;
17176 /* Do code byteswapping. Return FALSE afterwards so that the section is
17177 written out as normal. */
17180 elf32_arm_write_section (bfd
*output_bfd
,
17181 struct bfd_link_info
*link_info
,
17183 bfd_byte
*contents
)
17185 unsigned int mapcount
, errcount
;
17186 _arm_elf_section_data
*arm_data
;
17187 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
17188 elf32_arm_section_map
*map
;
17189 elf32_vfp11_erratum_list
*errnode
;
17190 elf32_stm32l4xx_erratum_list
*stm32l4xx_errnode
;
17193 bfd_vma offset
= sec
->output_section
->vma
+ sec
->output_offset
;
17197 if (globals
== NULL
)
17200 /* If this section has not been allocated an _arm_elf_section_data
17201 structure then we cannot record anything. */
17202 arm_data
= get_arm_elf_section_data (sec
);
17203 if (arm_data
== NULL
)
17206 mapcount
= arm_data
->mapcount
;
17207 map
= arm_data
->map
;
17208 errcount
= arm_data
->erratumcount
;
17212 unsigned int endianflip
= bfd_big_endian (output_bfd
) ? 3 : 0;
17214 for (errnode
= arm_data
->erratumlist
; errnode
!= 0;
17215 errnode
= errnode
->next
)
17217 bfd_vma target
= errnode
->vma
- offset
;
17219 switch (errnode
->type
)
17221 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
17223 bfd_vma branch_to_veneer
;
17224 /* Original condition code of instruction, plus bit mask for
17225 ARM B instruction. */
17226 unsigned int insn
= (errnode
->u
.b
.vfp_insn
& 0xf0000000)
17229 /* The instruction is before the label. */
17232 /* Above offset included in -4 below. */
17233 branch_to_veneer
= errnode
->u
.b
.veneer
->vma
17234 - errnode
->vma
- 4;
17236 if ((signed) branch_to_veneer
< -(1 << 25)
17237 || (signed) branch_to_veneer
>= (1 << 25))
17238 (*_bfd_error_handler
) (_("%B: error: VFP11 veneer out of "
17239 "range"), output_bfd
);
17241 insn
|= (branch_to_veneer
>> 2) & 0xffffff;
17242 contents
[endianflip
^ target
] = insn
& 0xff;
17243 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
17244 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
17245 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
17249 case VFP11_ERRATUM_ARM_VENEER
:
17251 bfd_vma branch_from_veneer
;
17254 /* Take size of veneer into account. */
17255 branch_from_veneer
= errnode
->u
.v
.branch
->vma
17256 - errnode
->vma
- 12;
17258 if ((signed) branch_from_veneer
< -(1 << 25)
17259 || (signed) branch_from_veneer
>= (1 << 25))
17260 (*_bfd_error_handler
) (_("%B: error: VFP11 veneer out of "
17261 "range"), output_bfd
);
17263 /* Original instruction. */
17264 insn
= errnode
->u
.v
.branch
->u
.b
.vfp_insn
;
17265 contents
[endianflip
^ target
] = insn
& 0xff;
17266 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
17267 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
17268 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
17270 /* Branch back to insn after original insn. */
17271 insn
= 0xea000000 | ((branch_from_veneer
>> 2) & 0xffffff);
17272 contents
[endianflip
^ (target
+ 4)] = insn
& 0xff;
17273 contents
[endianflip
^ (target
+ 5)] = (insn
>> 8) & 0xff;
17274 contents
[endianflip
^ (target
+ 6)] = (insn
>> 16) & 0xff;
17275 contents
[endianflip
^ (target
+ 7)] = (insn
>> 24) & 0xff;
17285 if (arm_data
->stm32l4xx_erratumcount
!= 0)
17287 for (stm32l4xx_errnode
= arm_data
->stm32l4xx_erratumlist
;
17288 stm32l4xx_errnode
!= 0;
17289 stm32l4xx_errnode
= stm32l4xx_errnode
->next
)
17291 bfd_vma target
= stm32l4xx_errnode
->vma
- offset
;
17293 switch (stm32l4xx_errnode
->type
)
17295 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
17298 bfd_vma branch_to_veneer
=
17299 stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
;
17301 if ((signed) branch_to_veneer
< -(1 << 24)
17302 || (signed) branch_to_veneer
>= (1 << 24))
17304 bfd_vma out_of_range
=
17305 ((signed) branch_to_veneer
< -(1 << 24)) ?
17306 - branch_to_veneer
- (1 << 24) :
17307 ((signed) branch_to_veneer
>= (1 << 24)) ?
17308 branch_to_veneer
- (1 << 24) : 0;
17310 (*_bfd_error_handler
)
17311 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17312 "Jump out of range by %ld bytes. "
17313 "Cannot encode branch instruction. "),
17315 (long) (stm32l4xx_errnode
->vma
- 4),
17320 insn
= create_instruction_branch_absolute
17321 (stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
);
17323 /* The instruction is before the label. */
17326 put_thumb2_insn (globals
, output_bfd
,
17327 (bfd_vma
) insn
, contents
+ target
);
17331 case STM32L4XX_ERRATUM_VENEER
:
17334 bfd_byte
* veneer_r
;
17337 veneer
= contents
+ target
;
17339 + stm32l4xx_errnode
->u
.b
.veneer
->vma
17340 - stm32l4xx_errnode
->vma
- 4;
17342 if ((signed) (veneer_r
- veneer
-
17343 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
>
17344 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
?
17345 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
:
17346 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
) < -(1 << 24)
17347 || (signed) (veneer_r
- veneer
) >= (1 << 24))
17349 (*_bfd_error_handler
) (_("%B: error: Cannot create STM32L4XX "
17350 "veneer."), output_bfd
);
17354 /* Original instruction. */
17355 insn
= stm32l4xx_errnode
->u
.v
.branch
->u
.b
.insn
;
17357 stm32l4xx_create_replacing_stub
17358 (globals
, output_bfd
, insn
, (void*)veneer_r
, (void*)veneer
);
17368 if (arm_data
->elf
.this_hdr
.sh_type
== SHT_ARM_EXIDX
)
17370 arm_unwind_table_edit
*edit_node
17371 = arm_data
->u
.exidx
.unwind_edit_list
;
17372 /* Now, sec->size is the size of the section we will write. The original
17373 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17374 markers) was sec->rawsize. (This isn't the case if we perform no
17375 edits, then rawsize will be zero and we should use size). */
17376 bfd_byte
*edited_contents
= (bfd_byte
*) bfd_malloc (sec
->size
);
17377 unsigned int input_size
= sec
->rawsize
? sec
->rawsize
: sec
->size
;
17378 unsigned int in_index
, out_index
;
17379 bfd_vma add_to_offsets
= 0;
17381 for (in_index
= 0, out_index
= 0; in_index
* 8 < input_size
|| edit_node
;)
17385 unsigned int edit_index
= edit_node
->index
;
17387 if (in_index
< edit_index
&& in_index
* 8 < input_size
)
17389 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
17390 contents
+ in_index
* 8, add_to_offsets
);
17394 else if (in_index
== edit_index
17395 || (in_index
* 8 >= input_size
17396 && edit_index
== UINT_MAX
))
17398 switch (edit_node
->type
)
17400 case DELETE_EXIDX_ENTRY
:
17402 add_to_offsets
+= 8;
17405 case INSERT_EXIDX_CANTUNWIND_AT_END
:
17407 asection
*text_sec
= edit_node
->linked_section
;
17408 bfd_vma text_offset
= text_sec
->output_section
->vma
17409 + text_sec
->output_offset
17411 bfd_vma exidx_offset
= offset
+ out_index
* 8;
17412 unsigned long prel31_offset
;
17414 /* Note: this is meant to be equivalent to an
17415 R_ARM_PREL31 relocation. These synthetic
17416 EXIDX_CANTUNWIND markers are not relocated by the
17417 usual BFD method. */
17418 prel31_offset
= (text_offset
- exidx_offset
)
17420 if (bfd_link_relocatable (link_info
))
17422 /* Here relocation for new EXIDX_CANTUNWIND is
17423 created, so there is no need to
17424 adjust offset by hand. */
17425 prel31_offset
= text_sec
->output_offset
17428 /* New relocation entity. */
17429 asection
*text_out
= text_sec
->output_section
;
17430 Elf_Internal_Rela rel
;
17432 rel
.r_offset
= exidx_offset
;
17433 rel
.r_info
= ELF32_R_INFO (text_out
->target_index
,
17436 elf32_arm_add_relocation (output_bfd
, link_info
,
17437 sec
->output_section
,
17441 /* First address we can't unwind. */
17442 bfd_put_32 (output_bfd
, prel31_offset
,
17443 &edited_contents
[out_index
* 8]);
17445 /* Code for EXIDX_CANTUNWIND. */
17446 bfd_put_32 (output_bfd
, 0x1,
17447 &edited_contents
[out_index
* 8 + 4]);
17450 add_to_offsets
-= 8;
17455 edit_node
= edit_node
->next
;
17460 /* No more edits, copy remaining entries verbatim. */
17461 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
17462 contents
+ in_index
* 8, add_to_offsets
);
17468 if (!(sec
->flags
& SEC_EXCLUDE
) && !(sec
->flags
& SEC_NEVER_LOAD
))
17469 bfd_set_section_contents (output_bfd
, sec
->output_section
,
17471 (file_ptr
) sec
->output_offset
, sec
->size
);
17476 /* Fix code to point to Cortex-A8 erratum stubs. */
17477 if (globals
->fix_cortex_a8
)
17479 struct a8_branch_to_stub_data data
;
17481 data
.writing_section
= sec
;
17482 data
.contents
= contents
;
17484 bfd_hash_traverse (& globals
->stub_hash_table
, make_branch_to_a8_stub
,
17491 if (globals
->byteswap_code
)
17493 qsort (map
, mapcount
, sizeof (* map
), elf32_arm_compare_mapping
);
17496 for (i
= 0; i
< mapcount
; i
++)
17498 if (i
== mapcount
- 1)
17501 end
= map
[i
+ 1].vma
;
17503 switch (map
[i
].type
)
17506 /* Byte swap code words. */
17507 while (ptr
+ 3 < end
)
17509 tmp
= contents
[ptr
];
17510 contents
[ptr
] = contents
[ptr
+ 3];
17511 contents
[ptr
+ 3] = tmp
;
17512 tmp
= contents
[ptr
+ 1];
17513 contents
[ptr
+ 1] = contents
[ptr
+ 2];
17514 contents
[ptr
+ 2] = tmp
;
17520 /* Byte swap code halfwords. */
17521 while (ptr
+ 1 < end
)
17523 tmp
= contents
[ptr
];
17524 contents
[ptr
] = contents
[ptr
+ 1];
17525 contents
[ptr
+ 1] = tmp
;
17531 /* Leave data alone. */
17539 arm_data
->mapcount
= -1;
17540 arm_data
->mapsize
= 0;
17541 arm_data
->map
= NULL
;
17546 /* Mangle thumb function symbols as we read them in. */
17549 elf32_arm_swap_symbol_in (bfd
* abfd
,
17552 Elf_Internal_Sym
*dst
)
17554 if (!bfd_elf32_swap_symbol_in (abfd
, psrc
, pshn
, dst
))
17556 dst
->st_target_internal
= 0;
17558 /* New EABI objects mark thumb function symbols by setting the low bit of
17560 if (ELF_ST_TYPE (dst
->st_info
) == STT_FUNC
17561 || ELF_ST_TYPE (dst
->st_info
) == STT_GNU_IFUNC
)
17563 if (dst
->st_value
& 1)
17565 dst
->st_value
&= ~(bfd_vma
) 1;
17566 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
,
17567 ST_BRANCH_TO_THUMB
);
17570 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_ARM
);
17572 else if (ELF_ST_TYPE (dst
->st_info
) == STT_ARM_TFUNC
)
17574 dst
->st_info
= ELF_ST_INFO (ELF_ST_BIND (dst
->st_info
), STT_FUNC
);
17575 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_THUMB
);
17577 else if (ELF_ST_TYPE (dst
->st_info
) == STT_SECTION
)
17578 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_LONG
);
17580 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_UNKNOWN
);
17586 /* Mangle thumb function symbols as we write them out. */
17589 elf32_arm_swap_symbol_out (bfd
*abfd
,
17590 const Elf_Internal_Sym
*src
,
17594 Elf_Internal_Sym newsym
;
17596 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17597 of the address set, as per the new EABI. We do this unconditionally
17598 because objcopy does not set the elf header flags until after
17599 it writes out the symbol table. */
17600 if (ARM_GET_SYM_BRANCH_TYPE (src
->st_target_internal
) == ST_BRANCH_TO_THUMB
)
17603 if (ELF_ST_TYPE (src
->st_info
) != STT_GNU_IFUNC
)
17604 newsym
.st_info
= ELF_ST_INFO (ELF_ST_BIND (src
->st_info
), STT_FUNC
);
17605 if (newsym
.st_shndx
!= SHN_UNDEF
)
17607 /* Do this only for defined symbols. At link type, the static
17608 linker will simulate the work of dynamic linker of resolving
17609 symbols and will carry over the thumbness of found symbols to
17610 the output symbol table. It's not clear how it happens, but
17611 the thumbness of undefined symbols can well be different at
17612 runtime, and writing '1' for them will be confusing for users
17613 and possibly for dynamic linker itself.
17615 newsym
.st_value
|= 1;
17620 bfd_elf32_swap_symbol_out (abfd
, src
, cdst
, shndx
);
17623 /* Add the PT_ARM_EXIDX program header. */
17626 elf32_arm_modify_segment_map (bfd
*abfd
,
17627 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
17629 struct elf_segment_map
*m
;
17632 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
17633 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
17635 /* If there is already a PT_ARM_EXIDX header, then we do not
17636 want to add another one. This situation arises when running
17637 "strip"; the input binary already has the header. */
17638 m
= elf_seg_map (abfd
);
17639 while (m
&& m
->p_type
!= PT_ARM_EXIDX
)
17643 m
= (struct elf_segment_map
*)
17644 bfd_zalloc (abfd
, sizeof (struct elf_segment_map
));
17647 m
->p_type
= PT_ARM_EXIDX
;
17649 m
->sections
[0] = sec
;
17651 m
->next
= elf_seg_map (abfd
);
17652 elf_seg_map (abfd
) = m
;
17659 /* We may add a PT_ARM_EXIDX program header. */
17662 elf32_arm_additional_program_headers (bfd
*abfd
,
17663 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
17667 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
17668 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
17674 /* Hook called by the linker routine which adds symbols from an object
17678 elf32_arm_add_symbol_hook (bfd
*abfd
, struct bfd_link_info
*info
,
17679 Elf_Internal_Sym
*sym
, const char **namep
,
17680 flagword
*flagsp
, asection
**secp
, bfd_vma
*valp
)
17682 if ((ELF_ST_TYPE (sym
->st_info
) == STT_GNU_IFUNC
17683 || ELF_ST_BIND (sym
->st_info
) == STB_GNU_UNIQUE
)
17684 && (abfd
->flags
& DYNAMIC
) == 0
17685 && bfd_get_flavour (info
->output_bfd
) == bfd_target_elf_flavour
)
17686 elf_tdata (info
->output_bfd
)->has_gnu_symbols
= elf_gnu_symbol_any
;
17688 if (elf32_arm_hash_table (info
) == NULL
)
17691 if (elf32_arm_hash_table (info
)->vxworks_p
17692 && !elf_vxworks_add_symbol_hook (abfd
, info
, sym
, namep
,
17693 flagsp
, secp
, valp
))
17699 /* We use this to override swap_symbol_in and swap_symbol_out. */
17700 const struct elf_size_info elf32_arm_size_info
=
17702 sizeof (Elf32_External_Ehdr
),
17703 sizeof (Elf32_External_Phdr
),
17704 sizeof (Elf32_External_Shdr
),
17705 sizeof (Elf32_External_Rel
),
17706 sizeof (Elf32_External_Rela
),
17707 sizeof (Elf32_External_Sym
),
17708 sizeof (Elf32_External_Dyn
),
17709 sizeof (Elf_External_Note
),
17713 ELFCLASS32
, EV_CURRENT
,
17714 bfd_elf32_write_out_phdrs
,
17715 bfd_elf32_write_shdrs_and_ehdr
,
17716 bfd_elf32_checksum_contents
,
17717 bfd_elf32_write_relocs
,
17718 elf32_arm_swap_symbol_in
,
17719 elf32_arm_swap_symbol_out
,
17720 bfd_elf32_slurp_reloc_table
,
17721 bfd_elf32_slurp_symbol_table
,
17722 bfd_elf32_swap_dyn_in
,
17723 bfd_elf32_swap_dyn_out
,
17724 bfd_elf32_swap_reloc_in
,
17725 bfd_elf32_swap_reloc_out
,
17726 bfd_elf32_swap_reloca_in
,
17727 bfd_elf32_swap_reloca_out
17731 read_code32 (const bfd
*abfd
, const bfd_byte
*addr
)
17733 /* V7 BE8 code is always little endian. */
17734 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
17735 return bfd_getl32 (addr
);
17737 return bfd_get_32 (abfd
, addr
);
17741 read_code16 (const bfd
*abfd
, const bfd_byte
*addr
)
17743 /* V7 BE8 code is always little endian. */
17744 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
17745 return bfd_getl16 (addr
);
17747 return bfd_get_16 (abfd
, addr
);
17750 /* Return size of plt0 entry starting at ADDR
17751 or (bfd_vma) -1 if size can not be determined. */
17754 elf32_arm_plt0_size (const bfd
*abfd
, const bfd_byte
*addr
)
17756 bfd_vma first_word
;
17759 first_word
= read_code32 (abfd
, addr
);
17761 if (first_word
== elf32_arm_plt0_entry
[0])
17762 plt0_size
= 4 * ARRAY_SIZE (elf32_arm_plt0_entry
);
17763 else if (first_word
== elf32_thumb2_plt0_entry
[0])
17764 plt0_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
17766 /* We don't yet handle this PLT format. */
17767 return (bfd_vma
) -1;
17772 /* Return size of plt entry starting at offset OFFSET
17773 of plt section located at address START
17774 or (bfd_vma) -1 if size can not be determined. */
17777 elf32_arm_plt_size (const bfd
*abfd
, const bfd_byte
*start
, bfd_vma offset
)
17779 bfd_vma first_insn
;
17780 bfd_vma plt_size
= 0;
17781 const bfd_byte
*addr
= start
+ offset
;
17783 /* PLT entry size if fixed on Thumb-only platforms. */
17784 if (read_code32 (abfd
, start
) == elf32_thumb2_plt0_entry
[0])
17785 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
17787 /* Respect Thumb stub if necessary. */
17788 if (read_code16 (abfd
, addr
) == elf32_arm_plt_thumb_stub
[0])
17790 plt_size
+= 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub
);
17793 /* Strip immediate from first add. */
17794 first_insn
= read_code32 (abfd
, addr
+ plt_size
) & 0xffffff00;
17796 #ifdef FOUR_WORD_PLT
17797 if (first_insn
== elf32_arm_plt_entry
[0])
17798 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry
);
17800 if (first_insn
== elf32_arm_plt_entry_long
[0])
17801 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_long
);
17802 else if (first_insn
== elf32_arm_plt_entry_short
[0])
17803 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_short
);
17806 /* We don't yet handle this PLT format. */
17807 return (bfd_vma
) -1;
17812 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
17815 elf32_arm_get_synthetic_symtab (bfd
*abfd
,
17816 long symcount ATTRIBUTE_UNUSED
,
17817 asymbol
**syms ATTRIBUTE_UNUSED
,
17827 Elf_Internal_Shdr
*hdr
;
17835 if ((abfd
->flags
& (DYNAMIC
| EXEC_P
)) == 0)
17838 if (dynsymcount
<= 0)
17841 relplt
= bfd_get_section_by_name (abfd
, ".rel.plt");
17842 if (relplt
== NULL
)
17845 hdr
= &elf_section_data (relplt
)->this_hdr
;
17846 if (hdr
->sh_link
!= elf_dynsymtab (abfd
)
17847 || (hdr
->sh_type
!= SHT_REL
&& hdr
->sh_type
!= SHT_RELA
))
17850 plt
= bfd_get_section_by_name (abfd
, ".plt");
17854 if (!elf32_arm_size_info
.slurp_reloc_table (abfd
, relplt
, dynsyms
, TRUE
))
17857 data
= plt
->contents
;
17860 if (!bfd_get_full_section_contents(abfd
, (asection
*) plt
, &data
) || data
== NULL
)
17862 bfd_cache_section_contents((asection
*) plt
, data
);
17865 count
= relplt
->size
/ hdr
->sh_entsize
;
17866 size
= count
* sizeof (asymbol
);
17867 p
= relplt
->relocation
;
17868 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
17870 size
+= strlen ((*p
->sym_ptr_ptr
)->name
) + sizeof ("@plt");
17871 if (p
->addend
!= 0)
17872 size
+= sizeof ("+0x") - 1 + 8;
17875 s
= *ret
= (asymbol
*) bfd_malloc (size
);
17879 offset
= elf32_arm_plt0_size (abfd
, data
);
17880 if (offset
== (bfd_vma
) -1)
17883 names
= (char *) (s
+ count
);
17884 p
= relplt
->relocation
;
17886 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
17890 bfd_vma plt_size
= elf32_arm_plt_size (abfd
, data
, offset
);
17891 if (plt_size
== (bfd_vma
) -1)
17894 *s
= **p
->sym_ptr_ptr
;
17895 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
17896 we are defining a symbol, ensure one of them is set. */
17897 if ((s
->flags
& BSF_LOCAL
) == 0)
17898 s
->flags
|= BSF_GLOBAL
;
17899 s
->flags
|= BSF_SYNTHETIC
;
17904 len
= strlen ((*p
->sym_ptr_ptr
)->name
);
17905 memcpy (names
, (*p
->sym_ptr_ptr
)->name
, len
);
17907 if (p
->addend
!= 0)
17911 memcpy (names
, "+0x", sizeof ("+0x") - 1);
17912 names
+= sizeof ("+0x") - 1;
17913 bfd_sprintf_vma (abfd
, buf
, p
->addend
);
17914 for (a
= buf
; *a
== '0'; ++a
)
17917 memcpy (names
, a
, len
);
17920 memcpy (names
, "@plt", sizeof ("@plt"));
17921 names
+= sizeof ("@plt");
17923 offset
+= plt_size
;
17930 elf32_arm_section_flags (flagword
*flags
, const Elf_Internal_Shdr
* hdr
)
17932 if (hdr
->sh_flags
& SHF_ARM_NOREAD
)
17933 *flags
|= SEC_ELF_NOREAD
;
17938 elf32_arm_lookup_section_flags (char *flag_name
)
17940 if (!strcmp (flag_name
, "SHF_ARM_NOREAD"))
17941 return SHF_ARM_NOREAD
;
17943 return SEC_NO_FLAGS
;
17946 static unsigned int
17947 elf32_arm_count_additional_relocs (asection
*sec
)
17949 struct _arm_elf_section_data
*arm_data
;
17950 arm_data
= get_arm_elf_section_data (sec
);
17951 return arm_data
->additional_reloc_count
;
17954 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
17955 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
17956 FALSE otherwise. ISECTION is the best guess matching section from the
17957 input bfd IBFD, but it might be NULL. */
17960 elf32_arm_copy_special_section_fields (const bfd
*ibfd ATTRIBUTE_UNUSED
,
17961 bfd
*obfd ATTRIBUTE_UNUSED
,
17962 const Elf_Internal_Shdr
*isection ATTRIBUTE_UNUSED
,
17963 Elf_Internal_Shdr
*osection
)
17965 switch (osection
->sh_type
)
17967 case SHT_ARM_EXIDX
:
17969 Elf_Internal_Shdr
**oheaders
= elf_elfsections (obfd
);
17970 Elf_Internal_Shdr
**iheaders
= elf_elfsections (ibfd
);
17973 osection
->sh_flags
= SHF_ALLOC
| SHF_LINK_ORDER
;
17974 osection
->sh_info
= 0;
17976 /* The sh_link field must be set to the text section associated with
17977 this index section. Unfortunately the ARM EHABI does not specify
17978 exactly how to determine this association. Our caller does try
17979 to match up OSECTION with its corresponding input section however
17980 so that is a good first guess. */
17981 if (isection
!= NULL
17982 && osection
->bfd_section
!= NULL
17983 && isection
->bfd_section
!= NULL
17984 && isection
->bfd_section
->output_section
!= NULL
17985 && isection
->bfd_section
->output_section
== osection
->bfd_section
17986 && iheaders
!= NULL
17987 && isection
->sh_link
> 0
17988 && isection
->sh_link
< elf_numsections (ibfd
)
17989 && iheaders
[isection
->sh_link
]->bfd_section
!= NULL
17990 && iheaders
[isection
->sh_link
]->bfd_section
->output_section
!= NULL
17993 for (i
= elf_numsections (obfd
); i
-- > 0;)
17994 if (oheaders
[i
]->bfd_section
17995 == iheaders
[isection
->sh_link
]->bfd_section
->output_section
)
18001 /* Failing that we have to find a matching section ourselves. If
18002 we had the output section name available we could compare that
18003 with input section names. Unfortunately we don't. So instead
18004 we use a simple heuristic and look for the nearest executable
18005 section before this one. */
18006 for (i
= elf_numsections (obfd
); i
-- > 0;)
18007 if (oheaders
[i
] == osection
)
18013 if (oheaders
[i
]->sh_type
== SHT_PROGBITS
18014 && (oheaders
[i
]->sh_flags
& (SHF_ALLOC
| SHF_EXECINSTR
))
18015 == (SHF_ALLOC
| SHF_EXECINSTR
))
18021 osection
->sh_link
= i
;
18022 /* If the text section was part of a group
18023 then the index section should be too. */
18024 if (oheaders
[i
]->sh_flags
& SHF_GROUP
)
18025 osection
->sh_flags
|= SHF_GROUP
;
18031 case SHT_ARM_PREEMPTMAP
:
18032 osection
->sh_flags
= SHF_ALLOC
;
18035 case SHT_ARM_ATTRIBUTES
:
18036 case SHT_ARM_DEBUGOVERLAY
:
18037 case SHT_ARM_OVERLAYSECTION
:
18045 #undef elf_backend_copy_special_section_fields
18046 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18048 #define ELF_ARCH bfd_arch_arm
18049 #define ELF_TARGET_ID ARM_ELF_DATA
18050 #define ELF_MACHINE_CODE EM_ARM
18051 #ifdef __QNXTARGET__
18052 #define ELF_MAXPAGESIZE 0x1000
18054 #define ELF_MAXPAGESIZE 0x10000
18056 #define ELF_MINPAGESIZE 0x1000
18057 #define ELF_COMMONPAGESIZE 0x1000
18059 #define bfd_elf32_mkobject elf32_arm_mkobject
18061 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
18062 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
18063 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
18064 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
18065 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
18066 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
18067 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
18068 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
18069 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
18070 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
18071 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
18072 #define bfd_elf32_bfd_final_link elf32_arm_final_link
18073 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
18075 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
18076 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
18077 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
18078 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
18079 #define elf_backend_check_relocs elf32_arm_check_relocs
18080 #define elf_backend_relocate_section elf32_arm_relocate_section
18081 #define elf_backend_write_section elf32_arm_write_section
18082 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
18083 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
18084 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
18085 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
18086 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
18087 #define elf_backend_always_size_sections elf32_arm_always_size_sections
18088 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
18089 #define elf_backend_post_process_headers elf32_arm_post_process_headers
18090 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
18091 #define elf_backend_object_p elf32_arm_object_p
18092 #define elf_backend_fake_sections elf32_arm_fake_sections
18093 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
18094 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18095 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
18096 #define elf_backend_size_info elf32_arm_size_info
18097 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18098 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
18099 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
18100 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
18101 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
18102 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
18104 #define elf_backend_can_refcount 1
18105 #define elf_backend_can_gc_sections 1
18106 #define elf_backend_plt_readonly 1
18107 #define elf_backend_want_got_plt 1
18108 #define elf_backend_want_plt_sym 0
18109 #define elf_backend_may_use_rel_p 1
18110 #define elf_backend_may_use_rela_p 0
18111 #define elf_backend_default_use_rela_p 0
18113 #define elf_backend_got_header_size 12
18114 #define elf_backend_extern_protected_data 1
18116 #undef elf_backend_obj_attrs_vendor
18117 #define elf_backend_obj_attrs_vendor "aeabi"
18118 #undef elf_backend_obj_attrs_section
18119 #define elf_backend_obj_attrs_section ".ARM.attributes"
18120 #undef elf_backend_obj_attrs_arg_type
18121 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
18122 #undef elf_backend_obj_attrs_section_type
18123 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
18124 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
18125 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
18127 #undef elf_backend_section_flags
18128 #define elf_backend_section_flags elf32_arm_section_flags
18129 #undef elf_backend_lookup_section_flags_hook
18130 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
18132 #include "elf32-target.h"
18134 /* Native Client targets. */
18136 #undef TARGET_LITTLE_SYM
18137 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
18138 #undef TARGET_LITTLE_NAME
18139 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
18140 #undef TARGET_BIG_SYM
18141 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
18142 #undef TARGET_BIG_NAME
18143 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
18145 /* Like elf32_arm_link_hash_table_create -- but overrides
18146 appropriately for NaCl. */
18148 static struct bfd_link_hash_table
*
18149 elf32_arm_nacl_link_hash_table_create (bfd
*abfd
)
18151 struct bfd_link_hash_table
*ret
;
18153 ret
= elf32_arm_link_hash_table_create (abfd
);
18156 struct elf32_arm_link_hash_table
*htab
18157 = (struct elf32_arm_link_hash_table
*) ret
;
18161 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry
);
18162 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry
);
18167 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18168 really need to use elf32_arm_modify_segment_map. But we do it
18169 anyway just to reduce gratuitous differences with the stock ARM backend. */
18172 elf32_arm_nacl_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
18174 return (elf32_arm_modify_segment_map (abfd
, info
)
18175 && nacl_modify_segment_map (abfd
, info
));
18179 elf32_arm_nacl_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
18181 elf32_arm_final_write_processing (abfd
, linker
);
18182 nacl_final_write_processing (abfd
, linker
);
18186 elf32_arm_nacl_plt_sym_val (bfd_vma i
, const asection
*plt
,
18187 const arelent
*rel ATTRIBUTE_UNUSED
)
18190 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry
) +
18191 i
* ARRAY_SIZE (elf32_arm_nacl_plt_entry
));
18195 #define elf32_bed elf32_arm_nacl_bed
18196 #undef bfd_elf32_bfd_link_hash_table_create
18197 #define bfd_elf32_bfd_link_hash_table_create \
18198 elf32_arm_nacl_link_hash_table_create
18199 #undef elf_backend_plt_alignment
18200 #define elf_backend_plt_alignment 4
18201 #undef elf_backend_modify_segment_map
18202 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
18203 #undef elf_backend_modify_program_headers
18204 #define elf_backend_modify_program_headers nacl_modify_program_headers
18205 #undef elf_backend_final_write_processing
18206 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
18207 #undef bfd_elf32_get_synthetic_symtab
18208 #undef elf_backend_plt_sym_val
18209 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
18210 #undef elf_backend_copy_special_section_fields
18212 #undef ELF_MINPAGESIZE
18213 #undef ELF_COMMONPAGESIZE
18216 #include "elf32-target.h"
18218 /* Reset to defaults. */
18219 #undef elf_backend_plt_alignment
18220 #undef elf_backend_modify_segment_map
18221 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18222 #undef elf_backend_modify_program_headers
18223 #undef elf_backend_final_write_processing
18224 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18225 #undef ELF_MINPAGESIZE
18226 #define ELF_MINPAGESIZE 0x1000
18227 #undef ELF_COMMONPAGESIZE
18228 #define ELF_COMMONPAGESIZE 0x1000
18231 /* VxWorks Targets. */
18233 #undef TARGET_LITTLE_SYM
18234 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
18235 #undef TARGET_LITTLE_NAME
18236 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
18237 #undef TARGET_BIG_SYM
18238 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
18239 #undef TARGET_BIG_NAME
18240 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
18242 /* Like elf32_arm_link_hash_table_create -- but overrides
18243 appropriately for VxWorks. */
18245 static struct bfd_link_hash_table
*
18246 elf32_arm_vxworks_link_hash_table_create (bfd
*abfd
)
18248 struct bfd_link_hash_table
*ret
;
18250 ret
= elf32_arm_link_hash_table_create (abfd
);
18253 struct elf32_arm_link_hash_table
*htab
18254 = (struct elf32_arm_link_hash_table
*) ret
;
18256 htab
->vxworks_p
= 1;
18262 elf32_arm_vxworks_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
18264 elf32_arm_final_write_processing (abfd
, linker
);
18265 elf_vxworks_final_write_processing (abfd
, linker
);
18269 #define elf32_bed elf32_arm_vxworks_bed
18271 #undef bfd_elf32_bfd_link_hash_table_create
18272 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
18273 #undef elf_backend_final_write_processing
18274 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
18275 #undef elf_backend_emit_relocs
18276 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
18278 #undef elf_backend_may_use_rel_p
18279 #define elf_backend_may_use_rel_p 0
18280 #undef elf_backend_may_use_rela_p
18281 #define elf_backend_may_use_rela_p 1
18282 #undef elf_backend_default_use_rela_p
18283 #define elf_backend_default_use_rela_p 1
18284 #undef elf_backend_want_plt_sym
18285 #define elf_backend_want_plt_sym 1
18286 #undef ELF_MAXPAGESIZE
18287 #define ELF_MAXPAGESIZE 0x1000
18289 #include "elf32-target.h"
18292 /* Merge backend specific data from an object file to the output
18293 object file when linking. */
18296 elf32_arm_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
)
18298 flagword out_flags
;
18300 bfd_boolean flags_compatible
= TRUE
;
18303 /* Check if we have the same endianness. */
18304 if (! _bfd_generic_verify_endian_match (ibfd
, obfd
))
18307 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
18310 if (!elf32_arm_merge_eabi_attributes (ibfd
, obfd
))
18313 /* The input BFD must have had its flags initialised. */
18314 /* The following seems bogus to me -- The flags are initialized in
18315 the assembler but I don't think an elf_flags_init field is
18316 written into the object. */
18317 /* BFD_ASSERT (elf_flags_init (ibfd)); */
18319 in_flags
= elf_elfheader (ibfd
)->e_flags
;
18320 out_flags
= elf_elfheader (obfd
)->e_flags
;
18322 /* In theory there is no reason why we couldn't handle this. However
18323 in practice it isn't even close to working and there is no real
18324 reason to want it. */
18325 if (EF_ARM_EABI_VERSION (in_flags
) >= EF_ARM_EABI_VER4
18326 && !(ibfd
->flags
& DYNAMIC
)
18327 && (in_flags
& EF_ARM_BE8
))
18329 _bfd_error_handler (_("error: %B is already in final BE8 format"),
18334 if (!elf_flags_init (obfd
))
18336 /* If the input is the default architecture and had the default
18337 flags then do not bother setting the flags for the output
18338 architecture, instead allow future merges to do this. If no
18339 future merges ever set these flags then they will retain their
18340 uninitialised values, which surprise surprise, correspond
18341 to the default values. */
18342 if (bfd_get_arch_info (ibfd
)->the_default
18343 && elf_elfheader (ibfd
)->e_flags
== 0)
18346 elf_flags_init (obfd
) = TRUE
;
18347 elf_elfheader (obfd
)->e_flags
= in_flags
;
18349 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
18350 && bfd_get_arch_info (obfd
)->the_default
)
18351 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
), bfd_get_mach (ibfd
));
18356 /* Determine what should happen if the input ARM architecture
18357 does not match the output ARM architecture. */
18358 if (! bfd_arm_merge_machines (ibfd
, obfd
))
18361 /* Identical flags must be compatible. */
18362 if (in_flags
== out_flags
)
18365 /* Check to see if the input BFD actually contains any sections. If
18366 not, its flags may not have been initialised either, but it
18367 cannot actually cause any incompatiblity. Do not short-circuit
18368 dynamic objects; their section list may be emptied by
18369 elf_link_add_object_symbols.
18371 Also check to see if there are no code sections in the input.
18372 In this case there is no need to check for code specific flags.
18373 XXX - do we need to worry about floating-point format compatability
18374 in data sections ? */
18375 if (!(ibfd
->flags
& DYNAMIC
))
18377 bfd_boolean null_input_bfd
= TRUE
;
18378 bfd_boolean only_data_sections
= TRUE
;
18380 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
18382 /* Ignore synthetic glue sections. */
18383 if (strcmp (sec
->name
, ".glue_7")
18384 && strcmp (sec
->name
, ".glue_7t"))
18386 if ((bfd_get_section_flags (ibfd
, sec
)
18387 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
18388 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
18389 only_data_sections
= FALSE
;
18391 null_input_bfd
= FALSE
;
18396 if (null_input_bfd
|| only_data_sections
)
18400 /* Complain about various flag mismatches. */
18401 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags
),
18402 EF_ARM_EABI_VERSION (out_flags
)))
18405 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18407 (in_flags
& EF_ARM_EABIMASK
) >> 24,
18408 (out_flags
& EF_ARM_EABIMASK
) >> 24);
18412 /* Not sure what needs to be checked for EABI versions >= 1. */
18413 /* VxWorks libraries do not use these flags. */
18414 if (get_elf_backend_data (obfd
) != &elf32_arm_vxworks_bed
18415 && get_elf_backend_data (ibfd
) != &elf32_arm_vxworks_bed
18416 && EF_ARM_EABI_VERSION (in_flags
) == EF_ARM_EABI_UNKNOWN
)
18418 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
18421 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18423 in_flags
& EF_ARM_APCS_26
? 26 : 32,
18424 out_flags
& EF_ARM_APCS_26
? 26 : 32);
18425 flags_compatible
= FALSE
;
18428 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
18430 if (in_flags
& EF_ARM_APCS_FLOAT
)
18432 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18436 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18439 flags_compatible
= FALSE
;
18442 if ((in_flags
& EF_ARM_VFP_FLOAT
) != (out_flags
& EF_ARM_VFP_FLOAT
))
18444 if (in_flags
& EF_ARM_VFP_FLOAT
)
18446 (_("error: %B uses VFP instructions, whereas %B does not"),
18450 (_("error: %B uses FPA instructions, whereas %B does not"),
18453 flags_compatible
= FALSE
;
18456 if ((in_flags
& EF_ARM_MAVERICK_FLOAT
) != (out_flags
& EF_ARM_MAVERICK_FLOAT
))
18458 if (in_flags
& EF_ARM_MAVERICK_FLOAT
)
18460 (_("error: %B uses Maverick instructions, whereas %B does not"),
18464 (_("error: %B does not use Maverick instructions, whereas %B does"),
18467 flags_compatible
= FALSE
;
18470 #ifdef EF_ARM_SOFT_FLOAT
18471 if ((in_flags
& EF_ARM_SOFT_FLOAT
) != (out_flags
& EF_ARM_SOFT_FLOAT
))
18473 /* We can allow interworking between code that is VFP format
18474 layout, and uses either soft float or integer regs for
18475 passing floating point arguments and results. We already
18476 know that the APCS_FLOAT flags match; similarly for VFP
18478 if ((in_flags
& EF_ARM_APCS_FLOAT
) != 0
18479 || (in_flags
& EF_ARM_VFP_FLOAT
) == 0)
18481 if (in_flags
& EF_ARM_SOFT_FLOAT
)
18483 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18487 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18490 flags_compatible
= FALSE
;
18495 /* Interworking mismatch is only a warning. */
18496 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
18498 if (in_flags
& EF_ARM_INTERWORK
)
18501 (_("Warning: %B supports interworking, whereas %B does not"),
18507 (_("Warning: %B does not support interworking, whereas %B does"),
18513 return flags_compatible
;
18517 /* Symbian OS Targets. */
18519 #undef TARGET_LITTLE_SYM
18520 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18521 #undef TARGET_LITTLE_NAME
18522 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18523 #undef TARGET_BIG_SYM
18524 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18525 #undef TARGET_BIG_NAME
18526 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18528 /* Like elf32_arm_link_hash_table_create -- but overrides
18529 appropriately for Symbian OS. */
18531 static struct bfd_link_hash_table
*
18532 elf32_arm_symbian_link_hash_table_create (bfd
*abfd
)
18534 struct bfd_link_hash_table
*ret
;
18536 ret
= elf32_arm_link_hash_table_create (abfd
);
18539 struct elf32_arm_link_hash_table
*htab
18540 = (struct elf32_arm_link_hash_table
*)ret
;
18541 /* There is no PLT header for Symbian OS. */
18542 htab
->plt_header_size
= 0;
18543 /* The PLT entries are each one instruction and one word. */
18544 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
);
18545 htab
->symbian_p
= 1;
18546 /* Symbian uses armv5t or above, so use_blx is always true. */
18548 htab
->root
.is_relocatable_executable
= 1;
18553 static const struct bfd_elf_special_section
18554 elf32_arm_symbian_special_sections
[] =
18556 /* In a BPABI executable, the dynamic linking sections do not go in
18557 the loadable read-only segment. The post-linker may wish to
18558 refer to these sections, but they are not part of the final
18560 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC
, 0 },
18561 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB
, 0 },
18562 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM
, 0 },
18563 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS
, 0 },
18564 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH
, 0 },
18565 /* These sections do not need to be writable as the SymbianOS
18566 postlinker will arrange things so that no dynamic relocation is
18568 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY
, SHF_ALLOC
},
18569 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY
, SHF_ALLOC
},
18570 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY
, SHF_ALLOC
},
18571 { NULL
, 0, 0, 0, 0 }
18575 elf32_arm_symbian_begin_write_processing (bfd
*abfd
,
18576 struct bfd_link_info
*link_info
)
18578 /* BPABI objects are never loaded directly by an OS kernel; they are
18579 processed by a postlinker first, into an OS-specific format. If
18580 the D_PAGED bit is set on the file, BFD will align segments on
18581 page boundaries, so that an OS can directly map the file. With
18582 BPABI objects, that just results in wasted space. In addition,
18583 because we clear the D_PAGED bit, map_sections_to_segments will
18584 recognize that the program headers should not be mapped into any
18585 loadable segment. */
18586 abfd
->flags
&= ~D_PAGED
;
18587 elf32_arm_begin_write_processing (abfd
, link_info
);
18591 elf32_arm_symbian_modify_segment_map (bfd
*abfd
,
18592 struct bfd_link_info
*info
)
18594 struct elf_segment_map
*m
;
18597 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18598 segment. However, because the .dynamic section is not marked
18599 with SEC_LOAD, the generic ELF code will not create such a
18601 dynsec
= bfd_get_section_by_name (abfd
, ".dynamic");
18604 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
18605 if (m
->p_type
== PT_DYNAMIC
)
18610 m
= _bfd_elf_make_dynamic_segment (abfd
, dynsec
);
18611 m
->next
= elf_seg_map (abfd
);
18612 elf_seg_map (abfd
) = m
;
18616 /* Also call the generic arm routine. */
18617 return elf32_arm_modify_segment_map (abfd
, info
);
18620 /* Return address for Ith PLT stub in section PLT, for relocation REL
18621 or (bfd_vma) -1 if it should not be included. */
18624 elf32_arm_symbian_plt_sym_val (bfd_vma i
, const asection
*plt
,
18625 const arelent
*rel ATTRIBUTE_UNUSED
)
18627 return plt
->vma
+ 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
) * i
;
18631 #define elf32_bed elf32_arm_symbian_bed
18633 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18634 will process them and then discard them. */
18635 #undef ELF_DYNAMIC_SEC_FLAGS
18636 #define ELF_DYNAMIC_SEC_FLAGS \
18637 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18639 #undef elf_backend_emit_relocs
18641 #undef bfd_elf32_bfd_link_hash_table_create
18642 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18643 #undef elf_backend_special_sections
18644 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18645 #undef elf_backend_begin_write_processing
18646 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18647 #undef elf_backend_final_write_processing
18648 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18650 #undef elf_backend_modify_segment_map
18651 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18653 /* There is no .got section for BPABI objects, and hence no header. */
18654 #undef elf_backend_got_header_size
18655 #define elf_backend_got_header_size 0
18657 /* Similarly, there is no .got.plt section. */
18658 #undef elf_backend_want_got_plt
18659 #define elf_backend_want_got_plt 0
18661 #undef elf_backend_plt_sym_val
18662 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
18664 #undef elf_backend_may_use_rel_p
18665 #define elf_backend_may_use_rel_p 1
18666 #undef elf_backend_may_use_rela_p
18667 #define elf_backend_may_use_rela_p 0
18668 #undef elf_backend_default_use_rela_p
18669 #define elf_backend_default_use_rela_p 0
18670 #undef elf_backend_want_plt_sym
18671 #define elf_backend_want_plt_sym 0
18672 #undef ELF_MAXPAGESIZE
18673 #define ELF_MAXPAGESIZE 0x8000
18675 #include "elf32-target.h"