[ARM] Update bfd's Tag_CPU_arch knowledge
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2018 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto NULL
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 EMPTY_HOWTO (130),
1693 EMPTY_HOWTO (131),
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1697 16, /* bitsize. */
1698 FALSE, /* pc_relative. */
1699 0, /* bitpos. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1710 16, /* bitsize. */
1711 FALSE, /* pc_relative. */
1712 0, /* bitpos. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1723 16, /* bitsize. */
1724 FALSE, /* pc_relative. */
1725 0, /* bitpos. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1736 16, /* bitsize. */
1737 FALSE, /* pc_relative. */
1738 0, /* bitpos. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1746 };
1747
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[8] =
1750 {
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1752 0, /* rightshift */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1754 32, /* bitsize */
1755 FALSE, /* pc_relative */
1756 0, /* bitpos */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE), /* pcrel_offset */
1764 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1765 0, /* rightshift */
1766 2, /* size (0 = byte, 1 = short, 2 = long) */
1767 32, /* bitsize */
1768 FALSE, /* pc_relative */
1769 0, /* bitpos */
1770 complain_overflow_bitfield,/* complain_on_overflow */
1771 bfd_elf_generic_reloc, /* special_function */
1772 "R_ARM_GOTFUNCDESC", /* name */
1773 FALSE, /* partial_inplace */
1774 0, /* src_mask */
1775 0xffffffff, /* dst_mask */
1776 FALSE), /* pcrel_offset */
1777 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1778 0, /* rightshift */
1779 2, /* size (0 = byte, 1 = short, 2 = long) */
1780 32, /* bitsize */
1781 FALSE, /* pc_relative */
1782 0, /* bitpos */
1783 complain_overflow_bitfield,/* complain_on_overflow */
1784 bfd_elf_generic_reloc, /* special_function */
1785 "R_ARM_GOTOFFFUNCDESC",/* name */
1786 FALSE, /* partial_inplace */
1787 0, /* src_mask */
1788 0xffffffff, /* dst_mask */
1789 FALSE), /* pcrel_offset */
1790 HOWTO (R_ARM_FUNCDESC, /* type */
1791 0, /* rightshift */
1792 2, /* size (0 = byte, 1 = short, 2 = long) */
1793 32, /* bitsize */
1794 FALSE, /* pc_relative */
1795 0, /* bitpos */
1796 complain_overflow_bitfield,/* complain_on_overflow */
1797 bfd_elf_generic_reloc, /* special_function */
1798 "R_ARM_FUNCDESC", /* name */
1799 FALSE, /* partial_inplace */
1800 0, /* src_mask */
1801 0xffffffff, /* dst_mask */
1802 FALSE), /* pcrel_offset */
1803 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1804 0, /* rightshift */
1805 2, /* size (0 = byte, 1 = short, 2 = long) */
1806 64, /* bitsize */
1807 FALSE, /* pc_relative */
1808 0, /* bitpos */
1809 complain_overflow_bitfield,/* complain_on_overflow */
1810 bfd_elf_generic_reloc, /* special_function */
1811 "R_ARM_FUNCDESC_VALUE",/* name */
1812 FALSE, /* partial_inplace */
1813 0, /* src_mask */
1814 0xffffffff, /* dst_mask */
1815 FALSE), /* pcrel_offset */
1816 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1817 0, /* rightshift */
1818 2, /* size (0 = byte, 1 = short, 2 = long) */
1819 32, /* bitsize */
1820 FALSE, /* pc_relative */
1821 0, /* bitpos */
1822 complain_overflow_bitfield,/* complain_on_overflow */
1823 bfd_elf_generic_reloc, /* special_function */
1824 "R_ARM_TLS_GD32_FDPIC",/* name */
1825 FALSE, /* partial_inplace */
1826 0, /* src_mask */
1827 0xffffffff, /* dst_mask */
1828 FALSE), /* pcrel_offset */
1829 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1830 0, /* rightshift */
1831 2, /* size (0 = byte, 1 = short, 2 = long) */
1832 32, /* bitsize */
1833 FALSE, /* pc_relative */
1834 0, /* bitpos */
1835 complain_overflow_bitfield,/* complain_on_overflow */
1836 bfd_elf_generic_reloc, /* special_function */
1837 "R_ARM_TLS_LDM32_FDPIC",/* name */
1838 FALSE, /* partial_inplace */
1839 0, /* src_mask */
1840 0xffffffff, /* dst_mask */
1841 FALSE), /* pcrel_offset */
1842 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1843 0, /* rightshift */
1844 2, /* size (0 = byte, 1 = short, 2 = long) */
1845 32, /* bitsize */
1846 FALSE, /* pc_relative */
1847 0, /* bitpos */
1848 complain_overflow_bitfield,/* complain_on_overflow */
1849 bfd_elf_generic_reloc, /* special_function */
1850 "R_ARM_TLS_IE32_FDPIC",/* name */
1851 FALSE, /* partial_inplace */
1852 0, /* src_mask */
1853 0xffffffff, /* dst_mask */
1854 FALSE), /* pcrel_offset */
1855 };
1856
1857 /* 249-255 extended, currently unused, relocations: */
1858 static reloc_howto_type elf32_arm_howto_table_3[4] =
1859 {
1860 HOWTO (R_ARM_RREL32, /* type */
1861 0, /* rightshift */
1862 0, /* size (0 = byte, 1 = short, 2 = long) */
1863 0, /* bitsize */
1864 FALSE, /* pc_relative */
1865 0, /* bitpos */
1866 complain_overflow_dont,/* complain_on_overflow */
1867 bfd_elf_generic_reloc, /* special_function */
1868 "R_ARM_RREL32", /* name */
1869 FALSE, /* partial_inplace */
1870 0, /* src_mask */
1871 0, /* dst_mask */
1872 FALSE), /* pcrel_offset */
1873
1874 HOWTO (R_ARM_RABS32, /* type */
1875 0, /* rightshift */
1876 0, /* size (0 = byte, 1 = short, 2 = long) */
1877 0, /* bitsize */
1878 FALSE, /* pc_relative */
1879 0, /* bitpos */
1880 complain_overflow_dont,/* complain_on_overflow */
1881 bfd_elf_generic_reloc, /* special_function */
1882 "R_ARM_RABS32", /* name */
1883 FALSE, /* partial_inplace */
1884 0, /* src_mask */
1885 0, /* dst_mask */
1886 FALSE), /* pcrel_offset */
1887
1888 HOWTO (R_ARM_RPC24, /* type */
1889 0, /* rightshift */
1890 0, /* size (0 = byte, 1 = short, 2 = long) */
1891 0, /* bitsize */
1892 FALSE, /* pc_relative */
1893 0, /* bitpos */
1894 complain_overflow_dont,/* complain_on_overflow */
1895 bfd_elf_generic_reloc, /* special_function */
1896 "R_ARM_RPC24", /* name */
1897 FALSE, /* partial_inplace */
1898 0, /* src_mask */
1899 0, /* dst_mask */
1900 FALSE), /* pcrel_offset */
1901
1902 HOWTO (R_ARM_RBASE, /* type */
1903 0, /* rightshift */
1904 0, /* size (0 = byte, 1 = short, 2 = long) */
1905 0, /* bitsize */
1906 FALSE, /* pc_relative */
1907 0, /* bitpos */
1908 complain_overflow_dont,/* complain_on_overflow */
1909 bfd_elf_generic_reloc, /* special_function */
1910 "R_ARM_RBASE", /* name */
1911 FALSE, /* partial_inplace */
1912 0, /* src_mask */
1913 0, /* dst_mask */
1914 FALSE) /* pcrel_offset */
1915 };
1916
1917 static reloc_howto_type *
1918 elf32_arm_howto_from_type (unsigned int r_type)
1919 {
1920 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1921 return &elf32_arm_howto_table_1[r_type];
1922
1923 if (r_type >= R_ARM_IRELATIVE
1924 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1925 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1926
1927 if (r_type >= R_ARM_RREL32
1928 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1929 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1930
1931 return NULL;
1932 }
1933
1934 static bfd_boolean
1935 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1936 Elf_Internal_Rela * elf_reloc)
1937 {
1938 unsigned int r_type;
1939
1940 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1941 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1942 {
1943 /* xgettext:c-format */
1944 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1945 abfd, r_type);
1946 bfd_set_error (bfd_error_bad_value);
1947 return FALSE;
1948 }
1949 return TRUE;
1950 }
1951
1952 struct elf32_arm_reloc_map
1953 {
1954 bfd_reloc_code_real_type bfd_reloc_val;
1955 unsigned char elf_reloc_val;
1956 };
1957
1958 /* All entries in this list must also be present in elf32_arm_howto_table. */
1959 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1960 {
1961 {BFD_RELOC_NONE, R_ARM_NONE},
1962 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1963 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1964 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1965 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1966 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1967 {BFD_RELOC_32, R_ARM_ABS32},
1968 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1969 {BFD_RELOC_8, R_ARM_ABS8},
1970 {BFD_RELOC_16, R_ARM_ABS16},
1971 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1972 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1973 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1974 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1975 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1976 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1977 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1978 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1979 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1980 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1981 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1982 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1983 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1984 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1985 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1986 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1987 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1988 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1989 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1990 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1991 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1992 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1993 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1994 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1995 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1996 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1997 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1998 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1999 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2000 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2001 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2002 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2003 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2004 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2005 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2006 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2007 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2008 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2009 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2010 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2011 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2012 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2013 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2014 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2015 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2016 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2017 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2018 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2019 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2020 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2021 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2022 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2023 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2024 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2025 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2026 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2027 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2028 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2029 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2030 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2031 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2032 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2033 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2034 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2035 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2036 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2037 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2038 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2039 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2040 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2041 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2042 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2043 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2044 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2045 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2046 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2047 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2048 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2049 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2050 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2051 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2052 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2053 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2054 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2055 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2056 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2057 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
2058 };
2059
2060 static reloc_howto_type *
2061 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2062 bfd_reloc_code_real_type code)
2063 {
2064 unsigned int i;
2065
2066 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2067 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2068 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2069
2070 return NULL;
2071 }
2072
2073 static reloc_howto_type *
2074 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2075 const char *r_name)
2076 {
2077 unsigned int i;
2078
2079 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2080 if (elf32_arm_howto_table_1[i].name != NULL
2081 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2082 return &elf32_arm_howto_table_1[i];
2083
2084 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2085 if (elf32_arm_howto_table_2[i].name != NULL
2086 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2087 return &elf32_arm_howto_table_2[i];
2088
2089 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2090 if (elf32_arm_howto_table_3[i].name != NULL
2091 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2092 return &elf32_arm_howto_table_3[i];
2093
2094 return NULL;
2095 }
2096
2097 /* Support for core dump NOTE sections. */
2098
2099 static bfd_boolean
2100 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2101 {
2102 int offset;
2103 size_t size;
2104
2105 switch (note->descsz)
2106 {
2107 default:
2108 return FALSE;
2109
2110 case 148: /* Linux/ARM 32-bit. */
2111 /* pr_cursig */
2112 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2113
2114 /* pr_pid */
2115 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2116
2117 /* pr_reg */
2118 offset = 72;
2119 size = 72;
2120
2121 break;
2122 }
2123
2124 /* Make a ".reg/999" section. */
2125 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2126 size, note->descpos + offset);
2127 }
2128
2129 static bfd_boolean
2130 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2131 {
2132 switch (note->descsz)
2133 {
2134 default:
2135 return FALSE;
2136
2137 case 124: /* Linux/ARM elf_prpsinfo. */
2138 elf_tdata (abfd)->core->pid
2139 = bfd_get_32 (abfd, note->descdata + 12);
2140 elf_tdata (abfd)->core->program
2141 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2142 elf_tdata (abfd)->core->command
2143 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2144 }
2145
2146 /* Note that for some reason, a spurious space is tacked
2147 onto the end of the args in some (at least one anyway)
2148 implementations, so strip it off if it exists. */
2149 {
2150 char *command = elf_tdata (abfd)->core->command;
2151 int n = strlen (command);
2152
2153 if (0 < n && command[n - 1] == ' ')
2154 command[n - 1] = '\0';
2155 }
2156
2157 return TRUE;
2158 }
2159
2160 static char *
2161 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2162 int note_type, ...)
2163 {
2164 switch (note_type)
2165 {
2166 default:
2167 return NULL;
2168
2169 case NT_PRPSINFO:
2170 {
2171 char data[124] ATTRIBUTE_NONSTRING;
2172 va_list ap;
2173
2174 va_start (ap, note_type);
2175 memset (data, 0, sizeof (data));
2176 strncpy (data + 28, va_arg (ap, const char *), 16);
2177 DIAGNOSTIC_PUSH;
2178 /* GCC 8.1 warns about 80 equals destination size with
2179 -Wstringop-truncation:
2180 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2181 */
2182 #if GCC_VERSION == 8001
2183 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2184 #endif
2185 strncpy (data + 44, va_arg (ap, const char *), 80);
2186 DIAGNOSTIC_POP;
2187 va_end (ap);
2188
2189 return elfcore_write_note (abfd, buf, bufsiz,
2190 "CORE", note_type, data, sizeof (data));
2191 }
2192
2193 case NT_PRSTATUS:
2194 {
2195 char data[148];
2196 va_list ap;
2197 long pid;
2198 int cursig;
2199 const void *greg;
2200
2201 va_start (ap, note_type);
2202 memset (data, 0, sizeof (data));
2203 pid = va_arg (ap, long);
2204 bfd_put_32 (abfd, pid, data + 24);
2205 cursig = va_arg (ap, int);
2206 bfd_put_16 (abfd, cursig, data + 12);
2207 greg = va_arg (ap, const void *);
2208 memcpy (data + 72, greg, 72);
2209 va_end (ap);
2210
2211 return elfcore_write_note (abfd, buf, bufsiz,
2212 "CORE", note_type, data, sizeof (data));
2213 }
2214 }
2215 }
2216
2217 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2218 #define TARGET_LITTLE_NAME "elf32-littlearm"
2219 #define TARGET_BIG_SYM arm_elf32_be_vec
2220 #define TARGET_BIG_NAME "elf32-bigarm"
2221
2222 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2223 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2224 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2225
2226 typedef unsigned long int insn32;
2227 typedef unsigned short int insn16;
2228
2229 /* In lieu of proper flags, assume all EABIv4 or later objects are
2230 interworkable. */
2231 #define INTERWORK_FLAG(abfd) \
2232 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2233 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2234 || ((abfd)->flags & BFD_LINKER_CREATED))
2235
2236 /* The linker script knows the section names for placement.
2237 The entry_names are used to do simple name mangling on the stubs.
2238 Given a function name, and its type, the stub can be found. The
2239 name can be changed. The only requirement is the %s be present. */
2240 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2241 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2242
2243 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2244 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2245
2246 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2247 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2248
2249 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2250 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2251
2252 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2253 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2254
2255 #define STUB_ENTRY_NAME "__%s_veneer"
2256
2257 #define CMSE_PREFIX "__acle_se_"
2258
2259 /* The name of the dynamic interpreter. This is put in the .interp
2260 section. */
2261 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2262
2263 /* FDPIC default stack size. */
2264 #define DEFAULT_STACK_SIZE 0x8000
2265
2266 static const unsigned long tls_trampoline [] =
2267 {
2268 0xe08e0000, /* add r0, lr, r0 */
2269 0xe5901004, /* ldr r1, [r0,#4] */
2270 0xe12fff11, /* bx r1 */
2271 };
2272
2273 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2274 {
2275 0xe52d2004, /* push {r2} */
2276 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2277 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2278 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2279 0xe081100f, /* 2: add r1, pc */
2280 0xe12fff12, /* bx r2 */
2281 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2282 + dl_tlsdesc_lazy_resolver(GOT) */
2283 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2284 };
2285
2286 /* ARM FDPIC PLT entry. */
2287 /* The last 5 words contain PLT lazy fragment code and data. */
2288 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2289 {
2290 0xe59fc008, /* ldr r12, .L1 */
2291 0xe08cc009, /* add r12, r12, r9 */
2292 0xe59c9004, /* ldr r9, [r12, #4] */
2293 0xe59cf000, /* ldr pc, [r12] */
2294 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2295 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2296 0xe51fc00c, /* ldr r12, [pc, #-12] */
2297 0xe92d1000, /* push {r12} */
2298 0xe599c004, /* ldr r12, [r9, #4] */
2299 0xe599f000, /* ldr pc, [r9] */
2300 };
2301
2302 /* Thumb FDPIC PLT entry. */
2303 /* The last 5 words contain PLT lazy fragment code and data. */
2304 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2305 {
2306 0xc00cf8df, /* ldr.w r12, .L1 */
2307 0x0c09eb0c, /* add.w r12, r12, r9 */
2308 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2309 0xf000f8dc, /* ldr.w pc, [r12] */
2310 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2311 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2312 0xc008f85f, /* ldr.w r12, .L2 */
2313 0xcd04f84d, /* push {r12} */
2314 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2315 0xf000f8d9, /* ldr.w pc, [r9] */
2316 };
2317
2318 #ifdef FOUR_WORD_PLT
2319
2320 /* The first entry in a procedure linkage table looks like
2321 this. It is set up so that any shared library function that is
2322 called before the relocation has been set up calls the dynamic
2323 linker first. */
2324 static const bfd_vma elf32_arm_plt0_entry [] =
2325 {
2326 0xe52de004, /* str lr, [sp, #-4]! */
2327 0xe59fe010, /* ldr lr, [pc, #16] */
2328 0xe08fe00e, /* add lr, pc, lr */
2329 0xe5bef008, /* ldr pc, [lr, #8]! */
2330 };
2331
2332 /* Subsequent entries in a procedure linkage table look like
2333 this. */
2334 static const bfd_vma elf32_arm_plt_entry [] =
2335 {
2336 0xe28fc600, /* add ip, pc, #NN */
2337 0xe28cca00, /* add ip, ip, #NN */
2338 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2339 0x00000000, /* unused */
2340 };
2341
2342 #else /* not FOUR_WORD_PLT */
2343
2344 /* The first entry in a procedure linkage table looks like
2345 this. It is set up so that any shared library function that is
2346 called before the relocation has been set up calls the dynamic
2347 linker first. */
2348 static const bfd_vma elf32_arm_plt0_entry [] =
2349 {
2350 0xe52de004, /* str lr, [sp, #-4]! */
2351 0xe59fe004, /* ldr lr, [pc, #4] */
2352 0xe08fe00e, /* add lr, pc, lr */
2353 0xe5bef008, /* ldr pc, [lr, #8]! */
2354 0x00000000, /* &GOT[0] - . */
2355 };
2356
2357 /* By default subsequent entries in a procedure linkage table look like
2358 this. Offsets that don't fit into 28 bits will cause link error. */
2359 static const bfd_vma elf32_arm_plt_entry_short [] =
2360 {
2361 0xe28fc600, /* add ip, pc, #0xNN00000 */
2362 0xe28cca00, /* add ip, ip, #0xNN000 */
2363 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2364 };
2365
2366 /* When explicitly asked, we'll use this "long" entry format
2367 which can cope with arbitrary displacements. */
2368 static const bfd_vma elf32_arm_plt_entry_long [] =
2369 {
2370 0xe28fc200, /* add ip, pc, #0xN0000000 */
2371 0xe28cc600, /* add ip, ip, #0xNN00000 */
2372 0xe28cca00, /* add ip, ip, #0xNN000 */
2373 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2374 };
2375
2376 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2377
2378 #endif /* not FOUR_WORD_PLT */
2379
2380 /* The first entry in a procedure linkage table looks like this.
2381 It is set up so that any shared library function that is called before the
2382 relocation has been set up calls the dynamic linker first. */
2383 static const bfd_vma elf32_thumb2_plt0_entry [] =
2384 {
2385 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2386 an instruction maybe encoded to one or two array elements. */
2387 0xf8dfb500, /* push {lr} */
2388 0x44fee008, /* ldr.w lr, [pc, #8] */
2389 /* add lr, pc */
2390 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2391 0x00000000, /* &GOT[0] - . */
2392 };
2393
2394 /* Subsequent entries in a procedure linkage table for thumb only target
2395 look like this. */
2396 static const bfd_vma elf32_thumb2_plt_entry [] =
2397 {
2398 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2399 an instruction maybe encoded to one or two array elements. */
2400 0x0c00f240, /* movw ip, #0xNNNN */
2401 0x0c00f2c0, /* movt ip, #0xNNNN */
2402 0xf8dc44fc, /* add ip, pc */
2403 0xbf00f000 /* ldr.w pc, [ip] */
2404 /* nop */
2405 };
2406
2407 /* The format of the first entry in the procedure linkage table
2408 for a VxWorks executable. */
2409 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2410 {
2411 0xe52dc008, /* str ip,[sp,#-8]! */
2412 0xe59fc000, /* ldr ip,[pc] */
2413 0xe59cf008, /* ldr pc,[ip,#8] */
2414 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2415 };
2416
2417 /* The format of subsequent entries in a VxWorks executable. */
2418 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2419 {
2420 0xe59fc000, /* ldr ip,[pc] */
2421 0xe59cf000, /* ldr pc,[ip] */
2422 0x00000000, /* .long @got */
2423 0xe59fc000, /* ldr ip,[pc] */
2424 0xea000000, /* b _PLT */
2425 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2426 };
2427
2428 /* The format of entries in a VxWorks shared library. */
2429 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2430 {
2431 0xe59fc000, /* ldr ip,[pc] */
2432 0xe79cf009, /* ldr pc,[ip,r9] */
2433 0x00000000, /* .long @got */
2434 0xe59fc000, /* ldr ip,[pc] */
2435 0xe599f008, /* ldr pc,[r9,#8] */
2436 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2437 };
2438
2439 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2440 #define PLT_THUMB_STUB_SIZE 4
2441 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2442 {
2443 0x4778, /* bx pc */
2444 0x46c0 /* nop */
2445 };
2446
2447 /* The entries in a PLT when using a DLL-based target with multiple
2448 address spaces. */
2449 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2450 {
2451 0xe51ff004, /* ldr pc, [pc, #-4] */
2452 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2453 };
2454
2455 /* The first entry in a procedure linkage table looks like
2456 this. It is set up so that any shared library function that is
2457 called before the relocation has been set up calls the dynamic
2458 linker first. */
2459 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2460 {
2461 /* First bundle: */
2462 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2463 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2464 0xe08cc00f, /* add ip, ip, pc */
2465 0xe52dc008, /* str ip, [sp, #-8]! */
2466 /* Second bundle: */
2467 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2468 0xe59cc000, /* ldr ip, [ip] */
2469 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2470 0xe12fff1c, /* bx ip */
2471 /* Third bundle: */
2472 0xe320f000, /* nop */
2473 0xe320f000, /* nop */
2474 0xe320f000, /* nop */
2475 /* .Lplt_tail: */
2476 0xe50dc004, /* str ip, [sp, #-4] */
2477 /* Fourth bundle: */
2478 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2479 0xe59cc000, /* ldr ip, [ip] */
2480 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2481 0xe12fff1c, /* bx ip */
2482 };
2483 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2484
2485 /* Subsequent entries in a procedure linkage table look like this. */
2486 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2487 {
2488 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2489 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2490 0xe08cc00f, /* add ip, ip, pc */
2491 0xea000000, /* b .Lplt_tail */
2492 };
2493
2494 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2495 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2496 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2497 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2498 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2499 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2500 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2501 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2502
2503 enum stub_insn_type
2504 {
2505 THUMB16_TYPE = 1,
2506 THUMB32_TYPE,
2507 ARM_TYPE,
2508 DATA_TYPE
2509 };
2510
2511 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2512 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2513 is inserted in arm_build_one_stub(). */
2514 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2515 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2516 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2517 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2518 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2519 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2520 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2521 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2522
2523 typedef struct
2524 {
2525 bfd_vma data;
2526 enum stub_insn_type type;
2527 unsigned int r_type;
2528 int reloc_addend;
2529 } insn_sequence;
2530
2531 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2532 to reach the stub if necessary. */
2533 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2534 {
2535 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2536 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2537 };
2538
2539 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2540 available. */
2541 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2542 {
2543 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2544 ARM_INSN (0xe12fff1c), /* bx ip */
2545 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2546 };
2547
2548 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2549 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2550 {
2551 THUMB16_INSN (0xb401), /* push {r0} */
2552 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2553 THUMB16_INSN (0x4684), /* mov ip, r0 */
2554 THUMB16_INSN (0xbc01), /* pop {r0} */
2555 THUMB16_INSN (0x4760), /* bx ip */
2556 THUMB16_INSN (0xbf00), /* nop */
2557 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2558 };
2559
2560 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2561 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2562 {
2563 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2564 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2565 };
2566
2567 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2568 M-profile architectures. */
2569 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2570 {
2571 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2572 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2573 THUMB16_INSN (0x4760), /* bx ip */
2574 };
2575
2576 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2577 allowed. */
2578 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2579 {
2580 THUMB16_INSN (0x4778), /* bx pc */
2581 THUMB16_INSN (0x46c0), /* nop */
2582 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2583 ARM_INSN (0xe12fff1c), /* bx ip */
2584 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2585 };
2586
2587 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2588 available. */
2589 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2590 {
2591 THUMB16_INSN (0x4778), /* bx pc */
2592 THUMB16_INSN (0x46c0), /* nop */
2593 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2594 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2595 };
2596
2597 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2598 one, when the destination is close enough. */
2599 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2600 {
2601 THUMB16_INSN (0x4778), /* bx pc */
2602 THUMB16_INSN (0x46c0), /* nop */
2603 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2604 };
2605
2606 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2607 blx to reach the stub if necessary. */
2608 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2609 {
2610 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2611 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2612 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2613 };
2614
2615 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2616 blx to reach the stub if necessary. We can not add into pc;
2617 it is not guaranteed to mode switch (different in ARMv6 and
2618 ARMv7). */
2619 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2620 {
2621 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2622 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2623 ARM_INSN (0xe12fff1c), /* bx ip */
2624 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2625 };
2626
2627 /* V4T ARM -> ARM long branch stub, PIC. */
2628 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2629 {
2630 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2631 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2632 ARM_INSN (0xe12fff1c), /* bx ip */
2633 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2634 };
2635
2636 /* V4T Thumb -> ARM long branch stub, PIC. */
2637 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2638 {
2639 THUMB16_INSN (0x4778), /* bx pc */
2640 THUMB16_INSN (0x46c0), /* nop */
2641 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2642 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2643 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2644 };
2645
2646 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2647 architectures. */
2648 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2649 {
2650 THUMB16_INSN (0xb401), /* push {r0} */
2651 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2652 THUMB16_INSN (0x46fc), /* mov ip, pc */
2653 THUMB16_INSN (0x4484), /* add ip, r0 */
2654 THUMB16_INSN (0xbc01), /* pop {r0} */
2655 THUMB16_INSN (0x4760), /* bx ip */
2656 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2657 };
2658
2659 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2660 allowed. */
2661 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2662 {
2663 THUMB16_INSN (0x4778), /* bx pc */
2664 THUMB16_INSN (0x46c0), /* nop */
2665 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2666 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2667 ARM_INSN (0xe12fff1c), /* bx ip */
2668 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2669 };
2670
2671 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2672 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2673 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2674 {
2675 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2676 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2677 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2678 };
2679
2680 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2681 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2682 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2683 {
2684 THUMB16_INSN (0x4778), /* bx pc */
2685 THUMB16_INSN (0x46c0), /* nop */
2686 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2687 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2688 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2689 };
2690
2691 /* NaCl ARM -> ARM long branch stub. */
2692 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2693 {
2694 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2695 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2696 ARM_INSN (0xe12fff1c), /* bx ip */
2697 ARM_INSN (0xe320f000), /* nop */
2698 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2699 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2700 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2701 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2702 };
2703
2704 /* NaCl ARM -> ARM long branch stub, PIC. */
2705 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2706 {
2707 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2708 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2709 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2710 ARM_INSN (0xe12fff1c), /* bx ip */
2711 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2712 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2713 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2714 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2715 };
2716
2717 /* Stub used for transition to secure state (aka SG veneer). */
2718 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2719 {
2720 THUMB32_INSN (0xe97fe97f), /* sg. */
2721 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2722 };
2723
2724
2725 /* Cortex-A8 erratum-workaround stubs. */
2726
2727 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2728 can't use a conditional branch to reach this stub). */
2729
2730 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2731 {
2732 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2733 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2734 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2735 };
2736
2737 /* Stub used for b.w and bl.w instructions. */
2738
2739 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2740 {
2741 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2742 };
2743
2744 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2745 {
2746 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2747 };
2748
2749 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2750 instruction (which switches to ARM mode) to point to this stub. Jump to the
2751 real destination using an ARM-mode branch. */
2752
2753 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2754 {
2755 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2756 };
2757
2758 /* For each section group there can be a specially created linker section
2759 to hold the stubs for that group. The name of the stub section is based
2760 upon the name of another section within that group with the suffix below
2761 applied.
2762
2763 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2764 create what appeared to be a linker stub section when it actually
2765 contained user code/data. For example, consider this fragment:
2766
2767 const char * stubborn_problems[] = { "np" };
2768
2769 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2770 section called:
2771
2772 .data.rel.local.stubborn_problems
2773
2774 This then causes problems in arm32_arm_build_stubs() as it triggers:
2775
2776 // Ignore non-stub sections.
2777 if (!strstr (stub_sec->name, STUB_SUFFIX))
2778 continue;
2779
2780 And so the section would be ignored instead of being processed. Hence
2781 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2782 C identifier. */
2783 #define STUB_SUFFIX ".__stub"
2784
2785 /* One entry per long/short branch stub defined above. */
2786 #define DEF_STUBS \
2787 DEF_STUB(long_branch_any_any) \
2788 DEF_STUB(long_branch_v4t_arm_thumb) \
2789 DEF_STUB(long_branch_thumb_only) \
2790 DEF_STUB(long_branch_v4t_thumb_thumb) \
2791 DEF_STUB(long_branch_v4t_thumb_arm) \
2792 DEF_STUB(short_branch_v4t_thumb_arm) \
2793 DEF_STUB(long_branch_any_arm_pic) \
2794 DEF_STUB(long_branch_any_thumb_pic) \
2795 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2796 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2797 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2798 DEF_STUB(long_branch_thumb_only_pic) \
2799 DEF_STUB(long_branch_any_tls_pic) \
2800 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2801 DEF_STUB(long_branch_arm_nacl) \
2802 DEF_STUB(long_branch_arm_nacl_pic) \
2803 DEF_STUB(cmse_branch_thumb_only) \
2804 DEF_STUB(a8_veneer_b_cond) \
2805 DEF_STUB(a8_veneer_b) \
2806 DEF_STUB(a8_veneer_bl) \
2807 DEF_STUB(a8_veneer_blx) \
2808 DEF_STUB(long_branch_thumb2_only) \
2809 DEF_STUB(long_branch_thumb2_only_pure)
2810
2811 #define DEF_STUB(x) arm_stub_##x,
2812 enum elf32_arm_stub_type
2813 {
2814 arm_stub_none,
2815 DEF_STUBS
2816 max_stub_type
2817 };
2818 #undef DEF_STUB
2819
2820 /* Note the first a8_veneer type. */
2821 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2822
2823 typedef struct
2824 {
2825 const insn_sequence* template_sequence;
2826 int template_size;
2827 } stub_def;
2828
2829 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2830 static const stub_def stub_definitions[] =
2831 {
2832 {NULL, 0},
2833 DEF_STUBS
2834 };
2835
2836 struct elf32_arm_stub_hash_entry
2837 {
2838 /* Base hash table entry structure. */
2839 struct bfd_hash_entry root;
2840
2841 /* The stub section. */
2842 asection *stub_sec;
2843
2844 /* Offset within stub_sec of the beginning of this stub. */
2845 bfd_vma stub_offset;
2846
2847 /* Given the symbol's value and its section we can determine its final
2848 value when building the stubs (so the stub knows where to jump). */
2849 bfd_vma target_value;
2850 asection *target_section;
2851
2852 /* Same as above but for the source of the branch to the stub. Used for
2853 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2854 such, source section does not need to be recorded since Cortex-A8 erratum
2855 workaround stubs are only generated when both source and target are in the
2856 same section. */
2857 bfd_vma source_value;
2858
2859 /* The instruction which caused this stub to be generated (only valid for
2860 Cortex-A8 erratum workaround stubs at present). */
2861 unsigned long orig_insn;
2862
2863 /* The stub type. */
2864 enum elf32_arm_stub_type stub_type;
2865 /* Its encoding size in bytes. */
2866 int stub_size;
2867 /* Its template. */
2868 const insn_sequence *stub_template;
2869 /* The size of the template (number of entries). */
2870 int stub_template_size;
2871
2872 /* The symbol table entry, if any, that this was derived from. */
2873 struct elf32_arm_link_hash_entry *h;
2874
2875 /* Type of branch. */
2876 enum arm_st_branch_type branch_type;
2877
2878 /* Where this stub is being called from, or, in the case of combined
2879 stub sections, the first input section in the group. */
2880 asection *id_sec;
2881
2882 /* The name for the local symbol at the start of this stub. The
2883 stub name in the hash table has to be unique; this does not, so
2884 it can be friendlier. */
2885 char *output_name;
2886 };
2887
2888 /* Used to build a map of a section. This is required for mixed-endian
2889 code/data. */
2890
2891 typedef struct elf32_elf_section_map
2892 {
2893 bfd_vma vma;
2894 char type;
2895 }
2896 elf32_arm_section_map;
2897
2898 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2899
2900 typedef enum
2901 {
2902 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2903 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2904 VFP11_ERRATUM_ARM_VENEER,
2905 VFP11_ERRATUM_THUMB_VENEER
2906 }
2907 elf32_vfp11_erratum_type;
2908
2909 typedef struct elf32_vfp11_erratum_list
2910 {
2911 struct elf32_vfp11_erratum_list *next;
2912 bfd_vma vma;
2913 union
2914 {
2915 struct
2916 {
2917 struct elf32_vfp11_erratum_list *veneer;
2918 unsigned int vfp_insn;
2919 } b;
2920 struct
2921 {
2922 struct elf32_vfp11_erratum_list *branch;
2923 unsigned int id;
2924 } v;
2925 } u;
2926 elf32_vfp11_erratum_type type;
2927 }
2928 elf32_vfp11_erratum_list;
2929
2930 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2931 veneer. */
2932 typedef enum
2933 {
2934 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2935 STM32L4XX_ERRATUM_VENEER
2936 }
2937 elf32_stm32l4xx_erratum_type;
2938
2939 typedef struct elf32_stm32l4xx_erratum_list
2940 {
2941 struct elf32_stm32l4xx_erratum_list *next;
2942 bfd_vma vma;
2943 union
2944 {
2945 struct
2946 {
2947 struct elf32_stm32l4xx_erratum_list *veneer;
2948 unsigned int insn;
2949 } b;
2950 struct
2951 {
2952 struct elf32_stm32l4xx_erratum_list *branch;
2953 unsigned int id;
2954 } v;
2955 } u;
2956 elf32_stm32l4xx_erratum_type type;
2957 }
2958 elf32_stm32l4xx_erratum_list;
2959
2960 typedef enum
2961 {
2962 DELETE_EXIDX_ENTRY,
2963 INSERT_EXIDX_CANTUNWIND_AT_END
2964 }
2965 arm_unwind_edit_type;
2966
2967 /* A (sorted) list of edits to apply to an unwind table. */
2968 typedef struct arm_unwind_table_edit
2969 {
2970 arm_unwind_edit_type type;
2971 /* Note: we sometimes want to insert an unwind entry corresponding to a
2972 section different from the one we're currently writing out, so record the
2973 (text) section this edit relates to here. */
2974 asection *linked_section;
2975 unsigned int index;
2976 struct arm_unwind_table_edit *next;
2977 }
2978 arm_unwind_table_edit;
2979
2980 typedef struct _arm_elf_section_data
2981 {
2982 /* Information about mapping symbols. */
2983 struct bfd_elf_section_data elf;
2984 unsigned int mapcount;
2985 unsigned int mapsize;
2986 elf32_arm_section_map *map;
2987 /* Information about CPU errata. */
2988 unsigned int erratumcount;
2989 elf32_vfp11_erratum_list *erratumlist;
2990 unsigned int stm32l4xx_erratumcount;
2991 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2992 unsigned int additional_reloc_count;
2993 /* Information about unwind tables. */
2994 union
2995 {
2996 /* Unwind info attached to a text section. */
2997 struct
2998 {
2999 asection *arm_exidx_sec;
3000 } text;
3001
3002 /* Unwind info attached to an .ARM.exidx section. */
3003 struct
3004 {
3005 arm_unwind_table_edit *unwind_edit_list;
3006 arm_unwind_table_edit *unwind_edit_tail;
3007 } exidx;
3008 } u;
3009 }
3010 _arm_elf_section_data;
3011
3012 #define elf32_arm_section_data(sec) \
3013 ((_arm_elf_section_data *) elf_section_data (sec))
3014
3015 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3016 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3017 so may be created multiple times: we use an array of these entries whilst
3018 relaxing which we can refresh easily, then create stubs for each potentially
3019 erratum-triggering instruction once we've settled on a solution. */
3020
3021 struct a8_erratum_fix
3022 {
3023 bfd *input_bfd;
3024 asection *section;
3025 bfd_vma offset;
3026 bfd_vma target_offset;
3027 unsigned long orig_insn;
3028 char *stub_name;
3029 enum elf32_arm_stub_type stub_type;
3030 enum arm_st_branch_type branch_type;
3031 };
3032
3033 /* A table of relocs applied to branches which might trigger Cortex-A8
3034 erratum. */
3035
3036 struct a8_erratum_reloc
3037 {
3038 bfd_vma from;
3039 bfd_vma destination;
3040 struct elf32_arm_link_hash_entry *hash;
3041 const char *sym_name;
3042 unsigned int r_type;
3043 enum arm_st_branch_type branch_type;
3044 bfd_boolean non_a8_stub;
3045 };
3046
3047 /* The size of the thread control block. */
3048 #define TCB_SIZE 8
3049
3050 /* ARM-specific information about a PLT entry, over and above the usual
3051 gotplt_union. */
3052 struct arm_plt_info
3053 {
3054 /* We reference count Thumb references to a PLT entry separately,
3055 so that we can emit the Thumb trampoline only if needed. */
3056 bfd_signed_vma thumb_refcount;
3057
3058 /* Some references from Thumb code may be eliminated by BL->BLX
3059 conversion, so record them separately. */
3060 bfd_signed_vma maybe_thumb_refcount;
3061
3062 /* How many of the recorded PLT accesses were from non-call relocations.
3063 This information is useful when deciding whether anything takes the
3064 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3065 non-call references to the function should resolve directly to the
3066 real runtime target. */
3067 unsigned int noncall_refcount;
3068
3069 /* Since PLT entries have variable size if the Thumb prologue is
3070 used, we need to record the index into .got.plt instead of
3071 recomputing it from the PLT offset. */
3072 bfd_signed_vma got_offset;
3073 };
3074
3075 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3076 struct arm_local_iplt_info
3077 {
3078 /* The information that is usually found in the generic ELF part of
3079 the hash table entry. */
3080 union gotplt_union root;
3081
3082 /* The information that is usually found in the ARM-specific part of
3083 the hash table entry. */
3084 struct arm_plt_info arm;
3085
3086 /* A list of all potential dynamic relocations against this symbol. */
3087 struct elf_dyn_relocs *dyn_relocs;
3088 };
3089
3090 /* Structure to handle FDPIC support for local functions. */
3091 struct fdpic_local {
3092 unsigned int funcdesc_cnt;
3093 unsigned int gotofffuncdesc_cnt;
3094 int funcdesc_offset;
3095 };
3096
3097 struct elf_arm_obj_tdata
3098 {
3099 struct elf_obj_tdata root;
3100
3101 /* tls_type for each local got entry. */
3102 char *local_got_tls_type;
3103
3104 /* GOTPLT entries for TLS descriptors. */
3105 bfd_vma *local_tlsdesc_gotent;
3106
3107 /* Information for local symbols that need entries in .iplt. */
3108 struct arm_local_iplt_info **local_iplt;
3109
3110 /* Zero to warn when linking objects with incompatible enum sizes. */
3111 int no_enum_size_warning;
3112
3113 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3114 int no_wchar_size_warning;
3115
3116 /* Maintains FDPIC counters and funcdesc info. */
3117 struct fdpic_local *local_fdpic_cnts;
3118 };
3119
3120 #define elf_arm_tdata(bfd) \
3121 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3122
3123 #define elf32_arm_local_got_tls_type(bfd) \
3124 (elf_arm_tdata (bfd)->local_got_tls_type)
3125
3126 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3127 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3128
3129 #define elf32_arm_local_iplt(bfd) \
3130 (elf_arm_tdata (bfd)->local_iplt)
3131
3132 #define elf32_arm_local_fdpic_cnts(bfd) \
3133 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3134
3135 #define is_arm_elf(bfd) \
3136 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3137 && elf_tdata (bfd) != NULL \
3138 && elf_object_id (bfd) == ARM_ELF_DATA)
3139
3140 static bfd_boolean
3141 elf32_arm_mkobject (bfd *abfd)
3142 {
3143 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3144 ARM_ELF_DATA);
3145 }
3146
3147 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3148
3149 /* Structure to handle FDPIC support for extern functions. */
3150 struct fdpic_global {
3151 unsigned int gotofffuncdesc_cnt;
3152 unsigned int gotfuncdesc_cnt;
3153 unsigned int funcdesc_cnt;
3154 int funcdesc_offset;
3155 int gotfuncdesc_offset;
3156 };
3157
3158 /* Arm ELF linker hash entry. */
3159 struct elf32_arm_link_hash_entry
3160 {
3161 struct elf_link_hash_entry root;
3162
3163 /* Track dynamic relocs copied for this symbol. */
3164 struct elf_dyn_relocs *dyn_relocs;
3165
3166 /* ARM-specific PLT information. */
3167 struct arm_plt_info plt;
3168
3169 #define GOT_UNKNOWN 0
3170 #define GOT_NORMAL 1
3171 #define GOT_TLS_GD 2
3172 #define GOT_TLS_IE 4
3173 #define GOT_TLS_GDESC 8
3174 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3175 unsigned int tls_type : 8;
3176
3177 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3178 unsigned int is_iplt : 1;
3179
3180 unsigned int unused : 23;
3181
3182 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3183 starting at the end of the jump table. */
3184 bfd_vma tlsdesc_got;
3185
3186 /* The symbol marking the real symbol location for exported thumb
3187 symbols with Arm stubs. */
3188 struct elf_link_hash_entry *export_glue;
3189
3190 /* A pointer to the most recently used stub hash entry against this
3191 symbol. */
3192 struct elf32_arm_stub_hash_entry *stub_cache;
3193
3194 /* Counter for FDPIC relocations against this symbol. */
3195 struct fdpic_global fdpic_cnts;
3196 };
3197
3198 /* Traverse an arm ELF linker hash table. */
3199 #define elf32_arm_link_hash_traverse(table, func, info) \
3200 (elf_link_hash_traverse \
3201 (&(table)->root, \
3202 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3203 (info)))
3204
3205 /* Get the ARM elf linker hash table from a link_info structure. */
3206 #define elf32_arm_hash_table(info) \
3207 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3208 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3209
3210 #define arm_stub_hash_lookup(table, string, create, copy) \
3211 ((struct elf32_arm_stub_hash_entry *) \
3212 bfd_hash_lookup ((table), (string), (create), (copy)))
3213
3214 /* Array to keep track of which stub sections have been created, and
3215 information on stub grouping. */
3216 struct map_stub
3217 {
3218 /* This is the section to which stubs in the group will be
3219 attached. */
3220 asection *link_sec;
3221 /* The stub section. */
3222 asection *stub_sec;
3223 };
3224
3225 #define elf32_arm_compute_jump_table_size(htab) \
3226 ((htab)->next_tls_desc_index * 4)
3227
3228 /* ARM ELF linker hash table. */
3229 struct elf32_arm_link_hash_table
3230 {
3231 /* The main hash table. */
3232 struct elf_link_hash_table root;
3233
3234 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3235 bfd_size_type thumb_glue_size;
3236
3237 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3238 bfd_size_type arm_glue_size;
3239
3240 /* The size in bytes of section containing the ARMv4 BX veneers. */
3241 bfd_size_type bx_glue_size;
3242
3243 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3244 veneer has been populated. */
3245 bfd_vma bx_glue_offset[15];
3246
3247 /* The size in bytes of the section containing glue for VFP11 erratum
3248 veneers. */
3249 bfd_size_type vfp11_erratum_glue_size;
3250
3251 /* The size in bytes of the section containing glue for STM32L4XX erratum
3252 veneers. */
3253 bfd_size_type stm32l4xx_erratum_glue_size;
3254
3255 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3256 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3257 elf32_arm_write_section(). */
3258 struct a8_erratum_fix *a8_erratum_fixes;
3259 unsigned int num_a8_erratum_fixes;
3260
3261 /* An arbitrary input BFD chosen to hold the glue sections. */
3262 bfd * bfd_of_glue_owner;
3263
3264 /* Nonzero to output a BE8 image. */
3265 int byteswap_code;
3266
3267 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3268 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3269 int target1_is_rel;
3270
3271 /* The relocation to use for R_ARM_TARGET2 relocations. */
3272 int target2_reloc;
3273
3274 /* 0 = Ignore R_ARM_V4BX.
3275 1 = Convert BX to MOV PC.
3276 2 = Generate v4 interworing stubs. */
3277 int fix_v4bx;
3278
3279 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3280 int fix_cortex_a8;
3281
3282 /* Whether we should fix the ARM1176 BLX immediate issue. */
3283 int fix_arm1176;
3284
3285 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3286 int use_blx;
3287
3288 /* What sort of code sequences we should look for which may trigger the
3289 VFP11 denorm erratum. */
3290 bfd_arm_vfp11_fix vfp11_fix;
3291
3292 /* Global counter for the number of fixes we have emitted. */
3293 int num_vfp11_fixes;
3294
3295 /* What sort of code sequences we should look for which may trigger the
3296 STM32L4XX erratum. */
3297 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3298
3299 /* Global counter for the number of fixes we have emitted. */
3300 int num_stm32l4xx_fixes;
3301
3302 /* Nonzero to force PIC branch veneers. */
3303 int pic_veneer;
3304
3305 /* The number of bytes in the initial entry in the PLT. */
3306 bfd_size_type plt_header_size;
3307
3308 /* The number of bytes in the subsequent PLT etries. */
3309 bfd_size_type plt_entry_size;
3310
3311 /* True if the target system is VxWorks. */
3312 int vxworks_p;
3313
3314 /* True if the target system is Symbian OS. */
3315 int symbian_p;
3316
3317 /* True if the target system is Native Client. */
3318 int nacl_p;
3319
3320 /* True if the target uses REL relocations. */
3321 bfd_boolean use_rel;
3322
3323 /* Nonzero if import library must be a secure gateway import library
3324 as per ARMv8-M Security Extensions. */
3325 int cmse_implib;
3326
3327 /* The import library whose symbols' address must remain stable in
3328 the import library generated. */
3329 bfd *in_implib_bfd;
3330
3331 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3332 bfd_vma next_tls_desc_index;
3333
3334 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3335 bfd_vma num_tls_desc;
3336
3337 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3338 asection *srelplt2;
3339
3340 /* The offset into splt of the PLT entry for the TLS descriptor
3341 resolver. Special values are 0, if not necessary (or not found
3342 to be necessary yet), and -1 if needed but not determined
3343 yet. */
3344 bfd_vma dt_tlsdesc_plt;
3345
3346 /* The offset into sgot of the GOT entry used by the PLT entry
3347 above. */
3348 bfd_vma dt_tlsdesc_got;
3349
3350 /* Offset in .plt section of tls_arm_trampoline. */
3351 bfd_vma tls_trampoline;
3352
3353 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3354 union
3355 {
3356 bfd_signed_vma refcount;
3357 bfd_vma offset;
3358 } tls_ldm_got;
3359
3360 /* Small local sym cache. */
3361 struct sym_cache sym_cache;
3362
3363 /* For convenience in allocate_dynrelocs. */
3364 bfd * obfd;
3365
3366 /* The amount of space used by the reserved portion of the sgotplt
3367 section, plus whatever space is used by the jump slots. */
3368 bfd_vma sgotplt_jump_table_size;
3369
3370 /* The stub hash table. */
3371 struct bfd_hash_table stub_hash_table;
3372
3373 /* Linker stub bfd. */
3374 bfd *stub_bfd;
3375
3376 /* Linker call-backs. */
3377 asection * (*add_stub_section) (const char *, asection *, asection *,
3378 unsigned int);
3379 void (*layout_sections_again) (void);
3380
3381 /* Array to keep track of which stub sections have been created, and
3382 information on stub grouping. */
3383 struct map_stub *stub_group;
3384
3385 /* Input stub section holding secure gateway veneers. */
3386 asection *cmse_stub_sec;
3387
3388 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3389 start to be allocated. */
3390 bfd_vma new_cmse_stub_offset;
3391
3392 /* Number of elements in stub_group. */
3393 unsigned int top_id;
3394
3395 /* Assorted information used by elf32_arm_size_stubs. */
3396 unsigned int bfd_count;
3397 unsigned int top_index;
3398 asection **input_list;
3399
3400 /* True if the target system uses FDPIC. */
3401 int fdpic_p;
3402
3403 /* Fixup section. Used for FDPIC. */
3404 asection *srofixup;
3405 };
3406
3407 /* Add an FDPIC read-only fixup. */
3408 static void
3409 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3410 {
3411 bfd_vma fixup_offset;
3412
3413 fixup_offset = srofixup->reloc_count++ * 4;
3414 BFD_ASSERT (fixup_offset < srofixup->size);
3415 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3416 }
3417
3418 static inline int
3419 ctz (unsigned int mask)
3420 {
3421 #if GCC_VERSION >= 3004
3422 return __builtin_ctz (mask);
3423 #else
3424 unsigned int i;
3425
3426 for (i = 0; i < 8 * sizeof (mask); i++)
3427 {
3428 if (mask & 0x1)
3429 break;
3430 mask = (mask >> 1);
3431 }
3432 return i;
3433 #endif
3434 }
3435
3436 static inline int
3437 elf32_arm_popcount (unsigned int mask)
3438 {
3439 #if GCC_VERSION >= 3004
3440 return __builtin_popcount (mask);
3441 #else
3442 unsigned int i;
3443 int sum = 0;
3444
3445 for (i = 0; i < 8 * sizeof (mask); i++)
3446 {
3447 if (mask & 0x1)
3448 sum++;
3449 mask = (mask >> 1);
3450 }
3451 return sum;
3452 #endif
3453 }
3454
3455 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3456 asection *sreloc, Elf_Internal_Rela *rel);
3457
3458 static void
3459 arm_elf_fill_funcdesc(bfd *output_bfd,
3460 struct bfd_link_info *info,
3461 int *funcdesc_offset,
3462 int dynindx,
3463 int offset,
3464 bfd_vma addr,
3465 bfd_vma dynreloc_value,
3466 bfd_vma seg)
3467 {
3468 if ((*funcdesc_offset & 1) == 0)
3469 {
3470 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3471 asection *sgot = globals->root.sgot;
3472
3473 if (bfd_link_pic(info))
3474 {
3475 asection *srelgot = globals->root.srelgot;
3476 Elf_Internal_Rela outrel;
3477
3478 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3479 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3480 outrel.r_addend = 0;
3481
3482 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3483 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3484 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3485 }
3486 else
3487 {
3488 struct elf_link_hash_entry *hgot = globals->root.hgot;
3489 bfd_vma got_value = hgot->root.u.def.value
3490 + hgot->root.u.def.section->output_section->vma
3491 + hgot->root.u.def.section->output_offset;
3492
3493 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3494 sgot->output_section->vma + sgot->output_offset
3495 + offset);
3496 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3497 sgot->output_section->vma + sgot->output_offset
3498 + offset + 4);
3499 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3500 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3501 }
3502 *funcdesc_offset |= 1;
3503 }
3504 }
3505
3506 /* Create an entry in an ARM ELF linker hash table. */
3507
3508 static struct bfd_hash_entry *
3509 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3510 struct bfd_hash_table * table,
3511 const char * string)
3512 {
3513 struct elf32_arm_link_hash_entry * ret =
3514 (struct elf32_arm_link_hash_entry *) entry;
3515
3516 /* Allocate the structure if it has not already been allocated by a
3517 subclass. */
3518 if (ret == NULL)
3519 ret = (struct elf32_arm_link_hash_entry *)
3520 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3521 if (ret == NULL)
3522 return (struct bfd_hash_entry *) ret;
3523
3524 /* Call the allocation method of the superclass. */
3525 ret = ((struct elf32_arm_link_hash_entry *)
3526 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3527 table, string));
3528 if (ret != NULL)
3529 {
3530 ret->dyn_relocs = NULL;
3531 ret->tls_type = GOT_UNKNOWN;
3532 ret->tlsdesc_got = (bfd_vma) -1;
3533 ret->plt.thumb_refcount = 0;
3534 ret->plt.maybe_thumb_refcount = 0;
3535 ret->plt.noncall_refcount = 0;
3536 ret->plt.got_offset = -1;
3537 ret->is_iplt = FALSE;
3538 ret->export_glue = NULL;
3539
3540 ret->stub_cache = NULL;
3541
3542 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3543 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3544 ret->fdpic_cnts.funcdesc_cnt = 0;
3545 ret->fdpic_cnts.funcdesc_offset = -1;
3546 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3547 }
3548
3549 return (struct bfd_hash_entry *) ret;
3550 }
3551
3552 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3553 symbols. */
3554
3555 static bfd_boolean
3556 elf32_arm_allocate_local_sym_info (bfd *abfd)
3557 {
3558 if (elf_local_got_refcounts (abfd) == NULL)
3559 {
3560 bfd_size_type num_syms;
3561 bfd_size_type size;
3562 char *data;
3563
3564 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3565 size = num_syms * (sizeof (bfd_signed_vma)
3566 + sizeof (struct arm_local_iplt_info *)
3567 + sizeof (bfd_vma)
3568 + sizeof (char)
3569 + sizeof (struct fdpic_local));
3570 data = bfd_zalloc (abfd, size);
3571 if (data == NULL)
3572 return FALSE;
3573
3574 elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3575 data += num_syms * sizeof (struct fdpic_local);
3576
3577 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3578 data += num_syms * sizeof (bfd_signed_vma);
3579
3580 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3581 data += num_syms * sizeof (struct arm_local_iplt_info *);
3582
3583 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3584 data += num_syms * sizeof (bfd_vma);
3585
3586 elf32_arm_local_got_tls_type (abfd) = data;
3587 }
3588 return TRUE;
3589 }
3590
3591 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3592 to input bfd ABFD. Create the information if it doesn't already exist.
3593 Return null if an allocation fails. */
3594
3595 static struct arm_local_iplt_info *
3596 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3597 {
3598 struct arm_local_iplt_info **ptr;
3599
3600 if (!elf32_arm_allocate_local_sym_info (abfd))
3601 return NULL;
3602
3603 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3604 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3605 if (*ptr == NULL)
3606 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3607 return *ptr;
3608 }
3609
3610 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3611 in ABFD's symbol table. If the symbol is global, H points to its
3612 hash table entry, otherwise H is null.
3613
3614 Return true if the symbol does have PLT information. When returning
3615 true, point *ROOT_PLT at the target-independent reference count/offset
3616 union and *ARM_PLT at the ARM-specific information. */
3617
3618 static bfd_boolean
3619 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3620 struct elf32_arm_link_hash_entry *h,
3621 unsigned long r_symndx, union gotplt_union **root_plt,
3622 struct arm_plt_info **arm_plt)
3623 {
3624 struct arm_local_iplt_info *local_iplt;
3625
3626 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3627 return FALSE;
3628
3629 if (h != NULL)
3630 {
3631 *root_plt = &h->root.plt;
3632 *arm_plt = &h->plt;
3633 return TRUE;
3634 }
3635
3636 if (elf32_arm_local_iplt (abfd) == NULL)
3637 return FALSE;
3638
3639 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3640 if (local_iplt == NULL)
3641 return FALSE;
3642
3643 *root_plt = &local_iplt->root;
3644 *arm_plt = &local_iplt->arm;
3645 return TRUE;
3646 }
3647
3648 static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
3649
3650 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3651 before it. */
3652
3653 static bfd_boolean
3654 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3655 struct arm_plt_info *arm_plt)
3656 {
3657 struct elf32_arm_link_hash_table *htab;
3658
3659 htab = elf32_arm_hash_table (info);
3660
3661 return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
3662 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3663 }
3664
3665 /* Return a pointer to the head of the dynamic reloc list that should
3666 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3667 ABFD's symbol table. Return null if an error occurs. */
3668
3669 static struct elf_dyn_relocs **
3670 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3671 Elf_Internal_Sym *isym)
3672 {
3673 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3674 {
3675 struct arm_local_iplt_info *local_iplt;
3676
3677 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3678 if (local_iplt == NULL)
3679 return NULL;
3680 return &local_iplt->dyn_relocs;
3681 }
3682 else
3683 {
3684 /* Track dynamic relocs needed for local syms too.
3685 We really need local syms available to do this
3686 easily. Oh well. */
3687 asection *s;
3688 void *vpp;
3689
3690 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3691 if (s == NULL)
3692 abort ();
3693
3694 vpp = &elf_section_data (s)->local_dynrel;
3695 return (struct elf_dyn_relocs **) vpp;
3696 }
3697 }
3698
3699 /* Initialize an entry in the stub hash table. */
3700
3701 static struct bfd_hash_entry *
3702 stub_hash_newfunc (struct bfd_hash_entry *entry,
3703 struct bfd_hash_table *table,
3704 const char *string)
3705 {
3706 /* Allocate the structure if it has not already been allocated by a
3707 subclass. */
3708 if (entry == NULL)
3709 {
3710 entry = (struct bfd_hash_entry *)
3711 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3712 if (entry == NULL)
3713 return entry;
3714 }
3715
3716 /* Call the allocation method of the superclass. */
3717 entry = bfd_hash_newfunc (entry, table, string);
3718 if (entry != NULL)
3719 {
3720 struct elf32_arm_stub_hash_entry *eh;
3721
3722 /* Initialize the local fields. */
3723 eh = (struct elf32_arm_stub_hash_entry *) entry;
3724 eh->stub_sec = NULL;
3725 eh->stub_offset = (bfd_vma) -1;
3726 eh->source_value = 0;
3727 eh->target_value = 0;
3728 eh->target_section = NULL;
3729 eh->orig_insn = 0;
3730 eh->stub_type = arm_stub_none;
3731 eh->stub_size = 0;
3732 eh->stub_template = NULL;
3733 eh->stub_template_size = -1;
3734 eh->h = NULL;
3735 eh->id_sec = NULL;
3736 eh->output_name = NULL;
3737 }
3738
3739 return entry;
3740 }
3741
3742 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3743 shortcuts to them in our hash table. */
3744
3745 static bfd_boolean
3746 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3747 {
3748 struct elf32_arm_link_hash_table *htab;
3749
3750 htab = elf32_arm_hash_table (info);
3751 if (htab == NULL)
3752 return FALSE;
3753
3754 /* BPABI objects never have a GOT, or associated sections. */
3755 if (htab->symbian_p)
3756 return TRUE;
3757
3758 if (! _bfd_elf_create_got_section (dynobj, info))
3759 return FALSE;
3760
3761 /* Also create .rofixup. */
3762 if (htab->fdpic_p)
3763 {
3764 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3765 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3766 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3767 if (htab->srofixup == NULL || ! bfd_set_section_alignment (dynobj, htab->srofixup, 2))
3768 return FALSE;
3769 }
3770
3771 return TRUE;
3772 }
3773
3774 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3775
3776 static bfd_boolean
3777 create_ifunc_sections (struct bfd_link_info *info)
3778 {
3779 struct elf32_arm_link_hash_table *htab;
3780 const struct elf_backend_data *bed;
3781 bfd *dynobj;
3782 asection *s;
3783 flagword flags;
3784
3785 htab = elf32_arm_hash_table (info);
3786 dynobj = htab->root.dynobj;
3787 bed = get_elf_backend_data (dynobj);
3788 flags = bed->dynamic_sec_flags;
3789
3790 if (htab->root.iplt == NULL)
3791 {
3792 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3793 flags | SEC_READONLY | SEC_CODE);
3794 if (s == NULL
3795 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3796 return FALSE;
3797 htab->root.iplt = s;
3798 }
3799
3800 if (htab->root.irelplt == NULL)
3801 {
3802 s = bfd_make_section_anyway_with_flags (dynobj,
3803 RELOC_SECTION (htab, ".iplt"),
3804 flags | SEC_READONLY);
3805 if (s == NULL
3806 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3807 return FALSE;
3808 htab->root.irelplt = s;
3809 }
3810
3811 if (htab->root.igotplt == NULL)
3812 {
3813 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3814 if (s == NULL
3815 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3816 return FALSE;
3817 htab->root.igotplt = s;
3818 }
3819 return TRUE;
3820 }
3821
3822 /* Determine if we're dealing with a Thumb only architecture. */
3823
3824 static bfd_boolean
3825 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3826 {
3827 int arch;
3828 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3829 Tag_CPU_arch_profile);
3830
3831 if (profile)
3832 return profile == 'M';
3833
3834 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3835
3836 /* Force return logic to be reviewed for each new architecture. */
3837 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3838
3839 if (arch == TAG_CPU_ARCH_V6_M
3840 || arch == TAG_CPU_ARCH_V6S_M
3841 || arch == TAG_CPU_ARCH_V7E_M
3842 || arch == TAG_CPU_ARCH_V8M_BASE
3843 || arch == TAG_CPU_ARCH_V8M_MAIN)
3844 return TRUE;
3845
3846 return FALSE;
3847 }
3848
3849 /* Determine if we're dealing with a Thumb-2 object. */
3850
3851 static bfd_boolean
3852 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3853 {
3854 int arch;
3855 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3856 Tag_THUMB_ISA_use);
3857
3858 if (thumb_isa)
3859 return thumb_isa == 2;
3860
3861 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3862
3863 /* Force return logic to be reviewed for each new architecture. */
3864 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3865
3866 return (arch == TAG_CPU_ARCH_V6T2
3867 || arch == TAG_CPU_ARCH_V7
3868 || arch == TAG_CPU_ARCH_V7E_M
3869 || arch == TAG_CPU_ARCH_V8
3870 || arch == TAG_CPU_ARCH_V8R
3871 || arch == TAG_CPU_ARCH_V8M_MAIN);
3872 }
3873
3874 /* Determine whether Thumb-2 BL instruction is available. */
3875
3876 static bfd_boolean
3877 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3878 {
3879 int arch =
3880 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3881
3882 /* Force return logic to be reviewed for each new architecture. */
3883 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3884
3885 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3886 return (arch == TAG_CPU_ARCH_V6T2
3887 || arch >= TAG_CPU_ARCH_V7);
3888 }
3889
3890 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3891 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3892 hash table. */
3893
3894 static bfd_boolean
3895 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3896 {
3897 struct elf32_arm_link_hash_table *htab;
3898
3899 htab = elf32_arm_hash_table (info);
3900 if (htab == NULL)
3901 return FALSE;
3902
3903 if (!htab->root.sgot && !create_got_section (dynobj, info))
3904 return FALSE;
3905
3906 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3907 return FALSE;
3908
3909 if (htab->vxworks_p)
3910 {
3911 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3912 return FALSE;
3913
3914 if (bfd_link_pic (info))
3915 {
3916 htab->plt_header_size = 0;
3917 htab->plt_entry_size
3918 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3919 }
3920 else
3921 {
3922 htab->plt_header_size
3923 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3924 htab->plt_entry_size
3925 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3926 }
3927
3928 if (elf_elfheader (dynobj))
3929 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3930 }
3931 else
3932 {
3933 /* PR ld/16017
3934 Test for thumb only architectures. Note - we cannot just call
3935 using_thumb_only() as the attributes in the output bfd have not been
3936 initialised at this point, so instead we use the input bfd. */
3937 bfd * saved_obfd = htab->obfd;
3938
3939 htab->obfd = dynobj;
3940 if (using_thumb_only (htab))
3941 {
3942 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3943 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3944 }
3945 htab->obfd = saved_obfd;
3946 }
3947
3948 if (htab->fdpic_p) {
3949 htab->plt_header_size = 0;
3950 if (info->flags & DF_BIND_NOW)
3951 htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
3952 else
3953 htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
3954 }
3955
3956 if (!htab->root.splt
3957 || !htab->root.srelplt
3958 || !htab->root.sdynbss
3959 || (!bfd_link_pic (info) && !htab->root.srelbss))
3960 abort ();
3961
3962 return TRUE;
3963 }
3964
3965 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3966
3967 static void
3968 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3969 struct elf_link_hash_entry *dir,
3970 struct elf_link_hash_entry *ind)
3971 {
3972 struct elf32_arm_link_hash_entry *edir, *eind;
3973
3974 edir = (struct elf32_arm_link_hash_entry *) dir;
3975 eind = (struct elf32_arm_link_hash_entry *) ind;
3976
3977 if (eind->dyn_relocs != NULL)
3978 {
3979 if (edir->dyn_relocs != NULL)
3980 {
3981 struct elf_dyn_relocs **pp;
3982 struct elf_dyn_relocs *p;
3983
3984 /* Add reloc counts against the indirect sym to the direct sym
3985 list. Merge any entries against the same section. */
3986 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3987 {
3988 struct elf_dyn_relocs *q;
3989
3990 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3991 if (q->sec == p->sec)
3992 {
3993 q->pc_count += p->pc_count;
3994 q->count += p->count;
3995 *pp = p->next;
3996 break;
3997 }
3998 if (q == NULL)
3999 pp = &p->next;
4000 }
4001 *pp = edir->dyn_relocs;
4002 }
4003
4004 edir->dyn_relocs = eind->dyn_relocs;
4005 eind->dyn_relocs = NULL;
4006 }
4007
4008 if (ind->root.type == bfd_link_hash_indirect)
4009 {
4010 /* Copy over PLT info. */
4011 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4012 eind->plt.thumb_refcount = 0;
4013 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4014 eind->plt.maybe_thumb_refcount = 0;
4015 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4016 eind->plt.noncall_refcount = 0;
4017
4018 /* Copy FDPIC counters. */
4019 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4020 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4021 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4022
4023 /* We should only allocate a function to .iplt once the final
4024 symbol information is known. */
4025 BFD_ASSERT (!eind->is_iplt);
4026
4027 if (dir->got.refcount <= 0)
4028 {
4029 edir->tls_type = eind->tls_type;
4030 eind->tls_type = GOT_UNKNOWN;
4031 }
4032 }
4033
4034 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4035 }
4036
4037 /* Destroy an ARM elf linker hash table. */
4038
4039 static void
4040 elf32_arm_link_hash_table_free (bfd *obfd)
4041 {
4042 struct elf32_arm_link_hash_table *ret
4043 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4044
4045 bfd_hash_table_free (&ret->stub_hash_table);
4046 _bfd_elf_link_hash_table_free (obfd);
4047 }
4048
4049 /* Create an ARM elf linker hash table. */
4050
4051 static struct bfd_link_hash_table *
4052 elf32_arm_link_hash_table_create (bfd *abfd)
4053 {
4054 struct elf32_arm_link_hash_table *ret;
4055 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
4056
4057 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4058 if (ret == NULL)
4059 return NULL;
4060
4061 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4062 elf32_arm_link_hash_newfunc,
4063 sizeof (struct elf32_arm_link_hash_entry),
4064 ARM_ELF_DATA))
4065 {
4066 free (ret);
4067 return NULL;
4068 }
4069
4070 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4071 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4072 #ifdef FOUR_WORD_PLT
4073 ret->plt_header_size = 16;
4074 ret->plt_entry_size = 16;
4075 #else
4076 ret->plt_header_size = 20;
4077 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4078 #endif
4079 ret->use_rel = TRUE;
4080 ret->obfd = abfd;
4081 ret->fdpic_p = 0;
4082
4083 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4084 sizeof (struct elf32_arm_stub_hash_entry)))
4085 {
4086 _bfd_elf_link_hash_table_free (abfd);
4087 return NULL;
4088 }
4089 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4090
4091 return &ret->root.root;
4092 }
4093
4094 /* Determine what kind of NOPs are available. */
4095
4096 static bfd_boolean
4097 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4098 {
4099 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4100 Tag_CPU_arch);
4101
4102 /* Force return logic to be reviewed for each new architecture. */
4103 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
4104
4105 return (arch == TAG_CPU_ARCH_V6T2
4106 || arch == TAG_CPU_ARCH_V6K
4107 || arch == TAG_CPU_ARCH_V7
4108 || arch == TAG_CPU_ARCH_V8
4109 || arch == TAG_CPU_ARCH_V8R);
4110 }
4111
4112 static bfd_boolean
4113 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4114 {
4115 switch (stub_type)
4116 {
4117 case arm_stub_long_branch_thumb_only:
4118 case arm_stub_long_branch_thumb2_only:
4119 case arm_stub_long_branch_thumb2_only_pure:
4120 case arm_stub_long_branch_v4t_thumb_arm:
4121 case arm_stub_short_branch_v4t_thumb_arm:
4122 case arm_stub_long_branch_v4t_thumb_arm_pic:
4123 case arm_stub_long_branch_v4t_thumb_tls_pic:
4124 case arm_stub_long_branch_thumb_only_pic:
4125 case arm_stub_cmse_branch_thumb_only:
4126 return TRUE;
4127 case arm_stub_none:
4128 BFD_FAIL ();
4129 return FALSE;
4130 break;
4131 default:
4132 return FALSE;
4133 }
4134 }
4135
4136 /* Determine the type of stub needed, if any, for a call. */
4137
4138 static enum elf32_arm_stub_type
4139 arm_type_of_stub (struct bfd_link_info *info,
4140 asection *input_sec,
4141 const Elf_Internal_Rela *rel,
4142 unsigned char st_type,
4143 enum arm_st_branch_type *actual_branch_type,
4144 struct elf32_arm_link_hash_entry *hash,
4145 bfd_vma destination,
4146 asection *sym_sec,
4147 bfd *input_bfd,
4148 const char *name)
4149 {
4150 bfd_vma location;
4151 bfd_signed_vma branch_offset;
4152 unsigned int r_type;
4153 struct elf32_arm_link_hash_table * globals;
4154 bfd_boolean thumb2, thumb2_bl, thumb_only;
4155 enum elf32_arm_stub_type stub_type = arm_stub_none;
4156 int use_plt = 0;
4157 enum arm_st_branch_type branch_type = *actual_branch_type;
4158 union gotplt_union *root_plt;
4159 struct arm_plt_info *arm_plt;
4160 int arch;
4161 int thumb2_movw;
4162
4163 if (branch_type == ST_BRANCH_LONG)
4164 return stub_type;
4165
4166 globals = elf32_arm_hash_table (info);
4167 if (globals == NULL)
4168 return stub_type;
4169
4170 thumb_only = using_thumb_only (globals);
4171 thumb2 = using_thumb2 (globals);
4172 thumb2_bl = using_thumb2_bl (globals);
4173
4174 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4175
4176 /* True for architectures that implement the thumb2 movw instruction. */
4177 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4178
4179 /* Determine where the call point is. */
4180 location = (input_sec->output_offset
4181 + input_sec->output_section->vma
4182 + rel->r_offset);
4183
4184 r_type = ELF32_R_TYPE (rel->r_info);
4185
4186 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4187 are considering a function call relocation. */
4188 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4189 || r_type == R_ARM_THM_JUMP19)
4190 && branch_type == ST_BRANCH_TO_ARM)
4191 branch_type = ST_BRANCH_TO_THUMB;
4192
4193 /* For TLS call relocs, it is the caller's responsibility to provide
4194 the address of the appropriate trampoline. */
4195 if (r_type != R_ARM_TLS_CALL
4196 && r_type != R_ARM_THM_TLS_CALL
4197 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4198 ELF32_R_SYM (rel->r_info), &root_plt,
4199 &arm_plt)
4200 && root_plt->offset != (bfd_vma) -1)
4201 {
4202 asection *splt;
4203
4204 if (hash == NULL || hash->is_iplt)
4205 splt = globals->root.iplt;
4206 else
4207 splt = globals->root.splt;
4208 if (splt != NULL)
4209 {
4210 use_plt = 1;
4211
4212 /* Note when dealing with PLT entries: the main PLT stub is in
4213 ARM mode, so if the branch is in Thumb mode, another
4214 Thumb->ARM stub will be inserted later just before the ARM
4215 PLT stub. If a long branch stub is needed, we'll add a
4216 Thumb->Arm one and branch directly to the ARM PLT entry.
4217 Here, we have to check if a pre-PLT Thumb->ARM stub
4218 is needed and if it will be close enough. */
4219
4220 destination = (splt->output_section->vma
4221 + splt->output_offset
4222 + root_plt->offset);
4223 st_type = STT_FUNC;
4224
4225 /* Thumb branch/call to PLT: it can become a branch to ARM
4226 or to Thumb. We must perform the same checks and
4227 corrections as in elf32_arm_final_link_relocate. */
4228 if ((r_type == R_ARM_THM_CALL)
4229 || (r_type == R_ARM_THM_JUMP24))
4230 {
4231 if (globals->use_blx
4232 && r_type == R_ARM_THM_CALL
4233 && !thumb_only)
4234 {
4235 /* If the Thumb BLX instruction is available, convert
4236 the BL to a BLX instruction to call the ARM-mode
4237 PLT entry. */
4238 branch_type = ST_BRANCH_TO_ARM;
4239 }
4240 else
4241 {
4242 if (!thumb_only)
4243 /* Target the Thumb stub before the ARM PLT entry. */
4244 destination -= PLT_THUMB_STUB_SIZE;
4245 branch_type = ST_BRANCH_TO_THUMB;
4246 }
4247 }
4248 else
4249 {
4250 branch_type = ST_BRANCH_TO_ARM;
4251 }
4252 }
4253 }
4254 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4255 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4256
4257 branch_offset = (bfd_signed_vma)(destination - location);
4258
4259 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4260 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4261 {
4262 /* Handle cases where:
4263 - this call goes too far (different Thumb/Thumb2 max
4264 distance)
4265 - it's a Thumb->Arm call and blx is not available, or it's a
4266 Thumb->Arm branch (not bl). A stub is needed in this case,
4267 but only if this call is not through a PLT entry. Indeed,
4268 PLT stubs handle mode switching already. */
4269 if ((!thumb2_bl
4270 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4271 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4272 || (thumb2_bl
4273 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4274 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4275 || (thumb2
4276 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4277 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4278 && (r_type == R_ARM_THM_JUMP19))
4279 || (branch_type == ST_BRANCH_TO_ARM
4280 && (((r_type == R_ARM_THM_CALL
4281 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4282 || (r_type == R_ARM_THM_JUMP24)
4283 || (r_type == R_ARM_THM_JUMP19))
4284 && !use_plt))
4285 {
4286 /* If we need to insert a Thumb-Thumb long branch stub to a
4287 PLT, use one that branches directly to the ARM PLT
4288 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4289 stub, undo this now. */
4290 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4291 {
4292 branch_type = ST_BRANCH_TO_ARM;
4293 branch_offset += PLT_THUMB_STUB_SIZE;
4294 }
4295
4296 if (branch_type == ST_BRANCH_TO_THUMB)
4297 {
4298 /* Thumb to thumb. */
4299 if (!thumb_only)
4300 {
4301 if (input_sec->flags & SEC_ELF_PURECODE)
4302 _bfd_error_handler
4303 (_("%pB(%pA): warning: long branch veneers used in"
4304 " section with SHF_ARM_PURECODE section"
4305 " attribute is only supported for M-profile"
4306 " targets that implement the movw instruction"),
4307 input_bfd, input_sec);
4308
4309 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4310 /* PIC stubs. */
4311 ? ((globals->use_blx
4312 && (r_type == R_ARM_THM_CALL))
4313 /* V5T and above. Stub starts with ARM code, so
4314 we must be able to switch mode before
4315 reaching it, which is only possible for 'bl'
4316 (ie R_ARM_THM_CALL relocation). */
4317 ? arm_stub_long_branch_any_thumb_pic
4318 /* On V4T, use Thumb code only. */
4319 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4320
4321 /* non-PIC stubs. */
4322 : ((globals->use_blx
4323 && (r_type == R_ARM_THM_CALL))
4324 /* V5T and above. */
4325 ? arm_stub_long_branch_any_any
4326 /* V4T. */
4327 : arm_stub_long_branch_v4t_thumb_thumb);
4328 }
4329 else
4330 {
4331 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4332 stub_type = arm_stub_long_branch_thumb2_only_pure;
4333 else
4334 {
4335 if (input_sec->flags & SEC_ELF_PURECODE)
4336 _bfd_error_handler
4337 (_("%pB(%pA): warning: long branch veneers used in"
4338 " section with SHF_ARM_PURECODE section"
4339 " attribute is only supported for M-profile"
4340 " targets that implement the movw instruction"),
4341 input_bfd, input_sec);
4342
4343 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4344 /* PIC stub. */
4345 ? arm_stub_long_branch_thumb_only_pic
4346 /* non-PIC stub. */
4347 : (thumb2 ? arm_stub_long_branch_thumb2_only
4348 : arm_stub_long_branch_thumb_only);
4349 }
4350 }
4351 }
4352 else
4353 {
4354 if (input_sec->flags & SEC_ELF_PURECODE)
4355 _bfd_error_handler
4356 (_("%pB(%pA): warning: long branch veneers used in"
4357 " section with SHF_ARM_PURECODE section"
4358 " attribute is only supported" " for M-profile"
4359 " targets that implement the movw instruction"),
4360 input_bfd, input_sec);
4361
4362 /* Thumb to arm. */
4363 if (sym_sec != NULL
4364 && sym_sec->owner != NULL
4365 && !INTERWORK_FLAG (sym_sec->owner))
4366 {
4367 _bfd_error_handler
4368 (_("%pB(%s): warning: interworking not enabled;"
4369 " first occurrence: %pB: %s call to %s"),
4370 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4371 }
4372
4373 stub_type =
4374 (bfd_link_pic (info) | globals->pic_veneer)
4375 /* PIC stubs. */
4376 ? (r_type == R_ARM_THM_TLS_CALL
4377 /* TLS PIC stubs. */
4378 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4379 : arm_stub_long_branch_v4t_thumb_tls_pic)
4380 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4381 /* V5T PIC and above. */
4382 ? arm_stub_long_branch_any_arm_pic
4383 /* V4T PIC stub. */
4384 : arm_stub_long_branch_v4t_thumb_arm_pic))
4385
4386 /* non-PIC stubs. */
4387 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4388 /* V5T and above. */
4389 ? arm_stub_long_branch_any_any
4390 /* V4T. */
4391 : arm_stub_long_branch_v4t_thumb_arm);
4392
4393 /* Handle v4t short branches. */
4394 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4395 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4396 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4397 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4398 }
4399 }
4400 }
4401 else if (r_type == R_ARM_CALL
4402 || r_type == R_ARM_JUMP24
4403 || r_type == R_ARM_PLT32
4404 || r_type == R_ARM_TLS_CALL)
4405 {
4406 if (input_sec->flags & SEC_ELF_PURECODE)
4407 _bfd_error_handler
4408 (_("%pB(%pA): warning: long branch veneers used in"
4409 " section with SHF_ARM_PURECODE section"
4410 " attribute is only supported for M-profile"
4411 " targets that implement the movw instruction"),
4412 input_bfd, input_sec);
4413 if (branch_type == ST_BRANCH_TO_THUMB)
4414 {
4415 /* Arm to thumb. */
4416
4417 if (sym_sec != NULL
4418 && sym_sec->owner != NULL
4419 && !INTERWORK_FLAG (sym_sec->owner))
4420 {
4421 _bfd_error_handler
4422 (_("%pB(%s): warning: interworking not enabled;"
4423 " first occurrence: %pB: %s call to %s"),
4424 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4425 }
4426
4427 /* We have an extra 2-bytes reach because of
4428 the mode change (bit 24 (H) of BLX encoding). */
4429 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4430 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4431 || (r_type == R_ARM_CALL && !globals->use_blx)
4432 || (r_type == R_ARM_JUMP24)
4433 || (r_type == R_ARM_PLT32))
4434 {
4435 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4436 /* PIC stubs. */
4437 ? ((globals->use_blx)
4438 /* V5T and above. */
4439 ? arm_stub_long_branch_any_thumb_pic
4440 /* V4T stub. */
4441 : arm_stub_long_branch_v4t_arm_thumb_pic)
4442
4443 /* non-PIC stubs. */
4444 : ((globals->use_blx)
4445 /* V5T and above. */
4446 ? arm_stub_long_branch_any_any
4447 /* V4T. */
4448 : arm_stub_long_branch_v4t_arm_thumb);
4449 }
4450 }
4451 else
4452 {
4453 /* Arm to arm. */
4454 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4455 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4456 {
4457 stub_type =
4458 (bfd_link_pic (info) | globals->pic_veneer)
4459 /* PIC stubs. */
4460 ? (r_type == R_ARM_TLS_CALL
4461 /* TLS PIC Stub. */
4462 ? arm_stub_long_branch_any_tls_pic
4463 : (globals->nacl_p
4464 ? arm_stub_long_branch_arm_nacl_pic
4465 : arm_stub_long_branch_any_arm_pic))
4466 /* non-PIC stubs. */
4467 : (globals->nacl_p
4468 ? arm_stub_long_branch_arm_nacl
4469 : arm_stub_long_branch_any_any);
4470 }
4471 }
4472 }
4473
4474 /* If a stub is needed, record the actual destination type. */
4475 if (stub_type != arm_stub_none)
4476 *actual_branch_type = branch_type;
4477
4478 return stub_type;
4479 }
4480
4481 /* Build a name for an entry in the stub hash table. */
4482
4483 static char *
4484 elf32_arm_stub_name (const asection *input_section,
4485 const asection *sym_sec,
4486 const struct elf32_arm_link_hash_entry *hash,
4487 const Elf_Internal_Rela *rel,
4488 enum elf32_arm_stub_type stub_type)
4489 {
4490 char *stub_name;
4491 bfd_size_type len;
4492
4493 if (hash)
4494 {
4495 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4496 stub_name = (char *) bfd_malloc (len);
4497 if (stub_name != NULL)
4498 sprintf (stub_name, "%08x_%s+%x_%d",
4499 input_section->id & 0xffffffff,
4500 hash->root.root.root.string,
4501 (int) rel->r_addend & 0xffffffff,
4502 (int) stub_type);
4503 }
4504 else
4505 {
4506 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4507 stub_name = (char *) bfd_malloc (len);
4508 if (stub_name != NULL)
4509 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4510 input_section->id & 0xffffffff,
4511 sym_sec->id & 0xffffffff,
4512 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4513 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4514 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4515 (int) rel->r_addend & 0xffffffff,
4516 (int) stub_type);
4517 }
4518
4519 return stub_name;
4520 }
4521
4522 /* Look up an entry in the stub hash. Stub entries are cached because
4523 creating the stub name takes a bit of time. */
4524
4525 static struct elf32_arm_stub_hash_entry *
4526 elf32_arm_get_stub_entry (const asection *input_section,
4527 const asection *sym_sec,
4528 struct elf_link_hash_entry *hash,
4529 const Elf_Internal_Rela *rel,
4530 struct elf32_arm_link_hash_table *htab,
4531 enum elf32_arm_stub_type stub_type)
4532 {
4533 struct elf32_arm_stub_hash_entry *stub_entry;
4534 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4535 const asection *id_sec;
4536
4537 if ((input_section->flags & SEC_CODE) == 0)
4538 return NULL;
4539
4540 /* If this input section is part of a group of sections sharing one
4541 stub section, then use the id of the first section in the group.
4542 Stub names need to include a section id, as there may well be
4543 more than one stub used to reach say, printf, and we need to
4544 distinguish between them. */
4545 BFD_ASSERT (input_section->id <= htab->top_id);
4546 id_sec = htab->stub_group[input_section->id].link_sec;
4547
4548 if (h != NULL && h->stub_cache != NULL
4549 && h->stub_cache->h == h
4550 && h->stub_cache->id_sec == id_sec
4551 && h->stub_cache->stub_type == stub_type)
4552 {
4553 stub_entry = h->stub_cache;
4554 }
4555 else
4556 {
4557 char *stub_name;
4558
4559 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4560 if (stub_name == NULL)
4561 return NULL;
4562
4563 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4564 stub_name, FALSE, FALSE);
4565 if (h != NULL)
4566 h->stub_cache = stub_entry;
4567
4568 free (stub_name);
4569 }
4570
4571 return stub_entry;
4572 }
4573
4574 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4575 section. */
4576
4577 static bfd_boolean
4578 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4579 {
4580 if (stub_type >= max_stub_type)
4581 abort (); /* Should be unreachable. */
4582
4583 switch (stub_type)
4584 {
4585 case arm_stub_cmse_branch_thumb_only:
4586 return TRUE;
4587
4588 default:
4589 return FALSE;
4590 }
4591
4592 abort (); /* Should be unreachable. */
4593 }
4594
4595 /* Required alignment (as a power of 2) for the dedicated section holding
4596 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4597 with input sections. */
4598
4599 static int
4600 arm_dedicated_stub_output_section_required_alignment
4601 (enum elf32_arm_stub_type stub_type)
4602 {
4603 if (stub_type >= max_stub_type)
4604 abort (); /* Should be unreachable. */
4605
4606 switch (stub_type)
4607 {
4608 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4609 boundary. */
4610 case arm_stub_cmse_branch_thumb_only:
4611 return 5;
4612
4613 default:
4614 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4615 return 0;
4616 }
4617
4618 abort (); /* Should be unreachable. */
4619 }
4620
4621 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4622 NULL if veneers of this type are interspersed with input sections. */
4623
4624 static const char *
4625 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4626 {
4627 if (stub_type >= max_stub_type)
4628 abort (); /* Should be unreachable. */
4629
4630 switch (stub_type)
4631 {
4632 case arm_stub_cmse_branch_thumb_only:
4633 return ".gnu.sgstubs";
4634
4635 default:
4636 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4637 return NULL;
4638 }
4639
4640 abort (); /* Should be unreachable. */
4641 }
4642
4643 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4644 returns the address of the hash table field in HTAB holding a pointer to the
4645 corresponding input section. Otherwise, returns NULL. */
4646
4647 static asection **
4648 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4649 enum elf32_arm_stub_type stub_type)
4650 {
4651 if (stub_type >= max_stub_type)
4652 abort (); /* Should be unreachable. */
4653
4654 switch (stub_type)
4655 {
4656 case arm_stub_cmse_branch_thumb_only:
4657 return &htab->cmse_stub_sec;
4658
4659 default:
4660 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4661 return NULL;
4662 }
4663
4664 abort (); /* Should be unreachable. */
4665 }
4666
4667 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4668 is the section that branch into veneer and can be NULL if stub should go in
4669 a dedicated output section. Returns a pointer to the stub section, and the
4670 section to which the stub section will be attached (in *LINK_SEC_P).
4671 LINK_SEC_P may be NULL. */
4672
4673 static asection *
4674 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4675 struct elf32_arm_link_hash_table *htab,
4676 enum elf32_arm_stub_type stub_type)
4677 {
4678 asection *link_sec, *out_sec, **stub_sec_p;
4679 const char *stub_sec_prefix;
4680 bfd_boolean dedicated_output_section =
4681 arm_dedicated_stub_output_section_required (stub_type);
4682 int align;
4683
4684 if (dedicated_output_section)
4685 {
4686 bfd *output_bfd = htab->obfd;
4687 const char *out_sec_name =
4688 arm_dedicated_stub_output_section_name (stub_type);
4689 link_sec = NULL;
4690 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4691 stub_sec_prefix = out_sec_name;
4692 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4693 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4694 if (out_sec == NULL)
4695 {
4696 _bfd_error_handler (_("no address assigned to the veneers output "
4697 "section %s"), out_sec_name);
4698 return NULL;
4699 }
4700 }
4701 else
4702 {
4703 BFD_ASSERT (section->id <= htab->top_id);
4704 link_sec = htab->stub_group[section->id].link_sec;
4705 BFD_ASSERT (link_sec != NULL);
4706 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4707 if (*stub_sec_p == NULL)
4708 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4709 stub_sec_prefix = link_sec->name;
4710 out_sec = link_sec->output_section;
4711 align = htab->nacl_p ? 4 : 3;
4712 }
4713
4714 if (*stub_sec_p == NULL)
4715 {
4716 size_t namelen;
4717 bfd_size_type len;
4718 char *s_name;
4719
4720 namelen = strlen (stub_sec_prefix);
4721 len = namelen + sizeof (STUB_SUFFIX);
4722 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4723 if (s_name == NULL)
4724 return NULL;
4725
4726 memcpy (s_name, stub_sec_prefix, namelen);
4727 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4728 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4729 align);
4730 if (*stub_sec_p == NULL)
4731 return NULL;
4732
4733 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4734 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4735 | SEC_KEEP;
4736 }
4737
4738 if (!dedicated_output_section)
4739 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4740
4741 if (link_sec_p)
4742 *link_sec_p = link_sec;
4743
4744 return *stub_sec_p;
4745 }
4746
4747 /* Add a new stub entry to the stub hash. Not all fields of the new
4748 stub entry are initialised. */
4749
4750 static struct elf32_arm_stub_hash_entry *
4751 elf32_arm_add_stub (const char *stub_name, asection *section,
4752 struct elf32_arm_link_hash_table *htab,
4753 enum elf32_arm_stub_type stub_type)
4754 {
4755 asection *link_sec;
4756 asection *stub_sec;
4757 struct elf32_arm_stub_hash_entry *stub_entry;
4758
4759 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4760 stub_type);
4761 if (stub_sec == NULL)
4762 return NULL;
4763
4764 /* Enter this entry into the linker stub hash table. */
4765 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4766 TRUE, FALSE);
4767 if (stub_entry == NULL)
4768 {
4769 if (section == NULL)
4770 section = stub_sec;
4771 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4772 section->owner, stub_name);
4773 return NULL;
4774 }
4775
4776 stub_entry->stub_sec = stub_sec;
4777 stub_entry->stub_offset = (bfd_vma) -1;
4778 stub_entry->id_sec = link_sec;
4779
4780 return stub_entry;
4781 }
4782
4783 /* Store an Arm insn into an output section not processed by
4784 elf32_arm_write_section. */
4785
4786 static void
4787 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4788 bfd * output_bfd, bfd_vma val, void * ptr)
4789 {
4790 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4791 bfd_putl32 (val, ptr);
4792 else
4793 bfd_putb32 (val, ptr);
4794 }
4795
4796 /* Store a 16-bit Thumb insn into an output section not processed by
4797 elf32_arm_write_section. */
4798
4799 static void
4800 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4801 bfd * output_bfd, bfd_vma val, void * ptr)
4802 {
4803 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4804 bfd_putl16 (val, ptr);
4805 else
4806 bfd_putb16 (val, ptr);
4807 }
4808
4809 /* Store a Thumb2 insn into an output section not processed by
4810 elf32_arm_write_section. */
4811
4812 static void
4813 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4814 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4815 {
4816 /* T2 instructions are 16-bit streamed. */
4817 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4818 {
4819 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4820 bfd_putl16 ((val & 0xffff), ptr + 2);
4821 }
4822 else
4823 {
4824 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4825 bfd_putb16 ((val & 0xffff), ptr + 2);
4826 }
4827 }
4828
4829 /* If it's possible to change R_TYPE to a more efficient access
4830 model, return the new reloc type. */
4831
4832 static unsigned
4833 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4834 struct elf_link_hash_entry *h)
4835 {
4836 int is_local = (h == NULL);
4837
4838 if (bfd_link_pic (info)
4839 || (h && h->root.type == bfd_link_hash_undefweak))
4840 return r_type;
4841
4842 /* We do not support relaxations for Old TLS models. */
4843 switch (r_type)
4844 {
4845 case R_ARM_TLS_GOTDESC:
4846 case R_ARM_TLS_CALL:
4847 case R_ARM_THM_TLS_CALL:
4848 case R_ARM_TLS_DESCSEQ:
4849 case R_ARM_THM_TLS_DESCSEQ:
4850 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4851 }
4852
4853 return r_type;
4854 }
4855
4856 static bfd_reloc_status_type elf32_arm_final_link_relocate
4857 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4858 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4859 const char *, unsigned char, enum arm_st_branch_type,
4860 struct elf_link_hash_entry *, bfd_boolean *, char **);
4861
4862 static unsigned int
4863 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4864 {
4865 switch (stub_type)
4866 {
4867 case arm_stub_a8_veneer_b_cond:
4868 case arm_stub_a8_veneer_b:
4869 case arm_stub_a8_veneer_bl:
4870 return 2;
4871
4872 case arm_stub_long_branch_any_any:
4873 case arm_stub_long_branch_v4t_arm_thumb:
4874 case arm_stub_long_branch_thumb_only:
4875 case arm_stub_long_branch_thumb2_only:
4876 case arm_stub_long_branch_thumb2_only_pure:
4877 case arm_stub_long_branch_v4t_thumb_thumb:
4878 case arm_stub_long_branch_v4t_thumb_arm:
4879 case arm_stub_short_branch_v4t_thumb_arm:
4880 case arm_stub_long_branch_any_arm_pic:
4881 case arm_stub_long_branch_any_thumb_pic:
4882 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4883 case arm_stub_long_branch_v4t_arm_thumb_pic:
4884 case arm_stub_long_branch_v4t_thumb_arm_pic:
4885 case arm_stub_long_branch_thumb_only_pic:
4886 case arm_stub_long_branch_any_tls_pic:
4887 case arm_stub_long_branch_v4t_thumb_tls_pic:
4888 case arm_stub_cmse_branch_thumb_only:
4889 case arm_stub_a8_veneer_blx:
4890 return 4;
4891
4892 case arm_stub_long_branch_arm_nacl:
4893 case arm_stub_long_branch_arm_nacl_pic:
4894 return 16;
4895
4896 default:
4897 abort (); /* Should be unreachable. */
4898 }
4899 }
4900
4901 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4902 veneering (TRUE) or have their own symbol (FALSE). */
4903
4904 static bfd_boolean
4905 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4906 {
4907 if (stub_type >= max_stub_type)
4908 abort (); /* Should be unreachable. */
4909
4910 switch (stub_type)
4911 {
4912 case arm_stub_cmse_branch_thumb_only:
4913 return TRUE;
4914
4915 default:
4916 return FALSE;
4917 }
4918
4919 abort (); /* Should be unreachable. */
4920 }
4921
4922 /* Returns the padding needed for the dedicated section used stubs of type
4923 STUB_TYPE. */
4924
4925 static int
4926 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4927 {
4928 if (stub_type >= max_stub_type)
4929 abort (); /* Should be unreachable. */
4930
4931 switch (stub_type)
4932 {
4933 case arm_stub_cmse_branch_thumb_only:
4934 return 32;
4935
4936 default:
4937 return 0;
4938 }
4939
4940 abort (); /* Should be unreachable. */
4941 }
4942
4943 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4944 returns the address of the hash table field in HTAB holding the offset at
4945 which new veneers should be layed out in the stub section. */
4946
4947 static bfd_vma*
4948 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
4949 enum elf32_arm_stub_type stub_type)
4950 {
4951 switch (stub_type)
4952 {
4953 case arm_stub_cmse_branch_thumb_only:
4954 return &htab->new_cmse_stub_offset;
4955
4956 default:
4957 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4958 return NULL;
4959 }
4960 }
4961
4962 static bfd_boolean
4963 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4964 void * in_arg)
4965 {
4966 #define MAXRELOCS 3
4967 bfd_boolean removed_sg_veneer;
4968 struct elf32_arm_stub_hash_entry *stub_entry;
4969 struct elf32_arm_link_hash_table *globals;
4970 struct bfd_link_info *info;
4971 asection *stub_sec;
4972 bfd *stub_bfd;
4973 bfd_byte *loc;
4974 bfd_vma sym_value;
4975 int template_size;
4976 int size;
4977 const insn_sequence *template_sequence;
4978 int i;
4979 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4980 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4981 int nrelocs = 0;
4982 int just_allocated = 0;
4983
4984 /* Massage our args to the form they really have. */
4985 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4986 info = (struct bfd_link_info *) in_arg;
4987
4988 globals = elf32_arm_hash_table (info);
4989 if (globals == NULL)
4990 return FALSE;
4991
4992 stub_sec = stub_entry->stub_sec;
4993
4994 if ((globals->fix_cortex_a8 < 0)
4995 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4996 /* We have to do less-strictly-aligned fixes last. */
4997 return TRUE;
4998
4999 /* Assign a slot at the end of section if none assigned yet. */
5000 if (stub_entry->stub_offset == (bfd_vma) -1)
5001 {
5002 stub_entry->stub_offset = stub_sec->size;
5003 just_allocated = 1;
5004 }
5005 loc = stub_sec->contents + stub_entry->stub_offset;
5006
5007 stub_bfd = stub_sec->owner;
5008
5009 /* This is the address of the stub destination. */
5010 sym_value = (stub_entry->target_value
5011 + stub_entry->target_section->output_offset
5012 + stub_entry->target_section->output_section->vma);
5013
5014 template_sequence = stub_entry->stub_template;
5015 template_size = stub_entry->stub_template_size;
5016
5017 size = 0;
5018 for (i = 0; i < template_size; i++)
5019 {
5020 switch (template_sequence[i].type)
5021 {
5022 case THUMB16_TYPE:
5023 {
5024 bfd_vma data = (bfd_vma) template_sequence[i].data;
5025 if (template_sequence[i].reloc_addend != 0)
5026 {
5027 /* We've borrowed the reloc_addend field to mean we should
5028 insert a condition code into this (Thumb-1 branch)
5029 instruction. See THUMB16_BCOND_INSN. */
5030 BFD_ASSERT ((data & 0xff00) == 0xd000);
5031 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5032 }
5033 bfd_put_16 (stub_bfd, data, loc + size);
5034 size += 2;
5035 }
5036 break;
5037
5038 case THUMB32_TYPE:
5039 bfd_put_16 (stub_bfd,
5040 (template_sequence[i].data >> 16) & 0xffff,
5041 loc + size);
5042 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5043 loc + size + 2);
5044 if (template_sequence[i].r_type != R_ARM_NONE)
5045 {
5046 stub_reloc_idx[nrelocs] = i;
5047 stub_reloc_offset[nrelocs++] = size;
5048 }
5049 size += 4;
5050 break;
5051
5052 case ARM_TYPE:
5053 bfd_put_32 (stub_bfd, template_sequence[i].data,
5054 loc + size);
5055 /* Handle cases where the target is encoded within the
5056 instruction. */
5057 if (template_sequence[i].r_type == R_ARM_JUMP24)
5058 {
5059 stub_reloc_idx[nrelocs] = i;
5060 stub_reloc_offset[nrelocs++] = size;
5061 }
5062 size += 4;
5063 break;
5064
5065 case DATA_TYPE:
5066 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5067 stub_reloc_idx[nrelocs] = i;
5068 stub_reloc_offset[nrelocs++] = size;
5069 size += 4;
5070 break;
5071
5072 default:
5073 BFD_FAIL ();
5074 return FALSE;
5075 }
5076 }
5077
5078 if (just_allocated)
5079 stub_sec->size += size;
5080
5081 /* Stub size has already been computed in arm_size_one_stub. Check
5082 consistency. */
5083 BFD_ASSERT (size == stub_entry->stub_size);
5084
5085 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5086 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5087 sym_value |= 1;
5088
5089 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5090 to relocate in each stub. */
5091 removed_sg_veneer =
5092 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5093 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5094
5095 for (i = 0; i < nrelocs; i++)
5096 {
5097 Elf_Internal_Rela rel;
5098 bfd_boolean unresolved_reloc;
5099 char *error_message;
5100 bfd_vma points_to =
5101 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5102
5103 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5104 rel.r_info = ELF32_R_INFO (0,
5105 template_sequence[stub_reloc_idx[i]].r_type);
5106 rel.r_addend = 0;
5107
5108 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5109 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5110 template should refer back to the instruction after the original
5111 branch. We use target_section as Cortex-A8 erratum workaround stubs
5112 are only generated when both source and target are in the same
5113 section. */
5114 points_to = stub_entry->target_section->output_section->vma
5115 + stub_entry->target_section->output_offset
5116 + stub_entry->source_value;
5117
5118 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5119 (template_sequence[stub_reloc_idx[i]].r_type),
5120 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5121 points_to, info, stub_entry->target_section, "", STT_FUNC,
5122 stub_entry->branch_type,
5123 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5124 &error_message);
5125 }
5126
5127 return TRUE;
5128 #undef MAXRELOCS
5129 }
5130
5131 /* Calculate the template, template size and instruction size for a stub.
5132 Return value is the instruction size. */
5133
5134 static unsigned int
5135 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5136 const insn_sequence **stub_template,
5137 int *stub_template_size)
5138 {
5139 const insn_sequence *template_sequence = NULL;
5140 int template_size = 0, i;
5141 unsigned int size;
5142
5143 template_sequence = stub_definitions[stub_type].template_sequence;
5144 if (stub_template)
5145 *stub_template = template_sequence;
5146
5147 template_size = stub_definitions[stub_type].template_size;
5148 if (stub_template_size)
5149 *stub_template_size = template_size;
5150
5151 size = 0;
5152 for (i = 0; i < template_size; i++)
5153 {
5154 switch (template_sequence[i].type)
5155 {
5156 case THUMB16_TYPE:
5157 size += 2;
5158 break;
5159
5160 case ARM_TYPE:
5161 case THUMB32_TYPE:
5162 case DATA_TYPE:
5163 size += 4;
5164 break;
5165
5166 default:
5167 BFD_FAIL ();
5168 return 0;
5169 }
5170 }
5171
5172 return size;
5173 }
5174
5175 /* As above, but don't actually build the stub. Just bump offset so
5176 we know stub section sizes. */
5177
5178 static bfd_boolean
5179 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5180 void *in_arg ATTRIBUTE_UNUSED)
5181 {
5182 struct elf32_arm_stub_hash_entry *stub_entry;
5183 const insn_sequence *template_sequence;
5184 int template_size, size;
5185
5186 /* Massage our args to the form they really have. */
5187 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5188
5189 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5190 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5191
5192 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5193 &template_size);
5194
5195 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5196 if (stub_entry->stub_template_size)
5197 {
5198 stub_entry->stub_size = size;
5199 stub_entry->stub_template = template_sequence;
5200 stub_entry->stub_template_size = template_size;
5201 }
5202
5203 /* Already accounted for. */
5204 if (stub_entry->stub_offset != (bfd_vma) -1)
5205 return TRUE;
5206
5207 size = (size + 7) & ~7;
5208 stub_entry->stub_sec->size += size;
5209
5210 return TRUE;
5211 }
5212
5213 /* External entry points for sizing and building linker stubs. */
5214
5215 /* Set up various things so that we can make a list of input sections
5216 for each output section included in the link. Returns -1 on error,
5217 0 when no stubs will be needed, and 1 on success. */
5218
5219 int
5220 elf32_arm_setup_section_lists (bfd *output_bfd,
5221 struct bfd_link_info *info)
5222 {
5223 bfd *input_bfd;
5224 unsigned int bfd_count;
5225 unsigned int top_id, top_index;
5226 asection *section;
5227 asection **input_list, **list;
5228 bfd_size_type amt;
5229 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5230
5231 if (htab == NULL)
5232 return 0;
5233 if (! is_elf_hash_table (htab))
5234 return 0;
5235
5236 /* Count the number of input BFDs and find the top input section id. */
5237 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5238 input_bfd != NULL;
5239 input_bfd = input_bfd->link.next)
5240 {
5241 bfd_count += 1;
5242 for (section = input_bfd->sections;
5243 section != NULL;
5244 section = section->next)
5245 {
5246 if (top_id < section->id)
5247 top_id = section->id;
5248 }
5249 }
5250 htab->bfd_count = bfd_count;
5251
5252 amt = sizeof (struct map_stub) * (top_id + 1);
5253 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5254 if (htab->stub_group == NULL)
5255 return -1;
5256 htab->top_id = top_id;
5257
5258 /* We can't use output_bfd->section_count here to find the top output
5259 section index as some sections may have been removed, and
5260 _bfd_strip_section_from_output doesn't renumber the indices. */
5261 for (section = output_bfd->sections, top_index = 0;
5262 section != NULL;
5263 section = section->next)
5264 {
5265 if (top_index < section->index)
5266 top_index = section->index;
5267 }
5268
5269 htab->top_index = top_index;
5270 amt = sizeof (asection *) * (top_index + 1);
5271 input_list = (asection **) bfd_malloc (amt);
5272 htab->input_list = input_list;
5273 if (input_list == NULL)
5274 return -1;
5275
5276 /* For sections we aren't interested in, mark their entries with a
5277 value we can check later. */
5278 list = input_list + top_index;
5279 do
5280 *list = bfd_abs_section_ptr;
5281 while (list-- != input_list);
5282
5283 for (section = output_bfd->sections;
5284 section != NULL;
5285 section = section->next)
5286 {
5287 if ((section->flags & SEC_CODE) != 0)
5288 input_list[section->index] = NULL;
5289 }
5290
5291 return 1;
5292 }
5293
5294 /* The linker repeatedly calls this function for each input section,
5295 in the order that input sections are linked into output sections.
5296 Build lists of input sections to determine groupings between which
5297 we may insert linker stubs. */
5298
5299 void
5300 elf32_arm_next_input_section (struct bfd_link_info *info,
5301 asection *isec)
5302 {
5303 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5304
5305 if (htab == NULL)
5306 return;
5307
5308 if (isec->output_section->index <= htab->top_index)
5309 {
5310 asection **list = htab->input_list + isec->output_section->index;
5311
5312 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5313 {
5314 /* Steal the link_sec pointer for our list. */
5315 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5316 /* This happens to make the list in reverse order,
5317 which we reverse later. */
5318 PREV_SEC (isec) = *list;
5319 *list = isec;
5320 }
5321 }
5322 }
5323
5324 /* See whether we can group stub sections together. Grouping stub
5325 sections may result in fewer stubs. More importantly, we need to
5326 put all .init* and .fini* stubs at the end of the .init or
5327 .fini output sections respectively, because glibc splits the
5328 _init and _fini functions into multiple parts. Putting a stub in
5329 the middle of a function is not a good idea. */
5330
5331 static void
5332 group_sections (struct elf32_arm_link_hash_table *htab,
5333 bfd_size_type stub_group_size,
5334 bfd_boolean stubs_always_after_branch)
5335 {
5336 asection **list = htab->input_list;
5337
5338 do
5339 {
5340 asection *tail = *list;
5341 asection *head;
5342
5343 if (tail == bfd_abs_section_ptr)
5344 continue;
5345
5346 /* Reverse the list: we must avoid placing stubs at the
5347 beginning of the section because the beginning of the text
5348 section may be required for an interrupt vector in bare metal
5349 code. */
5350 #define NEXT_SEC PREV_SEC
5351 head = NULL;
5352 while (tail != NULL)
5353 {
5354 /* Pop from tail. */
5355 asection *item = tail;
5356 tail = PREV_SEC (item);
5357
5358 /* Push on head. */
5359 NEXT_SEC (item) = head;
5360 head = item;
5361 }
5362
5363 while (head != NULL)
5364 {
5365 asection *curr;
5366 asection *next;
5367 bfd_vma stub_group_start = head->output_offset;
5368 bfd_vma end_of_next;
5369
5370 curr = head;
5371 while (NEXT_SEC (curr) != NULL)
5372 {
5373 next = NEXT_SEC (curr);
5374 end_of_next = next->output_offset + next->size;
5375 if (end_of_next - stub_group_start >= stub_group_size)
5376 /* End of NEXT is too far from start, so stop. */
5377 break;
5378 /* Add NEXT to the group. */
5379 curr = next;
5380 }
5381
5382 /* OK, the size from the start to the start of CURR is less
5383 than stub_group_size and thus can be handled by one stub
5384 section. (Or the head section is itself larger than
5385 stub_group_size, in which case we may be toast.)
5386 We should really be keeping track of the total size of
5387 stubs added here, as stubs contribute to the final output
5388 section size. */
5389 do
5390 {
5391 next = NEXT_SEC (head);
5392 /* Set up this stub group. */
5393 htab->stub_group[head->id].link_sec = curr;
5394 }
5395 while (head != curr && (head = next) != NULL);
5396
5397 /* But wait, there's more! Input sections up to stub_group_size
5398 bytes after the stub section can be handled by it too. */
5399 if (!stubs_always_after_branch)
5400 {
5401 stub_group_start = curr->output_offset + curr->size;
5402
5403 while (next != NULL)
5404 {
5405 end_of_next = next->output_offset + next->size;
5406 if (end_of_next - stub_group_start >= stub_group_size)
5407 /* End of NEXT is too far from stubs, so stop. */
5408 break;
5409 /* Add NEXT to the stub group. */
5410 head = next;
5411 next = NEXT_SEC (head);
5412 htab->stub_group[head->id].link_sec = curr;
5413 }
5414 }
5415 head = next;
5416 }
5417 }
5418 while (list++ != htab->input_list + htab->top_index);
5419
5420 free (htab->input_list);
5421 #undef PREV_SEC
5422 #undef NEXT_SEC
5423 }
5424
5425 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5426 erratum fix. */
5427
5428 static int
5429 a8_reloc_compare (const void *a, const void *b)
5430 {
5431 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5432 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5433
5434 if (ra->from < rb->from)
5435 return -1;
5436 else if (ra->from > rb->from)
5437 return 1;
5438 else
5439 return 0;
5440 }
5441
5442 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5443 const char *, char **);
5444
5445 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5446 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5447 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5448 otherwise. */
5449
5450 static bfd_boolean
5451 cortex_a8_erratum_scan (bfd *input_bfd,
5452 struct bfd_link_info *info,
5453 struct a8_erratum_fix **a8_fixes_p,
5454 unsigned int *num_a8_fixes_p,
5455 unsigned int *a8_fix_table_size_p,
5456 struct a8_erratum_reloc *a8_relocs,
5457 unsigned int num_a8_relocs,
5458 unsigned prev_num_a8_fixes,
5459 bfd_boolean *stub_changed_p)
5460 {
5461 asection *section;
5462 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5463 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5464 unsigned int num_a8_fixes = *num_a8_fixes_p;
5465 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5466
5467 if (htab == NULL)
5468 return FALSE;
5469
5470 for (section = input_bfd->sections;
5471 section != NULL;
5472 section = section->next)
5473 {
5474 bfd_byte *contents = NULL;
5475 struct _arm_elf_section_data *sec_data;
5476 unsigned int span;
5477 bfd_vma base_vma;
5478
5479 if (elf_section_type (section) != SHT_PROGBITS
5480 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5481 || (section->flags & SEC_EXCLUDE) != 0
5482 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5483 || (section->output_section == bfd_abs_section_ptr))
5484 continue;
5485
5486 base_vma = section->output_section->vma + section->output_offset;
5487
5488 if (elf_section_data (section)->this_hdr.contents != NULL)
5489 contents = elf_section_data (section)->this_hdr.contents;
5490 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5491 return TRUE;
5492
5493 sec_data = elf32_arm_section_data (section);
5494
5495 for (span = 0; span < sec_data->mapcount; span++)
5496 {
5497 unsigned int span_start = sec_data->map[span].vma;
5498 unsigned int span_end = (span == sec_data->mapcount - 1)
5499 ? section->size : sec_data->map[span + 1].vma;
5500 unsigned int i;
5501 char span_type = sec_data->map[span].type;
5502 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5503
5504 if (span_type != 't')
5505 continue;
5506
5507 /* Span is entirely within a single 4KB region: skip scanning. */
5508 if (((base_vma + span_start) & ~0xfff)
5509 == ((base_vma + span_end) & ~0xfff))
5510 continue;
5511
5512 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5513
5514 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5515 * The branch target is in the same 4KB region as the
5516 first half of the branch.
5517 * The instruction before the branch is a 32-bit
5518 length non-branch instruction. */
5519 for (i = span_start; i < span_end;)
5520 {
5521 unsigned int insn = bfd_getl16 (&contents[i]);
5522 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5523 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5524
5525 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5526 insn_32bit = TRUE;
5527
5528 if (insn_32bit)
5529 {
5530 /* Load the rest of the insn (in manual-friendly order). */
5531 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5532
5533 /* Encoding T4: B<c>.W. */
5534 is_b = (insn & 0xf800d000) == 0xf0009000;
5535 /* Encoding T1: BL<c>.W. */
5536 is_bl = (insn & 0xf800d000) == 0xf000d000;
5537 /* Encoding T2: BLX<c>.W. */
5538 is_blx = (insn & 0xf800d000) == 0xf000c000;
5539 /* Encoding T3: B<c>.W (not permitted in IT block). */
5540 is_bcc = (insn & 0xf800d000) == 0xf0008000
5541 && (insn & 0x07f00000) != 0x03800000;
5542 }
5543
5544 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5545
5546 if (((base_vma + i) & 0xfff) == 0xffe
5547 && insn_32bit
5548 && is_32bit_branch
5549 && last_was_32bit
5550 && ! last_was_branch)
5551 {
5552 bfd_signed_vma offset = 0;
5553 bfd_boolean force_target_arm = FALSE;
5554 bfd_boolean force_target_thumb = FALSE;
5555 bfd_vma target;
5556 enum elf32_arm_stub_type stub_type = arm_stub_none;
5557 struct a8_erratum_reloc key, *found;
5558 bfd_boolean use_plt = FALSE;
5559
5560 key.from = base_vma + i;
5561 found = (struct a8_erratum_reloc *)
5562 bsearch (&key, a8_relocs, num_a8_relocs,
5563 sizeof (struct a8_erratum_reloc),
5564 &a8_reloc_compare);
5565
5566 if (found)
5567 {
5568 char *error_message = NULL;
5569 struct elf_link_hash_entry *entry;
5570
5571 /* We don't care about the error returned from this
5572 function, only if there is glue or not. */
5573 entry = find_thumb_glue (info, found->sym_name,
5574 &error_message);
5575
5576 if (entry)
5577 found->non_a8_stub = TRUE;
5578
5579 /* Keep a simpler condition, for the sake of clarity. */
5580 if (htab->root.splt != NULL && found->hash != NULL
5581 && found->hash->root.plt.offset != (bfd_vma) -1)
5582 use_plt = TRUE;
5583
5584 if (found->r_type == R_ARM_THM_CALL)
5585 {
5586 if (found->branch_type == ST_BRANCH_TO_ARM
5587 || use_plt)
5588 force_target_arm = TRUE;
5589 else
5590 force_target_thumb = TRUE;
5591 }
5592 }
5593
5594 /* Check if we have an offending branch instruction. */
5595
5596 if (found && found->non_a8_stub)
5597 /* We've already made a stub for this instruction, e.g.
5598 it's a long branch or a Thumb->ARM stub. Assume that
5599 stub will suffice to work around the A8 erratum (see
5600 setting of always_after_branch above). */
5601 ;
5602 else if (is_bcc)
5603 {
5604 offset = (insn & 0x7ff) << 1;
5605 offset |= (insn & 0x3f0000) >> 4;
5606 offset |= (insn & 0x2000) ? 0x40000 : 0;
5607 offset |= (insn & 0x800) ? 0x80000 : 0;
5608 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5609 if (offset & 0x100000)
5610 offset |= ~ ((bfd_signed_vma) 0xfffff);
5611 stub_type = arm_stub_a8_veneer_b_cond;
5612 }
5613 else if (is_b || is_bl || is_blx)
5614 {
5615 int s = (insn & 0x4000000) != 0;
5616 int j1 = (insn & 0x2000) != 0;
5617 int j2 = (insn & 0x800) != 0;
5618 int i1 = !(j1 ^ s);
5619 int i2 = !(j2 ^ s);
5620
5621 offset = (insn & 0x7ff) << 1;
5622 offset |= (insn & 0x3ff0000) >> 4;
5623 offset |= i2 << 22;
5624 offset |= i1 << 23;
5625 offset |= s << 24;
5626 if (offset & 0x1000000)
5627 offset |= ~ ((bfd_signed_vma) 0xffffff);
5628
5629 if (is_blx)
5630 offset &= ~ ((bfd_signed_vma) 3);
5631
5632 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5633 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5634 }
5635
5636 if (stub_type != arm_stub_none)
5637 {
5638 bfd_vma pc_for_insn = base_vma + i + 4;
5639
5640 /* The original instruction is a BL, but the target is
5641 an ARM instruction. If we were not making a stub,
5642 the BL would have been converted to a BLX. Use the
5643 BLX stub instead in that case. */
5644 if (htab->use_blx && force_target_arm
5645 && stub_type == arm_stub_a8_veneer_bl)
5646 {
5647 stub_type = arm_stub_a8_veneer_blx;
5648 is_blx = TRUE;
5649 is_bl = FALSE;
5650 }
5651 /* Conversely, if the original instruction was
5652 BLX but the target is Thumb mode, use the BL
5653 stub. */
5654 else if (force_target_thumb
5655 && stub_type == arm_stub_a8_veneer_blx)
5656 {
5657 stub_type = arm_stub_a8_veneer_bl;
5658 is_blx = FALSE;
5659 is_bl = TRUE;
5660 }
5661
5662 if (is_blx)
5663 pc_for_insn &= ~ ((bfd_vma) 3);
5664
5665 /* If we found a relocation, use the proper destination,
5666 not the offset in the (unrelocated) instruction.
5667 Note this is always done if we switched the stub type
5668 above. */
5669 if (found)
5670 offset =
5671 (bfd_signed_vma) (found->destination - pc_for_insn);
5672
5673 /* If the stub will use a Thumb-mode branch to a
5674 PLT target, redirect it to the preceding Thumb
5675 entry point. */
5676 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5677 offset -= PLT_THUMB_STUB_SIZE;
5678
5679 target = pc_for_insn + offset;
5680
5681 /* The BLX stub is ARM-mode code. Adjust the offset to
5682 take the different PC value (+8 instead of +4) into
5683 account. */
5684 if (stub_type == arm_stub_a8_veneer_blx)
5685 offset += 4;
5686
5687 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5688 {
5689 char *stub_name = NULL;
5690
5691 if (num_a8_fixes == a8_fix_table_size)
5692 {
5693 a8_fix_table_size *= 2;
5694 a8_fixes = (struct a8_erratum_fix *)
5695 bfd_realloc (a8_fixes,
5696 sizeof (struct a8_erratum_fix)
5697 * a8_fix_table_size);
5698 }
5699
5700 if (num_a8_fixes < prev_num_a8_fixes)
5701 {
5702 /* If we're doing a subsequent scan,
5703 check if we've found the same fix as
5704 before, and try and reuse the stub
5705 name. */
5706 stub_name = a8_fixes[num_a8_fixes].stub_name;
5707 if ((a8_fixes[num_a8_fixes].section != section)
5708 || (a8_fixes[num_a8_fixes].offset != i))
5709 {
5710 free (stub_name);
5711 stub_name = NULL;
5712 *stub_changed_p = TRUE;
5713 }
5714 }
5715
5716 if (!stub_name)
5717 {
5718 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5719 if (stub_name != NULL)
5720 sprintf (stub_name, "%x:%x", section->id, i);
5721 }
5722
5723 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5724 a8_fixes[num_a8_fixes].section = section;
5725 a8_fixes[num_a8_fixes].offset = i;
5726 a8_fixes[num_a8_fixes].target_offset =
5727 target - base_vma;
5728 a8_fixes[num_a8_fixes].orig_insn = insn;
5729 a8_fixes[num_a8_fixes].stub_name = stub_name;
5730 a8_fixes[num_a8_fixes].stub_type = stub_type;
5731 a8_fixes[num_a8_fixes].branch_type =
5732 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5733
5734 num_a8_fixes++;
5735 }
5736 }
5737 }
5738
5739 i += insn_32bit ? 4 : 2;
5740 last_was_32bit = insn_32bit;
5741 last_was_branch = is_32bit_branch;
5742 }
5743 }
5744
5745 if (elf_section_data (section)->this_hdr.contents == NULL)
5746 free (contents);
5747 }
5748
5749 *a8_fixes_p = a8_fixes;
5750 *num_a8_fixes_p = num_a8_fixes;
5751 *a8_fix_table_size_p = a8_fix_table_size;
5752
5753 return FALSE;
5754 }
5755
5756 /* Create or update a stub entry depending on whether the stub can already be
5757 found in HTAB. The stub is identified by:
5758 - its type STUB_TYPE
5759 - its source branch (note that several can share the same stub) whose
5760 section and relocation (if any) are given by SECTION and IRELA
5761 respectively
5762 - its target symbol whose input section, hash, name, value and branch type
5763 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5764 respectively
5765
5766 If found, the value of the stub's target symbol is updated from SYM_VALUE
5767 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5768 TRUE and the stub entry is initialized.
5769
5770 Returns the stub that was created or updated, or NULL if an error
5771 occurred. */
5772
5773 static struct elf32_arm_stub_hash_entry *
5774 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5775 enum elf32_arm_stub_type stub_type, asection *section,
5776 Elf_Internal_Rela *irela, asection *sym_sec,
5777 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5778 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5779 bfd_boolean *new_stub)
5780 {
5781 const asection *id_sec;
5782 char *stub_name;
5783 struct elf32_arm_stub_hash_entry *stub_entry;
5784 unsigned int r_type;
5785 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5786
5787 BFD_ASSERT (stub_type != arm_stub_none);
5788 *new_stub = FALSE;
5789
5790 if (sym_claimed)
5791 stub_name = sym_name;
5792 else
5793 {
5794 BFD_ASSERT (irela);
5795 BFD_ASSERT (section);
5796 BFD_ASSERT (section->id <= htab->top_id);
5797
5798 /* Support for grouping stub sections. */
5799 id_sec = htab->stub_group[section->id].link_sec;
5800
5801 /* Get the name of this stub. */
5802 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5803 stub_type);
5804 if (!stub_name)
5805 return NULL;
5806 }
5807
5808 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5809 FALSE);
5810 /* The proper stub has already been created, just update its value. */
5811 if (stub_entry != NULL)
5812 {
5813 if (!sym_claimed)
5814 free (stub_name);
5815 stub_entry->target_value = sym_value;
5816 return stub_entry;
5817 }
5818
5819 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5820 if (stub_entry == NULL)
5821 {
5822 if (!sym_claimed)
5823 free (stub_name);
5824 return NULL;
5825 }
5826
5827 stub_entry->target_value = sym_value;
5828 stub_entry->target_section = sym_sec;
5829 stub_entry->stub_type = stub_type;
5830 stub_entry->h = hash;
5831 stub_entry->branch_type = branch_type;
5832
5833 if (sym_claimed)
5834 stub_entry->output_name = sym_name;
5835 else
5836 {
5837 if (sym_name == NULL)
5838 sym_name = "unnamed";
5839 stub_entry->output_name = (char *)
5840 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5841 + strlen (sym_name));
5842 if (stub_entry->output_name == NULL)
5843 {
5844 free (stub_name);
5845 return NULL;
5846 }
5847
5848 /* For historical reasons, use the existing names for ARM-to-Thumb and
5849 Thumb-to-ARM stubs. */
5850 r_type = ELF32_R_TYPE (irela->r_info);
5851 if ((r_type == (unsigned int) R_ARM_THM_CALL
5852 || r_type == (unsigned int) R_ARM_THM_JUMP24
5853 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5854 && branch_type == ST_BRANCH_TO_ARM)
5855 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5856 else if ((r_type == (unsigned int) R_ARM_CALL
5857 || r_type == (unsigned int) R_ARM_JUMP24)
5858 && branch_type == ST_BRANCH_TO_THUMB)
5859 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5860 else
5861 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5862 }
5863
5864 *new_stub = TRUE;
5865 return stub_entry;
5866 }
5867
5868 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5869 gateway veneer to transition from non secure to secure state and create them
5870 accordingly.
5871
5872 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5873 defines the conditions that govern Secure Gateway veneer creation for a
5874 given symbol <SYM> as follows:
5875 - it has function type
5876 - it has non local binding
5877 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5878 same type, binding and value as <SYM> (called normal symbol).
5879 An entry function can handle secure state transition itself in which case
5880 its special symbol would have a different value from the normal symbol.
5881
5882 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5883 entry mapping while HTAB gives the name to hash entry mapping.
5884 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5885 created.
5886
5887 The return value gives whether a stub failed to be allocated. */
5888
5889 static bfd_boolean
5890 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5891 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5892 int *cmse_stub_created)
5893 {
5894 const struct elf_backend_data *bed;
5895 Elf_Internal_Shdr *symtab_hdr;
5896 unsigned i, j, sym_count, ext_start;
5897 Elf_Internal_Sym *cmse_sym, *local_syms;
5898 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5899 enum arm_st_branch_type branch_type;
5900 char *sym_name, *lsym_name;
5901 bfd_vma sym_value;
5902 asection *section;
5903 struct elf32_arm_stub_hash_entry *stub_entry;
5904 bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5905
5906 bed = get_elf_backend_data (input_bfd);
5907 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5908 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5909 ext_start = symtab_hdr->sh_info;
5910 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5911 && out_attr[Tag_CPU_arch_profile].i == 'M');
5912
5913 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5914 if (local_syms == NULL)
5915 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5916 symtab_hdr->sh_info, 0, NULL, NULL,
5917 NULL);
5918 if (symtab_hdr->sh_info && local_syms == NULL)
5919 return FALSE;
5920
5921 /* Scan symbols. */
5922 for (i = 0; i < sym_count; i++)
5923 {
5924 cmse_invalid = FALSE;
5925
5926 if (i < ext_start)
5927 {
5928 cmse_sym = &local_syms[i];
5929 /* Not a special symbol. */
5930 if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym->st_target_internal))
5931 continue;
5932 sym_name = bfd_elf_string_from_elf_section (input_bfd,
5933 symtab_hdr->sh_link,
5934 cmse_sym->st_name);
5935 /* Special symbol with local binding. */
5936 cmse_invalid = TRUE;
5937 }
5938 else
5939 {
5940 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5941 sym_name = (char *) cmse_hash->root.root.root.string;
5942
5943 /* Not a special symbol. */
5944 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
5945 continue;
5946
5947 /* Special symbol has incorrect binding or type. */
5948 if ((cmse_hash->root.root.type != bfd_link_hash_defined
5949 && cmse_hash->root.root.type != bfd_link_hash_defweak)
5950 || cmse_hash->root.type != STT_FUNC)
5951 cmse_invalid = TRUE;
5952 }
5953
5954 if (!is_v8m)
5955 {
5956 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
5957 "ARMv8-M architecture or later"),
5958 input_bfd, sym_name);
5959 is_v8m = TRUE; /* Avoid multiple warning. */
5960 ret = FALSE;
5961 }
5962
5963 if (cmse_invalid)
5964 {
5965 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
5966 " a global or weak function symbol"),
5967 input_bfd, sym_name);
5968 ret = FALSE;
5969 if (i < ext_start)
5970 continue;
5971 }
5972
5973 sym_name += strlen (CMSE_PREFIX);
5974 hash = (struct elf32_arm_link_hash_entry *)
5975 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
5976
5977 /* No associated normal symbol or it is neither global nor weak. */
5978 if (!hash
5979 || (hash->root.root.type != bfd_link_hash_defined
5980 && hash->root.root.type != bfd_link_hash_defweak)
5981 || hash->root.type != STT_FUNC)
5982 {
5983 /* Initialize here to avoid warning about use of possibly
5984 uninitialized variable. */
5985 j = 0;
5986
5987 if (!hash)
5988 {
5989 /* Searching for a normal symbol with local binding. */
5990 for (; j < ext_start; j++)
5991 {
5992 lsym_name =
5993 bfd_elf_string_from_elf_section (input_bfd,
5994 symtab_hdr->sh_link,
5995 local_syms[j].st_name);
5996 if (!strcmp (sym_name, lsym_name))
5997 break;
5998 }
5999 }
6000
6001 if (hash || j < ext_start)
6002 {
6003 _bfd_error_handler
6004 (_("%pB: invalid standard symbol `%s'; it must be "
6005 "a global or weak function symbol"),
6006 input_bfd, sym_name);
6007 }
6008 else
6009 _bfd_error_handler
6010 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6011 ret = FALSE;
6012 if (!hash)
6013 continue;
6014 }
6015
6016 sym_value = hash->root.root.u.def.value;
6017 section = hash->root.root.u.def.section;
6018
6019 if (cmse_hash->root.root.u.def.section != section)
6020 {
6021 _bfd_error_handler
6022 (_("%pB: `%s' and its special symbol are in different sections"),
6023 input_bfd, sym_name);
6024 ret = FALSE;
6025 }
6026 if (cmse_hash->root.root.u.def.value != sym_value)
6027 continue; /* Ignore: could be an entry function starting with SG. */
6028
6029 /* If this section is a link-once section that will be discarded, then
6030 don't create any stubs. */
6031 if (section->output_section == NULL)
6032 {
6033 _bfd_error_handler
6034 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6035 continue;
6036 }
6037
6038 if (hash->root.size == 0)
6039 {
6040 _bfd_error_handler
6041 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6042 ret = FALSE;
6043 }
6044
6045 if (!ret)
6046 continue;
6047 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6048 stub_entry
6049 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6050 NULL, NULL, section, hash, sym_name,
6051 sym_value, branch_type, &new_stub);
6052
6053 if (stub_entry == NULL)
6054 ret = FALSE;
6055 else
6056 {
6057 BFD_ASSERT (new_stub);
6058 (*cmse_stub_created)++;
6059 }
6060 }
6061
6062 if (!symtab_hdr->contents)
6063 free (local_syms);
6064 return ret;
6065 }
6066
6067 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6068 code entry function, ie can be called from non secure code without using a
6069 veneer. */
6070
6071 static bfd_boolean
6072 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6073 {
6074 bfd_byte contents[4];
6075 uint32_t first_insn;
6076 asection *section;
6077 file_ptr offset;
6078 bfd *abfd;
6079
6080 /* Defined symbol of function type. */
6081 if (hash->root.root.type != bfd_link_hash_defined
6082 && hash->root.root.type != bfd_link_hash_defweak)
6083 return FALSE;
6084 if (hash->root.type != STT_FUNC)
6085 return FALSE;
6086
6087 /* Read first instruction. */
6088 section = hash->root.root.u.def.section;
6089 abfd = section->owner;
6090 offset = hash->root.root.u.def.value - section->vma;
6091 if (!bfd_get_section_contents (abfd, section, contents, offset,
6092 sizeof (contents)))
6093 return FALSE;
6094
6095 first_insn = bfd_get_32 (abfd, contents);
6096
6097 /* Starts by SG instruction. */
6098 return first_insn == 0xe97fe97f;
6099 }
6100
6101 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6102 secure gateway veneers (ie. the veneers was not in the input import library)
6103 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6104
6105 static bfd_boolean
6106 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6107 {
6108 struct elf32_arm_stub_hash_entry *stub_entry;
6109 struct bfd_link_info *info;
6110
6111 /* Massage our args to the form they really have. */
6112 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6113 info = (struct bfd_link_info *) gen_info;
6114
6115 if (info->out_implib_bfd)
6116 return TRUE;
6117
6118 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6119 return TRUE;
6120
6121 if (stub_entry->stub_offset == (bfd_vma) -1)
6122 _bfd_error_handler (" %s", stub_entry->output_name);
6123
6124 return TRUE;
6125 }
6126
6127 /* Set offset of each secure gateway veneers so that its address remain
6128 identical to the one in the input import library referred by
6129 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6130 (present in input import library but absent from the executable being
6131 linked) or if new veneers appeared and there is no output import library
6132 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6133 number of secure gateway veneers found in the input import library.
6134
6135 The function returns whether an error occurred. If no error occurred,
6136 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6137 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6138 veneer observed set for new veneers to be layed out after. */
6139
6140 static bfd_boolean
6141 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6142 struct elf32_arm_link_hash_table *htab,
6143 int *cmse_stub_created)
6144 {
6145 long symsize;
6146 char *sym_name;
6147 flagword flags;
6148 long i, symcount;
6149 bfd *in_implib_bfd;
6150 asection *stub_out_sec;
6151 bfd_boolean ret = TRUE;
6152 Elf_Internal_Sym *intsym;
6153 const char *out_sec_name;
6154 bfd_size_type cmse_stub_size;
6155 asymbol **sympp = NULL, *sym;
6156 struct elf32_arm_link_hash_entry *hash;
6157 const insn_sequence *cmse_stub_template;
6158 struct elf32_arm_stub_hash_entry *stub_entry;
6159 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6160 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6161 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6162
6163 /* No input secure gateway import library. */
6164 if (!htab->in_implib_bfd)
6165 return TRUE;
6166
6167 in_implib_bfd = htab->in_implib_bfd;
6168 if (!htab->cmse_implib)
6169 {
6170 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6171 "Gateway import libraries"), in_implib_bfd);
6172 return FALSE;
6173 }
6174
6175 /* Get symbol table size. */
6176 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6177 if (symsize < 0)
6178 return FALSE;
6179
6180 /* Read in the input secure gateway import library's symbol table. */
6181 sympp = (asymbol **) xmalloc (symsize);
6182 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6183 if (symcount < 0)
6184 {
6185 ret = FALSE;
6186 goto free_sym_buf;
6187 }
6188
6189 htab->new_cmse_stub_offset = 0;
6190 cmse_stub_size =
6191 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6192 &cmse_stub_template,
6193 &cmse_stub_template_size);
6194 out_sec_name =
6195 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6196 stub_out_sec =
6197 bfd_get_section_by_name (htab->obfd, out_sec_name);
6198 if (stub_out_sec != NULL)
6199 cmse_stub_sec_vma = stub_out_sec->vma;
6200
6201 /* Set addresses of veneers mentionned in input secure gateway import
6202 library's symbol table. */
6203 for (i = 0; i < symcount; i++)
6204 {
6205 sym = sympp[i];
6206 flags = sym->flags;
6207 sym_name = (char *) bfd_asymbol_name (sym);
6208 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6209
6210 if (sym->section != bfd_abs_section_ptr
6211 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6212 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6213 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6214 != ST_BRANCH_TO_THUMB))
6215 {
6216 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6217 "symbol should be absolute, global and "
6218 "refer to Thumb functions"),
6219 in_implib_bfd, sym_name);
6220 ret = FALSE;
6221 continue;
6222 }
6223
6224 veneer_value = bfd_asymbol_value (sym);
6225 stub_offset = veneer_value - cmse_stub_sec_vma;
6226 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6227 FALSE, FALSE);
6228 hash = (struct elf32_arm_link_hash_entry *)
6229 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6230
6231 /* Stub entry should have been created by cmse_scan or the symbol be of
6232 a secure function callable from non secure code. */
6233 if (!stub_entry && !hash)
6234 {
6235 bfd_boolean new_stub;
6236
6237 _bfd_error_handler
6238 (_("entry function `%s' disappeared from secure code"), sym_name);
6239 hash = (struct elf32_arm_link_hash_entry *)
6240 elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6241 stub_entry
6242 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6243 NULL, NULL, bfd_abs_section_ptr, hash,
6244 sym_name, veneer_value,
6245 ST_BRANCH_TO_THUMB, &new_stub);
6246 if (stub_entry == NULL)
6247 ret = FALSE;
6248 else
6249 {
6250 BFD_ASSERT (new_stub);
6251 new_cmse_stubs_created++;
6252 (*cmse_stub_created)++;
6253 }
6254 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6255 stub_entry->stub_offset = stub_offset;
6256 }
6257 /* Symbol found is not callable from non secure code. */
6258 else if (!stub_entry)
6259 {
6260 if (!cmse_entry_fct_p (hash))
6261 {
6262 _bfd_error_handler (_("`%s' refers to a non entry function"),
6263 sym_name);
6264 ret = FALSE;
6265 }
6266 continue;
6267 }
6268 else
6269 {
6270 /* Only stubs for SG veneers should have been created. */
6271 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6272
6273 /* Check visibility hasn't changed. */
6274 if (!!(flags & BSF_GLOBAL)
6275 != (hash->root.root.type == bfd_link_hash_defined))
6276 _bfd_error_handler
6277 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6278 sym_name);
6279
6280 stub_entry->stub_offset = stub_offset;
6281 }
6282
6283 /* Size should match that of a SG veneer. */
6284 if (intsym->st_size != cmse_stub_size)
6285 {
6286 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6287 in_implib_bfd, sym_name);
6288 ret = FALSE;
6289 }
6290
6291 /* Previous veneer address is before current SG veneer section. */
6292 if (veneer_value < cmse_stub_sec_vma)
6293 {
6294 /* Avoid offset underflow. */
6295 if (stub_entry)
6296 stub_entry->stub_offset = 0;
6297 stub_offset = 0;
6298 ret = FALSE;
6299 }
6300
6301 /* Complain if stub offset not a multiple of stub size. */
6302 if (stub_offset % cmse_stub_size)
6303 {
6304 _bfd_error_handler
6305 (_("offset of veneer for entry function `%s' not a multiple of "
6306 "its size"), sym_name);
6307 ret = FALSE;
6308 }
6309
6310 if (!ret)
6311 continue;
6312
6313 new_cmse_stubs_created--;
6314 if (veneer_value < cmse_stub_array_start)
6315 cmse_stub_array_start = veneer_value;
6316 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6317 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6318 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6319 }
6320
6321 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6322 {
6323 BFD_ASSERT (new_cmse_stubs_created > 0);
6324 _bfd_error_handler
6325 (_("new entry function(s) introduced but no output import library "
6326 "specified:"));
6327 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6328 }
6329
6330 if (cmse_stub_array_start != cmse_stub_sec_vma)
6331 {
6332 _bfd_error_handler
6333 (_("start address of `%s' is different from previous link"),
6334 out_sec_name);
6335 ret = FALSE;
6336 }
6337
6338 free_sym_buf:
6339 free (sympp);
6340 return ret;
6341 }
6342
6343 /* Determine and set the size of the stub section for a final link.
6344
6345 The basic idea here is to examine all the relocations looking for
6346 PC-relative calls to a target that is unreachable with a "bl"
6347 instruction. */
6348
6349 bfd_boolean
6350 elf32_arm_size_stubs (bfd *output_bfd,
6351 bfd *stub_bfd,
6352 struct bfd_link_info *info,
6353 bfd_signed_vma group_size,
6354 asection * (*add_stub_section) (const char *, asection *,
6355 asection *,
6356 unsigned int),
6357 void (*layout_sections_again) (void))
6358 {
6359 bfd_boolean ret = TRUE;
6360 obj_attribute *out_attr;
6361 int cmse_stub_created = 0;
6362 bfd_size_type stub_group_size;
6363 bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6364 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6365 struct a8_erratum_fix *a8_fixes = NULL;
6366 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6367 struct a8_erratum_reloc *a8_relocs = NULL;
6368 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6369
6370 if (htab == NULL)
6371 return FALSE;
6372
6373 if (htab->fix_cortex_a8)
6374 {
6375 a8_fixes = (struct a8_erratum_fix *)
6376 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6377 a8_relocs = (struct a8_erratum_reloc *)
6378 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6379 }
6380
6381 /* Propagate mach to stub bfd, because it may not have been
6382 finalized when we created stub_bfd. */
6383 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6384 bfd_get_mach (output_bfd));
6385
6386 /* Stash our params away. */
6387 htab->stub_bfd = stub_bfd;
6388 htab->add_stub_section = add_stub_section;
6389 htab->layout_sections_again = layout_sections_again;
6390 stubs_always_after_branch = group_size < 0;
6391
6392 out_attr = elf_known_obj_attributes_proc (output_bfd);
6393 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6394
6395 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6396 as the first half of a 32-bit branch straddling two 4K pages. This is a
6397 crude way of enforcing that. */
6398 if (htab->fix_cortex_a8)
6399 stubs_always_after_branch = 1;
6400
6401 if (group_size < 0)
6402 stub_group_size = -group_size;
6403 else
6404 stub_group_size = group_size;
6405
6406 if (stub_group_size == 1)
6407 {
6408 /* Default values. */
6409 /* Thumb branch range is +-4MB has to be used as the default
6410 maximum size (a given section can contain both ARM and Thumb
6411 code, so the worst case has to be taken into account).
6412
6413 This value is 24K less than that, which allows for 2025
6414 12-byte stubs. If we exceed that, then we will fail to link.
6415 The user will have to relink with an explicit group size
6416 option. */
6417 stub_group_size = 4170000;
6418 }
6419
6420 group_sections (htab, stub_group_size, stubs_always_after_branch);
6421
6422 /* If we're applying the cortex A8 fix, we need to determine the
6423 program header size now, because we cannot change it later --
6424 that could alter section placements. Notice the A8 erratum fix
6425 ends up requiring the section addresses to remain unchanged
6426 modulo the page size. That's something we cannot represent
6427 inside BFD, and we don't want to force the section alignment to
6428 be the page size. */
6429 if (htab->fix_cortex_a8)
6430 (*htab->layout_sections_again) ();
6431
6432 while (1)
6433 {
6434 bfd *input_bfd;
6435 unsigned int bfd_indx;
6436 asection *stub_sec;
6437 enum elf32_arm_stub_type stub_type;
6438 bfd_boolean stub_changed = FALSE;
6439 unsigned prev_num_a8_fixes = num_a8_fixes;
6440
6441 num_a8_fixes = 0;
6442 for (input_bfd = info->input_bfds, bfd_indx = 0;
6443 input_bfd != NULL;
6444 input_bfd = input_bfd->link.next, bfd_indx++)
6445 {
6446 Elf_Internal_Shdr *symtab_hdr;
6447 asection *section;
6448 Elf_Internal_Sym *local_syms = NULL;
6449
6450 if (!is_arm_elf (input_bfd))
6451 continue;
6452
6453 num_a8_relocs = 0;
6454
6455 /* We'll need the symbol table in a second. */
6456 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6457 if (symtab_hdr->sh_info == 0)
6458 continue;
6459
6460 /* Limit scan of symbols to object file whose profile is
6461 Microcontroller to not hinder performance in the general case. */
6462 if (m_profile && first_veneer_scan)
6463 {
6464 struct elf_link_hash_entry **sym_hashes;
6465
6466 sym_hashes = elf_sym_hashes (input_bfd);
6467 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6468 &cmse_stub_created))
6469 goto error_ret_free_local;
6470
6471 if (cmse_stub_created != 0)
6472 stub_changed = TRUE;
6473 }
6474
6475 /* Walk over each section attached to the input bfd. */
6476 for (section = input_bfd->sections;
6477 section != NULL;
6478 section = section->next)
6479 {
6480 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6481
6482 /* If there aren't any relocs, then there's nothing more
6483 to do. */
6484 if ((section->flags & SEC_RELOC) == 0
6485 || section->reloc_count == 0
6486 || (section->flags & SEC_CODE) == 0)
6487 continue;
6488
6489 /* If this section is a link-once section that will be
6490 discarded, then don't create any stubs. */
6491 if (section->output_section == NULL
6492 || section->output_section->owner != output_bfd)
6493 continue;
6494
6495 /* Get the relocs. */
6496 internal_relocs
6497 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6498 NULL, info->keep_memory);
6499 if (internal_relocs == NULL)
6500 goto error_ret_free_local;
6501
6502 /* Now examine each relocation. */
6503 irela = internal_relocs;
6504 irelaend = irela + section->reloc_count;
6505 for (; irela < irelaend; irela++)
6506 {
6507 unsigned int r_type, r_indx;
6508 asection *sym_sec;
6509 bfd_vma sym_value;
6510 bfd_vma destination;
6511 struct elf32_arm_link_hash_entry *hash;
6512 const char *sym_name;
6513 unsigned char st_type;
6514 enum arm_st_branch_type branch_type;
6515 bfd_boolean created_stub = FALSE;
6516
6517 r_type = ELF32_R_TYPE (irela->r_info);
6518 r_indx = ELF32_R_SYM (irela->r_info);
6519
6520 if (r_type >= (unsigned int) R_ARM_max)
6521 {
6522 bfd_set_error (bfd_error_bad_value);
6523 error_ret_free_internal:
6524 if (elf_section_data (section)->relocs == NULL)
6525 free (internal_relocs);
6526 /* Fall through. */
6527 error_ret_free_local:
6528 if (local_syms != NULL
6529 && (symtab_hdr->contents
6530 != (unsigned char *) local_syms))
6531 free (local_syms);
6532 return FALSE;
6533 }
6534
6535 hash = NULL;
6536 if (r_indx >= symtab_hdr->sh_info)
6537 hash = elf32_arm_hash_entry
6538 (elf_sym_hashes (input_bfd)
6539 [r_indx - symtab_hdr->sh_info]);
6540
6541 /* Only look for stubs on branch instructions, or
6542 non-relaxed TLSCALL */
6543 if ((r_type != (unsigned int) R_ARM_CALL)
6544 && (r_type != (unsigned int) R_ARM_THM_CALL)
6545 && (r_type != (unsigned int) R_ARM_JUMP24)
6546 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6547 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6548 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6549 && (r_type != (unsigned int) R_ARM_PLT32)
6550 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6551 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6552 && r_type == elf32_arm_tls_transition
6553 (info, r_type, &hash->root)
6554 && ((hash ? hash->tls_type
6555 : (elf32_arm_local_got_tls_type
6556 (input_bfd)[r_indx]))
6557 & GOT_TLS_GDESC) != 0))
6558 continue;
6559
6560 /* Now determine the call target, its name, value,
6561 section. */
6562 sym_sec = NULL;
6563 sym_value = 0;
6564 destination = 0;
6565 sym_name = NULL;
6566
6567 if (r_type == (unsigned int) R_ARM_TLS_CALL
6568 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6569 {
6570 /* A non-relaxed TLS call. The target is the
6571 plt-resident trampoline and nothing to do
6572 with the symbol. */
6573 BFD_ASSERT (htab->tls_trampoline > 0);
6574 sym_sec = htab->root.splt;
6575 sym_value = htab->tls_trampoline;
6576 hash = 0;
6577 st_type = STT_FUNC;
6578 branch_type = ST_BRANCH_TO_ARM;
6579 }
6580 else if (!hash)
6581 {
6582 /* It's a local symbol. */
6583 Elf_Internal_Sym *sym;
6584
6585 if (local_syms == NULL)
6586 {
6587 local_syms
6588 = (Elf_Internal_Sym *) symtab_hdr->contents;
6589 if (local_syms == NULL)
6590 local_syms
6591 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6592 symtab_hdr->sh_info, 0,
6593 NULL, NULL, NULL);
6594 if (local_syms == NULL)
6595 goto error_ret_free_internal;
6596 }
6597
6598 sym = local_syms + r_indx;
6599 if (sym->st_shndx == SHN_UNDEF)
6600 sym_sec = bfd_und_section_ptr;
6601 else if (sym->st_shndx == SHN_ABS)
6602 sym_sec = bfd_abs_section_ptr;
6603 else if (sym->st_shndx == SHN_COMMON)
6604 sym_sec = bfd_com_section_ptr;
6605 else
6606 sym_sec =
6607 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6608
6609 if (!sym_sec)
6610 /* This is an undefined symbol. It can never
6611 be resolved. */
6612 continue;
6613
6614 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6615 sym_value = sym->st_value;
6616 destination = (sym_value + irela->r_addend
6617 + sym_sec->output_offset
6618 + sym_sec->output_section->vma);
6619 st_type = ELF_ST_TYPE (sym->st_info);
6620 branch_type =
6621 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6622 sym_name
6623 = bfd_elf_string_from_elf_section (input_bfd,
6624 symtab_hdr->sh_link,
6625 sym->st_name);
6626 }
6627 else
6628 {
6629 /* It's an external symbol. */
6630 while (hash->root.root.type == bfd_link_hash_indirect
6631 || hash->root.root.type == bfd_link_hash_warning)
6632 hash = ((struct elf32_arm_link_hash_entry *)
6633 hash->root.root.u.i.link);
6634
6635 if (hash->root.root.type == bfd_link_hash_defined
6636 || hash->root.root.type == bfd_link_hash_defweak)
6637 {
6638 sym_sec = hash->root.root.u.def.section;
6639 sym_value = hash->root.root.u.def.value;
6640
6641 struct elf32_arm_link_hash_table *globals =
6642 elf32_arm_hash_table (info);
6643
6644 /* For a destination in a shared library,
6645 use the PLT stub as target address to
6646 decide whether a branch stub is
6647 needed. */
6648 if (globals != NULL
6649 && globals->root.splt != NULL
6650 && hash != NULL
6651 && hash->root.plt.offset != (bfd_vma) -1)
6652 {
6653 sym_sec = globals->root.splt;
6654 sym_value = hash->root.plt.offset;
6655 if (sym_sec->output_section != NULL)
6656 destination = (sym_value
6657 + sym_sec->output_offset
6658 + sym_sec->output_section->vma);
6659 }
6660 else if (sym_sec->output_section != NULL)
6661 destination = (sym_value + irela->r_addend
6662 + sym_sec->output_offset
6663 + sym_sec->output_section->vma);
6664 }
6665 else if ((hash->root.root.type == bfd_link_hash_undefined)
6666 || (hash->root.root.type == bfd_link_hash_undefweak))
6667 {
6668 /* For a shared library, use the PLT stub as
6669 target address to decide whether a long
6670 branch stub is needed.
6671 For absolute code, they cannot be handled. */
6672 struct elf32_arm_link_hash_table *globals =
6673 elf32_arm_hash_table (info);
6674
6675 if (globals != NULL
6676 && globals->root.splt != NULL
6677 && hash != NULL
6678 && hash->root.plt.offset != (bfd_vma) -1)
6679 {
6680 sym_sec = globals->root.splt;
6681 sym_value = hash->root.plt.offset;
6682 if (sym_sec->output_section != NULL)
6683 destination = (sym_value
6684 + sym_sec->output_offset
6685 + sym_sec->output_section->vma);
6686 }
6687 else
6688 continue;
6689 }
6690 else
6691 {
6692 bfd_set_error (bfd_error_bad_value);
6693 goto error_ret_free_internal;
6694 }
6695 st_type = hash->root.type;
6696 branch_type =
6697 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6698 sym_name = hash->root.root.root.string;
6699 }
6700
6701 do
6702 {
6703 bfd_boolean new_stub;
6704 struct elf32_arm_stub_hash_entry *stub_entry;
6705
6706 /* Determine what (if any) linker stub is needed. */
6707 stub_type = arm_type_of_stub (info, section, irela,
6708 st_type, &branch_type,
6709 hash, destination, sym_sec,
6710 input_bfd, sym_name);
6711 if (stub_type == arm_stub_none)
6712 break;
6713
6714 /* We've either created a stub for this reloc already,
6715 or we are about to. */
6716 stub_entry =
6717 elf32_arm_create_stub (htab, stub_type, section, irela,
6718 sym_sec, hash,
6719 (char *) sym_name, sym_value,
6720 branch_type, &new_stub);
6721
6722 created_stub = stub_entry != NULL;
6723 if (!created_stub)
6724 goto error_ret_free_internal;
6725 else if (!new_stub)
6726 break;
6727 else
6728 stub_changed = TRUE;
6729 }
6730 while (0);
6731
6732 /* Look for relocations which might trigger Cortex-A8
6733 erratum. */
6734 if (htab->fix_cortex_a8
6735 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6736 || r_type == (unsigned int) R_ARM_THM_JUMP19
6737 || r_type == (unsigned int) R_ARM_THM_CALL
6738 || r_type == (unsigned int) R_ARM_THM_XPC22))
6739 {
6740 bfd_vma from = section->output_section->vma
6741 + section->output_offset
6742 + irela->r_offset;
6743
6744 if ((from & 0xfff) == 0xffe)
6745 {
6746 /* Found a candidate. Note we haven't checked the
6747 destination is within 4K here: if we do so (and
6748 don't create an entry in a8_relocs) we can't tell
6749 that a branch should have been relocated when
6750 scanning later. */
6751 if (num_a8_relocs == a8_reloc_table_size)
6752 {
6753 a8_reloc_table_size *= 2;
6754 a8_relocs = (struct a8_erratum_reloc *)
6755 bfd_realloc (a8_relocs,
6756 sizeof (struct a8_erratum_reloc)
6757 * a8_reloc_table_size);
6758 }
6759
6760 a8_relocs[num_a8_relocs].from = from;
6761 a8_relocs[num_a8_relocs].destination = destination;
6762 a8_relocs[num_a8_relocs].r_type = r_type;
6763 a8_relocs[num_a8_relocs].branch_type = branch_type;
6764 a8_relocs[num_a8_relocs].sym_name = sym_name;
6765 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6766 a8_relocs[num_a8_relocs].hash = hash;
6767
6768 num_a8_relocs++;
6769 }
6770 }
6771 }
6772
6773 /* We're done with the internal relocs, free them. */
6774 if (elf_section_data (section)->relocs == NULL)
6775 free (internal_relocs);
6776 }
6777
6778 if (htab->fix_cortex_a8)
6779 {
6780 /* Sort relocs which might apply to Cortex-A8 erratum. */
6781 qsort (a8_relocs, num_a8_relocs,
6782 sizeof (struct a8_erratum_reloc),
6783 &a8_reloc_compare);
6784
6785 /* Scan for branches which might trigger Cortex-A8 erratum. */
6786 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6787 &num_a8_fixes, &a8_fix_table_size,
6788 a8_relocs, num_a8_relocs,
6789 prev_num_a8_fixes, &stub_changed)
6790 != 0)
6791 goto error_ret_free_local;
6792 }
6793
6794 if (local_syms != NULL
6795 && symtab_hdr->contents != (unsigned char *) local_syms)
6796 {
6797 if (!info->keep_memory)
6798 free (local_syms);
6799 else
6800 symtab_hdr->contents = (unsigned char *) local_syms;
6801 }
6802 }
6803
6804 if (first_veneer_scan
6805 && !set_cmse_veneer_addr_from_implib (info, htab,
6806 &cmse_stub_created))
6807 ret = FALSE;
6808
6809 if (prev_num_a8_fixes != num_a8_fixes)
6810 stub_changed = TRUE;
6811
6812 if (!stub_changed)
6813 break;
6814
6815 /* OK, we've added some stubs. Find out the new size of the
6816 stub sections. */
6817 for (stub_sec = htab->stub_bfd->sections;
6818 stub_sec != NULL;
6819 stub_sec = stub_sec->next)
6820 {
6821 /* Ignore non-stub sections. */
6822 if (!strstr (stub_sec->name, STUB_SUFFIX))
6823 continue;
6824
6825 stub_sec->size = 0;
6826 }
6827
6828 /* Add new SG veneers after those already in the input import
6829 library. */
6830 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6831 stub_type++)
6832 {
6833 bfd_vma *start_offset_p;
6834 asection **stub_sec_p;
6835
6836 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6837 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6838 if (start_offset_p == NULL)
6839 continue;
6840
6841 BFD_ASSERT (stub_sec_p != NULL);
6842 if (*stub_sec_p != NULL)
6843 (*stub_sec_p)->size = *start_offset_p;
6844 }
6845
6846 /* Compute stub section size, considering padding. */
6847 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6848 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6849 stub_type++)
6850 {
6851 int size, padding;
6852 asection **stub_sec_p;
6853
6854 padding = arm_dedicated_stub_section_padding (stub_type);
6855 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6856 /* Skip if no stub input section or no stub section padding
6857 required. */
6858 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6859 continue;
6860 /* Stub section padding required but no dedicated section. */
6861 BFD_ASSERT (stub_sec_p);
6862
6863 size = (*stub_sec_p)->size;
6864 size = (size + padding - 1) & ~(padding - 1);
6865 (*stub_sec_p)->size = size;
6866 }
6867
6868 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6869 if (htab->fix_cortex_a8)
6870 for (i = 0; i < num_a8_fixes; i++)
6871 {
6872 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6873 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6874
6875 if (stub_sec == NULL)
6876 return FALSE;
6877
6878 stub_sec->size
6879 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6880 NULL);
6881 }
6882
6883
6884 /* Ask the linker to do its stuff. */
6885 (*htab->layout_sections_again) ();
6886 first_veneer_scan = FALSE;
6887 }
6888
6889 /* Add stubs for Cortex-A8 erratum fixes now. */
6890 if (htab->fix_cortex_a8)
6891 {
6892 for (i = 0; i < num_a8_fixes; i++)
6893 {
6894 struct elf32_arm_stub_hash_entry *stub_entry;
6895 char *stub_name = a8_fixes[i].stub_name;
6896 asection *section = a8_fixes[i].section;
6897 unsigned int section_id = a8_fixes[i].section->id;
6898 asection *link_sec = htab->stub_group[section_id].link_sec;
6899 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6900 const insn_sequence *template_sequence;
6901 int template_size, size = 0;
6902
6903 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6904 TRUE, FALSE);
6905 if (stub_entry == NULL)
6906 {
6907 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6908 section->owner, stub_name);
6909 return FALSE;
6910 }
6911
6912 stub_entry->stub_sec = stub_sec;
6913 stub_entry->stub_offset = (bfd_vma) -1;
6914 stub_entry->id_sec = link_sec;
6915 stub_entry->stub_type = a8_fixes[i].stub_type;
6916 stub_entry->source_value = a8_fixes[i].offset;
6917 stub_entry->target_section = a8_fixes[i].section;
6918 stub_entry->target_value = a8_fixes[i].target_offset;
6919 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6920 stub_entry->branch_type = a8_fixes[i].branch_type;
6921
6922 size = find_stub_size_and_template (a8_fixes[i].stub_type,
6923 &template_sequence,
6924 &template_size);
6925
6926 stub_entry->stub_size = size;
6927 stub_entry->stub_template = template_sequence;
6928 stub_entry->stub_template_size = template_size;
6929 }
6930
6931 /* Stash the Cortex-A8 erratum fix array for use later in
6932 elf32_arm_write_section(). */
6933 htab->a8_erratum_fixes = a8_fixes;
6934 htab->num_a8_erratum_fixes = num_a8_fixes;
6935 }
6936 else
6937 {
6938 htab->a8_erratum_fixes = NULL;
6939 htab->num_a8_erratum_fixes = 0;
6940 }
6941 return ret;
6942 }
6943
6944 /* Build all the stubs associated with the current output file. The
6945 stubs are kept in a hash table attached to the main linker hash
6946 table. We also set up the .plt entries for statically linked PIC
6947 functions here. This function is called via arm_elf_finish in the
6948 linker. */
6949
6950 bfd_boolean
6951 elf32_arm_build_stubs (struct bfd_link_info *info)
6952 {
6953 asection *stub_sec;
6954 struct bfd_hash_table *table;
6955 enum elf32_arm_stub_type stub_type;
6956 struct elf32_arm_link_hash_table *htab;
6957
6958 htab = elf32_arm_hash_table (info);
6959 if (htab == NULL)
6960 return FALSE;
6961
6962 for (stub_sec = htab->stub_bfd->sections;
6963 stub_sec != NULL;
6964 stub_sec = stub_sec->next)
6965 {
6966 bfd_size_type size;
6967
6968 /* Ignore non-stub sections. */
6969 if (!strstr (stub_sec->name, STUB_SUFFIX))
6970 continue;
6971
6972 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
6973 must at least be done for stub section requiring padding and for SG
6974 veneers to ensure that a non secure code branching to a removed SG
6975 veneer causes an error. */
6976 size = stub_sec->size;
6977 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
6978 if (stub_sec->contents == NULL && size != 0)
6979 return FALSE;
6980
6981 stub_sec->size = 0;
6982 }
6983
6984 /* Add new SG veneers after those already in the input import library. */
6985 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6986 {
6987 bfd_vma *start_offset_p;
6988 asection **stub_sec_p;
6989
6990 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6991 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6992 if (start_offset_p == NULL)
6993 continue;
6994
6995 BFD_ASSERT (stub_sec_p != NULL);
6996 if (*stub_sec_p != NULL)
6997 (*stub_sec_p)->size = *start_offset_p;
6998 }
6999
7000 /* Build the stubs as directed by the stub hash table. */
7001 table = &htab->stub_hash_table;
7002 bfd_hash_traverse (table, arm_build_one_stub, info);
7003 if (htab->fix_cortex_a8)
7004 {
7005 /* Place the cortex a8 stubs last. */
7006 htab->fix_cortex_a8 = -1;
7007 bfd_hash_traverse (table, arm_build_one_stub, info);
7008 }
7009
7010 return TRUE;
7011 }
7012
7013 /* Locate the Thumb encoded calling stub for NAME. */
7014
7015 static struct elf_link_hash_entry *
7016 find_thumb_glue (struct bfd_link_info *link_info,
7017 const char *name,
7018 char **error_message)
7019 {
7020 char *tmp_name;
7021 struct elf_link_hash_entry *hash;
7022 struct elf32_arm_link_hash_table *hash_table;
7023
7024 /* We need a pointer to the armelf specific hash table. */
7025 hash_table = elf32_arm_hash_table (link_info);
7026 if (hash_table == NULL)
7027 return NULL;
7028
7029 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7030 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7031
7032 BFD_ASSERT (tmp_name);
7033
7034 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7035
7036 hash = elf_link_hash_lookup
7037 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7038
7039 if (hash == NULL
7040 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7041 "Thumb", tmp_name, name) == -1)
7042 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7043
7044 free (tmp_name);
7045
7046 return hash;
7047 }
7048
7049 /* Locate the ARM encoded calling stub for NAME. */
7050
7051 static struct elf_link_hash_entry *
7052 find_arm_glue (struct bfd_link_info *link_info,
7053 const char *name,
7054 char **error_message)
7055 {
7056 char *tmp_name;
7057 struct elf_link_hash_entry *myh;
7058 struct elf32_arm_link_hash_table *hash_table;
7059
7060 /* We need a pointer to the elfarm specific hash table. */
7061 hash_table = elf32_arm_hash_table (link_info);
7062 if (hash_table == NULL)
7063 return NULL;
7064
7065 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7066 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7067
7068 BFD_ASSERT (tmp_name);
7069
7070 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7071
7072 myh = elf_link_hash_lookup
7073 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7074
7075 if (myh == NULL
7076 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7077 "ARM", tmp_name, name) == -1)
7078 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7079
7080 free (tmp_name);
7081
7082 return myh;
7083 }
7084
7085 /* ARM->Thumb glue (static images):
7086
7087 .arm
7088 __func_from_arm:
7089 ldr r12, __func_addr
7090 bx r12
7091 __func_addr:
7092 .word func @ behave as if you saw a ARM_32 reloc.
7093
7094 (v5t static images)
7095 .arm
7096 __func_from_arm:
7097 ldr pc, __func_addr
7098 __func_addr:
7099 .word func @ behave as if you saw a ARM_32 reloc.
7100
7101 (relocatable images)
7102 .arm
7103 __func_from_arm:
7104 ldr r12, __func_offset
7105 add r12, r12, pc
7106 bx r12
7107 __func_offset:
7108 .word func - . */
7109
7110 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7111 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7112 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7113 static const insn32 a2t3_func_addr_insn = 0x00000001;
7114
7115 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7116 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7117 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7118
7119 #define ARM2THUMB_PIC_GLUE_SIZE 16
7120 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7121 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7122 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7123
7124 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7125
7126 .thumb .thumb
7127 .align 2 .align 2
7128 __func_from_thumb: __func_from_thumb:
7129 bx pc push {r6, lr}
7130 nop ldr r6, __func_addr
7131 .arm mov lr, pc
7132 b func bx r6
7133 .arm
7134 ;; back_to_thumb
7135 ldmia r13! {r6, lr}
7136 bx lr
7137 __func_addr:
7138 .word func */
7139
7140 #define THUMB2ARM_GLUE_SIZE 8
7141 static const insn16 t2a1_bx_pc_insn = 0x4778;
7142 static const insn16 t2a2_noop_insn = 0x46c0;
7143 static const insn32 t2a3_b_insn = 0xea000000;
7144
7145 #define VFP11_ERRATUM_VENEER_SIZE 8
7146 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7147 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7148
7149 #define ARM_BX_VENEER_SIZE 12
7150 static const insn32 armbx1_tst_insn = 0xe3100001;
7151 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7152 static const insn32 armbx3_bx_insn = 0xe12fff10;
7153
7154 #ifndef ELFARM_NABI_C_INCLUDED
7155 static void
7156 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7157 {
7158 asection * s;
7159 bfd_byte * contents;
7160
7161 if (size == 0)
7162 {
7163 /* Do not include empty glue sections in the output. */
7164 if (abfd != NULL)
7165 {
7166 s = bfd_get_linker_section (abfd, name);
7167 if (s != NULL)
7168 s->flags |= SEC_EXCLUDE;
7169 }
7170 return;
7171 }
7172
7173 BFD_ASSERT (abfd != NULL);
7174
7175 s = bfd_get_linker_section (abfd, name);
7176 BFD_ASSERT (s != NULL);
7177
7178 contents = (bfd_byte *) bfd_alloc (abfd, size);
7179
7180 BFD_ASSERT (s->size == size);
7181 s->contents = contents;
7182 }
7183
7184 bfd_boolean
7185 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7186 {
7187 struct elf32_arm_link_hash_table * globals;
7188
7189 globals = elf32_arm_hash_table (info);
7190 BFD_ASSERT (globals != NULL);
7191
7192 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7193 globals->arm_glue_size,
7194 ARM2THUMB_GLUE_SECTION_NAME);
7195
7196 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7197 globals->thumb_glue_size,
7198 THUMB2ARM_GLUE_SECTION_NAME);
7199
7200 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7201 globals->vfp11_erratum_glue_size,
7202 VFP11_ERRATUM_VENEER_SECTION_NAME);
7203
7204 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7205 globals->stm32l4xx_erratum_glue_size,
7206 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7207
7208 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7209 globals->bx_glue_size,
7210 ARM_BX_GLUE_SECTION_NAME);
7211
7212 return TRUE;
7213 }
7214
7215 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7216 returns the symbol identifying the stub. */
7217
7218 static struct elf_link_hash_entry *
7219 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7220 struct elf_link_hash_entry * h)
7221 {
7222 const char * name = h->root.root.string;
7223 asection * s;
7224 char * tmp_name;
7225 struct elf_link_hash_entry * myh;
7226 struct bfd_link_hash_entry * bh;
7227 struct elf32_arm_link_hash_table * globals;
7228 bfd_vma val;
7229 bfd_size_type size;
7230
7231 globals = elf32_arm_hash_table (link_info);
7232 BFD_ASSERT (globals != NULL);
7233 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7234
7235 s = bfd_get_linker_section
7236 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7237
7238 BFD_ASSERT (s != NULL);
7239
7240 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7241 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7242
7243 BFD_ASSERT (tmp_name);
7244
7245 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7246
7247 myh = elf_link_hash_lookup
7248 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7249
7250 if (myh != NULL)
7251 {
7252 /* We've already seen this guy. */
7253 free (tmp_name);
7254 return myh;
7255 }
7256
7257 /* The only trick here is using hash_table->arm_glue_size as the value.
7258 Even though the section isn't allocated yet, this is where we will be
7259 putting it. The +1 on the value marks that the stub has not been
7260 output yet - not that it is a Thumb function. */
7261 bh = NULL;
7262 val = globals->arm_glue_size + 1;
7263 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7264 tmp_name, BSF_GLOBAL, s, val,
7265 NULL, TRUE, FALSE, &bh);
7266
7267 myh = (struct elf_link_hash_entry *) bh;
7268 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7269 myh->forced_local = 1;
7270
7271 free (tmp_name);
7272
7273 if (bfd_link_pic (link_info)
7274 || globals->root.is_relocatable_executable
7275 || globals->pic_veneer)
7276 size = ARM2THUMB_PIC_GLUE_SIZE;
7277 else if (globals->use_blx)
7278 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7279 else
7280 size = ARM2THUMB_STATIC_GLUE_SIZE;
7281
7282 s->size += size;
7283 globals->arm_glue_size += size;
7284
7285 return myh;
7286 }
7287
7288 /* Allocate space for ARMv4 BX veneers. */
7289
7290 static void
7291 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7292 {
7293 asection * s;
7294 struct elf32_arm_link_hash_table *globals;
7295 char *tmp_name;
7296 struct elf_link_hash_entry *myh;
7297 struct bfd_link_hash_entry *bh;
7298 bfd_vma val;
7299
7300 /* BX PC does not need a veneer. */
7301 if (reg == 15)
7302 return;
7303
7304 globals = elf32_arm_hash_table (link_info);
7305 BFD_ASSERT (globals != NULL);
7306 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7307
7308 /* Check if this veneer has already been allocated. */
7309 if (globals->bx_glue_offset[reg])
7310 return;
7311
7312 s = bfd_get_linker_section
7313 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7314
7315 BFD_ASSERT (s != NULL);
7316
7317 /* Add symbol for veneer. */
7318 tmp_name = (char *)
7319 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7320
7321 BFD_ASSERT (tmp_name);
7322
7323 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7324
7325 myh = elf_link_hash_lookup
7326 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7327
7328 BFD_ASSERT (myh == NULL);
7329
7330 bh = NULL;
7331 val = globals->bx_glue_size;
7332 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7333 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7334 NULL, TRUE, FALSE, &bh);
7335
7336 myh = (struct elf_link_hash_entry *) bh;
7337 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7338 myh->forced_local = 1;
7339
7340 s->size += ARM_BX_VENEER_SIZE;
7341 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7342 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7343 }
7344
7345
7346 /* Add an entry to the code/data map for section SEC. */
7347
7348 static void
7349 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7350 {
7351 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7352 unsigned int newidx;
7353
7354 if (sec_data->map == NULL)
7355 {
7356 sec_data->map = (elf32_arm_section_map *)
7357 bfd_malloc (sizeof (elf32_arm_section_map));
7358 sec_data->mapcount = 0;
7359 sec_data->mapsize = 1;
7360 }
7361
7362 newidx = sec_data->mapcount++;
7363
7364 if (sec_data->mapcount > sec_data->mapsize)
7365 {
7366 sec_data->mapsize *= 2;
7367 sec_data->map = (elf32_arm_section_map *)
7368 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7369 * sizeof (elf32_arm_section_map));
7370 }
7371
7372 if (sec_data->map)
7373 {
7374 sec_data->map[newidx].vma = vma;
7375 sec_data->map[newidx].type = type;
7376 }
7377 }
7378
7379
7380 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7381 veneers are handled for now. */
7382
7383 static bfd_vma
7384 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7385 elf32_vfp11_erratum_list *branch,
7386 bfd *branch_bfd,
7387 asection *branch_sec,
7388 unsigned int offset)
7389 {
7390 asection *s;
7391 struct elf32_arm_link_hash_table *hash_table;
7392 char *tmp_name;
7393 struct elf_link_hash_entry *myh;
7394 struct bfd_link_hash_entry *bh;
7395 bfd_vma val;
7396 struct _arm_elf_section_data *sec_data;
7397 elf32_vfp11_erratum_list *newerr;
7398
7399 hash_table = elf32_arm_hash_table (link_info);
7400 BFD_ASSERT (hash_table != NULL);
7401 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7402
7403 s = bfd_get_linker_section
7404 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7405
7406 sec_data = elf32_arm_section_data (s);
7407
7408 BFD_ASSERT (s != NULL);
7409
7410 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7411 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7412
7413 BFD_ASSERT (tmp_name);
7414
7415 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7416 hash_table->num_vfp11_fixes);
7417
7418 myh = elf_link_hash_lookup
7419 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7420
7421 BFD_ASSERT (myh == NULL);
7422
7423 bh = NULL;
7424 val = hash_table->vfp11_erratum_glue_size;
7425 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7426 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7427 NULL, TRUE, FALSE, &bh);
7428
7429 myh = (struct elf_link_hash_entry *) bh;
7430 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7431 myh->forced_local = 1;
7432
7433 /* Link veneer back to calling location. */
7434 sec_data->erratumcount += 1;
7435 newerr = (elf32_vfp11_erratum_list *)
7436 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7437
7438 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7439 newerr->vma = -1;
7440 newerr->u.v.branch = branch;
7441 newerr->u.v.id = hash_table->num_vfp11_fixes;
7442 branch->u.b.veneer = newerr;
7443
7444 newerr->next = sec_data->erratumlist;
7445 sec_data->erratumlist = newerr;
7446
7447 /* A symbol for the return from the veneer. */
7448 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7449 hash_table->num_vfp11_fixes);
7450
7451 myh = elf_link_hash_lookup
7452 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7453
7454 if (myh != NULL)
7455 abort ();
7456
7457 bh = NULL;
7458 val = offset + 4;
7459 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7460 branch_sec, val, NULL, TRUE, FALSE, &bh);
7461
7462 myh = (struct elf_link_hash_entry *) bh;
7463 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7464 myh->forced_local = 1;
7465
7466 free (tmp_name);
7467
7468 /* Generate a mapping symbol for the veneer section, and explicitly add an
7469 entry for that symbol to the code/data map for the section. */
7470 if (hash_table->vfp11_erratum_glue_size == 0)
7471 {
7472 bh = NULL;
7473 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7474 ever requires this erratum fix. */
7475 _bfd_generic_link_add_one_symbol (link_info,
7476 hash_table->bfd_of_glue_owner, "$a",
7477 BSF_LOCAL, s, 0, NULL,
7478 TRUE, FALSE, &bh);
7479
7480 myh = (struct elf_link_hash_entry *) bh;
7481 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7482 myh->forced_local = 1;
7483
7484 /* The elf32_arm_init_maps function only cares about symbols from input
7485 BFDs. We must make a note of this generated mapping symbol
7486 ourselves so that code byteswapping works properly in
7487 elf32_arm_write_section. */
7488 elf32_arm_section_map_add (s, 'a', 0);
7489 }
7490
7491 s->size += VFP11_ERRATUM_VENEER_SIZE;
7492 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7493 hash_table->num_vfp11_fixes++;
7494
7495 /* The offset of the veneer. */
7496 return val;
7497 }
7498
7499 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7500 veneers need to be handled because used only in Cortex-M. */
7501
7502 static bfd_vma
7503 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7504 elf32_stm32l4xx_erratum_list *branch,
7505 bfd *branch_bfd,
7506 asection *branch_sec,
7507 unsigned int offset,
7508 bfd_size_type veneer_size)
7509 {
7510 asection *s;
7511 struct elf32_arm_link_hash_table *hash_table;
7512 char *tmp_name;
7513 struct elf_link_hash_entry *myh;
7514 struct bfd_link_hash_entry *bh;
7515 bfd_vma val;
7516 struct _arm_elf_section_data *sec_data;
7517 elf32_stm32l4xx_erratum_list *newerr;
7518
7519 hash_table = elf32_arm_hash_table (link_info);
7520 BFD_ASSERT (hash_table != NULL);
7521 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7522
7523 s = bfd_get_linker_section
7524 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7525
7526 BFD_ASSERT (s != NULL);
7527
7528 sec_data = elf32_arm_section_data (s);
7529
7530 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7531 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7532
7533 BFD_ASSERT (tmp_name);
7534
7535 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7536 hash_table->num_stm32l4xx_fixes);
7537
7538 myh = elf_link_hash_lookup
7539 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7540
7541 BFD_ASSERT (myh == NULL);
7542
7543 bh = NULL;
7544 val = hash_table->stm32l4xx_erratum_glue_size;
7545 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7546 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7547 NULL, TRUE, FALSE, &bh);
7548
7549 myh = (struct elf_link_hash_entry *) bh;
7550 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7551 myh->forced_local = 1;
7552
7553 /* Link veneer back to calling location. */
7554 sec_data->stm32l4xx_erratumcount += 1;
7555 newerr = (elf32_stm32l4xx_erratum_list *)
7556 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7557
7558 newerr->type = STM32L4XX_ERRATUM_VENEER;
7559 newerr->vma = -1;
7560 newerr->u.v.branch = branch;
7561 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7562 branch->u.b.veneer = newerr;
7563
7564 newerr->next = sec_data->stm32l4xx_erratumlist;
7565 sec_data->stm32l4xx_erratumlist = newerr;
7566
7567 /* A symbol for the return from the veneer. */
7568 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7569 hash_table->num_stm32l4xx_fixes);
7570
7571 myh = elf_link_hash_lookup
7572 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7573
7574 if (myh != NULL)
7575 abort ();
7576
7577 bh = NULL;
7578 val = offset + 4;
7579 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7580 branch_sec, val, NULL, TRUE, FALSE, &bh);
7581
7582 myh = (struct elf_link_hash_entry *) bh;
7583 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7584 myh->forced_local = 1;
7585
7586 free (tmp_name);
7587
7588 /* Generate a mapping symbol for the veneer section, and explicitly add an
7589 entry for that symbol to the code/data map for the section. */
7590 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7591 {
7592 bh = NULL;
7593 /* Creates a THUMB symbol since there is no other choice. */
7594 _bfd_generic_link_add_one_symbol (link_info,
7595 hash_table->bfd_of_glue_owner, "$t",
7596 BSF_LOCAL, s, 0, NULL,
7597 TRUE, FALSE, &bh);
7598
7599 myh = (struct elf_link_hash_entry *) bh;
7600 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7601 myh->forced_local = 1;
7602
7603 /* The elf32_arm_init_maps function only cares about symbols from input
7604 BFDs. We must make a note of this generated mapping symbol
7605 ourselves so that code byteswapping works properly in
7606 elf32_arm_write_section. */
7607 elf32_arm_section_map_add (s, 't', 0);
7608 }
7609
7610 s->size += veneer_size;
7611 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7612 hash_table->num_stm32l4xx_fixes++;
7613
7614 /* The offset of the veneer. */
7615 return val;
7616 }
7617
7618 #define ARM_GLUE_SECTION_FLAGS \
7619 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7620 | SEC_READONLY | SEC_LINKER_CREATED)
7621
7622 /* Create a fake section for use by the ARM backend of the linker. */
7623
7624 static bfd_boolean
7625 arm_make_glue_section (bfd * abfd, const char * name)
7626 {
7627 asection * sec;
7628
7629 sec = bfd_get_linker_section (abfd, name);
7630 if (sec != NULL)
7631 /* Already made. */
7632 return TRUE;
7633
7634 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7635
7636 if (sec == NULL
7637 || !bfd_set_section_alignment (abfd, sec, 2))
7638 return FALSE;
7639
7640 /* Set the gc mark to prevent the section from being removed by garbage
7641 collection, despite the fact that no relocs refer to this section. */
7642 sec->gc_mark = 1;
7643
7644 return TRUE;
7645 }
7646
7647 /* Set size of .plt entries. This function is called from the
7648 linker scripts in ld/emultempl/{armelf}.em. */
7649
7650 void
7651 bfd_elf32_arm_use_long_plt (void)
7652 {
7653 elf32_arm_use_long_plt_entry = TRUE;
7654 }
7655
7656 /* Add the glue sections to ABFD. This function is called from the
7657 linker scripts in ld/emultempl/{armelf}.em. */
7658
7659 bfd_boolean
7660 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7661 struct bfd_link_info *info)
7662 {
7663 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7664 bfd_boolean dostm32l4xx = globals
7665 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7666 bfd_boolean addglue;
7667
7668 /* If we are only performing a partial
7669 link do not bother adding the glue. */
7670 if (bfd_link_relocatable (info))
7671 return TRUE;
7672
7673 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7674 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7675 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7676 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7677
7678 if (!dostm32l4xx)
7679 return addglue;
7680
7681 return addglue
7682 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7683 }
7684
7685 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7686 ensures they are not marked for deletion by
7687 strip_excluded_output_sections () when veneers are going to be created
7688 later. Not doing so would trigger assert on empty section size in
7689 lang_size_sections_1 (). */
7690
7691 void
7692 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7693 {
7694 enum elf32_arm_stub_type stub_type;
7695
7696 /* If we are only performing a partial
7697 link do not bother adding the glue. */
7698 if (bfd_link_relocatable (info))
7699 return;
7700
7701 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7702 {
7703 asection *out_sec;
7704 const char *out_sec_name;
7705
7706 if (!arm_dedicated_stub_output_section_required (stub_type))
7707 continue;
7708
7709 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7710 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7711 if (out_sec != NULL)
7712 out_sec->flags |= SEC_KEEP;
7713 }
7714 }
7715
7716 /* Select a BFD to be used to hold the sections used by the glue code.
7717 This function is called from the linker scripts in ld/emultempl/
7718 {armelf/pe}.em. */
7719
7720 bfd_boolean
7721 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7722 {
7723 struct elf32_arm_link_hash_table *globals;
7724
7725 /* If we are only performing a partial link
7726 do not bother getting a bfd to hold the glue. */
7727 if (bfd_link_relocatable (info))
7728 return TRUE;
7729
7730 /* Make sure we don't attach the glue sections to a dynamic object. */
7731 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7732
7733 globals = elf32_arm_hash_table (info);
7734 BFD_ASSERT (globals != NULL);
7735
7736 if (globals->bfd_of_glue_owner != NULL)
7737 return TRUE;
7738
7739 /* Save the bfd for later use. */
7740 globals->bfd_of_glue_owner = abfd;
7741
7742 return TRUE;
7743 }
7744
7745 static void
7746 check_use_blx (struct elf32_arm_link_hash_table *globals)
7747 {
7748 int cpu_arch;
7749
7750 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7751 Tag_CPU_arch);
7752
7753 if (globals->fix_arm1176)
7754 {
7755 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7756 globals->use_blx = 1;
7757 }
7758 else
7759 {
7760 if (cpu_arch > TAG_CPU_ARCH_V4T)
7761 globals->use_blx = 1;
7762 }
7763 }
7764
7765 bfd_boolean
7766 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7767 struct bfd_link_info *link_info)
7768 {
7769 Elf_Internal_Shdr *symtab_hdr;
7770 Elf_Internal_Rela *internal_relocs = NULL;
7771 Elf_Internal_Rela *irel, *irelend;
7772 bfd_byte *contents = NULL;
7773
7774 asection *sec;
7775 struct elf32_arm_link_hash_table *globals;
7776
7777 /* If we are only performing a partial link do not bother
7778 to construct any glue. */
7779 if (bfd_link_relocatable (link_info))
7780 return TRUE;
7781
7782 /* Here we have a bfd that is to be included on the link. We have a
7783 hook to do reloc rummaging, before section sizes are nailed down. */
7784 globals = elf32_arm_hash_table (link_info);
7785 BFD_ASSERT (globals != NULL);
7786
7787 check_use_blx (globals);
7788
7789 if (globals->byteswap_code && !bfd_big_endian (abfd))
7790 {
7791 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7792 abfd);
7793 return FALSE;
7794 }
7795
7796 /* PR 5398: If we have not decided to include any loadable sections in
7797 the output then we will not have a glue owner bfd. This is OK, it
7798 just means that there is nothing else for us to do here. */
7799 if (globals->bfd_of_glue_owner == NULL)
7800 return TRUE;
7801
7802 /* Rummage around all the relocs and map the glue vectors. */
7803 sec = abfd->sections;
7804
7805 if (sec == NULL)
7806 return TRUE;
7807
7808 for (; sec != NULL; sec = sec->next)
7809 {
7810 if (sec->reloc_count == 0)
7811 continue;
7812
7813 if ((sec->flags & SEC_EXCLUDE) != 0)
7814 continue;
7815
7816 symtab_hdr = & elf_symtab_hdr (abfd);
7817
7818 /* Load the relocs. */
7819 internal_relocs
7820 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7821
7822 if (internal_relocs == NULL)
7823 goto error_return;
7824
7825 irelend = internal_relocs + sec->reloc_count;
7826 for (irel = internal_relocs; irel < irelend; irel++)
7827 {
7828 long r_type;
7829 unsigned long r_index;
7830
7831 struct elf_link_hash_entry *h;
7832
7833 r_type = ELF32_R_TYPE (irel->r_info);
7834 r_index = ELF32_R_SYM (irel->r_info);
7835
7836 /* These are the only relocation types we care about. */
7837 if ( r_type != R_ARM_PC24
7838 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7839 continue;
7840
7841 /* Get the section contents if we haven't done so already. */
7842 if (contents == NULL)
7843 {
7844 /* Get cached copy if it exists. */
7845 if (elf_section_data (sec)->this_hdr.contents != NULL)
7846 contents = elf_section_data (sec)->this_hdr.contents;
7847 else
7848 {
7849 /* Go get them off disk. */
7850 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7851 goto error_return;
7852 }
7853 }
7854
7855 if (r_type == R_ARM_V4BX)
7856 {
7857 int reg;
7858
7859 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7860 record_arm_bx_glue (link_info, reg);
7861 continue;
7862 }
7863
7864 /* If the relocation is not against a symbol it cannot concern us. */
7865 h = NULL;
7866
7867 /* We don't care about local symbols. */
7868 if (r_index < symtab_hdr->sh_info)
7869 continue;
7870
7871 /* This is an external symbol. */
7872 r_index -= symtab_hdr->sh_info;
7873 h = (struct elf_link_hash_entry *)
7874 elf_sym_hashes (abfd)[r_index];
7875
7876 /* If the relocation is against a static symbol it must be within
7877 the current section and so cannot be a cross ARM/Thumb relocation. */
7878 if (h == NULL)
7879 continue;
7880
7881 /* If the call will go through a PLT entry then we do not need
7882 glue. */
7883 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7884 continue;
7885
7886 switch (r_type)
7887 {
7888 case R_ARM_PC24:
7889 /* This one is a call from arm code. We need to look up
7890 the target of the call. If it is a thumb target, we
7891 insert glue. */
7892 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7893 == ST_BRANCH_TO_THUMB)
7894 record_arm_to_thumb_glue (link_info, h);
7895 break;
7896
7897 default:
7898 abort ();
7899 }
7900 }
7901
7902 if (contents != NULL
7903 && elf_section_data (sec)->this_hdr.contents != contents)
7904 free (contents);
7905 contents = NULL;
7906
7907 if (internal_relocs != NULL
7908 && elf_section_data (sec)->relocs != internal_relocs)
7909 free (internal_relocs);
7910 internal_relocs = NULL;
7911 }
7912
7913 return TRUE;
7914
7915 error_return:
7916 if (contents != NULL
7917 && elf_section_data (sec)->this_hdr.contents != contents)
7918 free (contents);
7919 if (internal_relocs != NULL
7920 && elf_section_data (sec)->relocs != internal_relocs)
7921 free (internal_relocs);
7922
7923 return FALSE;
7924 }
7925 #endif
7926
7927
7928 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7929
7930 void
7931 bfd_elf32_arm_init_maps (bfd *abfd)
7932 {
7933 Elf_Internal_Sym *isymbuf;
7934 Elf_Internal_Shdr *hdr;
7935 unsigned int i, localsyms;
7936
7937 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7938 if (! is_arm_elf (abfd))
7939 return;
7940
7941 if ((abfd->flags & DYNAMIC) != 0)
7942 return;
7943
7944 hdr = & elf_symtab_hdr (abfd);
7945 localsyms = hdr->sh_info;
7946
7947 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7948 should contain the number of local symbols, which should come before any
7949 global symbols. Mapping symbols are always local. */
7950 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
7951 NULL);
7952
7953 /* No internal symbols read? Skip this BFD. */
7954 if (isymbuf == NULL)
7955 return;
7956
7957 for (i = 0; i < localsyms; i++)
7958 {
7959 Elf_Internal_Sym *isym = &isymbuf[i];
7960 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
7961 const char *name;
7962
7963 if (sec != NULL
7964 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
7965 {
7966 name = bfd_elf_string_from_elf_section (abfd,
7967 hdr->sh_link, isym->st_name);
7968
7969 if (bfd_is_arm_special_symbol_name (name,
7970 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
7971 elf32_arm_section_map_add (sec, name[1], isym->st_value);
7972 }
7973 }
7974 }
7975
7976
7977 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7978 say what they wanted. */
7979
7980 void
7981 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
7982 {
7983 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7984 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7985
7986 if (globals == NULL)
7987 return;
7988
7989 if (globals->fix_cortex_a8 == -1)
7990 {
7991 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
7992 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
7993 && (out_attr[Tag_CPU_arch_profile].i == 'A'
7994 || out_attr[Tag_CPU_arch_profile].i == 0))
7995 globals->fix_cortex_a8 = 1;
7996 else
7997 globals->fix_cortex_a8 = 0;
7998 }
7999 }
8000
8001
8002 void
8003 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8004 {
8005 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8006 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8007
8008 if (globals == NULL)
8009 return;
8010 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8011 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8012 {
8013 switch (globals->vfp11_fix)
8014 {
8015 case BFD_ARM_VFP11_FIX_DEFAULT:
8016 case BFD_ARM_VFP11_FIX_NONE:
8017 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8018 break;
8019
8020 default:
8021 /* Give a warning, but do as the user requests anyway. */
8022 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8023 "workaround is not necessary for target architecture"), obfd);
8024 }
8025 }
8026 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8027 /* For earlier architectures, we might need the workaround, but do not
8028 enable it by default. If users is running with broken hardware, they
8029 must enable the erratum fix explicitly. */
8030 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8031 }
8032
8033 void
8034 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8035 {
8036 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8037 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8038
8039 if (globals == NULL)
8040 return;
8041
8042 /* We assume only Cortex-M4 may require the fix. */
8043 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8044 || out_attr[Tag_CPU_arch_profile].i != 'M')
8045 {
8046 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8047 /* Give a warning, but do as the user requests anyway. */
8048 _bfd_error_handler
8049 (_("%pB: warning: selected STM32L4XX erratum "
8050 "workaround is not necessary for target architecture"), obfd);
8051 }
8052 }
8053
8054 enum bfd_arm_vfp11_pipe
8055 {
8056 VFP11_FMAC,
8057 VFP11_LS,
8058 VFP11_DS,
8059 VFP11_BAD
8060 };
8061
8062 /* Return a VFP register number. This is encoded as RX:X for single-precision
8063 registers, or X:RX for double-precision registers, where RX is the group of
8064 four bits in the instruction encoding and X is the single extension bit.
8065 RX and X fields are specified using their lowest (starting) bit. The return
8066 value is:
8067
8068 0...31: single-precision registers s0...s31
8069 32...63: double-precision registers d0...d31.
8070
8071 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8072 encounter VFP3 instructions, so we allow the full range for DP registers. */
8073
8074 static unsigned int
8075 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8076 unsigned int x)
8077 {
8078 if (is_double)
8079 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8080 else
8081 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8082 }
8083
8084 /* Set bits in *WMASK according to a register number REG as encoded by
8085 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8086
8087 static void
8088 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8089 {
8090 if (reg < 32)
8091 *wmask |= 1 << reg;
8092 else if (reg < 48)
8093 *wmask |= 3 << ((reg - 32) * 2);
8094 }
8095
8096 /* Return TRUE if WMASK overwrites anything in REGS. */
8097
8098 static bfd_boolean
8099 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8100 {
8101 int i;
8102
8103 for (i = 0; i < numregs; i++)
8104 {
8105 unsigned int reg = regs[i];
8106
8107 if (reg < 32 && (wmask & (1 << reg)) != 0)
8108 return TRUE;
8109
8110 reg -= 32;
8111
8112 if (reg >= 16)
8113 continue;
8114
8115 if ((wmask & (3 << (reg * 2))) != 0)
8116 return TRUE;
8117 }
8118
8119 return FALSE;
8120 }
8121
8122 /* In this function, we're interested in two things: finding input registers
8123 for VFP data-processing instructions, and finding the set of registers which
8124 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8125 hold the written set, so FLDM etc. are easy to deal with (we're only
8126 interested in 32 SP registers or 16 dp registers, due to the VFP version
8127 implemented by the chip in question). DP registers are marked by setting
8128 both SP registers in the write mask). */
8129
8130 static enum bfd_arm_vfp11_pipe
8131 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8132 int *numregs)
8133 {
8134 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8135 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8136
8137 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8138 {
8139 unsigned int pqrs;
8140 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8141 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8142
8143 pqrs = ((insn & 0x00800000) >> 20)
8144 | ((insn & 0x00300000) >> 19)
8145 | ((insn & 0x00000040) >> 6);
8146
8147 switch (pqrs)
8148 {
8149 case 0: /* fmac[sd]. */
8150 case 1: /* fnmac[sd]. */
8151 case 2: /* fmsc[sd]. */
8152 case 3: /* fnmsc[sd]. */
8153 vpipe = VFP11_FMAC;
8154 bfd_arm_vfp11_write_mask (destmask, fd);
8155 regs[0] = fd;
8156 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8157 regs[2] = fm;
8158 *numregs = 3;
8159 break;
8160
8161 case 4: /* fmul[sd]. */
8162 case 5: /* fnmul[sd]. */
8163 case 6: /* fadd[sd]. */
8164 case 7: /* fsub[sd]. */
8165 vpipe = VFP11_FMAC;
8166 goto vfp_binop;
8167
8168 case 8: /* fdiv[sd]. */
8169 vpipe = VFP11_DS;
8170 vfp_binop:
8171 bfd_arm_vfp11_write_mask (destmask, fd);
8172 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8173 regs[1] = fm;
8174 *numregs = 2;
8175 break;
8176
8177 case 15: /* extended opcode. */
8178 {
8179 unsigned int extn = ((insn >> 15) & 0x1e)
8180 | ((insn >> 7) & 1);
8181
8182 switch (extn)
8183 {
8184 case 0: /* fcpy[sd]. */
8185 case 1: /* fabs[sd]. */
8186 case 2: /* fneg[sd]. */
8187 case 8: /* fcmp[sd]. */
8188 case 9: /* fcmpe[sd]. */
8189 case 10: /* fcmpz[sd]. */
8190 case 11: /* fcmpez[sd]. */
8191 case 16: /* fuito[sd]. */
8192 case 17: /* fsito[sd]. */
8193 case 24: /* ftoui[sd]. */
8194 case 25: /* ftouiz[sd]. */
8195 case 26: /* ftosi[sd]. */
8196 case 27: /* ftosiz[sd]. */
8197 /* These instructions will not bounce due to underflow. */
8198 *numregs = 0;
8199 vpipe = VFP11_FMAC;
8200 break;
8201
8202 case 3: /* fsqrt[sd]. */
8203 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8204 registers to cause the erratum in previous instructions. */
8205 bfd_arm_vfp11_write_mask (destmask, fd);
8206 vpipe = VFP11_DS;
8207 break;
8208
8209 case 15: /* fcvt{ds,sd}. */
8210 {
8211 int rnum = 0;
8212
8213 bfd_arm_vfp11_write_mask (destmask, fd);
8214
8215 /* Only FCVTSD can underflow. */
8216 if ((insn & 0x100) != 0)
8217 regs[rnum++] = fm;
8218
8219 *numregs = rnum;
8220
8221 vpipe = VFP11_FMAC;
8222 }
8223 break;
8224
8225 default:
8226 return VFP11_BAD;
8227 }
8228 }
8229 break;
8230
8231 default:
8232 return VFP11_BAD;
8233 }
8234 }
8235 /* Two-register transfer. */
8236 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8237 {
8238 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8239
8240 if ((insn & 0x100000) == 0)
8241 {
8242 if (is_double)
8243 bfd_arm_vfp11_write_mask (destmask, fm);
8244 else
8245 {
8246 bfd_arm_vfp11_write_mask (destmask, fm);
8247 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8248 }
8249 }
8250
8251 vpipe = VFP11_LS;
8252 }
8253 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8254 {
8255 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8256 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8257
8258 switch (puw)
8259 {
8260 case 0: /* Two-reg transfer. We should catch these above. */
8261 abort ();
8262
8263 case 2: /* fldm[sdx]. */
8264 case 3:
8265 case 5:
8266 {
8267 unsigned int i, offset = insn & 0xff;
8268
8269 if (is_double)
8270 offset >>= 1;
8271
8272 for (i = fd; i < fd + offset; i++)
8273 bfd_arm_vfp11_write_mask (destmask, i);
8274 }
8275 break;
8276
8277 case 4: /* fld[sd]. */
8278 case 6:
8279 bfd_arm_vfp11_write_mask (destmask, fd);
8280 break;
8281
8282 default:
8283 return VFP11_BAD;
8284 }
8285
8286 vpipe = VFP11_LS;
8287 }
8288 /* Single-register transfer. Note L==0. */
8289 else if ((insn & 0x0f100e10) == 0x0e000a10)
8290 {
8291 unsigned int opcode = (insn >> 21) & 7;
8292 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8293
8294 switch (opcode)
8295 {
8296 case 0: /* fmsr/fmdlr. */
8297 case 1: /* fmdhr. */
8298 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8299 destination register. I don't know if this is exactly right,
8300 but it is the conservative choice. */
8301 bfd_arm_vfp11_write_mask (destmask, fn);
8302 break;
8303
8304 case 7: /* fmxr. */
8305 break;
8306 }
8307
8308 vpipe = VFP11_LS;
8309 }
8310
8311 return vpipe;
8312 }
8313
8314
8315 static int elf32_arm_compare_mapping (const void * a, const void * b);
8316
8317
8318 /* Look for potentially-troublesome code sequences which might trigger the
8319 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8320 (available from ARM) for details of the erratum. A short version is
8321 described in ld.texinfo. */
8322
8323 bfd_boolean
8324 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8325 {
8326 asection *sec;
8327 bfd_byte *contents = NULL;
8328 int state = 0;
8329 int regs[3], numregs = 0;
8330 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8331 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8332
8333 if (globals == NULL)
8334 return FALSE;
8335
8336 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8337 The states transition as follows:
8338
8339 0 -> 1 (vector) or 0 -> 2 (scalar)
8340 A VFP FMAC-pipeline instruction has been seen. Fill
8341 regs[0]..regs[numregs-1] with its input operands. Remember this
8342 instruction in 'first_fmac'.
8343
8344 1 -> 2
8345 Any instruction, except for a VFP instruction which overwrites
8346 regs[*].
8347
8348 1 -> 3 [ -> 0 ] or
8349 2 -> 3 [ -> 0 ]
8350 A VFP instruction has been seen which overwrites any of regs[*].
8351 We must make a veneer! Reset state to 0 before examining next
8352 instruction.
8353
8354 2 -> 0
8355 If we fail to match anything in state 2, reset to state 0 and reset
8356 the instruction pointer to the instruction after 'first_fmac'.
8357
8358 If the VFP11 vector mode is in use, there must be at least two unrelated
8359 instructions between anti-dependent VFP11 instructions to properly avoid
8360 triggering the erratum, hence the use of the extra state 1. */
8361
8362 /* If we are only performing a partial link do not bother
8363 to construct any glue. */
8364 if (bfd_link_relocatable (link_info))
8365 return TRUE;
8366
8367 /* Skip if this bfd does not correspond to an ELF image. */
8368 if (! is_arm_elf (abfd))
8369 return TRUE;
8370
8371 /* We should have chosen a fix type by the time we get here. */
8372 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8373
8374 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8375 return TRUE;
8376
8377 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8378 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8379 return TRUE;
8380
8381 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8382 {
8383 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8384 struct _arm_elf_section_data *sec_data;
8385
8386 /* If we don't have executable progbits, we're not interested in this
8387 section. Also skip if section is to be excluded. */
8388 if (elf_section_type (sec) != SHT_PROGBITS
8389 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8390 || (sec->flags & SEC_EXCLUDE) != 0
8391 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8392 || sec->output_section == bfd_abs_section_ptr
8393 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8394 continue;
8395
8396 sec_data = elf32_arm_section_data (sec);
8397
8398 if (sec_data->mapcount == 0)
8399 continue;
8400
8401 if (elf_section_data (sec)->this_hdr.contents != NULL)
8402 contents = elf_section_data (sec)->this_hdr.contents;
8403 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8404 goto error_return;
8405
8406 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8407 elf32_arm_compare_mapping);
8408
8409 for (span = 0; span < sec_data->mapcount; span++)
8410 {
8411 unsigned int span_start = sec_data->map[span].vma;
8412 unsigned int span_end = (span == sec_data->mapcount - 1)
8413 ? sec->size : sec_data->map[span + 1].vma;
8414 char span_type = sec_data->map[span].type;
8415
8416 /* FIXME: Only ARM mode is supported at present. We may need to
8417 support Thumb-2 mode also at some point. */
8418 if (span_type != 'a')
8419 continue;
8420
8421 for (i = span_start; i < span_end;)
8422 {
8423 unsigned int next_i = i + 4;
8424 unsigned int insn = bfd_big_endian (abfd)
8425 ? (contents[i] << 24)
8426 | (contents[i + 1] << 16)
8427 | (contents[i + 2] << 8)
8428 | contents[i + 3]
8429 : (contents[i + 3] << 24)
8430 | (contents[i + 2] << 16)
8431 | (contents[i + 1] << 8)
8432 | contents[i];
8433 unsigned int writemask = 0;
8434 enum bfd_arm_vfp11_pipe vpipe;
8435
8436 switch (state)
8437 {
8438 case 0:
8439 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8440 &numregs);
8441 /* I'm assuming the VFP11 erratum can trigger with denorm
8442 operands on either the FMAC or the DS pipeline. This might
8443 lead to slightly overenthusiastic veneer insertion. */
8444 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8445 {
8446 state = use_vector ? 1 : 2;
8447 first_fmac = i;
8448 veneer_of_insn = insn;
8449 }
8450 break;
8451
8452 case 1:
8453 {
8454 int other_regs[3], other_numregs;
8455 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8456 other_regs,
8457 &other_numregs);
8458 if (vpipe != VFP11_BAD
8459 && bfd_arm_vfp11_antidependency (writemask, regs,
8460 numregs))
8461 state = 3;
8462 else
8463 state = 2;
8464 }
8465 break;
8466
8467 case 2:
8468 {
8469 int other_regs[3], other_numregs;
8470 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8471 other_regs,
8472 &other_numregs);
8473 if (vpipe != VFP11_BAD
8474 && bfd_arm_vfp11_antidependency (writemask, regs,
8475 numregs))
8476 state = 3;
8477 else
8478 {
8479 state = 0;
8480 next_i = first_fmac + 4;
8481 }
8482 }
8483 break;
8484
8485 case 3:
8486 abort (); /* Should be unreachable. */
8487 }
8488
8489 if (state == 3)
8490 {
8491 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8492 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8493
8494 elf32_arm_section_data (sec)->erratumcount += 1;
8495
8496 newerr->u.b.vfp_insn = veneer_of_insn;
8497
8498 switch (span_type)
8499 {
8500 case 'a':
8501 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8502 break;
8503
8504 default:
8505 abort ();
8506 }
8507
8508 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8509 first_fmac);
8510
8511 newerr->vma = -1;
8512
8513 newerr->next = sec_data->erratumlist;
8514 sec_data->erratumlist = newerr;
8515
8516 state = 0;
8517 }
8518
8519 i = next_i;
8520 }
8521 }
8522
8523 if (contents != NULL
8524 && elf_section_data (sec)->this_hdr.contents != contents)
8525 free (contents);
8526 contents = NULL;
8527 }
8528
8529 return TRUE;
8530
8531 error_return:
8532 if (contents != NULL
8533 && elf_section_data (sec)->this_hdr.contents != contents)
8534 free (contents);
8535
8536 return FALSE;
8537 }
8538
8539 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8540 after sections have been laid out, using specially-named symbols. */
8541
8542 void
8543 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8544 struct bfd_link_info *link_info)
8545 {
8546 asection *sec;
8547 struct elf32_arm_link_hash_table *globals;
8548 char *tmp_name;
8549
8550 if (bfd_link_relocatable (link_info))
8551 return;
8552
8553 /* Skip if this bfd does not correspond to an ELF image. */
8554 if (! is_arm_elf (abfd))
8555 return;
8556
8557 globals = elf32_arm_hash_table (link_info);
8558 if (globals == NULL)
8559 return;
8560
8561 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8562 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8563
8564 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8565 {
8566 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8567 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8568
8569 for (; errnode != NULL; errnode = errnode->next)
8570 {
8571 struct elf_link_hash_entry *myh;
8572 bfd_vma vma;
8573
8574 switch (errnode->type)
8575 {
8576 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8577 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8578 /* Find veneer symbol. */
8579 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8580 errnode->u.b.veneer->u.v.id);
8581
8582 myh = elf_link_hash_lookup
8583 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8584
8585 if (myh == NULL)
8586 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8587 abfd, "VFP11", tmp_name);
8588
8589 vma = myh->root.u.def.section->output_section->vma
8590 + myh->root.u.def.section->output_offset
8591 + myh->root.u.def.value;
8592
8593 errnode->u.b.veneer->vma = vma;
8594 break;
8595
8596 case VFP11_ERRATUM_ARM_VENEER:
8597 case VFP11_ERRATUM_THUMB_VENEER:
8598 /* Find return location. */
8599 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8600 errnode->u.v.id);
8601
8602 myh = elf_link_hash_lookup
8603 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8604
8605 if (myh == NULL)
8606 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8607 abfd, "VFP11", tmp_name);
8608
8609 vma = myh->root.u.def.section->output_section->vma
8610 + myh->root.u.def.section->output_offset
8611 + myh->root.u.def.value;
8612
8613 errnode->u.v.branch->vma = vma;
8614 break;
8615
8616 default:
8617 abort ();
8618 }
8619 }
8620 }
8621
8622 free (tmp_name);
8623 }
8624
8625 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8626 return locations after sections have been laid out, using
8627 specially-named symbols. */
8628
8629 void
8630 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8631 struct bfd_link_info *link_info)
8632 {
8633 asection *sec;
8634 struct elf32_arm_link_hash_table *globals;
8635 char *tmp_name;
8636
8637 if (bfd_link_relocatable (link_info))
8638 return;
8639
8640 /* Skip if this bfd does not correspond to an ELF image. */
8641 if (! is_arm_elf (abfd))
8642 return;
8643
8644 globals = elf32_arm_hash_table (link_info);
8645 if (globals == NULL)
8646 return;
8647
8648 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8649 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8650
8651 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8652 {
8653 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8654 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8655
8656 for (; errnode != NULL; errnode = errnode->next)
8657 {
8658 struct elf_link_hash_entry *myh;
8659 bfd_vma vma;
8660
8661 switch (errnode->type)
8662 {
8663 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8664 /* Find veneer symbol. */
8665 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8666 errnode->u.b.veneer->u.v.id);
8667
8668 myh = elf_link_hash_lookup
8669 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8670
8671 if (myh == NULL)
8672 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8673 abfd, "STM32L4XX", tmp_name);
8674
8675 vma = myh->root.u.def.section->output_section->vma
8676 + myh->root.u.def.section->output_offset
8677 + myh->root.u.def.value;
8678
8679 errnode->u.b.veneer->vma = vma;
8680 break;
8681
8682 case STM32L4XX_ERRATUM_VENEER:
8683 /* Find return location. */
8684 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8685 errnode->u.v.id);
8686
8687 myh = elf_link_hash_lookup
8688 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8689
8690 if (myh == NULL)
8691 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8692 abfd, "STM32L4XX", tmp_name);
8693
8694 vma = myh->root.u.def.section->output_section->vma
8695 + myh->root.u.def.section->output_offset
8696 + myh->root.u.def.value;
8697
8698 errnode->u.v.branch->vma = vma;
8699 break;
8700
8701 default:
8702 abort ();
8703 }
8704 }
8705 }
8706
8707 free (tmp_name);
8708 }
8709
8710 static inline bfd_boolean
8711 is_thumb2_ldmia (const insn32 insn)
8712 {
8713 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8714 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8715 return (insn & 0xffd02000) == 0xe8900000;
8716 }
8717
8718 static inline bfd_boolean
8719 is_thumb2_ldmdb (const insn32 insn)
8720 {
8721 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8722 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8723 return (insn & 0xffd02000) == 0xe9100000;
8724 }
8725
8726 static inline bfd_boolean
8727 is_thumb2_vldm (const insn32 insn)
8728 {
8729 /* A6.5 Extension register load or store instruction
8730 A7.7.229
8731 We look for SP 32-bit and DP 64-bit registers.
8732 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8733 <list> is consecutive 64-bit registers
8734 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8735 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8736 <list> is consecutive 32-bit registers
8737 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8738 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8739 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8740 return
8741 (((insn & 0xfe100f00) == 0xec100b00) ||
8742 ((insn & 0xfe100f00) == 0xec100a00))
8743 && /* (IA without !). */
8744 (((((insn << 7) >> 28) & 0xd) == 0x4)
8745 /* (IA with !), includes VPOP (when reg number is SP). */
8746 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8747 /* (DB with !). */
8748 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8749 }
8750
8751 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8752 VLDM opcode and:
8753 - computes the number and the mode of memory accesses
8754 - decides if the replacement should be done:
8755 . replaces only if > 8-word accesses
8756 . or (testing purposes only) replaces all accesses. */
8757
8758 static bfd_boolean
8759 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8760 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8761 {
8762 int nb_words = 0;
8763
8764 /* The field encoding the register list is the same for both LDMIA
8765 and LDMDB encodings. */
8766 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8767 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8768 else if (is_thumb2_vldm (insn))
8769 nb_words = (insn & 0xff);
8770
8771 /* DEFAULT mode accounts for the real bug condition situation,
8772 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8773 return
8774 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8775 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8776 }
8777
8778 /* Look for potentially-troublesome code sequences which might trigger
8779 the STM STM32L4XX erratum. */
8780
8781 bfd_boolean
8782 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8783 struct bfd_link_info *link_info)
8784 {
8785 asection *sec;
8786 bfd_byte *contents = NULL;
8787 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8788
8789 if (globals == NULL)
8790 return FALSE;
8791
8792 /* If we are only performing a partial link do not bother
8793 to construct any glue. */
8794 if (bfd_link_relocatable (link_info))
8795 return TRUE;
8796
8797 /* Skip if this bfd does not correspond to an ELF image. */
8798 if (! is_arm_elf (abfd))
8799 return TRUE;
8800
8801 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8802 return TRUE;
8803
8804 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8805 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8806 return TRUE;
8807
8808 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8809 {
8810 unsigned int i, span;
8811 struct _arm_elf_section_data *sec_data;
8812
8813 /* If we don't have executable progbits, we're not interested in this
8814 section. Also skip if section is to be excluded. */
8815 if (elf_section_type (sec) != SHT_PROGBITS
8816 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8817 || (sec->flags & SEC_EXCLUDE) != 0
8818 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8819 || sec->output_section == bfd_abs_section_ptr
8820 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8821 continue;
8822
8823 sec_data = elf32_arm_section_data (sec);
8824
8825 if (sec_data->mapcount == 0)
8826 continue;
8827
8828 if (elf_section_data (sec)->this_hdr.contents != NULL)
8829 contents = elf_section_data (sec)->this_hdr.contents;
8830 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8831 goto error_return;
8832
8833 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8834 elf32_arm_compare_mapping);
8835
8836 for (span = 0; span < sec_data->mapcount; span++)
8837 {
8838 unsigned int span_start = sec_data->map[span].vma;
8839 unsigned int span_end = (span == sec_data->mapcount - 1)
8840 ? sec->size : sec_data->map[span + 1].vma;
8841 char span_type = sec_data->map[span].type;
8842 int itblock_current_pos = 0;
8843
8844 /* Only Thumb2 mode need be supported with this CM4 specific
8845 code, we should not encounter any arm mode eg span_type
8846 != 'a'. */
8847 if (span_type != 't')
8848 continue;
8849
8850 for (i = span_start; i < span_end;)
8851 {
8852 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8853 bfd_boolean insn_32bit = FALSE;
8854 bfd_boolean is_ldm = FALSE;
8855 bfd_boolean is_vldm = FALSE;
8856 bfd_boolean is_not_last_in_it_block = FALSE;
8857
8858 /* The first 16-bits of all 32-bit thumb2 instructions start
8859 with opcode[15..13]=0b111 and the encoded op1 can be anything
8860 except opcode[12..11]!=0b00.
8861 See 32-bit Thumb instruction encoding. */
8862 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8863 insn_32bit = TRUE;
8864
8865 /* Compute the predicate that tells if the instruction
8866 is concerned by the IT block
8867 - Creates an error if there is a ldm that is not
8868 last in the IT block thus cannot be replaced
8869 - Otherwise we can create a branch at the end of the
8870 IT block, it will be controlled naturally by IT
8871 with the proper pseudo-predicate
8872 - So the only interesting predicate is the one that
8873 tells that we are not on the last item of an IT
8874 block. */
8875 if (itblock_current_pos != 0)
8876 is_not_last_in_it_block = !!--itblock_current_pos;
8877
8878 if (insn_32bit)
8879 {
8880 /* Load the rest of the insn (in manual-friendly order). */
8881 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8882 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8883 is_vldm = is_thumb2_vldm (insn);
8884
8885 /* Veneers are created for (v)ldm depending on
8886 option flags and memory accesses conditions; but
8887 if the instruction is not the last instruction of
8888 an IT block, we cannot create a jump there, so we
8889 bail out. */
8890 if ((is_ldm || is_vldm)
8891 && stm32l4xx_need_create_replacing_stub
8892 (insn, globals->stm32l4xx_fix))
8893 {
8894 if (is_not_last_in_it_block)
8895 {
8896 _bfd_error_handler
8897 /* xgettext:c-format */
8898 (_("%pB(%pA+%#x): error: multiple load detected"
8899 " in non-last IT block instruction:"
8900 " STM32L4XX veneer cannot be generated; "
8901 "use gcc option -mrestrict-it to generate"
8902 " only one instruction per IT block"),
8903 abfd, sec, i);
8904 }
8905 else
8906 {
8907 elf32_stm32l4xx_erratum_list *newerr =
8908 (elf32_stm32l4xx_erratum_list *)
8909 bfd_zmalloc
8910 (sizeof (elf32_stm32l4xx_erratum_list));
8911
8912 elf32_arm_section_data (sec)
8913 ->stm32l4xx_erratumcount += 1;
8914 newerr->u.b.insn = insn;
8915 /* We create only thumb branches. */
8916 newerr->type =
8917 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8918 record_stm32l4xx_erratum_veneer
8919 (link_info, newerr, abfd, sec,
8920 i,
8921 is_ldm ?
8922 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8923 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8924 newerr->vma = -1;
8925 newerr->next = sec_data->stm32l4xx_erratumlist;
8926 sec_data->stm32l4xx_erratumlist = newerr;
8927 }
8928 }
8929 }
8930 else
8931 {
8932 /* A7.7.37 IT p208
8933 IT blocks are only encoded in T1
8934 Encoding T1: IT{x{y{z}}} <firstcond>
8935 1 0 1 1 - 1 1 1 1 - firstcond - mask
8936 if mask = '0000' then see 'related encodings'
8937 We don't deal with UNPREDICTABLE, just ignore these.
8938 There can be no nested IT blocks so an IT block
8939 is naturally a new one for which it is worth
8940 computing its size. */
8941 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
8942 && ((insn & 0x000f) != 0x0000);
8943 /* If we have a new IT block we compute its size. */
8944 if (is_newitblock)
8945 {
8946 /* Compute the number of instructions controlled
8947 by the IT block, it will be used to decide
8948 whether we are inside an IT block or not. */
8949 unsigned int mask = insn & 0x000f;
8950 itblock_current_pos = 4 - ctz (mask);
8951 }
8952 }
8953
8954 i += insn_32bit ? 4 : 2;
8955 }
8956 }
8957
8958 if (contents != NULL
8959 && elf_section_data (sec)->this_hdr.contents != contents)
8960 free (contents);
8961 contents = NULL;
8962 }
8963
8964 return TRUE;
8965
8966 error_return:
8967 if (contents != NULL
8968 && elf_section_data (sec)->this_hdr.contents != contents)
8969 free (contents);
8970
8971 return FALSE;
8972 }
8973
8974 /* Set target relocation values needed during linking. */
8975
8976 void
8977 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
8978 struct bfd_link_info *link_info,
8979 struct elf32_arm_params *params)
8980 {
8981 struct elf32_arm_link_hash_table *globals;
8982
8983 globals = elf32_arm_hash_table (link_info);
8984 if (globals == NULL)
8985 return;
8986
8987 globals->target1_is_rel = params->target1_is_rel;
8988 if (globals->fdpic_p)
8989 globals->target2_reloc = R_ARM_GOT32;
8990 else if (strcmp (params->target2_type, "rel") == 0)
8991 globals->target2_reloc = R_ARM_REL32;
8992 else if (strcmp (params->target2_type, "abs") == 0)
8993 globals->target2_reloc = R_ARM_ABS32;
8994 else if (strcmp (params->target2_type, "got-rel") == 0)
8995 globals->target2_reloc = R_ARM_GOT_PREL;
8996 else
8997 {
8998 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
8999 params->target2_type);
9000 }
9001 globals->fix_v4bx = params->fix_v4bx;
9002 globals->use_blx |= params->use_blx;
9003 globals->vfp11_fix = params->vfp11_denorm_fix;
9004 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9005 if (globals->fdpic_p)
9006 globals->pic_veneer = 1;
9007 else
9008 globals->pic_veneer = params->pic_veneer;
9009 globals->fix_cortex_a8 = params->fix_cortex_a8;
9010 globals->fix_arm1176 = params->fix_arm1176;
9011 globals->cmse_implib = params->cmse_implib;
9012 globals->in_implib_bfd = params->in_implib_bfd;
9013
9014 BFD_ASSERT (is_arm_elf (output_bfd));
9015 elf_arm_tdata (output_bfd)->no_enum_size_warning
9016 = params->no_enum_size_warning;
9017 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9018 = params->no_wchar_size_warning;
9019 }
9020
9021 /* Replace the target offset of a Thumb bl or b.w instruction. */
9022
9023 static void
9024 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9025 {
9026 bfd_vma upper;
9027 bfd_vma lower;
9028 int reloc_sign;
9029
9030 BFD_ASSERT ((offset & 1) == 0);
9031
9032 upper = bfd_get_16 (abfd, insn);
9033 lower = bfd_get_16 (abfd, insn + 2);
9034 reloc_sign = (offset < 0) ? 1 : 0;
9035 upper = (upper & ~(bfd_vma) 0x7ff)
9036 | ((offset >> 12) & 0x3ff)
9037 | (reloc_sign << 10);
9038 lower = (lower & ~(bfd_vma) 0x2fff)
9039 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9040 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9041 | ((offset >> 1) & 0x7ff);
9042 bfd_put_16 (abfd, upper, insn);
9043 bfd_put_16 (abfd, lower, insn + 2);
9044 }
9045
9046 /* Thumb code calling an ARM function. */
9047
9048 static int
9049 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9050 const char * name,
9051 bfd * input_bfd,
9052 bfd * output_bfd,
9053 asection * input_section,
9054 bfd_byte * hit_data,
9055 asection * sym_sec,
9056 bfd_vma offset,
9057 bfd_signed_vma addend,
9058 bfd_vma val,
9059 char **error_message)
9060 {
9061 asection * s = 0;
9062 bfd_vma my_offset;
9063 long int ret_offset;
9064 struct elf_link_hash_entry * myh;
9065 struct elf32_arm_link_hash_table * globals;
9066
9067 myh = find_thumb_glue (info, name, error_message);
9068 if (myh == NULL)
9069 return FALSE;
9070
9071 globals = elf32_arm_hash_table (info);
9072 BFD_ASSERT (globals != NULL);
9073 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9074
9075 my_offset = myh->root.u.def.value;
9076
9077 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9078 THUMB2ARM_GLUE_SECTION_NAME);
9079
9080 BFD_ASSERT (s != NULL);
9081 BFD_ASSERT (s->contents != NULL);
9082 BFD_ASSERT (s->output_section != NULL);
9083
9084 if ((my_offset & 0x01) == 0x01)
9085 {
9086 if (sym_sec != NULL
9087 && sym_sec->owner != NULL
9088 && !INTERWORK_FLAG (sym_sec->owner))
9089 {
9090 _bfd_error_handler
9091 (_("%pB(%s): warning: interworking not enabled;"
9092 " first occurrence: %pB: %s call to %s"),
9093 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9094
9095 return FALSE;
9096 }
9097
9098 --my_offset;
9099 myh->root.u.def.value = my_offset;
9100
9101 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9102 s->contents + my_offset);
9103
9104 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9105 s->contents + my_offset + 2);
9106
9107 ret_offset =
9108 /* Address of destination of the stub. */
9109 ((bfd_signed_vma) val)
9110 - ((bfd_signed_vma)
9111 /* Offset from the start of the current section
9112 to the start of the stubs. */
9113 (s->output_offset
9114 /* Offset of the start of this stub from the start of the stubs. */
9115 + my_offset
9116 /* Address of the start of the current section. */
9117 + s->output_section->vma)
9118 /* The branch instruction is 4 bytes into the stub. */
9119 + 4
9120 /* ARM branches work from the pc of the instruction + 8. */
9121 + 8);
9122
9123 put_arm_insn (globals, output_bfd,
9124 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9125 s->contents + my_offset + 4);
9126 }
9127
9128 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9129
9130 /* Now go back and fix up the original BL insn to point to here. */
9131 ret_offset =
9132 /* Address of where the stub is located. */
9133 (s->output_section->vma + s->output_offset + my_offset)
9134 /* Address of where the BL is located. */
9135 - (input_section->output_section->vma + input_section->output_offset
9136 + offset)
9137 /* Addend in the relocation. */
9138 - addend
9139 /* Biassing for PC-relative addressing. */
9140 - 8;
9141
9142 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9143
9144 return TRUE;
9145 }
9146
9147 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9148
9149 static struct elf_link_hash_entry *
9150 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9151 const char * name,
9152 bfd * input_bfd,
9153 bfd * output_bfd,
9154 asection * sym_sec,
9155 bfd_vma val,
9156 asection * s,
9157 char ** error_message)
9158 {
9159 bfd_vma my_offset;
9160 long int ret_offset;
9161 struct elf_link_hash_entry * myh;
9162 struct elf32_arm_link_hash_table * globals;
9163
9164 myh = find_arm_glue (info, name, error_message);
9165 if (myh == NULL)
9166 return NULL;
9167
9168 globals = elf32_arm_hash_table (info);
9169 BFD_ASSERT (globals != NULL);
9170 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9171
9172 my_offset = myh->root.u.def.value;
9173
9174 if ((my_offset & 0x01) == 0x01)
9175 {
9176 if (sym_sec != NULL
9177 && sym_sec->owner != NULL
9178 && !INTERWORK_FLAG (sym_sec->owner))
9179 {
9180 _bfd_error_handler
9181 (_("%pB(%s): warning: interworking not enabled;"
9182 " first occurrence: %pB: %s call to %s"),
9183 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9184 }
9185
9186 --my_offset;
9187 myh->root.u.def.value = my_offset;
9188
9189 if (bfd_link_pic (info)
9190 || globals->root.is_relocatable_executable
9191 || globals->pic_veneer)
9192 {
9193 /* For relocatable objects we can't use absolute addresses,
9194 so construct the address from a relative offset. */
9195 /* TODO: If the offset is small it's probably worth
9196 constructing the address with adds. */
9197 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9198 s->contents + my_offset);
9199 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9200 s->contents + my_offset + 4);
9201 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9202 s->contents + my_offset + 8);
9203 /* Adjust the offset by 4 for the position of the add,
9204 and 8 for the pipeline offset. */
9205 ret_offset = (val - (s->output_offset
9206 + s->output_section->vma
9207 + my_offset + 12))
9208 | 1;
9209 bfd_put_32 (output_bfd, ret_offset,
9210 s->contents + my_offset + 12);
9211 }
9212 else if (globals->use_blx)
9213 {
9214 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9215 s->contents + my_offset);
9216
9217 /* It's a thumb address. Add the low order bit. */
9218 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9219 s->contents + my_offset + 4);
9220 }
9221 else
9222 {
9223 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9224 s->contents + my_offset);
9225
9226 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9227 s->contents + my_offset + 4);
9228
9229 /* It's a thumb address. Add the low order bit. */
9230 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9231 s->contents + my_offset + 8);
9232
9233 my_offset += 12;
9234 }
9235 }
9236
9237 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9238
9239 return myh;
9240 }
9241
9242 /* Arm code calling a Thumb function. */
9243
9244 static int
9245 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9246 const char * name,
9247 bfd * input_bfd,
9248 bfd * output_bfd,
9249 asection * input_section,
9250 bfd_byte * hit_data,
9251 asection * sym_sec,
9252 bfd_vma offset,
9253 bfd_signed_vma addend,
9254 bfd_vma val,
9255 char **error_message)
9256 {
9257 unsigned long int tmp;
9258 bfd_vma my_offset;
9259 asection * s;
9260 long int ret_offset;
9261 struct elf_link_hash_entry * myh;
9262 struct elf32_arm_link_hash_table * globals;
9263
9264 globals = elf32_arm_hash_table (info);
9265 BFD_ASSERT (globals != NULL);
9266 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9267
9268 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9269 ARM2THUMB_GLUE_SECTION_NAME);
9270 BFD_ASSERT (s != NULL);
9271 BFD_ASSERT (s->contents != NULL);
9272 BFD_ASSERT (s->output_section != NULL);
9273
9274 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9275 sym_sec, val, s, error_message);
9276 if (!myh)
9277 return FALSE;
9278
9279 my_offset = myh->root.u.def.value;
9280 tmp = bfd_get_32 (input_bfd, hit_data);
9281 tmp = tmp & 0xFF000000;
9282
9283 /* Somehow these are both 4 too far, so subtract 8. */
9284 ret_offset = (s->output_offset
9285 + my_offset
9286 + s->output_section->vma
9287 - (input_section->output_offset
9288 + input_section->output_section->vma
9289 + offset + addend)
9290 - 8);
9291
9292 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9293
9294 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9295
9296 return TRUE;
9297 }
9298
9299 /* Populate Arm stub for an exported Thumb function. */
9300
9301 static bfd_boolean
9302 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9303 {
9304 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9305 asection * s;
9306 struct elf_link_hash_entry * myh;
9307 struct elf32_arm_link_hash_entry *eh;
9308 struct elf32_arm_link_hash_table * globals;
9309 asection *sec;
9310 bfd_vma val;
9311 char *error_message;
9312
9313 eh = elf32_arm_hash_entry (h);
9314 /* Allocate stubs for exported Thumb functions on v4t. */
9315 if (eh->export_glue == NULL)
9316 return TRUE;
9317
9318 globals = elf32_arm_hash_table (info);
9319 BFD_ASSERT (globals != NULL);
9320 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9321
9322 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9323 ARM2THUMB_GLUE_SECTION_NAME);
9324 BFD_ASSERT (s != NULL);
9325 BFD_ASSERT (s->contents != NULL);
9326 BFD_ASSERT (s->output_section != NULL);
9327
9328 sec = eh->export_glue->root.u.def.section;
9329
9330 BFD_ASSERT (sec->output_section != NULL);
9331
9332 val = eh->export_glue->root.u.def.value + sec->output_offset
9333 + sec->output_section->vma;
9334
9335 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9336 h->root.u.def.section->owner,
9337 globals->obfd, sec, val, s,
9338 &error_message);
9339 BFD_ASSERT (myh);
9340 return TRUE;
9341 }
9342
9343 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9344
9345 static bfd_vma
9346 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9347 {
9348 bfd_byte *p;
9349 bfd_vma glue_addr;
9350 asection *s;
9351 struct elf32_arm_link_hash_table *globals;
9352
9353 globals = elf32_arm_hash_table (info);
9354 BFD_ASSERT (globals != NULL);
9355 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9356
9357 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9358 ARM_BX_GLUE_SECTION_NAME);
9359 BFD_ASSERT (s != NULL);
9360 BFD_ASSERT (s->contents != NULL);
9361 BFD_ASSERT (s->output_section != NULL);
9362
9363 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9364
9365 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9366
9367 if ((globals->bx_glue_offset[reg] & 1) == 0)
9368 {
9369 p = s->contents + glue_addr;
9370 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9371 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9372 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9373 globals->bx_glue_offset[reg] |= 1;
9374 }
9375
9376 return glue_addr + s->output_section->vma + s->output_offset;
9377 }
9378
9379 /* Generate Arm stubs for exported Thumb symbols. */
9380 static void
9381 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9382 struct bfd_link_info *link_info)
9383 {
9384 struct elf32_arm_link_hash_table * globals;
9385
9386 if (link_info == NULL)
9387 /* Ignore this if we are not called by the ELF backend linker. */
9388 return;
9389
9390 globals = elf32_arm_hash_table (link_info);
9391 if (globals == NULL)
9392 return;
9393
9394 /* If blx is available then exported Thumb symbols are OK and there is
9395 nothing to do. */
9396 if (globals->use_blx)
9397 return;
9398
9399 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9400 link_info);
9401 }
9402
9403 /* Reserve space for COUNT dynamic relocations in relocation selection
9404 SRELOC. */
9405
9406 static void
9407 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9408 bfd_size_type count)
9409 {
9410 struct elf32_arm_link_hash_table *htab;
9411
9412 htab = elf32_arm_hash_table (info);
9413 BFD_ASSERT (htab->root.dynamic_sections_created);
9414 if (sreloc == NULL)
9415 abort ();
9416 sreloc->size += RELOC_SIZE (htab) * count;
9417 }
9418
9419 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9420 dynamic, the relocations should go in SRELOC, otherwise they should
9421 go in the special .rel.iplt section. */
9422
9423 static void
9424 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9425 bfd_size_type count)
9426 {
9427 struct elf32_arm_link_hash_table *htab;
9428
9429 htab = elf32_arm_hash_table (info);
9430 if (!htab->root.dynamic_sections_created)
9431 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9432 else
9433 {
9434 BFD_ASSERT (sreloc != NULL);
9435 sreloc->size += RELOC_SIZE (htab) * count;
9436 }
9437 }
9438
9439 /* Add relocation REL to the end of relocation section SRELOC. */
9440
9441 static void
9442 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9443 asection *sreloc, Elf_Internal_Rela *rel)
9444 {
9445 bfd_byte *loc;
9446 struct elf32_arm_link_hash_table *htab;
9447
9448 htab = elf32_arm_hash_table (info);
9449 if (!htab->root.dynamic_sections_created
9450 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9451 sreloc = htab->root.irelplt;
9452 if (sreloc == NULL)
9453 abort ();
9454 loc = sreloc->contents;
9455 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9456 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9457 abort ();
9458 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9459 }
9460
9461 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9462 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9463 to .plt. */
9464
9465 static void
9466 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9467 bfd_boolean is_iplt_entry,
9468 union gotplt_union *root_plt,
9469 struct arm_plt_info *arm_plt)
9470 {
9471 struct elf32_arm_link_hash_table *htab;
9472 asection *splt;
9473 asection *sgotplt;
9474
9475 htab = elf32_arm_hash_table (info);
9476
9477 if (is_iplt_entry)
9478 {
9479 splt = htab->root.iplt;
9480 sgotplt = htab->root.igotplt;
9481
9482 /* NaCl uses a special first entry in .iplt too. */
9483 if (htab->nacl_p && splt->size == 0)
9484 splt->size += htab->plt_header_size;
9485
9486 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9487 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9488 }
9489 else
9490 {
9491 splt = htab->root.splt;
9492 sgotplt = htab->root.sgotplt;
9493
9494 if (htab->fdpic_p)
9495 {
9496 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9497 /* For lazy binding, relocations will be put into .rel.plt, in
9498 .rel.got otherwise. */
9499 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9500 if (info->flags & DF_BIND_NOW)
9501 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9502 else
9503 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9504 }
9505 else
9506 {
9507 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9508 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9509 }
9510
9511 /* If this is the first .plt entry, make room for the special
9512 first entry. */
9513 if (splt->size == 0)
9514 splt->size += htab->plt_header_size;
9515
9516 htab->next_tls_desc_index++;
9517 }
9518
9519 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9520 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9521 splt->size += PLT_THUMB_STUB_SIZE;
9522 root_plt->offset = splt->size;
9523 splt->size += htab->plt_entry_size;
9524
9525 if (!htab->symbian_p)
9526 {
9527 /* We also need to make an entry in the .got.plt section, which
9528 will be placed in the .got section by the linker script. */
9529 if (is_iplt_entry)
9530 arm_plt->got_offset = sgotplt->size;
9531 else
9532 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9533 if (htab->fdpic_p)
9534 /* Function descriptor takes 64 bits in GOT. */
9535 sgotplt->size += 8;
9536 else
9537 sgotplt->size += 4;
9538 }
9539 }
9540
9541 static bfd_vma
9542 arm_movw_immediate (bfd_vma value)
9543 {
9544 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9545 }
9546
9547 static bfd_vma
9548 arm_movt_immediate (bfd_vma value)
9549 {
9550 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9551 }
9552
9553 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9554 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9555 Otherwise, DYNINDX is the index of the symbol in the dynamic
9556 symbol table and SYM_VALUE is undefined.
9557
9558 ROOT_PLT points to the offset of the PLT entry from the start of its
9559 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9560 bookkeeping information.
9561
9562 Returns FALSE if there was a problem. */
9563
9564 static bfd_boolean
9565 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9566 union gotplt_union *root_plt,
9567 struct arm_plt_info *arm_plt,
9568 int dynindx, bfd_vma sym_value)
9569 {
9570 struct elf32_arm_link_hash_table *htab;
9571 asection *sgot;
9572 asection *splt;
9573 asection *srel;
9574 bfd_byte *loc;
9575 bfd_vma plt_index;
9576 Elf_Internal_Rela rel;
9577 bfd_vma plt_header_size;
9578 bfd_vma got_header_size;
9579
9580 htab = elf32_arm_hash_table (info);
9581
9582 /* Pick the appropriate sections and sizes. */
9583 if (dynindx == -1)
9584 {
9585 splt = htab->root.iplt;
9586 sgot = htab->root.igotplt;
9587 srel = htab->root.irelplt;
9588
9589 /* There are no reserved entries in .igot.plt, and no special
9590 first entry in .iplt. */
9591 got_header_size = 0;
9592 plt_header_size = 0;
9593 }
9594 else
9595 {
9596 splt = htab->root.splt;
9597 sgot = htab->root.sgotplt;
9598 srel = htab->root.srelplt;
9599
9600 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9601 plt_header_size = htab->plt_header_size;
9602 }
9603 BFD_ASSERT (splt != NULL && srel != NULL);
9604
9605 /* Fill in the entry in the procedure linkage table. */
9606 if (htab->symbian_p)
9607 {
9608 BFD_ASSERT (dynindx >= 0);
9609 put_arm_insn (htab, output_bfd,
9610 elf32_arm_symbian_plt_entry[0],
9611 splt->contents + root_plt->offset);
9612 bfd_put_32 (output_bfd,
9613 elf32_arm_symbian_plt_entry[1],
9614 splt->contents + root_plt->offset + 4);
9615
9616 /* Fill in the entry in the .rel.plt section. */
9617 rel.r_offset = (splt->output_section->vma
9618 + splt->output_offset
9619 + root_plt->offset + 4);
9620 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
9621
9622 /* Get the index in the procedure linkage table which
9623 corresponds to this symbol. This is the index of this symbol
9624 in all the symbols for which we are making plt entries. The
9625 first entry in the procedure linkage table is reserved. */
9626 plt_index = ((root_plt->offset - plt_header_size)
9627 / htab->plt_entry_size);
9628 }
9629 else
9630 {
9631 bfd_vma got_offset, got_address, plt_address;
9632 bfd_vma got_displacement, initial_got_entry;
9633 bfd_byte * ptr;
9634
9635 BFD_ASSERT (sgot != NULL);
9636
9637 /* Get the offset into the .(i)got.plt table of the entry that
9638 corresponds to this function. */
9639 got_offset = (arm_plt->got_offset & -2);
9640
9641 /* Get the index in the procedure linkage table which
9642 corresponds to this symbol. This is the index of this symbol
9643 in all the symbols for which we are making plt entries.
9644 After the reserved .got.plt entries, all symbols appear in
9645 the same order as in .plt. */
9646 if (htab->fdpic_p)
9647 /* Function descriptor takes 8 bytes. */
9648 plt_index = (got_offset - got_header_size) / 8;
9649 else
9650 plt_index = (got_offset - got_header_size) / 4;
9651
9652 /* Calculate the address of the GOT entry. */
9653 got_address = (sgot->output_section->vma
9654 + sgot->output_offset
9655 + got_offset);
9656
9657 /* ...and the address of the PLT entry. */
9658 plt_address = (splt->output_section->vma
9659 + splt->output_offset
9660 + root_plt->offset);
9661
9662 ptr = splt->contents + root_plt->offset;
9663 if (htab->vxworks_p && bfd_link_pic (info))
9664 {
9665 unsigned int i;
9666 bfd_vma val;
9667
9668 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9669 {
9670 val = elf32_arm_vxworks_shared_plt_entry[i];
9671 if (i == 2)
9672 val |= got_address - sgot->output_section->vma;
9673 if (i == 5)
9674 val |= plt_index * RELOC_SIZE (htab);
9675 if (i == 2 || i == 5)
9676 bfd_put_32 (output_bfd, val, ptr);
9677 else
9678 put_arm_insn (htab, output_bfd, val, ptr);
9679 }
9680 }
9681 else if (htab->vxworks_p)
9682 {
9683 unsigned int i;
9684 bfd_vma val;
9685
9686 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9687 {
9688 val = elf32_arm_vxworks_exec_plt_entry[i];
9689 if (i == 2)
9690 val |= got_address;
9691 if (i == 4)
9692 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9693 if (i == 5)
9694 val |= plt_index * RELOC_SIZE (htab);
9695 if (i == 2 || i == 5)
9696 bfd_put_32 (output_bfd, val, ptr);
9697 else
9698 put_arm_insn (htab, output_bfd, val, ptr);
9699 }
9700
9701 loc = (htab->srelplt2->contents
9702 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9703
9704 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9705 referencing the GOT for this PLT entry. */
9706 rel.r_offset = plt_address + 8;
9707 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9708 rel.r_addend = got_offset;
9709 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9710 loc += RELOC_SIZE (htab);
9711
9712 /* Create the R_ARM_ABS32 relocation referencing the
9713 beginning of the PLT for this GOT entry. */
9714 rel.r_offset = got_address;
9715 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9716 rel.r_addend = 0;
9717 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9718 }
9719 else if (htab->nacl_p)
9720 {
9721 /* Calculate the displacement between the PLT slot and the
9722 common tail that's part of the special initial PLT slot. */
9723 int32_t tail_displacement
9724 = ((splt->output_section->vma + splt->output_offset
9725 + ARM_NACL_PLT_TAIL_OFFSET)
9726 - (plt_address + htab->plt_entry_size + 4));
9727 BFD_ASSERT ((tail_displacement & 3) == 0);
9728 tail_displacement >>= 2;
9729
9730 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9731 || (-tail_displacement & 0xff000000) == 0);
9732
9733 /* Calculate the displacement between the PLT slot and the entry
9734 in the GOT. The offset accounts for the value produced by
9735 adding to pc in the penultimate instruction of the PLT stub. */
9736 got_displacement = (got_address
9737 - (plt_address + htab->plt_entry_size));
9738
9739 /* NaCl does not support interworking at all. */
9740 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9741
9742 put_arm_insn (htab, output_bfd,
9743 elf32_arm_nacl_plt_entry[0]
9744 | arm_movw_immediate (got_displacement),
9745 ptr + 0);
9746 put_arm_insn (htab, output_bfd,
9747 elf32_arm_nacl_plt_entry[1]
9748 | arm_movt_immediate (got_displacement),
9749 ptr + 4);
9750 put_arm_insn (htab, output_bfd,
9751 elf32_arm_nacl_plt_entry[2],
9752 ptr + 8);
9753 put_arm_insn (htab, output_bfd,
9754 elf32_arm_nacl_plt_entry[3]
9755 | (tail_displacement & 0x00ffffff),
9756 ptr + 12);
9757 }
9758 else if (htab->fdpic_p)
9759 {
9760 const bfd_vma *plt_entry = using_thumb_only(htab)
9761 ? elf32_arm_fdpic_thumb_plt_entry
9762 : elf32_arm_fdpic_plt_entry;
9763
9764 /* Fill-up Thumb stub if needed. */
9765 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9766 {
9767 put_thumb_insn (htab, output_bfd,
9768 elf32_arm_plt_thumb_stub[0], ptr - 4);
9769 put_thumb_insn (htab, output_bfd,
9770 elf32_arm_plt_thumb_stub[1], ptr - 2);
9771 }
9772 /* As we are using 32 bit instructions even for the Thumb
9773 version, we have to use 'put_arm_insn' instead of
9774 'put_thumb_insn'. */
9775 put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
9776 put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
9777 put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
9778 put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
9779 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9780
9781 if (!(info->flags & DF_BIND_NOW))
9782 {
9783 /* funcdesc_value_reloc_offset. */
9784 bfd_put_32 (output_bfd,
9785 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9786 ptr + 20);
9787 put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
9788 put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
9789 put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
9790 put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
9791 }
9792 }
9793 else if (using_thumb_only (htab))
9794 {
9795 /* PR ld/16017: Generate thumb only PLT entries. */
9796 if (!using_thumb2 (htab))
9797 {
9798 /* FIXME: We ought to be able to generate thumb-1 PLT
9799 instructions... */
9800 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9801 output_bfd);
9802 return FALSE;
9803 }
9804
9805 /* Calculate the displacement between the PLT slot and the entry in
9806 the GOT. The 12-byte offset accounts for the value produced by
9807 adding to pc in the 3rd instruction of the PLT stub. */
9808 got_displacement = got_address - (plt_address + 12);
9809
9810 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9811 instead of 'put_thumb_insn'. */
9812 put_arm_insn (htab, output_bfd,
9813 elf32_thumb2_plt_entry[0]
9814 | ((got_displacement & 0x000000ff) << 16)
9815 | ((got_displacement & 0x00000700) << 20)
9816 | ((got_displacement & 0x00000800) >> 1)
9817 | ((got_displacement & 0x0000f000) >> 12),
9818 ptr + 0);
9819 put_arm_insn (htab, output_bfd,
9820 elf32_thumb2_plt_entry[1]
9821 | ((got_displacement & 0x00ff0000) )
9822 | ((got_displacement & 0x07000000) << 4)
9823 | ((got_displacement & 0x08000000) >> 17)
9824 | ((got_displacement & 0xf0000000) >> 28),
9825 ptr + 4);
9826 put_arm_insn (htab, output_bfd,
9827 elf32_thumb2_plt_entry[2],
9828 ptr + 8);
9829 put_arm_insn (htab, output_bfd,
9830 elf32_thumb2_plt_entry[3],
9831 ptr + 12);
9832 }
9833 else
9834 {
9835 /* Calculate the displacement between the PLT slot and the
9836 entry in the GOT. The eight-byte offset accounts for the
9837 value produced by adding to pc in the first instruction
9838 of the PLT stub. */
9839 got_displacement = got_address - (plt_address + 8);
9840
9841 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9842 {
9843 put_thumb_insn (htab, output_bfd,
9844 elf32_arm_plt_thumb_stub[0], ptr - 4);
9845 put_thumb_insn (htab, output_bfd,
9846 elf32_arm_plt_thumb_stub[1], ptr - 2);
9847 }
9848
9849 if (!elf32_arm_use_long_plt_entry)
9850 {
9851 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9852
9853 put_arm_insn (htab, output_bfd,
9854 elf32_arm_plt_entry_short[0]
9855 | ((got_displacement & 0x0ff00000) >> 20),
9856 ptr + 0);
9857 put_arm_insn (htab, output_bfd,
9858 elf32_arm_plt_entry_short[1]
9859 | ((got_displacement & 0x000ff000) >> 12),
9860 ptr+ 4);
9861 put_arm_insn (htab, output_bfd,
9862 elf32_arm_plt_entry_short[2]
9863 | (got_displacement & 0x00000fff),
9864 ptr + 8);
9865 #ifdef FOUR_WORD_PLT
9866 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9867 #endif
9868 }
9869 else
9870 {
9871 put_arm_insn (htab, output_bfd,
9872 elf32_arm_plt_entry_long[0]
9873 | ((got_displacement & 0xf0000000) >> 28),
9874 ptr + 0);
9875 put_arm_insn (htab, output_bfd,
9876 elf32_arm_plt_entry_long[1]
9877 | ((got_displacement & 0x0ff00000) >> 20),
9878 ptr + 4);
9879 put_arm_insn (htab, output_bfd,
9880 elf32_arm_plt_entry_long[2]
9881 | ((got_displacement & 0x000ff000) >> 12),
9882 ptr+ 8);
9883 put_arm_insn (htab, output_bfd,
9884 elf32_arm_plt_entry_long[3]
9885 | (got_displacement & 0x00000fff),
9886 ptr + 12);
9887 }
9888 }
9889
9890 /* Fill in the entry in the .rel(a).(i)plt section. */
9891 rel.r_offset = got_address;
9892 rel.r_addend = 0;
9893 if (dynindx == -1)
9894 {
9895 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9896 The dynamic linker or static executable then calls SYM_VALUE
9897 to determine the correct run-time value of the .igot.plt entry. */
9898 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9899 initial_got_entry = sym_value;
9900 }
9901 else
9902 {
9903 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9904 used by PLT entry. */
9905 if (htab->fdpic_p)
9906 {
9907 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9908 initial_got_entry = 0;
9909 }
9910 else
9911 {
9912 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9913 initial_got_entry = (splt->output_section->vma
9914 + splt->output_offset);
9915 }
9916 }
9917
9918 /* Fill in the entry in the global offset table. */
9919 bfd_put_32 (output_bfd, initial_got_entry,
9920 sgot->contents + got_offset);
9921
9922 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9923 {
9924 /* Setup initial funcdesc value. */
9925 /* FIXME: we don't support lazy binding because there is a
9926 race condition between both words getting written and
9927 some other thread attempting to read them. The ARM
9928 architecture does not have an atomic 64 bit load/store
9929 instruction that could be used to prevent it; it is
9930 recommended that threaded FDPIC applications run with the
9931 LD_BIND_NOW environment variable set. */
9932 bfd_put_32(output_bfd, plt_address + 0x18,
9933 sgot->contents + got_offset);
9934 bfd_put_32(output_bfd, -1 /*TODO*/,
9935 sgot->contents + got_offset + 4);
9936 }
9937 }
9938
9939 if (dynindx == -1)
9940 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9941 else
9942 {
9943 if (htab->fdpic_p)
9944 {
9945 /* For FDPIC we put PLT relocationss into .rel.got when not
9946 lazy binding otherwise we put them in .rel.plt. For now,
9947 we don't support lazy binding so put it in .rel.got. */
9948 if (info->flags & DF_BIND_NOW)
9949 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
9950 else
9951 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
9952 }
9953 else
9954 {
9955 loc = srel->contents + plt_index * RELOC_SIZE (htab);
9956 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9957 }
9958 }
9959
9960 return TRUE;
9961 }
9962
9963 /* Some relocations map to different relocations depending on the
9964 target. Return the real relocation. */
9965
9966 static int
9967 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9968 int r_type)
9969 {
9970 switch (r_type)
9971 {
9972 case R_ARM_TARGET1:
9973 if (globals->target1_is_rel)
9974 return R_ARM_REL32;
9975 else
9976 return R_ARM_ABS32;
9977
9978 case R_ARM_TARGET2:
9979 return globals->target2_reloc;
9980
9981 default:
9982 return r_type;
9983 }
9984 }
9985
9986 /* Return the base VMA address which should be subtracted from real addresses
9987 when resolving @dtpoff relocation.
9988 This is PT_TLS segment p_vaddr. */
9989
9990 static bfd_vma
9991 dtpoff_base (struct bfd_link_info *info)
9992 {
9993 /* If tls_sec is NULL, we should have signalled an error already. */
9994 if (elf_hash_table (info)->tls_sec == NULL)
9995 return 0;
9996 return elf_hash_table (info)->tls_sec->vma;
9997 }
9998
9999 /* Return the relocation value for @tpoff relocation
10000 if STT_TLS virtual address is ADDRESS. */
10001
10002 static bfd_vma
10003 tpoff (struct bfd_link_info *info, bfd_vma address)
10004 {
10005 struct elf_link_hash_table *htab = elf_hash_table (info);
10006 bfd_vma base;
10007
10008 /* If tls_sec is NULL, we should have signalled an error already. */
10009 if (htab->tls_sec == NULL)
10010 return 0;
10011 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10012 return address - htab->tls_sec->vma + base;
10013 }
10014
10015 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10016 VALUE is the relocation value. */
10017
10018 static bfd_reloc_status_type
10019 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10020 {
10021 if (value > 0xfff)
10022 return bfd_reloc_overflow;
10023
10024 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10025 bfd_put_32 (abfd, value, data);
10026 return bfd_reloc_ok;
10027 }
10028
10029 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10030 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10031 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10032
10033 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10034 is to then call final_link_relocate. Return other values in the
10035 case of error.
10036
10037 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10038 the pre-relaxed code. It would be nice if the relocs were updated
10039 to match the optimization. */
10040
10041 static bfd_reloc_status_type
10042 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10043 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10044 Elf_Internal_Rela *rel, unsigned long is_local)
10045 {
10046 unsigned long insn;
10047
10048 switch (ELF32_R_TYPE (rel->r_info))
10049 {
10050 default:
10051 return bfd_reloc_notsupported;
10052
10053 case R_ARM_TLS_GOTDESC:
10054 if (is_local)
10055 insn = 0;
10056 else
10057 {
10058 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10059 if (insn & 1)
10060 insn -= 5; /* THUMB */
10061 else
10062 insn -= 8; /* ARM */
10063 }
10064 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10065 return bfd_reloc_continue;
10066
10067 case R_ARM_THM_TLS_DESCSEQ:
10068 /* Thumb insn. */
10069 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10070 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10071 {
10072 if (is_local)
10073 /* nop */
10074 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10075 }
10076 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10077 {
10078 if (is_local)
10079 /* nop */
10080 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10081 else
10082 /* ldr rx,[ry] */
10083 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10084 }
10085 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10086 {
10087 if (is_local)
10088 /* nop */
10089 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10090 else
10091 /* mov r0, rx */
10092 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10093 contents + rel->r_offset);
10094 }
10095 else
10096 {
10097 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10098 /* It's a 32 bit instruction, fetch the rest of it for
10099 error generation. */
10100 insn = (insn << 16)
10101 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10102 _bfd_error_handler
10103 /* xgettext:c-format */
10104 (_("%pB(%pA+%#" PRIx64 "): "
10105 "unexpected %s instruction '%#lx' in TLS trampoline"),
10106 input_bfd, input_sec, (uint64_t) rel->r_offset,
10107 "Thumb", insn);
10108 return bfd_reloc_notsupported;
10109 }
10110 break;
10111
10112 case R_ARM_TLS_DESCSEQ:
10113 /* arm insn. */
10114 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10115 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10116 {
10117 if (is_local)
10118 /* mov rx, ry */
10119 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10120 contents + rel->r_offset);
10121 }
10122 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10123 {
10124 if (is_local)
10125 /* nop */
10126 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10127 else
10128 /* ldr rx,[ry] */
10129 bfd_put_32 (input_bfd, insn & 0xfffff000,
10130 contents + rel->r_offset);
10131 }
10132 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10133 {
10134 if (is_local)
10135 /* nop */
10136 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10137 else
10138 /* mov r0, rx */
10139 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10140 contents + rel->r_offset);
10141 }
10142 else
10143 {
10144 _bfd_error_handler
10145 /* xgettext:c-format */
10146 (_("%pB(%pA+%#" PRIx64 "): "
10147 "unexpected %s instruction '%#lx' in TLS trampoline"),
10148 input_bfd, input_sec, (uint64_t) rel->r_offset,
10149 "ARM", insn);
10150 return bfd_reloc_notsupported;
10151 }
10152 break;
10153
10154 case R_ARM_TLS_CALL:
10155 /* GD->IE relaxation, turn the instruction into 'nop' or
10156 'ldr r0, [pc,r0]' */
10157 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10158 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10159 break;
10160
10161 case R_ARM_THM_TLS_CALL:
10162 /* GD->IE relaxation. */
10163 if (!is_local)
10164 /* add r0,pc; ldr r0, [r0] */
10165 insn = 0x44786800;
10166 else if (using_thumb2 (globals))
10167 /* nop.w */
10168 insn = 0xf3af8000;
10169 else
10170 /* nop; nop */
10171 insn = 0xbf00bf00;
10172
10173 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10174 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10175 break;
10176 }
10177 return bfd_reloc_ok;
10178 }
10179
10180 /* For a given value of n, calculate the value of G_n as required to
10181 deal with group relocations. We return it in the form of an
10182 encoded constant-and-rotation, together with the final residual. If n is
10183 specified as less than zero, then final_residual is filled with the
10184 input value and no further action is performed. */
10185
10186 static bfd_vma
10187 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10188 {
10189 int current_n;
10190 bfd_vma g_n;
10191 bfd_vma encoded_g_n = 0;
10192 bfd_vma residual = value; /* Also known as Y_n. */
10193
10194 for (current_n = 0; current_n <= n; current_n++)
10195 {
10196 int shift;
10197
10198 /* Calculate which part of the value to mask. */
10199 if (residual == 0)
10200 shift = 0;
10201 else
10202 {
10203 int msb;
10204
10205 /* Determine the most significant bit in the residual and
10206 align the resulting value to a 2-bit boundary. */
10207 for (msb = 30; msb >= 0; msb -= 2)
10208 if (residual & (3 << msb))
10209 break;
10210
10211 /* The desired shift is now (msb - 6), or zero, whichever
10212 is the greater. */
10213 shift = msb - 6;
10214 if (shift < 0)
10215 shift = 0;
10216 }
10217
10218 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10219 g_n = residual & (0xff << shift);
10220 encoded_g_n = (g_n >> shift)
10221 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10222
10223 /* Calculate the residual for the next time around. */
10224 residual &= ~g_n;
10225 }
10226
10227 *final_residual = residual;
10228
10229 return encoded_g_n;
10230 }
10231
10232 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10233 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10234
10235 static int
10236 identify_add_or_sub (bfd_vma insn)
10237 {
10238 int opcode = insn & 0x1e00000;
10239
10240 if (opcode == 1 << 23) /* ADD */
10241 return 1;
10242
10243 if (opcode == 1 << 22) /* SUB */
10244 return -1;
10245
10246 return 0;
10247 }
10248
10249 /* Perform a relocation as part of a final link. */
10250
10251 static bfd_reloc_status_type
10252 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10253 bfd * input_bfd,
10254 bfd * output_bfd,
10255 asection * input_section,
10256 bfd_byte * contents,
10257 Elf_Internal_Rela * rel,
10258 bfd_vma value,
10259 struct bfd_link_info * info,
10260 asection * sym_sec,
10261 const char * sym_name,
10262 unsigned char st_type,
10263 enum arm_st_branch_type branch_type,
10264 struct elf_link_hash_entry * h,
10265 bfd_boolean * unresolved_reloc_p,
10266 char ** error_message)
10267 {
10268 unsigned long r_type = howto->type;
10269 unsigned long r_symndx;
10270 bfd_byte * hit_data = contents + rel->r_offset;
10271 bfd_vma * local_got_offsets;
10272 bfd_vma * local_tlsdesc_gotents;
10273 asection * sgot;
10274 asection * splt;
10275 asection * sreloc = NULL;
10276 asection * srelgot;
10277 bfd_vma addend;
10278 bfd_signed_vma signed_addend;
10279 unsigned char dynreloc_st_type;
10280 bfd_vma dynreloc_value;
10281 struct elf32_arm_link_hash_table * globals;
10282 struct elf32_arm_link_hash_entry *eh;
10283 union gotplt_union *root_plt;
10284 struct arm_plt_info *arm_plt;
10285 bfd_vma plt_offset;
10286 bfd_vma gotplt_offset;
10287 bfd_boolean has_iplt_entry;
10288 bfd_boolean resolved_to_zero;
10289
10290 globals = elf32_arm_hash_table (info);
10291 if (globals == NULL)
10292 return bfd_reloc_notsupported;
10293
10294 BFD_ASSERT (is_arm_elf (input_bfd));
10295 BFD_ASSERT (howto != NULL);
10296
10297 /* Some relocation types map to different relocations depending on the
10298 target. We pick the right one here. */
10299 r_type = arm_real_reloc_type (globals, r_type);
10300
10301 /* It is possible to have linker relaxations on some TLS access
10302 models. Update our information here. */
10303 r_type = elf32_arm_tls_transition (info, r_type, h);
10304
10305 if (r_type != howto->type)
10306 howto = elf32_arm_howto_from_type (r_type);
10307
10308 eh = (struct elf32_arm_link_hash_entry *) h;
10309 sgot = globals->root.sgot;
10310 local_got_offsets = elf_local_got_offsets (input_bfd);
10311 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10312
10313 if (globals->root.dynamic_sections_created)
10314 srelgot = globals->root.srelgot;
10315 else
10316 srelgot = NULL;
10317
10318 r_symndx = ELF32_R_SYM (rel->r_info);
10319
10320 if (globals->use_rel)
10321 {
10322 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
10323
10324 if (addend & ((howto->src_mask + 1) >> 1))
10325 {
10326 signed_addend = -1;
10327 signed_addend &= ~ howto->src_mask;
10328 signed_addend |= addend;
10329 }
10330 else
10331 signed_addend = addend;
10332 }
10333 else
10334 addend = signed_addend = rel->r_addend;
10335
10336 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10337 are resolving a function call relocation. */
10338 if (using_thumb_only (globals)
10339 && (r_type == R_ARM_THM_CALL
10340 || r_type == R_ARM_THM_JUMP24)
10341 && branch_type == ST_BRANCH_TO_ARM)
10342 branch_type = ST_BRANCH_TO_THUMB;
10343
10344 /* Record the symbol information that should be used in dynamic
10345 relocations. */
10346 dynreloc_st_type = st_type;
10347 dynreloc_value = value;
10348 if (branch_type == ST_BRANCH_TO_THUMB)
10349 dynreloc_value |= 1;
10350
10351 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10352 VALUE appropriately for relocations that we resolve at link time. */
10353 has_iplt_entry = FALSE;
10354 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10355 &arm_plt)
10356 && root_plt->offset != (bfd_vma) -1)
10357 {
10358 plt_offset = root_plt->offset;
10359 gotplt_offset = arm_plt->got_offset;
10360
10361 if (h == NULL || eh->is_iplt)
10362 {
10363 has_iplt_entry = TRUE;
10364 splt = globals->root.iplt;
10365
10366 /* Populate .iplt entries here, because not all of them will
10367 be seen by finish_dynamic_symbol. The lower bit is set if
10368 we have already populated the entry. */
10369 if (plt_offset & 1)
10370 plt_offset--;
10371 else
10372 {
10373 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10374 -1, dynreloc_value))
10375 root_plt->offset |= 1;
10376 else
10377 return bfd_reloc_notsupported;
10378 }
10379
10380 /* Static relocations always resolve to the .iplt entry. */
10381 st_type = STT_FUNC;
10382 value = (splt->output_section->vma
10383 + splt->output_offset
10384 + plt_offset);
10385 branch_type = ST_BRANCH_TO_ARM;
10386
10387 /* If there are non-call relocations that resolve to the .iplt
10388 entry, then all dynamic ones must too. */
10389 if (arm_plt->noncall_refcount != 0)
10390 {
10391 dynreloc_st_type = st_type;
10392 dynreloc_value = value;
10393 }
10394 }
10395 else
10396 /* We populate the .plt entry in finish_dynamic_symbol. */
10397 splt = globals->root.splt;
10398 }
10399 else
10400 {
10401 splt = NULL;
10402 plt_offset = (bfd_vma) -1;
10403 gotplt_offset = (bfd_vma) -1;
10404 }
10405
10406 resolved_to_zero = (h != NULL
10407 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10408
10409 switch (r_type)
10410 {
10411 case R_ARM_NONE:
10412 /* We don't need to find a value for this symbol. It's just a
10413 marker. */
10414 *unresolved_reloc_p = FALSE;
10415 return bfd_reloc_ok;
10416
10417 case R_ARM_ABS12:
10418 if (!globals->vxworks_p)
10419 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10420 /* Fall through. */
10421
10422 case R_ARM_PC24:
10423 case R_ARM_ABS32:
10424 case R_ARM_ABS32_NOI:
10425 case R_ARM_REL32:
10426 case R_ARM_REL32_NOI:
10427 case R_ARM_CALL:
10428 case R_ARM_JUMP24:
10429 case R_ARM_XPC25:
10430 case R_ARM_PREL31:
10431 case R_ARM_PLT32:
10432 /* Handle relocations which should use the PLT entry. ABS32/REL32
10433 will use the symbol's value, which may point to a PLT entry, but we
10434 don't need to handle that here. If we created a PLT entry, all
10435 branches in this object should go to it, except if the PLT is too
10436 far away, in which case a long branch stub should be inserted. */
10437 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10438 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10439 && r_type != R_ARM_CALL
10440 && r_type != R_ARM_JUMP24
10441 && r_type != R_ARM_PLT32)
10442 && plt_offset != (bfd_vma) -1)
10443 {
10444 /* If we've created a .plt section, and assigned a PLT entry
10445 to this function, it must either be a STT_GNU_IFUNC reference
10446 or not be known to bind locally. In other cases, we should
10447 have cleared the PLT entry by now. */
10448 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10449
10450 value = (splt->output_section->vma
10451 + splt->output_offset
10452 + plt_offset);
10453 *unresolved_reloc_p = FALSE;
10454 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10455 contents, rel->r_offset, value,
10456 rel->r_addend);
10457 }
10458
10459 /* When generating a shared object or relocatable executable, these
10460 relocations are copied into the output file to be resolved at
10461 run time. */
10462 if ((bfd_link_pic (info)
10463 || globals->root.is_relocatable_executable
10464 || globals->fdpic_p)
10465 && (input_section->flags & SEC_ALLOC)
10466 && !(globals->vxworks_p
10467 && strcmp (input_section->output_section->name,
10468 ".tls_vars") == 0)
10469 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10470 || !SYMBOL_CALLS_LOCAL (info, h))
10471 && !(input_bfd == globals->stub_bfd
10472 && strstr (input_section->name, STUB_SUFFIX))
10473 && (h == NULL
10474 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10475 && !resolved_to_zero)
10476 || h->root.type != bfd_link_hash_undefweak)
10477 && r_type != R_ARM_PC24
10478 && r_type != R_ARM_CALL
10479 && r_type != R_ARM_JUMP24
10480 && r_type != R_ARM_PREL31
10481 && r_type != R_ARM_PLT32)
10482 {
10483 Elf_Internal_Rela outrel;
10484 bfd_boolean skip, relocate;
10485 int isrofixup = 0;
10486
10487 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10488 && !h->def_regular)
10489 {
10490 char *v = _("shared object");
10491
10492 if (bfd_link_executable (info))
10493 v = _("PIE executable");
10494
10495 _bfd_error_handler
10496 (_("%pB: relocation %s against external or undefined symbol `%s'"
10497 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10498 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10499 return bfd_reloc_notsupported;
10500 }
10501
10502 *unresolved_reloc_p = FALSE;
10503
10504 if (sreloc == NULL && globals->root.dynamic_sections_created)
10505 {
10506 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10507 ! globals->use_rel);
10508
10509 if (sreloc == NULL)
10510 return bfd_reloc_notsupported;
10511 }
10512
10513 skip = FALSE;
10514 relocate = FALSE;
10515
10516 outrel.r_addend = addend;
10517 outrel.r_offset =
10518 _bfd_elf_section_offset (output_bfd, info, input_section,
10519 rel->r_offset);
10520 if (outrel.r_offset == (bfd_vma) -1)
10521 skip = TRUE;
10522 else if (outrel.r_offset == (bfd_vma) -2)
10523 skip = TRUE, relocate = TRUE;
10524 outrel.r_offset += (input_section->output_section->vma
10525 + input_section->output_offset);
10526
10527 if (skip)
10528 memset (&outrel, 0, sizeof outrel);
10529 else if (h != NULL
10530 && h->dynindx != -1
10531 && (!bfd_link_pic (info)
10532 || !(bfd_link_pie (info)
10533 || SYMBOLIC_BIND (info, h))
10534 || !h->def_regular))
10535 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10536 else
10537 {
10538 int symbol;
10539
10540 /* This symbol is local, or marked to become local. */
10541 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10542 || (globals->fdpic_p && !bfd_link_pic(info)));
10543 if (globals->symbian_p)
10544 {
10545 asection *osec;
10546
10547 /* On Symbian OS, the data segment and text segement
10548 can be relocated independently. Therefore, we
10549 must indicate the segment to which this
10550 relocation is relative. The BPABI allows us to
10551 use any symbol in the right segment; we just use
10552 the section symbol as it is convenient. (We
10553 cannot use the symbol given by "h" directly as it
10554 will not appear in the dynamic symbol table.)
10555
10556 Note that the dynamic linker ignores the section
10557 symbol value, so we don't subtract osec->vma
10558 from the emitted reloc addend. */
10559 if (sym_sec)
10560 osec = sym_sec->output_section;
10561 else
10562 osec = input_section->output_section;
10563 symbol = elf_section_data (osec)->dynindx;
10564 if (symbol == 0)
10565 {
10566 struct elf_link_hash_table *htab = elf_hash_table (info);
10567
10568 if ((osec->flags & SEC_READONLY) == 0
10569 && htab->data_index_section != NULL)
10570 osec = htab->data_index_section;
10571 else
10572 osec = htab->text_index_section;
10573 symbol = elf_section_data (osec)->dynindx;
10574 }
10575 BFD_ASSERT (symbol != 0);
10576 }
10577 else
10578 /* On SVR4-ish systems, the dynamic loader cannot
10579 relocate the text and data segments independently,
10580 so the symbol does not matter. */
10581 symbol = 0;
10582 if (dynreloc_st_type == STT_GNU_IFUNC)
10583 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10584 to the .iplt entry. Instead, every non-call reference
10585 must use an R_ARM_IRELATIVE relocation to obtain the
10586 correct run-time address. */
10587 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10588 else if (globals->fdpic_p && !bfd_link_pic(info))
10589 isrofixup = 1;
10590 else
10591 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10592 if (globals->use_rel)
10593 relocate = TRUE;
10594 else
10595 outrel.r_addend += dynreloc_value;
10596 }
10597
10598 if (isrofixup)
10599 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10600 else
10601 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10602
10603 /* If this reloc is against an external symbol, we do not want to
10604 fiddle with the addend. Otherwise, we need to include the symbol
10605 value so that it becomes an addend for the dynamic reloc. */
10606 if (! relocate)
10607 return bfd_reloc_ok;
10608
10609 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10610 contents, rel->r_offset,
10611 dynreloc_value, (bfd_vma) 0);
10612 }
10613 else switch (r_type)
10614 {
10615 case R_ARM_ABS12:
10616 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10617
10618 case R_ARM_XPC25: /* Arm BLX instruction. */
10619 case R_ARM_CALL:
10620 case R_ARM_JUMP24:
10621 case R_ARM_PC24: /* Arm B/BL instruction. */
10622 case R_ARM_PLT32:
10623 {
10624 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10625
10626 if (r_type == R_ARM_XPC25)
10627 {
10628 /* Check for Arm calling Arm function. */
10629 /* FIXME: Should we translate the instruction into a BL
10630 instruction instead ? */
10631 if (branch_type != ST_BRANCH_TO_THUMB)
10632 _bfd_error_handler
10633 (_("\%pB: warning: %s BLX instruction targets"
10634 " %s function '%s'"),
10635 input_bfd, "ARM",
10636 "ARM", h ? h->root.root.string : "(local)");
10637 }
10638 else if (r_type == R_ARM_PC24)
10639 {
10640 /* Check for Arm calling Thumb function. */
10641 if (branch_type == ST_BRANCH_TO_THUMB)
10642 {
10643 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10644 output_bfd, input_section,
10645 hit_data, sym_sec, rel->r_offset,
10646 signed_addend, value,
10647 error_message))
10648 return bfd_reloc_ok;
10649 else
10650 return bfd_reloc_dangerous;
10651 }
10652 }
10653
10654 /* Check if a stub has to be inserted because the
10655 destination is too far or we are changing mode. */
10656 if ( r_type == R_ARM_CALL
10657 || r_type == R_ARM_JUMP24
10658 || r_type == R_ARM_PLT32)
10659 {
10660 enum elf32_arm_stub_type stub_type = arm_stub_none;
10661 struct elf32_arm_link_hash_entry *hash;
10662
10663 hash = (struct elf32_arm_link_hash_entry *) h;
10664 stub_type = arm_type_of_stub (info, input_section, rel,
10665 st_type, &branch_type,
10666 hash, value, sym_sec,
10667 input_bfd, sym_name);
10668
10669 if (stub_type != arm_stub_none)
10670 {
10671 /* The target is out of reach, so redirect the
10672 branch to the local stub for this function. */
10673 stub_entry = elf32_arm_get_stub_entry (input_section,
10674 sym_sec, h,
10675 rel, globals,
10676 stub_type);
10677 {
10678 if (stub_entry != NULL)
10679 value = (stub_entry->stub_offset
10680 + stub_entry->stub_sec->output_offset
10681 + stub_entry->stub_sec->output_section->vma);
10682
10683 if (plt_offset != (bfd_vma) -1)
10684 *unresolved_reloc_p = FALSE;
10685 }
10686 }
10687 else
10688 {
10689 /* If the call goes through a PLT entry, make sure to
10690 check distance to the right destination address. */
10691 if (plt_offset != (bfd_vma) -1)
10692 {
10693 value = (splt->output_section->vma
10694 + splt->output_offset
10695 + plt_offset);
10696 *unresolved_reloc_p = FALSE;
10697 /* The PLT entry is in ARM mode, regardless of the
10698 target function. */
10699 branch_type = ST_BRANCH_TO_ARM;
10700 }
10701 }
10702 }
10703
10704 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10705 where:
10706 S is the address of the symbol in the relocation.
10707 P is address of the instruction being relocated.
10708 A is the addend (extracted from the instruction) in bytes.
10709
10710 S is held in 'value'.
10711 P is the base address of the section containing the
10712 instruction plus the offset of the reloc into that
10713 section, ie:
10714 (input_section->output_section->vma +
10715 input_section->output_offset +
10716 rel->r_offset).
10717 A is the addend, converted into bytes, ie:
10718 (signed_addend * 4)
10719
10720 Note: None of these operations have knowledge of the pipeline
10721 size of the processor, thus it is up to the assembler to
10722 encode this information into the addend. */
10723 value -= (input_section->output_section->vma
10724 + input_section->output_offset);
10725 value -= rel->r_offset;
10726 if (globals->use_rel)
10727 value += (signed_addend << howto->size);
10728 else
10729 /* RELA addends do not have to be adjusted by howto->size. */
10730 value += signed_addend;
10731
10732 signed_addend = value;
10733 signed_addend >>= howto->rightshift;
10734
10735 /* A branch to an undefined weak symbol is turned into a jump to
10736 the next instruction unless a PLT entry will be created.
10737 Do the same for local undefined symbols (but not for STN_UNDEF).
10738 The jump to the next instruction is optimized as a NOP depending
10739 on the architecture. */
10740 if (h ? (h->root.type == bfd_link_hash_undefweak
10741 && plt_offset == (bfd_vma) -1)
10742 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10743 {
10744 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10745
10746 if (arch_has_arm_nop (globals))
10747 value |= 0x0320f000;
10748 else
10749 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10750 }
10751 else
10752 {
10753 /* Perform a signed range check. */
10754 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10755 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10756 return bfd_reloc_overflow;
10757
10758 addend = (value & 2);
10759
10760 value = (signed_addend & howto->dst_mask)
10761 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10762
10763 if (r_type == R_ARM_CALL)
10764 {
10765 /* Set the H bit in the BLX instruction. */
10766 if (branch_type == ST_BRANCH_TO_THUMB)
10767 {
10768 if (addend)
10769 value |= (1 << 24);
10770 else
10771 value &= ~(bfd_vma)(1 << 24);
10772 }
10773
10774 /* Select the correct instruction (BL or BLX). */
10775 /* Only if we are not handling a BL to a stub. In this
10776 case, mode switching is performed by the stub. */
10777 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10778 value |= (1 << 28);
10779 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10780 {
10781 value &= ~(bfd_vma)(1 << 28);
10782 value |= (1 << 24);
10783 }
10784 }
10785 }
10786 }
10787 break;
10788
10789 case R_ARM_ABS32:
10790 value += addend;
10791 if (branch_type == ST_BRANCH_TO_THUMB)
10792 value |= 1;
10793 break;
10794
10795 case R_ARM_ABS32_NOI:
10796 value += addend;
10797 break;
10798
10799 case R_ARM_REL32:
10800 value += addend;
10801 if (branch_type == ST_BRANCH_TO_THUMB)
10802 value |= 1;
10803 value -= (input_section->output_section->vma
10804 + input_section->output_offset + rel->r_offset);
10805 break;
10806
10807 case R_ARM_REL32_NOI:
10808 value += addend;
10809 value -= (input_section->output_section->vma
10810 + input_section->output_offset + rel->r_offset);
10811 break;
10812
10813 case R_ARM_PREL31:
10814 value -= (input_section->output_section->vma
10815 + input_section->output_offset + rel->r_offset);
10816 value += signed_addend;
10817 if (! h || h->root.type != bfd_link_hash_undefweak)
10818 {
10819 /* Check for overflow. */
10820 if ((value ^ (value >> 1)) & (1 << 30))
10821 return bfd_reloc_overflow;
10822 }
10823 value &= 0x7fffffff;
10824 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10825 if (branch_type == ST_BRANCH_TO_THUMB)
10826 value |= 1;
10827 break;
10828 }
10829
10830 bfd_put_32 (input_bfd, value, hit_data);
10831 return bfd_reloc_ok;
10832
10833 case R_ARM_ABS8:
10834 /* PR 16202: Refectch the addend using the correct size. */
10835 if (globals->use_rel)
10836 addend = bfd_get_8 (input_bfd, hit_data);
10837 value += addend;
10838
10839 /* There is no way to tell whether the user intended to use a signed or
10840 unsigned addend. When checking for overflow we accept either,
10841 as specified by the AAELF. */
10842 if ((long) value > 0xff || (long) value < -0x80)
10843 return bfd_reloc_overflow;
10844
10845 bfd_put_8 (input_bfd, value, hit_data);
10846 return bfd_reloc_ok;
10847
10848 case R_ARM_ABS16:
10849 /* PR 16202: Refectch the addend using the correct size. */
10850 if (globals->use_rel)
10851 addend = bfd_get_16 (input_bfd, hit_data);
10852 value += addend;
10853
10854 /* See comment for R_ARM_ABS8. */
10855 if ((long) value > 0xffff || (long) value < -0x8000)
10856 return bfd_reloc_overflow;
10857
10858 bfd_put_16 (input_bfd, value, hit_data);
10859 return bfd_reloc_ok;
10860
10861 case R_ARM_THM_ABS5:
10862 /* Support ldr and str instructions for the thumb. */
10863 if (globals->use_rel)
10864 {
10865 /* Need to refetch addend. */
10866 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10867 /* ??? Need to determine shift amount from operand size. */
10868 addend >>= howto->rightshift;
10869 }
10870 value += addend;
10871
10872 /* ??? Isn't value unsigned? */
10873 if ((long) value > 0x1f || (long) value < -0x10)
10874 return bfd_reloc_overflow;
10875
10876 /* ??? Value needs to be properly shifted into place first. */
10877 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10878 bfd_put_16 (input_bfd, value, hit_data);
10879 return bfd_reloc_ok;
10880
10881 case R_ARM_THM_ALU_PREL_11_0:
10882 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10883 {
10884 bfd_vma insn;
10885 bfd_signed_vma relocation;
10886
10887 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10888 | bfd_get_16 (input_bfd, hit_data + 2);
10889
10890 if (globals->use_rel)
10891 {
10892 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10893 | ((insn & (1 << 26)) >> 15);
10894 if (insn & 0xf00000)
10895 signed_addend = -signed_addend;
10896 }
10897
10898 relocation = value + signed_addend;
10899 relocation -= Pa (input_section->output_section->vma
10900 + input_section->output_offset
10901 + rel->r_offset);
10902
10903 /* PR 21523: Use an absolute value. The user of this reloc will
10904 have already selected an ADD or SUB insn appropriately. */
10905 value = labs (relocation);
10906
10907 if (value >= 0x1000)
10908 return bfd_reloc_overflow;
10909
10910 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10911 if (branch_type == ST_BRANCH_TO_THUMB)
10912 value |= 1;
10913
10914 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10915 | ((value & 0x700) << 4)
10916 | ((value & 0x800) << 15);
10917 if (relocation < 0)
10918 insn |= 0xa00000;
10919
10920 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10921 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10922
10923 return bfd_reloc_ok;
10924 }
10925
10926 case R_ARM_THM_PC8:
10927 /* PR 10073: This reloc is not generated by the GNU toolchain,
10928 but it is supported for compatibility with third party libraries
10929 generated by other compilers, specifically the ARM/IAR. */
10930 {
10931 bfd_vma insn;
10932 bfd_signed_vma relocation;
10933
10934 insn = bfd_get_16 (input_bfd, hit_data);
10935
10936 if (globals->use_rel)
10937 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10938
10939 relocation = value + addend;
10940 relocation -= Pa (input_section->output_section->vma
10941 + input_section->output_offset
10942 + rel->r_offset);
10943
10944 value = relocation;
10945
10946 /* We do not check for overflow of this reloc. Although strictly
10947 speaking this is incorrect, it appears to be necessary in order
10948 to work with IAR generated relocs. Since GCC and GAS do not
10949 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10950 a problem for them. */
10951 value &= 0x3fc;
10952
10953 insn = (insn & 0xff00) | (value >> 2);
10954
10955 bfd_put_16 (input_bfd, insn, hit_data);
10956
10957 return bfd_reloc_ok;
10958 }
10959
10960 case R_ARM_THM_PC12:
10961 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10962 {
10963 bfd_vma insn;
10964 bfd_signed_vma relocation;
10965
10966 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10967 | bfd_get_16 (input_bfd, hit_data + 2);
10968
10969 if (globals->use_rel)
10970 {
10971 signed_addend = insn & 0xfff;
10972 if (!(insn & (1 << 23)))
10973 signed_addend = -signed_addend;
10974 }
10975
10976 relocation = value + signed_addend;
10977 relocation -= Pa (input_section->output_section->vma
10978 + input_section->output_offset
10979 + rel->r_offset);
10980
10981 value = relocation;
10982
10983 if (value >= 0x1000)
10984 return bfd_reloc_overflow;
10985
10986 insn = (insn & 0xff7ff000) | value;
10987 if (relocation >= 0)
10988 insn |= (1 << 23);
10989
10990 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10991 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10992
10993 return bfd_reloc_ok;
10994 }
10995
10996 case R_ARM_THM_XPC22:
10997 case R_ARM_THM_CALL:
10998 case R_ARM_THM_JUMP24:
10999 /* Thumb BL (branch long instruction). */
11000 {
11001 bfd_vma relocation;
11002 bfd_vma reloc_sign;
11003 bfd_boolean overflow = FALSE;
11004 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11005 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11006 bfd_signed_vma reloc_signed_max;
11007 bfd_signed_vma reloc_signed_min;
11008 bfd_vma check;
11009 bfd_signed_vma signed_check;
11010 int bitsize;
11011 const int thumb2 = using_thumb2 (globals);
11012 const int thumb2_bl = using_thumb2_bl (globals);
11013
11014 /* A branch to an undefined weak symbol is turned into a jump to
11015 the next instruction unless a PLT entry will be created.
11016 The jump to the next instruction is optimized as a NOP.W for
11017 Thumb-2 enabled architectures. */
11018 if (h && h->root.type == bfd_link_hash_undefweak
11019 && plt_offset == (bfd_vma) -1)
11020 {
11021 if (thumb2)
11022 {
11023 bfd_put_16 (input_bfd, 0xf3af, hit_data);
11024 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11025 }
11026 else
11027 {
11028 bfd_put_16 (input_bfd, 0xe000, hit_data);
11029 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11030 }
11031 return bfd_reloc_ok;
11032 }
11033
11034 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11035 with Thumb-1) involving the J1 and J2 bits. */
11036 if (globals->use_rel)
11037 {
11038 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11039 bfd_vma upper = upper_insn & 0x3ff;
11040 bfd_vma lower = lower_insn & 0x7ff;
11041 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11042 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11043 bfd_vma i1 = j1 ^ s ? 0 : 1;
11044 bfd_vma i2 = j2 ^ s ? 0 : 1;
11045
11046 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11047 /* Sign extend. */
11048 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11049
11050 signed_addend = addend;
11051 }
11052
11053 if (r_type == R_ARM_THM_XPC22)
11054 {
11055 /* Check for Thumb to Thumb call. */
11056 /* FIXME: Should we translate the instruction into a BL
11057 instruction instead ? */
11058 if (branch_type == ST_BRANCH_TO_THUMB)
11059 _bfd_error_handler
11060 (_("%pB: warning: %s BLX instruction targets"
11061 " %s function '%s'"),
11062 input_bfd, "Thumb",
11063 "Thumb", h ? h->root.root.string : "(local)");
11064 }
11065 else
11066 {
11067 /* If it is not a call to Thumb, assume call to Arm.
11068 If it is a call relative to a section name, then it is not a
11069 function call at all, but rather a long jump. Calls through
11070 the PLT do not require stubs. */
11071 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11072 {
11073 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11074 {
11075 /* Convert BL to BLX. */
11076 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11077 }
11078 else if (( r_type != R_ARM_THM_CALL)
11079 && (r_type != R_ARM_THM_JUMP24))
11080 {
11081 if (elf32_thumb_to_arm_stub
11082 (info, sym_name, input_bfd, output_bfd, input_section,
11083 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11084 error_message))
11085 return bfd_reloc_ok;
11086 else
11087 return bfd_reloc_dangerous;
11088 }
11089 }
11090 else if (branch_type == ST_BRANCH_TO_THUMB
11091 && globals->use_blx
11092 && r_type == R_ARM_THM_CALL)
11093 {
11094 /* Make sure this is a BL. */
11095 lower_insn |= 0x1800;
11096 }
11097 }
11098
11099 enum elf32_arm_stub_type stub_type = arm_stub_none;
11100 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11101 {
11102 /* Check if a stub has to be inserted because the destination
11103 is too far. */
11104 struct elf32_arm_stub_hash_entry *stub_entry;
11105 struct elf32_arm_link_hash_entry *hash;
11106
11107 hash = (struct elf32_arm_link_hash_entry *) h;
11108
11109 stub_type = arm_type_of_stub (info, input_section, rel,
11110 st_type, &branch_type,
11111 hash, value, sym_sec,
11112 input_bfd, sym_name);
11113
11114 if (stub_type != arm_stub_none)
11115 {
11116 /* The target is out of reach or we are changing modes, so
11117 redirect the branch to the local stub for this
11118 function. */
11119 stub_entry = elf32_arm_get_stub_entry (input_section,
11120 sym_sec, h,
11121 rel, globals,
11122 stub_type);
11123 if (stub_entry != NULL)
11124 {
11125 value = (stub_entry->stub_offset
11126 + stub_entry->stub_sec->output_offset
11127 + stub_entry->stub_sec->output_section->vma);
11128
11129 if (plt_offset != (bfd_vma) -1)
11130 *unresolved_reloc_p = FALSE;
11131 }
11132
11133 /* If this call becomes a call to Arm, force BLX. */
11134 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11135 {
11136 if ((stub_entry
11137 && !arm_stub_is_thumb (stub_entry->stub_type))
11138 || branch_type != ST_BRANCH_TO_THUMB)
11139 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11140 }
11141 }
11142 }
11143
11144 /* Handle calls via the PLT. */
11145 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11146 {
11147 value = (splt->output_section->vma
11148 + splt->output_offset
11149 + plt_offset);
11150
11151 if (globals->use_blx
11152 && r_type == R_ARM_THM_CALL
11153 && ! using_thumb_only (globals))
11154 {
11155 /* If the Thumb BLX instruction is available, convert
11156 the BL to a BLX instruction to call the ARM-mode
11157 PLT entry. */
11158 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11159 branch_type = ST_BRANCH_TO_ARM;
11160 }
11161 else
11162 {
11163 if (! using_thumb_only (globals))
11164 /* Target the Thumb stub before the ARM PLT entry. */
11165 value -= PLT_THUMB_STUB_SIZE;
11166 branch_type = ST_BRANCH_TO_THUMB;
11167 }
11168 *unresolved_reloc_p = FALSE;
11169 }
11170
11171 relocation = value + signed_addend;
11172
11173 relocation -= (input_section->output_section->vma
11174 + input_section->output_offset
11175 + rel->r_offset);
11176
11177 check = relocation >> howto->rightshift;
11178
11179 /* If this is a signed value, the rightshift just dropped
11180 leading 1 bits (assuming twos complement). */
11181 if ((bfd_signed_vma) relocation >= 0)
11182 signed_check = check;
11183 else
11184 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11185
11186 /* Calculate the permissable maximum and minimum values for
11187 this relocation according to whether we're relocating for
11188 Thumb-2 or not. */
11189 bitsize = howto->bitsize;
11190 if (!thumb2_bl)
11191 bitsize -= 2;
11192 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11193 reloc_signed_min = ~reloc_signed_max;
11194
11195 /* Assumes two's complement. */
11196 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11197 overflow = TRUE;
11198
11199 if ((lower_insn & 0x5000) == 0x4000)
11200 /* For a BLX instruction, make sure that the relocation is rounded up
11201 to a word boundary. This follows the semantics of the instruction
11202 which specifies that bit 1 of the target address will come from bit
11203 1 of the base address. */
11204 relocation = (relocation + 2) & ~ 3;
11205
11206 /* Put RELOCATION back into the insn. Assumes two's complement.
11207 We use the Thumb-2 encoding, which is safe even if dealing with
11208 a Thumb-1 instruction by virtue of our overflow check above. */
11209 reloc_sign = (signed_check < 0) ? 1 : 0;
11210 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11211 | ((relocation >> 12) & 0x3ff)
11212 | (reloc_sign << 10);
11213 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11214 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11215 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11216 | ((relocation >> 1) & 0x7ff);
11217
11218 /* Put the relocated value back in the object file: */
11219 bfd_put_16 (input_bfd, upper_insn, hit_data);
11220 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11221
11222 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11223 }
11224 break;
11225
11226 case R_ARM_THM_JUMP19:
11227 /* Thumb32 conditional branch instruction. */
11228 {
11229 bfd_vma relocation;
11230 bfd_boolean overflow = FALSE;
11231 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11232 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11233 bfd_signed_vma reloc_signed_max = 0xffffe;
11234 bfd_signed_vma reloc_signed_min = -0x100000;
11235 bfd_signed_vma signed_check;
11236 enum elf32_arm_stub_type stub_type = arm_stub_none;
11237 struct elf32_arm_stub_hash_entry *stub_entry;
11238 struct elf32_arm_link_hash_entry *hash;
11239
11240 /* Need to refetch the addend, reconstruct the top three bits,
11241 and squish the two 11 bit pieces together. */
11242 if (globals->use_rel)
11243 {
11244 bfd_vma S = (upper_insn & 0x0400) >> 10;
11245 bfd_vma upper = (upper_insn & 0x003f);
11246 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11247 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11248 bfd_vma lower = (lower_insn & 0x07ff);
11249
11250 upper |= J1 << 6;
11251 upper |= J2 << 7;
11252 upper |= (!S) << 8;
11253 upper -= 0x0100; /* Sign extend. */
11254
11255 addend = (upper << 12) | (lower << 1);
11256 signed_addend = addend;
11257 }
11258
11259 /* Handle calls via the PLT. */
11260 if (plt_offset != (bfd_vma) -1)
11261 {
11262 value = (splt->output_section->vma
11263 + splt->output_offset
11264 + plt_offset);
11265 /* Target the Thumb stub before the ARM PLT entry. */
11266 value -= PLT_THUMB_STUB_SIZE;
11267 *unresolved_reloc_p = FALSE;
11268 }
11269
11270 hash = (struct elf32_arm_link_hash_entry *)h;
11271
11272 stub_type = arm_type_of_stub (info, input_section, rel,
11273 st_type, &branch_type,
11274 hash, value, sym_sec,
11275 input_bfd, sym_name);
11276 if (stub_type != arm_stub_none)
11277 {
11278 stub_entry = elf32_arm_get_stub_entry (input_section,
11279 sym_sec, h,
11280 rel, globals,
11281 stub_type);
11282 if (stub_entry != NULL)
11283 {
11284 value = (stub_entry->stub_offset
11285 + stub_entry->stub_sec->output_offset
11286 + stub_entry->stub_sec->output_section->vma);
11287 }
11288 }
11289
11290 relocation = value + signed_addend;
11291 relocation -= (input_section->output_section->vma
11292 + input_section->output_offset
11293 + rel->r_offset);
11294 signed_check = (bfd_signed_vma) relocation;
11295
11296 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11297 overflow = TRUE;
11298
11299 /* Put RELOCATION back into the insn. */
11300 {
11301 bfd_vma S = (relocation & 0x00100000) >> 20;
11302 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11303 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11304 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11305 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11306
11307 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11308 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11309 }
11310
11311 /* Put the relocated value back in the object file: */
11312 bfd_put_16 (input_bfd, upper_insn, hit_data);
11313 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11314
11315 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11316 }
11317
11318 case R_ARM_THM_JUMP11:
11319 case R_ARM_THM_JUMP8:
11320 case R_ARM_THM_JUMP6:
11321 /* Thumb B (branch) instruction). */
11322 {
11323 bfd_signed_vma relocation;
11324 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11325 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11326 bfd_signed_vma signed_check;
11327
11328 /* CZB cannot jump backward. */
11329 if (r_type == R_ARM_THM_JUMP6)
11330 reloc_signed_min = 0;
11331
11332 if (globals->use_rel)
11333 {
11334 /* Need to refetch addend. */
11335 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
11336 if (addend & ((howto->src_mask + 1) >> 1))
11337 {
11338 signed_addend = -1;
11339 signed_addend &= ~ howto->src_mask;
11340 signed_addend |= addend;
11341 }
11342 else
11343 signed_addend = addend;
11344 /* The value in the insn has been right shifted. We need to
11345 undo this, so that we can perform the address calculation
11346 in terms of bytes. */
11347 signed_addend <<= howto->rightshift;
11348 }
11349 relocation = value + signed_addend;
11350
11351 relocation -= (input_section->output_section->vma
11352 + input_section->output_offset
11353 + rel->r_offset);
11354
11355 relocation >>= howto->rightshift;
11356 signed_check = relocation;
11357
11358 if (r_type == R_ARM_THM_JUMP6)
11359 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11360 else
11361 relocation &= howto->dst_mask;
11362 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11363
11364 bfd_put_16 (input_bfd, relocation, hit_data);
11365
11366 /* Assumes two's complement. */
11367 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11368 return bfd_reloc_overflow;
11369
11370 return bfd_reloc_ok;
11371 }
11372
11373 case R_ARM_ALU_PCREL7_0:
11374 case R_ARM_ALU_PCREL15_8:
11375 case R_ARM_ALU_PCREL23_15:
11376 {
11377 bfd_vma insn;
11378 bfd_vma relocation;
11379
11380 insn = bfd_get_32 (input_bfd, hit_data);
11381 if (globals->use_rel)
11382 {
11383 /* Extract the addend. */
11384 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11385 signed_addend = addend;
11386 }
11387 relocation = value + signed_addend;
11388
11389 relocation -= (input_section->output_section->vma
11390 + input_section->output_offset
11391 + rel->r_offset);
11392 insn = (insn & ~0xfff)
11393 | ((howto->bitpos << 7) & 0xf00)
11394 | ((relocation >> howto->bitpos) & 0xff);
11395 bfd_put_32 (input_bfd, value, hit_data);
11396 }
11397 return bfd_reloc_ok;
11398
11399 case R_ARM_GNU_VTINHERIT:
11400 case R_ARM_GNU_VTENTRY:
11401 return bfd_reloc_ok;
11402
11403 case R_ARM_GOTOFF32:
11404 /* Relocation is relative to the start of the
11405 global offset table. */
11406
11407 BFD_ASSERT (sgot != NULL);
11408 if (sgot == NULL)
11409 return bfd_reloc_notsupported;
11410
11411 /* If we are addressing a Thumb function, we need to adjust the
11412 address by one, so that attempts to call the function pointer will
11413 correctly interpret it as Thumb code. */
11414 if (branch_type == ST_BRANCH_TO_THUMB)
11415 value += 1;
11416
11417 /* Note that sgot->output_offset is not involved in this
11418 calculation. We always want the start of .got. If we
11419 define _GLOBAL_OFFSET_TABLE in a different way, as is
11420 permitted by the ABI, we might have to change this
11421 calculation. */
11422 value -= sgot->output_section->vma;
11423 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11424 contents, rel->r_offset, value,
11425 rel->r_addend);
11426
11427 case R_ARM_GOTPC:
11428 /* Use global offset table as symbol value. */
11429 BFD_ASSERT (sgot != NULL);
11430
11431 if (sgot == NULL)
11432 return bfd_reloc_notsupported;
11433
11434 *unresolved_reloc_p = FALSE;
11435 value = sgot->output_section->vma;
11436 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11437 contents, rel->r_offset, value,
11438 rel->r_addend);
11439
11440 case R_ARM_GOT32:
11441 case R_ARM_GOT_PREL:
11442 /* Relocation is to the entry for this symbol in the
11443 global offset table. */
11444 if (sgot == NULL)
11445 return bfd_reloc_notsupported;
11446
11447 if (dynreloc_st_type == STT_GNU_IFUNC
11448 && plt_offset != (bfd_vma) -1
11449 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11450 {
11451 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11452 symbol, and the relocation resolves directly to the runtime
11453 target rather than to the .iplt entry. This means that any
11454 .got entry would be the same value as the .igot.plt entry,
11455 so there's no point creating both. */
11456 sgot = globals->root.igotplt;
11457 value = sgot->output_offset + gotplt_offset;
11458 }
11459 else if (h != NULL)
11460 {
11461 bfd_vma off;
11462
11463 off = h->got.offset;
11464 BFD_ASSERT (off != (bfd_vma) -1);
11465 if ((off & 1) != 0)
11466 {
11467 /* We have already processsed one GOT relocation against
11468 this symbol. */
11469 off &= ~1;
11470 if (globals->root.dynamic_sections_created
11471 && !SYMBOL_REFERENCES_LOCAL (info, h))
11472 *unresolved_reloc_p = FALSE;
11473 }
11474 else
11475 {
11476 Elf_Internal_Rela outrel;
11477 int isrofixup = 0;
11478
11479 if (((h->dynindx != -1) || globals->fdpic_p)
11480 && !SYMBOL_REFERENCES_LOCAL (info, h))
11481 {
11482 /* If the symbol doesn't resolve locally in a static
11483 object, we have an undefined reference. If the
11484 symbol doesn't resolve locally in a dynamic object,
11485 it should be resolved by the dynamic linker. */
11486 if (globals->root.dynamic_sections_created)
11487 {
11488 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11489 *unresolved_reloc_p = FALSE;
11490 }
11491 else
11492 outrel.r_info = 0;
11493 outrel.r_addend = 0;
11494 }
11495 else
11496 {
11497 if (dynreloc_st_type == STT_GNU_IFUNC)
11498 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11499 else if (bfd_link_pic (info)
11500 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11501 || h->root.type != bfd_link_hash_undefweak))
11502 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11503 else if (globals->fdpic_p)
11504 isrofixup = 1;
11505 else
11506 outrel.r_info = 0;
11507 outrel.r_addend = dynreloc_value;
11508 }
11509
11510 /* The GOT entry is initialized to zero by default.
11511 See if we should install a different value. */
11512 if (outrel.r_addend != 0
11513 && (outrel.r_info == 0 || globals->use_rel || isrofixup))
11514 {
11515 bfd_put_32 (output_bfd, outrel.r_addend,
11516 sgot->contents + off);
11517 outrel.r_addend = 0;
11518 }
11519
11520 if (outrel.r_info != 0 && !isrofixup)
11521 {
11522 outrel.r_offset = (sgot->output_section->vma
11523 + sgot->output_offset
11524 + off);
11525 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11526 }
11527 else if (isrofixup)
11528 {
11529 arm_elf_add_rofixup(output_bfd,
11530 elf32_arm_hash_table(info)->srofixup,
11531 sgot->output_section->vma
11532 + sgot->output_offset + off);
11533 }
11534 h->got.offset |= 1;
11535 }
11536 value = sgot->output_offset + off;
11537 }
11538 else
11539 {
11540 bfd_vma off;
11541
11542 BFD_ASSERT (local_got_offsets != NULL
11543 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11544
11545 off = local_got_offsets[r_symndx];
11546
11547 /* The offset must always be a multiple of 4. We use the
11548 least significant bit to record whether we have already
11549 generated the necessary reloc. */
11550 if ((off & 1) != 0)
11551 off &= ~1;
11552 else
11553 {
11554 if (globals->use_rel)
11555 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11556
11557 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
11558 {
11559 Elf_Internal_Rela outrel;
11560
11561 outrel.r_addend = addend + dynreloc_value;
11562 outrel.r_offset = (sgot->output_section->vma
11563 + sgot->output_offset
11564 + off);
11565 if (dynreloc_st_type == STT_GNU_IFUNC)
11566 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11567 else
11568 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11569 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11570 }
11571 else if (globals->fdpic_p)
11572 {
11573 /* For FDPIC executables, we use rofixup to fix
11574 address at runtime. */
11575 arm_elf_add_rofixup(output_bfd, globals->srofixup,
11576 sgot->output_section->vma + sgot->output_offset
11577 + off);
11578 }
11579
11580 local_got_offsets[r_symndx] |= 1;
11581 }
11582
11583 value = sgot->output_offset + off;
11584 }
11585 if (r_type != R_ARM_GOT32)
11586 value += sgot->output_section->vma;
11587
11588 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11589 contents, rel->r_offset, value,
11590 rel->r_addend);
11591
11592 case R_ARM_TLS_LDO32:
11593 value = value - dtpoff_base (info);
11594
11595 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11596 contents, rel->r_offset, value,
11597 rel->r_addend);
11598
11599 case R_ARM_TLS_LDM32:
11600 case R_ARM_TLS_LDM32_FDPIC:
11601 {
11602 bfd_vma off;
11603
11604 if (sgot == NULL)
11605 abort ();
11606
11607 off = globals->tls_ldm_got.offset;
11608
11609 if ((off & 1) != 0)
11610 off &= ~1;
11611 else
11612 {
11613 /* If we don't know the module number, create a relocation
11614 for it. */
11615 if (bfd_link_pic (info))
11616 {
11617 Elf_Internal_Rela outrel;
11618
11619 if (srelgot == NULL)
11620 abort ();
11621
11622 outrel.r_addend = 0;
11623 outrel.r_offset = (sgot->output_section->vma
11624 + sgot->output_offset + off);
11625 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11626
11627 if (globals->use_rel)
11628 bfd_put_32 (output_bfd, outrel.r_addend,
11629 sgot->contents + off);
11630
11631 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11632 }
11633 else
11634 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11635
11636 globals->tls_ldm_got.offset |= 1;
11637 }
11638
11639 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11640 {
11641 bfd_put_32(output_bfd,
11642 globals->root.sgot->output_offset + off,
11643 contents + rel->r_offset);
11644
11645 return bfd_reloc_ok;
11646 }
11647 else
11648 {
11649 value = sgot->output_section->vma + sgot->output_offset + off
11650 - (input_section->output_section->vma
11651 + input_section->output_offset + rel->r_offset);
11652
11653 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11654 contents, rel->r_offset, value,
11655 rel->r_addend);
11656 }
11657 }
11658
11659 case R_ARM_TLS_CALL:
11660 case R_ARM_THM_TLS_CALL:
11661 case R_ARM_TLS_GD32:
11662 case R_ARM_TLS_GD32_FDPIC:
11663 case R_ARM_TLS_IE32:
11664 case R_ARM_TLS_IE32_FDPIC:
11665 case R_ARM_TLS_GOTDESC:
11666 case R_ARM_TLS_DESCSEQ:
11667 case R_ARM_THM_TLS_DESCSEQ:
11668 {
11669 bfd_vma off, offplt;
11670 int indx = 0;
11671 char tls_type;
11672
11673 BFD_ASSERT (sgot != NULL);
11674
11675 if (h != NULL)
11676 {
11677 bfd_boolean dyn;
11678 dyn = globals->root.dynamic_sections_created;
11679 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11680 bfd_link_pic (info),
11681 h)
11682 && (!bfd_link_pic (info)
11683 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11684 {
11685 *unresolved_reloc_p = FALSE;
11686 indx = h->dynindx;
11687 }
11688 off = h->got.offset;
11689 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11690 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11691 }
11692 else
11693 {
11694 BFD_ASSERT (local_got_offsets != NULL);
11695 off = local_got_offsets[r_symndx];
11696 offplt = local_tlsdesc_gotents[r_symndx];
11697 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11698 }
11699
11700 /* Linker relaxations happens from one of the
11701 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11702 if (ELF32_R_TYPE(rel->r_info) != r_type)
11703 tls_type = GOT_TLS_IE;
11704
11705 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11706
11707 if ((off & 1) != 0)
11708 off &= ~1;
11709 else
11710 {
11711 bfd_boolean need_relocs = FALSE;
11712 Elf_Internal_Rela outrel;
11713 int cur_off = off;
11714
11715 /* The GOT entries have not been initialized yet. Do it
11716 now, and emit any relocations. If both an IE GOT and a
11717 GD GOT are necessary, we emit the GD first. */
11718
11719 if ((bfd_link_pic (info) || indx != 0)
11720 && (h == NULL
11721 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11722 && !resolved_to_zero)
11723 || h->root.type != bfd_link_hash_undefweak))
11724 {
11725 need_relocs = TRUE;
11726 BFD_ASSERT (srelgot != NULL);
11727 }
11728
11729 if (tls_type & GOT_TLS_GDESC)
11730 {
11731 bfd_byte *loc;
11732
11733 /* We should have relaxed, unless this is an undefined
11734 weak symbol. */
11735 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11736 || bfd_link_pic (info));
11737 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11738 <= globals->root.sgotplt->size);
11739
11740 outrel.r_addend = 0;
11741 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11742 + globals->root.sgotplt->output_offset
11743 + offplt
11744 + globals->sgotplt_jump_table_size);
11745
11746 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11747 sreloc = globals->root.srelplt;
11748 loc = sreloc->contents;
11749 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11750 BFD_ASSERT (loc + RELOC_SIZE (globals)
11751 <= sreloc->contents + sreloc->size);
11752
11753 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11754
11755 /* For globals, the first word in the relocation gets
11756 the relocation index and the top bit set, or zero,
11757 if we're binding now. For locals, it gets the
11758 symbol's offset in the tls section. */
11759 bfd_put_32 (output_bfd,
11760 !h ? value - elf_hash_table (info)->tls_sec->vma
11761 : info->flags & DF_BIND_NOW ? 0
11762 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11763 globals->root.sgotplt->contents + offplt
11764 + globals->sgotplt_jump_table_size);
11765
11766 /* Second word in the relocation is always zero. */
11767 bfd_put_32 (output_bfd, 0,
11768 globals->root.sgotplt->contents + offplt
11769 + globals->sgotplt_jump_table_size + 4);
11770 }
11771 if (tls_type & GOT_TLS_GD)
11772 {
11773 if (need_relocs)
11774 {
11775 outrel.r_addend = 0;
11776 outrel.r_offset = (sgot->output_section->vma
11777 + sgot->output_offset
11778 + cur_off);
11779 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11780
11781 if (globals->use_rel)
11782 bfd_put_32 (output_bfd, outrel.r_addend,
11783 sgot->contents + cur_off);
11784
11785 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11786
11787 if (indx == 0)
11788 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11789 sgot->contents + cur_off + 4);
11790 else
11791 {
11792 outrel.r_addend = 0;
11793 outrel.r_info = ELF32_R_INFO (indx,
11794 R_ARM_TLS_DTPOFF32);
11795 outrel.r_offset += 4;
11796
11797 if (globals->use_rel)
11798 bfd_put_32 (output_bfd, outrel.r_addend,
11799 sgot->contents + cur_off + 4);
11800
11801 elf32_arm_add_dynreloc (output_bfd, info,
11802 srelgot, &outrel);
11803 }
11804 }
11805 else
11806 {
11807 /* If we are not emitting relocations for a
11808 general dynamic reference, then we must be in a
11809 static link or an executable link with the
11810 symbol binding locally. Mark it as belonging
11811 to module 1, the executable. */
11812 bfd_put_32 (output_bfd, 1,
11813 sgot->contents + cur_off);
11814 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11815 sgot->contents + cur_off + 4);
11816 }
11817
11818 cur_off += 8;
11819 }
11820
11821 if (tls_type & GOT_TLS_IE)
11822 {
11823 if (need_relocs)
11824 {
11825 if (indx == 0)
11826 outrel.r_addend = value - dtpoff_base (info);
11827 else
11828 outrel.r_addend = 0;
11829 outrel.r_offset = (sgot->output_section->vma
11830 + sgot->output_offset
11831 + cur_off);
11832 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11833
11834 if (globals->use_rel)
11835 bfd_put_32 (output_bfd, outrel.r_addend,
11836 sgot->contents + cur_off);
11837
11838 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11839 }
11840 else
11841 bfd_put_32 (output_bfd, tpoff (info, value),
11842 sgot->contents + cur_off);
11843 cur_off += 4;
11844 }
11845
11846 if (h != NULL)
11847 h->got.offset |= 1;
11848 else
11849 local_got_offsets[r_symndx] |= 1;
11850 }
11851
11852 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11853 off += 8;
11854 else if (tls_type & GOT_TLS_GDESC)
11855 off = offplt;
11856
11857 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11858 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11859 {
11860 bfd_signed_vma offset;
11861 /* TLS stubs are arm mode. The original symbol is a
11862 data object, so branch_type is bogus. */
11863 branch_type = ST_BRANCH_TO_ARM;
11864 enum elf32_arm_stub_type stub_type
11865 = arm_type_of_stub (info, input_section, rel,
11866 st_type, &branch_type,
11867 (struct elf32_arm_link_hash_entry *)h,
11868 globals->tls_trampoline, globals->root.splt,
11869 input_bfd, sym_name);
11870
11871 if (stub_type != arm_stub_none)
11872 {
11873 struct elf32_arm_stub_hash_entry *stub_entry
11874 = elf32_arm_get_stub_entry
11875 (input_section, globals->root.splt, 0, rel,
11876 globals, stub_type);
11877 offset = (stub_entry->stub_offset
11878 + stub_entry->stub_sec->output_offset
11879 + stub_entry->stub_sec->output_section->vma);
11880 }
11881 else
11882 offset = (globals->root.splt->output_section->vma
11883 + globals->root.splt->output_offset
11884 + globals->tls_trampoline);
11885
11886 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11887 {
11888 unsigned long inst;
11889
11890 offset -= (input_section->output_section->vma
11891 + input_section->output_offset
11892 + rel->r_offset + 8);
11893
11894 inst = offset >> 2;
11895 inst &= 0x00ffffff;
11896 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11897 }
11898 else
11899 {
11900 /* Thumb blx encodes the offset in a complicated
11901 fashion. */
11902 unsigned upper_insn, lower_insn;
11903 unsigned neg;
11904
11905 offset -= (input_section->output_section->vma
11906 + input_section->output_offset
11907 + rel->r_offset + 4);
11908
11909 if (stub_type != arm_stub_none
11910 && arm_stub_is_thumb (stub_type))
11911 {
11912 lower_insn = 0xd000;
11913 }
11914 else
11915 {
11916 lower_insn = 0xc000;
11917 /* Round up the offset to a word boundary. */
11918 offset = (offset + 2) & ~2;
11919 }
11920
11921 neg = offset < 0;
11922 upper_insn = (0xf000
11923 | ((offset >> 12) & 0x3ff)
11924 | (neg << 10));
11925 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11926 | (((!((offset >> 22) & 1)) ^ neg) << 11)
11927 | ((offset >> 1) & 0x7ff);
11928 bfd_put_16 (input_bfd, upper_insn, hit_data);
11929 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11930 return bfd_reloc_ok;
11931 }
11932 }
11933 /* These relocations needs special care, as besides the fact
11934 they point somewhere in .gotplt, the addend must be
11935 adjusted accordingly depending on the type of instruction
11936 we refer to. */
11937 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11938 {
11939 unsigned long data, insn;
11940 unsigned thumb;
11941
11942 data = bfd_get_32 (input_bfd, hit_data);
11943 thumb = data & 1;
11944 data &= ~1u;
11945
11946 if (thumb)
11947 {
11948 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11949 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11950 insn = (insn << 16)
11951 | bfd_get_16 (input_bfd,
11952 contents + rel->r_offset - data + 2);
11953 if ((insn & 0xf800c000) == 0xf000c000)
11954 /* bl/blx */
11955 value = -6;
11956 else if ((insn & 0xffffff00) == 0x4400)
11957 /* add */
11958 value = -5;
11959 else
11960 {
11961 _bfd_error_handler
11962 /* xgettext:c-format */
11963 (_("%pB(%pA+%#" PRIx64 "): "
11964 "unexpected %s instruction '%#lx' "
11965 "referenced by TLS_GOTDESC"),
11966 input_bfd, input_section, (uint64_t) rel->r_offset,
11967 "Thumb", insn);
11968 return bfd_reloc_notsupported;
11969 }
11970 }
11971 else
11972 {
11973 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11974
11975 switch (insn >> 24)
11976 {
11977 case 0xeb: /* bl */
11978 case 0xfa: /* blx */
11979 value = -4;
11980 break;
11981
11982 case 0xe0: /* add */
11983 value = -8;
11984 break;
11985
11986 default:
11987 _bfd_error_handler
11988 /* xgettext:c-format */
11989 (_("%pB(%pA+%#" PRIx64 "): "
11990 "unexpected %s instruction '%#lx' "
11991 "referenced by TLS_GOTDESC"),
11992 input_bfd, input_section, (uint64_t) rel->r_offset,
11993 "ARM", insn);
11994 return bfd_reloc_notsupported;
11995 }
11996 }
11997
11998 value += ((globals->root.sgotplt->output_section->vma
11999 + globals->root.sgotplt->output_offset + off)
12000 - (input_section->output_section->vma
12001 + input_section->output_offset
12002 + rel->r_offset)
12003 + globals->sgotplt_jump_table_size);
12004 }
12005 else
12006 value = ((globals->root.sgot->output_section->vma
12007 + globals->root.sgot->output_offset + off)
12008 - (input_section->output_section->vma
12009 + input_section->output_offset + rel->r_offset));
12010
12011 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12012 r_type == R_ARM_TLS_IE32_FDPIC))
12013 {
12014 /* For FDPIC relocations, resolve to the offset of the GOT
12015 entry from the start of GOT. */
12016 bfd_put_32(output_bfd,
12017 globals->root.sgot->output_offset + off,
12018 contents + rel->r_offset);
12019
12020 return bfd_reloc_ok;
12021 }
12022 else
12023 {
12024 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12025 contents, rel->r_offset, value,
12026 rel->r_addend);
12027 }
12028 }
12029
12030 case R_ARM_TLS_LE32:
12031 if (bfd_link_dll (info))
12032 {
12033 _bfd_error_handler
12034 /* xgettext:c-format */
12035 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12036 "in shared object"),
12037 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12038 return bfd_reloc_notsupported;
12039 }
12040 else
12041 value = tpoff (info, value);
12042
12043 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12044 contents, rel->r_offset, value,
12045 rel->r_addend);
12046
12047 case R_ARM_V4BX:
12048 if (globals->fix_v4bx)
12049 {
12050 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12051
12052 /* Ensure that we have a BX instruction. */
12053 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12054
12055 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12056 {
12057 /* Branch to veneer. */
12058 bfd_vma glue_addr;
12059 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12060 glue_addr -= input_section->output_section->vma
12061 + input_section->output_offset
12062 + rel->r_offset + 8;
12063 insn = (insn & 0xf0000000) | 0x0a000000
12064 | ((glue_addr >> 2) & 0x00ffffff);
12065 }
12066 else
12067 {
12068 /* Preserve Rm (lowest four bits) and the condition code
12069 (highest four bits). Other bits encode MOV PC,Rm. */
12070 insn = (insn & 0xf000000f) | 0x01a0f000;
12071 }
12072
12073 bfd_put_32 (input_bfd, insn, hit_data);
12074 }
12075 return bfd_reloc_ok;
12076
12077 case R_ARM_MOVW_ABS_NC:
12078 case R_ARM_MOVT_ABS:
12079 case R_ARM_MOVW_PREL_NC:
12080 case R_ARM_MOVT_PREL:
12081 /* Until we properly support segment-base-relative addressing then
12082 we assume the segment base to be zero, as for the group relocations.
12083 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12084 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12085 case R_ARM_MOVW_BREL_NC:
12086 case R_ARM_MOVW_BREL:
12087 case R_ARM_MOVT_BREL:
12088 {
12089 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12090
12091 if (globals->use_rel)
12092 {
12093 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12094 signed_addend = (addend ^ 0x8000) - 0x8000;
12095 }
12096
12097 value += signed_addend;
12098
12099 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12100 value -= (input_section->output_section->vma
12101 + input_section->output_offset + rel->r_offset);
12102
12103 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12104 return bfd_reloc_overflow;
12105
12106 if (branch_type == ST_BRANCH_TO_THUMB)
12107 value |= 1;
12108
12109 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12110 || r_type == R_ARM_MOVT_BREL)
12111 value >>= 16;
12112
12113 insn &= 0xfff0f000;
12114 insn |= value & 0xfff;
12115 insn |= (value & 0xf000) << 4;
12116 bfd_put_32 (input_bfd, insn, hit_data);
12117 }
12118 return bfd_reloc_ok;
12119
12120 case R_ARM_THM_MOVW_ABS_NC:
12121 case R_ARM_THM_MOVT_ABS:
12122 case R_ARM_THM_MOVW_PREL_NC:
12123 case R_ARM_THM_MOVT_PREL:
12124 /* Until we properly support segment-base-relative addressing then
12125 we assume the segment base to be zero, as for the above relocations.
12126 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12127 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12128 as R_ARM_THM_MOVT_ABS. */
12129 case R_ARM_THM_MOVW_BREL_NC:
12130 case R_ARM_THM_MOVW_BREL:
12131 case R_ARM_THM_MOVT_BREL:
12132 {
12133 bfd_vma insn;
12134
12135 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12136 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12137
12138 if (globals->use_rel)
12139 {
12140 addend = ((insn >> 4) & 0xf000)
12141 | ((insn >> 15) & 0x0800)
12142 | ((insn >> 4) & 0x0700)
12143 | (insn & 0x00ff);
12144 signed_addend = (addend ^ 0x8000) - 0x8000;
12145 }
12146
12147 value += signed_addend;
12148
12149 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12150 value -= (input_section->output_section->vma
12151 + input_section->output_offset + rel->r_offset);
12152
12153 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12154 return bfd_reloc_overflow;
12155
12156 if (branch_type == ST_BRANCH_TO_THUMB)
12157 value |= 1;
12158
12159 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12160 || r_type == R_ARM_THM_MOVT_BREL)
12161 value >>= 16;
12162
12163 insn &= 0xfbf08f00;
12164 insn |= (value & 0xf000) << 4;
12165 insn |= (value & 0x0800) << 15;
12166 insn |= (value & 0x0700) << 4;
12167 insn |= (value & 0x00ff);
12168
12169 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12170 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12171 }
12172 return bfd_reloc_ok;
12173
12174 case R_ARM_ALU_PC_G0_NC:
12175 case R_ARM_ALU_PC_G1_NC:
12176 case R_ARM_ALU_PC_G0:
12177 case R_ARM_ALU_PC_G1:
12178 case R_ARM_ALU_PC_G2:
12179 case R_ARM_ALU_SB_G0_NC:
12180 case R_ARM_ALU_SB_G1_NC:
12181 case R_ARM_ALU_SB_G0:
12182 case R_ARM_ALU_SB_G1:
12183 case R_ARM_ALU_SB_G2:
12184 {
12185 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12186 bfd_vma pc = input_section->output_section->vma
12187 + input_section->output_offset + rel->r_offset;
12188 /* sb is the origin of the *segment* containing the symbol. */
12189 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12190 bfd_vma residual;
12191 bfd_vma g_n;
12192 bfd_signed_vma signed_value;
12193 int group = 0;
12194
12195 /* Determine which group of bits to select. */
12196 switch (r_type)
12197 {
12198 case R_ARM_ALU_PC_G0_NC:
12199 case R_ARM_ALU_PC_G0:
12200 case R_ARM_ALU_SB_G0_NC:
12201 case R_ARM_ALU_SB_G0:
12202 group = 0;
12203 break;
12204
12205 case R_ARM_ALU_PC_G1_NC:
12206 case R_ARM_ALU_PC_G1:
12207 case R_ARM_ALU_SB_G1_NC:
12208 case R_ARM_ALU_SB_G1:
12209 group = 1;
12210 break;
12211
12212 case R_ARM_ALU_PC_G2:
12213 case R_ARM_ALU_SB_G2:
12214 group = 2;
12215 break;
12216
12217 default:
12218 abort ();
12219 }
12220
12221 /* If REL, extract the addend from the insn. If RELA, it will
12222 have already been fetched for us. */
12223 if (globals->use_rel)
12224 {
12225 int negative;
12226 bfd_vma constant = insn & 0xff;
12227 bfd_vma rotation = (insn & 0xf00) >> 8;
12228
12229 if (rotation == 0)
12230 signed_addend = constant;
12231 else
12232 {
12233 /* Compensate for the fact that in the instruction, the
12234 rotation is stored in multiples of 2 bits. */
12235 rotation *= 2;
12236
12237 /* Rotate "constant" right by "rotation" bits. */
12238 signed_addend = (constant >> rotation) |
12239 (constant << (8 * sizeof (bfd_vma) - rotation));
12240 }
12241
12242 /* Determine if the instruction is an ADD or a SUB.
12243 (For REL, this determines the sign of the addend.) */
12244 negative = identify_add_or_sub (insn);
12245 if (negative == 0)
12246 {
12247 _bfd_error_handler
12248 /* xgettext:c-format */
12249 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12250 "are allowed for ALU group relocations"),
12251 input_bfd, input_section, (uint64_t) rel->r_offset);
12252 return bfd_reloc_overflow;
12253 }
12254
12255 signed_addend *= negative;
12256 }
12257
12258 /* Compute the value (X) to go in the place. */
12259 if (r_type == R_ARM_ALU_PC_G0_NC
12260 || r_type == R_ARM_ALU_PC_G1_NC
12261 || r_type == R_ARM_ALU_PC_G0
12262 || r_type == R_ARM_ALU_PC_G1
12263 || r_type == R_ARM_ALU_PC_G2)
12264 /* PC relative. */
12265 signed_value = value - pc + signed_addend;
12266 else
12267 /* Section base relative. */
12268 signed_value = value - sb + signed_addend;
12269
12270 /* If the target symbol is a Thumb function, then set the
12271 Thumb bit in the address. */
12272 if (branch_type == ST_BRANCH_TO_THUMB)
12273 signed_value |= 1;
12274
12275 /* Calculate the value of the relevant G_n, in encoded
12276 constant-with-rotation format. */
12277 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12278 group, &residual);
12279
12280 /* Check for overflow if required. */
12281 if ((r_type == R_ARM_ALU_PC_G0
12282 || r_type == R_ARM_ALU_PC_G1
12283 || r_type == R_ARM_ALU_PC_G2
12284 || r_type == R_ARM_ALU_SB_G0
12285 || r_type == R_ARM_ALU_SB_G1
12286 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12287 {
12288 _bfd_error_handler
12289 /* xgettext:c-format */
12290 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12291 "splitting %#" PRIx64 " for group relocation %s"),
12292 input_bfd, input_section, (uint64_t) rel->r_offset,
12293 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12294 howto->name);
12295 return bfd_reloc_overflow;
12296 }
12297
12298 /* Mask out the value and the ADD/SUB part of the opcode; take care
12299 not to destroy the S bit. */
12300 insn &= 0xff1ff000;
12301
12302 /* Set the opcode according to whether the value to go in the
12303 place is negative. */
12304 if (signed_value < 0)
12305 insn |= 1 << 22;
12306 else
12307 insn |= 1 << 23;
12308
12309 /* Encode the offset. */
12310 insn |= g_n;
12311
12312 bfd_put_32 (input_bfd, insn, hit_data);
12313 }
12314 return bfd_reloc_ok;
12315
12316 case R_ARM_LDR_PC_G0:
12317 case R_ARM_LDR_PC_G1:
12318 case R_ARM_LDR_PC_G2:
12319 case R_ARM_LDR_SB_G0:
12320 case R_ARM_LDR_SB_G1:
12321 case R_ARM_LDR_SB_G2:
12322 {
12323 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12324 bfd_vma pc = input_section->output_section->vma
12325 + input_section->output_offset + rel->r_offset;
12326 /* sb is the origin of the *segment* containing the symbol. */
12327 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12328 bfd_vma residual;
12329 bfd_signed_vma signed_value;
12330 int group = 0;
12331
12332 /* Determine which groups of bits to calculate. */
12333 switch (r_type)
12334 {
12335 case R_ARM_LDR_PC_G0:
12336 case R_ARM_LDR_SB_G0:
12337 group = 0;
12338 break;
12339
12340 case R_ARM_LDR_PC_G1:
12341 case R_ARM_LDR_SB_G1:
12342 group = 1;
12343 break;
12344
12345 case R_ARM_LDR_PC_G2:
12346 case R_ARM_LDR_SB_G2:
12347 group = 2;
12348 break;
12349
12350 default:
12351 abort ();
12352 }
12353
12354 /* If REL, extract the addend from the insn. If RELA, it will
12355 have already been fetched for us. */
12356 if (globals->use_rel)
12357 {
12358 int negative = (insn & (1 << 23)) ? 1 : -1;
12359 signed_addend = negative * (insn & 0xfff);
12360 }
12361
12362 /* Compute the value (X) to go in the place. */
12363 if (r_type == R_ARM_LDR_PC_G0
12364 || r_type == R_ARM_LDR_PC_G1
12365 || r_type == R_ARM_LDR_PC_G2)
12366 /* PC relative. */
12367 signed_value = value - pc + signed_addend;
12368 else
12369 /* Section base relative. */
12370 signed_value = value - sb + signed_addend;
12371
12372 /* Calculate the value of the relevant G_{n-1} to obtain
12373 the residual at that stage. */
12374 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12375 group - 1, &residual);
12376
12377 /* Check for overflow. */
12378 if (residual >= 0x1000)
12379 {
12380 _bfd_error_handler
12381 /* xgettext:c-format */
12382 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12383 "splitting %#" PRIx64 " for group relocation %s"),
12384 input_bfd, input_section, (uint64_t) rel->r_offset,
12385 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12386 howto->name);
12387 return bfd_reloc_overflow;
12388 }
12389
12390 /* Mask out the value and U bit. */
12391 insn &= 0xff7ff000;
12392
12393 /* Set the U bit if the value to go in the place is non-negative. */
12394 if (signed_value >= 0)
12395 insn |= 1 << 23;
12396
12397 /* Encode the offset. */
12398 insn |= residual;
12399
12400 bfd_put_32 (input_bfd, insn, hit_data);
12401 }
12402 return bfd_reloc_ok;
12403
12404 case R_ARM_LDRS_PC_G0:
12405 case R_ARM_LDRS_PC_G1:
12406 case R_ARM_LDRS_PC_G2:
12407 case R_ARM_LDRS_SB_G0:
12408 case R_ARM_LDRS_SB_G1:
12409 case R_ARM_LDRS_SB_G2:
12410 {
12411 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12412 bfd_vma pc = input_section->output_section->vma
12413 + input_section->output_offset + rel->r_offset;
12414 /* sb is the origin of the *segment* containing the symbol. */
12415 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12416 bfd_vma residual;
12417 bfd_signed_vma signed_value;
12418 int group = 0;
12419
12420 /* Determine which groups of bits to calculate. */
12421 switch (r_type)
12422 {
12423 case R_ARM_LDRS_PC_G0:
12424 case R_ARM_LDRS_SB_G0:
12425 group = 0;
12426 break;
12427
12428 case R_ARM_LDRS_PC_G1:
12429 case R_ARM_LDRS_SB_G1:
12430 group = 1;
12431 break;
12432
12433 case R_ARM_LDRS_PC_G2:
12434 case R_ARM_LDRS_SB_G2:
12435 group = 2;
12436 break;
12437
12438 default:
12439 abort ();
12440 }
12441
12442 /* If REL, extract the addend from the insn. If RELA, it will
12443 have already been fetched for us. */
12444 if (globals->use_rel)
12445 {
12446 int negative = (insn & (1 << 23)) ? 1 : -1;
12447 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12448 }
12449
12450 /* Compute the value (X) to go in the place. */
12451 if (r_type == R_ARM_LDRS_PC_G0
12452 || r_type == R_ARM_LDRS_PC_G1
12453 || r_type == R_ARM_LDRS_PC_G2)
12454 /* PC relative. */
12455 signed_value = value - pc + signed_addend;
12456 else
12457 /* Section base relative. */
12458 signed_value = value - sb + signed_addend;
12459
12460 /* Calculate the value of the relevant G_{n-1} to obtain
12461 the residual at that stage. */
12462 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12463 group - 1, &residual);
12464
12465 /* Check for overflow. */
12466 if (residual >= 0x100)
12467 {
12468 _bfd_error_handler
12469 /* xgettext:c-format */
12470 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12471 "splitting %#" PRIx64 " for group relocation %s"),
12472 input_bfd, input_section, (uint64_t) rel->r_offset,
12473 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12474 howto->name);
12475 return bfd_reloc_overflow;
12476 }
12477
12478 /* Mask out the value and U bit. */
12479 insn &= 0xff7ff0f0;
12480
12481 /* Set the U bit if the value to go in the place is non-negative. */
12482 if (signed_value >= 0)
12483 insn |= 1 << 23;
12484
12485 /* Encode the offset. */
12486 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12487
12488 bfd_put_32 (input_bfd, insn, hit_data);
12489 }
12490 return bfd_reloc_ok;
12491
12492 case R_ARM_LDC_PC_G0:
12493 case R_ARM_LDC_PC_G1:
12494 case R_ARM_LDC_PC_G2:
12495 case R_ARM_LDC_SB_G0:
12496 case R_ARM_LDC_SB_G1:
12497 case R_ARM_LDC_SB_G2:
12498 {
12499 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12500 bfd_vma pc = input_section->output_section->vma
12501 + input_section->output_offset + rel->r_offset;
12502 /* sb is the origin of the *segment* containing the symbol. */
12503 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12504 bfd_vma residual;
12505 bfd_signed_vma signed_value;
12506 int group = 0;
12507
12508 /* Determine which groups of bits to calculate. */
12509 switch (r_type)
12510 {
12511 case R_ARM_LDC_PC_G0:
12512 case R_ARM_LDC_SB_G0:
12513 group = 0;
12514 break;
12515
12516 case R_ARM_LDC_PC_G1:
12517 case R_ARM_LDC_SB_G1:
12518 group = 1;
12519 break;
12520
12521 case R_ARM_LDC_PC_G2:
12522 case R_ARM_LDC_SB_G2:
12523 group = 2;
12524 break;
12525
12526 default:
12527 abort ();
12528 }
12529
12530 /* If REL, extract the addend from the insn. If RELA, it will
12531 have already been fetched for us. */
12532 if (globals->use_rel)
12533 {
12534 int negative = (insn & (1 << 23)) ? 1 : -1;
12535 signed_addend = negative * ((insn & 0xff) << 2);
12536 }
12537
12538 /* Compute the value (X) to go in the place. */
12539 if (r_type == R_ARM_LDC_PC_G0
12540 || r_type == R_ARM_LDC_PC_G1
12541 || r_type == R_ARM_LDC_PC_G2)
12542 /* PC relative. */
12543 signed_value = value - pc + signed_addend;
12544 else
12545 /* Section base relative. */
12546 signed_value = value - sb + signed_addend;
12547
12548 /* Calculate the value of the relevant G_{n-1} to obtain
12549 the residual at that stage. */
12550 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12551 group - 1, &residual);
12552
12553 /* Check for overflow. (The absolute value to go in the place must be
12554 divisible by four and, after having been divided by four, must
12555 fit in eight bits.) */
12556 if ((residual & 0x3) != 0 || residual >= 0x400)
12557 {
12558 _bfd_error_handler
12559 /* xgettext:c-format */
12560 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12561 "splitting %#" PRIx64 " for group relocation %s"),
12562 input_bfd, input_section, (uint64_t) rel->r_offset,
12563 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12564 howto->name);
12565 return bfd_reloc_overflow;
12566 }
12567
12568 /* Mask out the value and U bit. */
12569 insn &= 0xff7fff00;
12570
12571 /* Set the U bit if the value to go in the place is non-negative. */
12572 if (signed_value >= 0)
12573 insn |= 1 << 23;
12574
12575 /* Encode the offset. */
12576 insn |= residual >> 2;
12577
12578 bfd_put_32 (input_bfd, insn, hit_data);
12579 }
12580 return bfd_reloc_ok;
12581
12582 case R_ARM_THM_ALU_ABS_G0_NC:
12583 case R_ARM_THM_ALU_ABS_G1_NC:
12584 case R_ARM_THM_ALU_ABS_G2_NC:
12585 case R_ARM_THM_ALU_ABS_G3_NC:
12586 {
12587 const int shift_array[4] = {0, 8, 16, 24};
12588 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12589 bfd_vma addr = value;
12590 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12591
12592 /* Compute address. */
12593 if (globals->use_rel)
12594 signed_addend = insn & 0xff;
12595 addr += signed_addend;
12596 if (branch_type == ST_BRANCH_TO_THUMB)
12597 addr |= 1;
12598 /* Clean imm8 insn. */
12599 insn &= 0xff00;
12600 /* And update with correct part of address. */
12601 insn |= (addr >> shift) & 0xff;
12602 /* Update insn. */
12603 bfd_put_16 (input_bfd, insn, hit_data);
12604 }
12605
12606 *unresolved_reloc_p = FALSE;
12607 return bfd_reloc_ok;
12608
12609 case R_ARM_GOTOFFFUNCDESC:
12610 {
12611 if (h == NULL)
12612 {
12613 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12614 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12615 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12616 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12617 bfd_vma seg = -1;
12618
12619 if (bfd_link_pic(info) && dynindx == 0)
12620 abort();
12621
12622 /* Resolve relocation. */
12623 bfd_put_32(output_bfd, (offset + sgot->output_offset)
12624 , contents + rel->r_offset);
12625 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12626 not done yet. */
12627 arm_elf_fill_funcdesc(output_bfd, info,
12628 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12629 dynindx, offset, addr, dynreloc_value, seg);
12630 }
12631 else
12632 {
12633 int dynindx;
12634 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12635 bfd_vma addr;
12636 bfd_vma seg = -1;
12637
12638 /* For static binaries, sym_sec can be null. */
12639 if (sym_sec)
12640 {
12641 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12642 addr = dynreloc_value - sym_sec->output_section->vma;
12643 }
12644 else
12645 {
12646 dynindx = 0;
12647 addr = 0;
12648 }
12649
12650 if (bfd_link_pic(info) && dynindx == 0)
12651 abort();
12652
12653 /* This case cannot occur since funcdesc is allocated by
12654 the dynamic loader so we cannot resolve the relocation. */
12655 if (h->dynindx != -1)
12656 abort();
12657
12658 /* Resolve relocation. */
12659 bfd_put_32(output_bfd, (offset + sgot->output_offset),
12660 contents + rel->r_offset);
12661 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12662 arm_elf_fill_funcdesc(output_bfd, info,
12663 &eh->fdpic_cnts.funcdesc_offset,
12664 dynindx, offset, addr, dynreloc_value, seg);
12665 }
12666 }
12667 *unresolved_reloc_p = FALSE;
12668 return bfd_reloc_ok;
12669
12670 case R_ARM_GOTFUNCDESC:
12671 {
12672 if (h != NULL)
12673 {
12674 Elf_Internal_Rela outrel;
12675
12676 /* Resolve relocation. */
12677 bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12678 + sgot->output_offset),
12679 contents + rel->r_offset);
12680 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12681 if(h->dynindx == -1)
12682 {
12683 int dynindx;
12684 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12685 bfd_vma addr;
12686 bfd_vma seg = -1;
12687
12688 /* For static binaries sym_sec can be null. */
12689 if (sym_sec)
12690 {
12691 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12692 addr = dynreloc_value - sym_sec->output_section->vma;
12693 }
12694 else
12695 {
12696 dynindx = 0;
12697 addr = 0;
12698 }
12699
12700 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12701 arm_elf_fill_funcdesc(output_bfd, info,
12702 &eh->fdpic_cnts.funcdesc_offset,
12703 dynindx, offset, addr, dynreloc_value, seg);
12704 }
12705
12706 /* Add a dynamic relocation on GOT entry if not already done. */
12707 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12708 {
12709 if (h->dynindx == -1)
12710 {
12711 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12712 if (h->root.type == bfd_link_hash_undefweak)
12713 bfd_put_32(output_bfd, 0, sgot->contents
12714 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12715 else
12716 bfd_put_32(output_bfd, sgot->output_section->vma
12717 + sgot->output_offset
12718 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12719 sgot->contents
12720 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12721 }
12722 else
12723 {
12724 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12725 }
12726 outrel.r_offset = sgot->output_section->vma
12727 + sgot->output_offset
12728 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12729 outrel.r_addend = 0;
12730 if (h->dynindx == -1 && !bfd_link_pic(info))
12731 if (h->root.type == bfd_link_hash_undefweak)
12732 arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12733 else
12734 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12735 else
12736 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12737 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12738 }
12739 }
12740 else
12741 {
12742 /* Such relocation on static function should not have been
12743 emitted by the compiler. */
12744 abort();
12745 }
12746 }
12747 *unresolved_reloc_p = FALSE;
12748 return bfd_reloc_ok;
12749
12750 case R_ARM_FUNCDESC:
12751 {
12752 if (h == NULL)
12753 {
12754 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12755 Elf_Internal_Rela outrel;
12756 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12757 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12758 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12759 bfd_vma seg = -1;
12760
12761 if (bfd_link_pic(info) && dynindx == 0)
12762 abort();
12763
12764 /* Replace static FUNCDESC relocation with a
12765 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12766 executable. */
12767 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12768 outrel.r_offset = input_section->output_section->vma
12769 + input_section->output_offset + rel->r_offset;
12770 outrel.r_addend = 0;
12771 if (bfd_link_pic(info))
12772 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12773 else
12774 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12775
12776 bfd_put_32 (input_bfd, sgot->output_section->vma
12777 + sgot->output_offset + offset, hit_data);
12778
12779 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12780 arm_elf_fill_funcdesc(output_bfd, info,
12781 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12782 dynindx, offset, addr, dynreloc_value, seg);
12783 }
12784 else
12785 {
12786 if (h->dynindx == -1)
12787 {
12788 int dynindx;
12789 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12790 bfd_vma addr;
12791 bfd_vma seg = -1;
12792 Elf_Internal_Rela outrel;
12793
12794 /* For static binaries sym_sec can be null. */
12795 if (sym_sec)
12796 {
12797 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12798 addr = dynreloc_value - sym_sec->output_section->vma;
12799 }
12800 else
12801 {
12802 dynindx = 0;
12803 addr = 0;
12804 }
12805
12806 if (bfd_link_pic(info) && dynindx == 0)
12807 abort();
12808
12809 /* Replace static FUNCDESC relocation with a
12810 R_ARM_RELATIVE dynamic relocation. */
12811 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12812 outrel.r_offset = input_section->output_section->vma
12813 + input_section->output_offset + rel->r_offset;
12814 outrel.r_addend = 0;
12815 if (bfd_link_pic(info))
12816 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12817 else
12818 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12819
12820 bfd_put_32 (input_bfd, sgot->output_section->vma
12821 + sgot->output_offset + offset, hit_data);
12822
12823 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12824 arm_elf_fill_funcdesc(output_bfd, info,
12825 &eh->fdpic_cnts.funcdesc_offset,
12826 dynindx, offset, addr, dynreloc_value, seg);
12827 }
12828 else
12829 {
12830 Elf_Internal_Rela outrel;
12831
12832 /* Add a dynamic relocation. */
12833 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12834 outrel.r_offset = input_section->output_section->vma
12835 + input_section->output_offset + rel->r_offset;
12836 outrel.r_addend = 0;
12837 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12838 }
12839 }
12840 }
12841 *unresolved_reloc_p = FALSE;
12842 return bfd_reloc_ok;
12843
12844 default:
12845 return bfd_reloc_notsupported;
12846 }
12847 }
12848
12849 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12850 static void
12851 arm_add_to_rel (bfd * abfd,
12852 bfd_byte * address,
12853 reloc_howto_type * howto,
12854 bfd_signed_vma increment)
12855 {
12856 bfd_signed_vma addend;
12857
12858 if (howto->type == R_ARM_THM_CALL
12859 || howto->type == R_ARM_THM_JUMP24)
12860 {
12861 int upper_insn, lower_insn;
12862 int upper, lower;
12863
12864 upper_insn = bfd_get_16 (abfd, address);
12865 lower_insn = bfd_get_16 (abfd, address + 2);
12866 upper = upper_insn & 0x7ff;
12867 lower = lower_insn & 0x7ff;
12868
12869 addend = (upper << 12) | (lower << 1);
12870 addend += increment;
12871 addend >>= 1;
12872
12873 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
12874 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
12875
12876 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
12877 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
12878 }
12879 else
12880 {
12881 bfd_vma contents;
12882
12883 contents = bfd_get_32 (abfd, address);
12884
12885 /* Get the (signed) value from the instruction. */
12886 addend = contents & howto->src_mask;
12887 if (addend & ((howto->src_mask + 1) >> 1))
12888 {
12889 bfd_signed_vma mask;
12890
12891 mask = -1;
12892 mask &= ~ howto->src_mask;
12893 addend |= mask;
12894 }
12895
12896 /* Add in the increment, (which is a byte value). */
12897 switch (howto->type)
12898 {
12899 default:
12900 addend += increment;
12901 break;
12902
12903 case R_ARM_PC24:
12904 case R_ARM_PLT32:
12905 case R_ARM_CALL:
12906 case R_ARM_JUMP24:
12907 addend <<= howto->size;
12908 addend += increment;
12909
12910 /* Should we check for overflow here ? */
12911
12912 /* Drop any undesired bits. */
12913 addend >>= howto->rightshift;
12914 break;
12915 }
12916
12917 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
12918
12919 bfd_put_32 (abfd, contents, address);
12920 }
12921 }
12922
12923 #define IS_ARM_TLS_RELOC(R_TYPE) \
12924 ((R_TYPE) == R_ARM_TLS_GD32 \
12925 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
12926 || (R_TYPE) == R_ARM_TLS_LDO32 \
12927 || (R_TYPE) == R_ARM_TLS_LDM32 \
12928 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
12929 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
12930 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
12931 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
12932 || (R_TYPE) == R_ARM_TLS_LE32 \
12933 || (R_TYPE) == R_ARM_TLS_IE32 \
12934 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
12935 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
12936
12937 /* Specific set of relocations for the gnu tls dialect. */
12938 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
12939 ((R_TYPE) == R_ARM_TLS_GOTDESC \
12940 || (R_TYPE) == R_ARM_TLS_CALL \
12941 || (R_TYPE) == R_ARM_THM_TLS_CALL \
12942 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
12943 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
12944
12945 /* Relocate an ARM ELF section. */
12946
12947 static bfd_boolean
12948 elf32_arm_relocate_section (bfd * output_bfd,
12949 struct bfd_link_info * info,
12950 bfd * input_bfd,
12951 asection * input_section,
12952 bfd_byte * contents,
12953 Elf_Internal_Rela * relocs,
12954 Elf_Internal_Sym * local_syms,
12955 asection ** local_sections)
12956 {
12957 Elf_Internal_Shdr *symtab_hdr;
12958 struct elf_link_hash_entry **sym_hashes;
12959 Elf_Internal_Rela *rel;
12960 Elf_Internal_Rela *relend;
12961 const char *name;
12962 struct elf32_arm_link_hash_table * globals;
12963
12964 globals = elf32_arm_hash_table (info);
12965 if (globals == NULL)
12966 return FALSE;
12967
12968 symtab_hdr = & elf_symtab_hdr (input_bfd);
12969 sym_hashes = elf_sym_hashes (input_bfd);
12970
12971 rel = relocs;
12972 relend = relocs + input_section->reloc_count;
12973 for (; rel < relend; rel++)
12974 {
12975 int r_type;
12976 reloc_howto_type * howto;
12977 unsigned long r_symndx;
12978 Elf_Internal_Sym * sym;
12979 asection * sec;
12980 struct elf_link_hash_entry * h;
12981 bfd_vma relocation;
12982 bfd_reloc_status_type r;
12983 arelent bfd_reloc;
12984 char sym_type;
12985 bfd_boolean unresolved_reloc = FALSE;
12986 char *error_message = NULL;
12987
12988 r_symndx = ELF32_R_SYM (rel->r_info);
12989 r_type = ELF32_R_TYPE (rel->r_info);
12990 r_type = arm_real_reloc_type (globals, r_type);
12991
12992 if ( r_type == R_ARM_GNU_VTENTRY
12993 || r_type == R_ARM_GNU_VTINHERIT)
12994 continue;
12995
12996 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
12997
12998 if (howto == NULL)
12999 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13000
13001 h = NULL;
13002 sym = NULL;
13003 sec = NULL;
13004
13005 if (r_symndx < symtab_hdr->sh_info)
13006 {
13007 sym = local_syms + r_symndx;
13008 sym_type = ELF32_ST_TYPE (sym->st_info);
13009 sec = local_sections[r_symndx];
13010
13011 /* An object file might have a reference to a local
13012 undefined symbol. This is a daft object file, but we
13013 should at least do something about it. V4BX & NONE
13014 relocations do not use the symbol and are explicitly
13015 allowed to use the undefined symbol, so allow those.
13016 Likewise for relocations against STN_UNDEF. */
13017 if (r_type != R_ARM_V4BX
13018 && r_type != R_ARM_NONE
13019 && r_symndx != STN_UNDEF
13020 && bfd_is_und_section (sec)
13021 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13022 (*info->callbacks->undefined_symbol)
13023 (info, bfd_elf_string_from_elf_section
13024 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13025 input_bfd, input_section,
13026 rel->r_offset, TRUE);
13027
13028 if (globals->use_rel)
13029 {
13030 relocation = (sec->output_section->vma
13031 + sec->output_offset
13032 + sym->st_value);
13033 if (!bfd_link_relocatable (info)
13034 && (sec->flags & SEC_MERGE)
13035 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13036 {
13037 asection *msec;
13038 bfd_vma addend, value;
13039
13040 switch (r_type)
13041 {
13042 case R_ARM_MOVW_ABS_NC:
13043 case R_ARM_MOVT_ABS:
13044 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13045 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13046 addend = (addend ^ 0x8000) - 0x8000;
13047 break;
13048
13049 case R_ARM_THM_MOVW_ABS_NC:
13050 case R_ARM_THM_MOVT_ABS:
13051 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13052 << 16;
13053 value |= bfd_get_16 (input_bfd,
13054 contents + rel->r_offset + 2);
13055 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13056 | ((value & 0x04000000) >> 15);
13057 addend = (addend ^ 0x8000) - 0x8000;
13058 break;
13059
13060 default:
13061 if (howto->rightshift
13062 || (howto->src_mask & (howto->src_mask + 1)))
13063 {
13064 _bfd_error_handler
13065 /* xgettext:c-format */
13066 (_("%pB(%pA+%#" PRIx64 "): "
13067 "%s relocation against SEC_MERGE section"),
13068 input_bfd, input_section,
13069 (uint64_t) rel->r_offset, howto->name);
13070 return FALSE;
13071 }
13072
13073 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13074
13075 /* Get the (signed) value from the instruction. */
13076 addend = value & howto->src_mask;
13077 if (addend & ((howto->src_mask + 1) >> 1))
13078 {
13079 bfd_signed_vma mask;
13080
13081 mask = -1;
13082 mask &= ~ howto->src_mask;
13083 addend |= mask;
13084 }
13085 break;
13086 }
13087
13088 msec = sec;
13089 addend =
13090 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13091 - relocation;
13092 addend += msec->output_section->vma + msec->output_offset;
13093
13094 /* Cases here must match those in the preceding
13095 switch statement. */
13096 switch (r_type)
13097 {
13098 case R_ARM_MOVW_ABS_NC:
13099 case R_ARM_MOVT_ABS:
13100 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13101 | (addend & 0xfff);
13102 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13103 break;
13104
13105 case R_ARM_THM_MOVW_ABS_NC:
13106 case R_ARM_THM_MOVT_ABS:
13107 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13108 | (addend & 0xff) | ((addend & 0x0800) << 15);
13109 bfd_put_16 (input_bfd, value >> 16,
13110 contents + rel->r_offset);
13111 bfd_put_16 (input_bfd, value,
13112 contents + rel->r_offset + 2);
13113 break;
13114
13115 default:
13116 value = (value & ~ howto->dst_mask)
13117 | (addend & howto->dst_mask);
13118 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13119 break;
13120 }
13121 }
13122 }
13123 else
13124 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13125 }
13126 else
13127 {
13128 bfd_boolean warned, ignored;
13129
13130 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13131 r_symndx, symtab_hdr, sym_hashes,
13132 h, sec, relocation,
13133 unresolved_reloc, warned, ignored);
13134
13135 sym_type = h->type;
13136 }
13137
13138 if (sec != NULL && discarded_section (sec))
13139 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13140 rel, 1, relend, howto, 0, contents);
13141
13142 if (bfd_link_relocatable (info))
13143 {
13144 /* This is a relocatable link. We don't have to change
13145 anything, unless the reloc is against a section symbol,
13146 in which case we have to adjust according to where the
13147 section symbol winds up in the output section. */
13148 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13149 {
13150 if (globals->use_rel)
13151 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13152 howto, (bfd_signed_vma) sec->output_offset);
13153 else
13154 rel->r_addend += sec->output_offset;
13155 }
13156 continue;
13157 }
13158
13159 if (h != NULL)
13160 name = h->root.root.string;
13161 else
13162 {
13163 name = (bfd_elf_string_from_elf_section
13164 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13165 if (name == NULL || *name == '\0')
13166 name = bfd_section_name (input_bfd, sec);
13167 }
13168
13169 if (r_symndx != STN_UNDEF
13170 && r_type != R_ARM_NONE
13171 && (h == NULL
13172 || h->root.type == bfd_link_hash_defined
13173 || h->root.type == bfd_link_hash_defweak)
13174 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13175 {
13176 _bfd_error_handler
13177 ((sym_type == STT_TLS
13178 /* xgettext:c-format */
13179 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13180 /* xgettext:c-format */
13181 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13182 input_bfd,
13183 input_section,
13184 (uint64_t) rel->r_offset,
13185 howto->name,
13186 name);
13187 }
13188
13189 /* We call elf32_arm_final_link_relocate unless we're completely
13190 done, i.e., the relaxation produced the final output we want,
13191 and we won't let anybody mess with it. Also, we have to do
13192 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13193 both in relaxed and non-relaxed cases. */
13194 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13195 || (IS_ARM_TLS_GNU_RELOC (r_type)
13196 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13197 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13198 & GOT_TLS_GDESC)))
13199 {
13200 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13201 contents, rel, h == NULL);
13202 /* This may have been marked unresolved because it came from
13203 a shared library. But we've just dealt with that. */
13204 unresolved_reloc = 0;
13205 }
13206 else
13207 r = bfd_reloc_continue;
13208
13209 if (r == bfd_reloc_continue)
13210 {
13211 unsigned char branch_type =
13212 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13213 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13214
13215 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13216 input_section, contents, rel,
13217 relocation, info, sec, name,
13218 sym_type, branch_type, h,
13219 &unresolved_reloc,
13220 &error_message);
13221 }
13222
13223 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13224 because such sections are not SEC_ALLOC and thus ld.so will
13225 not process them. */
13226 if (unresolved_reloc
13227 && !((input_section->flags & SEC_DEBUGGING) != 0
13228 && h->def_dynamic)
13229 && _bfd_elf_section_offset (output_bfd, info, input_section,
13230 rel->r_offset) != (bfd_vma) -1)
13231 {
13232 _bfd_error_handler
13233 /* xgettext:c-format */
13234 (_("%pB(%pA+%#" PRIx64 "): "
13235 "unresolvable %s relocation against symbol `%s'"),
13236 input_bfd,
13237 input_section,
13238 (uint64_t) rel->r_offset,
13239 howto->name,
13240 h->root.root.string);
13241 return FALSE;
13242 }
13243
13244 if (r != bfd_reloc_ok)
13245 {
13246 switch (r)
13247 {
13248 case bfd_reloc_overflow:
13249 /* If the overflowing reloc was to an undefined symbol,
13250 we have already printed one error message and there
13251 is no point complaining again. */
13252 if (!h || h->root.type != bfd_link_hash_undefined)
13253 (*info->callbacks->reloc_overflow)
13254 (info, (h ? &h->root : NULL), name, howto->name,
13255 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13256 break;
13257
13258 case bfd_reloc_undefined:
13259 (*info->callbacks->undefined_symbol)
13260 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
13261 break;
13262
13263 case bfd_reloc_outofrange:
13264 error_message = _("out of range");
13265 goto common_error;
13266
13267 case bfd_reloc_notsupported:
13268 error_message = _("unsupported relocation");
13269 goto common_error;
13270
13271 case bfd_reloc_dangerous:
13272 /* error_message should already be set. */
13273 goto common_error;
13274
13275 default:
13276 error_message = _("unknown error");
13277 /* Fall through. */
13278
13279 common_error:
13280 BFD_ASSERT (error_message != NULL);
13281 (*info->callbacks->reloc_dangerous)
13282 (info, error_message, input_bfd, input_section, rel->r_offset);
13283 break;
13284 }
13285 }
13286 }
13287
13288 return TRUE;
13289 }
13290
13291 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13292 adds the edit to the start of the list. (The list must be built in order of
13293 ascending TINDEX: the function's callers are primarily responsible for
13294 maintaining that condition). */
13295
13296 static void
13297 add_unwind_table_edit (arm_unwind_table_edit **head,
13298 arm_unwind_table_edit **tail,
13299 arm_unwind_edit_type type,
13300 asection *linked_section,
13301 unsigned int tindex)
13302 {
13303 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13304 xmalloc (sizeof (arm_unwind_table_edit));
13305
13306 new_edit->type = type;
13307 new_edit->linked_section = linked_section;
13308 new_edit->index = tindex;
13309
13310 if (tindex > 0)
13311 {
13312 new_edit->next = NULL;
13313
13314 if (*tail)
13315 (*tail)->next = new_edit;
13316
13317 (*tail) = new_edit;
13318
13319 if (!*head)
13320 (*head) = new_edit;
13321 }
13322 else
13323 {
13324 new_edit->next = *head;
13325
13326 if (!*tail)
13327 *tail = new_edit;
13328
13329 *head = new_edit;
13330 }
13331 }
13332
13333 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13334
13335 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13336 static void
13337 adjust_exidx_size(asection *exidx_sec, int adjust)
13338 {
13339 asection *out_sec;
13340
13341 if (!exidx_sec->rawsize)
13342 exidx_sec->rawsize = exidx_sec->size;
13343
13344 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
13345 out_sec = exidx_sec->output_section;
13346 /* Adjust size of output section. */
13347 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
13348 }
13349
13350 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13351 static void
13352 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13353 {
13354 struct _arm_elf_section_data *exidx_arm_data;
13355
13356 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13357 add_unwind_table_edit (
13358 &exidx_arm_data->u.exidx.unwind_edit_list,
13359 &exidx_arm_data->u.exidx.unwind_edit_tail,
13360 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13361
13362 exidx_arm_data->additional_reloc_count++;
13363
13364 adjust_exidx_size(exidx_sec, 8);
13365 }
13366
13367 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13368 made to those tables, such that:
13369
13370 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13371 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13372 codes which have been inlined into the index).
13373
13374 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13375
13376 The edits are applied when the tables are written
13377 (in elf32_arm_write_section). */
13378
13379 bfd_boolean
13380 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13381 unsigned int num_text_sections,
13382 struct bfd_link_info *info,
13383 bfd_boolean merge_exidx_entries)
13384 {
13385 bfd *inp;
13386 unsigned int last_second_word = 0, i;
13387 asection *last_exidx_sec = NULL;
13388 asection *last_text_sec = NULL;
13389 int last_unwind_type = -1;
13390
13391 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13392 text sections. */
13393 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13394 {
13395 asection *sec;
13396
13397 for (sec = inp->sections; sec != NULL; sec = sec->next)
13398 {
13399 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13400 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13401
13402 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13403 continue;
13404
13405 if (elf_sec->linked_to)
13406 {
13407 Elf_Internal_Shdr *linked_hdr
13408 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13409 struct _arm_elf_section_data *linked_sec_arm_data
13410 = get_arm_elf_section_data (linked_hdr->bfd_section);
13411
13412 if (linked_sec_arm_data == NULL)
13413 continue;
13414
13415 /* Link this .ARM.exidx section back from the text section it
13416 describes. */
13417 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13418 }
13419 }
13420 }
13421
13422 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13423 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13424 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13425
13426 for (i = 0; i < num_text_sections; i++)
13427 {
13428 asection *sec = text_section_order[i];
13429 asection *exidx_sec;
13430 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13431 struct _arm_elf_section_data *exidx_arm_data;
13432 bfd_byte *contents = NULL;
13433 int deleted_exidx_bytes = 0;
13434 bfd_vma j;
13435 arm_unwind_table_edit *unwind_edit_head = NULL;
13436 arm_unwind_table_edit *unwind_edit_tail = NULL;
13437 Elf_Internal_Shdr *hdr;
13438 bfd *ibfd;
13439
13440 if (arm_data == NULL)
13441 continue;
13442
13443 exidx_sec = arm_data->u.text.arm_exidx_sec;
13444 if (exidx_sec == NULL)
13445 {
13446 /* Section has no unwind data. */
13447 if (last_unwind_type == 0 || !last_exidx_sec)
13448 continue;
13449
13450 /* Ignore zero sized sections. */
13451 if (sec->size == 0)
13452 continue;
13453
13454 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13455 last_unwind_type = 0;
13456 continue;
13457 }
13458
13459 /* Skip /DISCARD/ sections. */
13460 if (bfd_is_abs_section (exidx_sec->output_section))
13461 continue;
13462
13463 hdr = &elf_section_data (exidx_sec)->this_hdr;
13464 if (hdr->sh_type != SHT_ARM_EXIDX)
13465 continue;
13466
13467 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13468 if (exidx_arm_data == NULL)
13469 continue;
13470
13471 ibfd = exidx_sec->owner;
13472
13473 if (hdr->contents != NULL)
13474 contents = hdr->contents;
13475 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13476 /* An error? */
13477 continue;
13478
13479 if (last_unwind_type > 0)
13480 {
13481 unsigned int first_word = bfd_get_32 (ibfd, contents);
13482 /* Add cantunwind if first unwind item does not match section
13483 start. */
13484 if (first_word != sec->vma)
13485 {
13486 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13487 last_unwind_type = 0;
13488 }
13489 }
13490
13491 for (j = 0; j < hdr->sh_size; j += 8)
13492 {
13493 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13494 int unwind_type;
13495 int elide = 0;
13496
13497 /* An EXIDX_CANTUNWIND entry. */
13498 if (second_word == 1)
13499 {
13500 if (last_unwind_type == 0)
13501 elide = 1;
13502 unwind_type = 0;
13503 }
13504 /* Inlined unwinding data. Merge if equal to previous. */
13505 else if ((second_word & 0x80000000) != 0)
13506 {
13507 if (merge_exidx_entries
13508 && last_second_word == second_word && last_unwind_type == 1)
13509 elide = 1;
13510 unwind_type = 1;
13511 last_second_word = second_word;
13512 }
13513 /* Normal table entry. In theory we could merge these too,
13514 but duplicate entries are likely to be much less common. */
13515 else
13516 unwind_type = 2;
13517
13518 if (elide && !bfd_link_relocatable (info))
13519 {
13520 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13521 DELETE_EXIDX_ENTRY, NULL, j / 8);
13522
13523 deleted_exidx_bytes += 8;
13524 }
13525
13526 last_unwind_type = unwind_type;
13527 }
13528
13529 /* Free contents if we allocated it ourselves. */
13530 if (contents != hdr->contents)
13531 free (contents);
13532
13533 /* Record edits to be applied later (in elf32_arm_write_section). */
13534 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13535 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13536
13537 if (deleted_exidx_bytes > 0)
13538 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13539
13540 last_exidx_sec = exidx_sec;
13541 last_text_sec = sec;
13542 }
13543
13544 /* Add terminating CANTUNWIND entry. */
13545 if (!bfd_link_relocatable (info) && last_exidx_sec
13546 && last_unwind_type != 0)
13547 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13548
13549 return TRUE;
13550 }
13551
13552 static bfd_boolean
13553 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13554 bfd *ibfd, const char *name)
13555 {
13556 asection *sec, *osec;
13557
13558 sec = bfd_get_linker_section (ibfd, name);
13559 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13560 return TRUE;
13561
13562 osec = sec->output_section;
13563 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13564 return TRUE;
13565
13566 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13567 sec->output_offset, sec->size))
13568 return FALSE;
13569
13570 return TRUE;
13571 }
13572
13573 static bfd_boolean
13574 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13575 {
13576 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13577 asection *sec, *osec;
13578
13579 if (globals == NULL)
13580 return FALSE;
13581
13582 /* Invoke the regular ELF backend linker to do all the work. */
13583 if (!bfd_elf_final_link (abfd, info))
13584 return FALSE;
13585
13586 /* Process stub sections (eg BE8 encoding, ...). */
13587 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13588 unsigned int i;
13589 for (i=0; i<htab->top_id; i++)
13590 {
13591 sec = htab->stub_group[i].stub_sec;
13592 /* Only process it once, in its link_sec slot. */
13593 if (sec && i == htab->stub_group[i].link_sec->id)
13594 {
13595 osec = sec->output_section;
13596 elf32_arm_write_section (abfd, info, sec, sec->contents);
13597 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13598 sec->output_offset, sec->size))
13599 return FALSE;
13600 }
13601 }
13602
13603 /* Write out any glue sections now that we have created all the
13604 stubs. */
13605 if (globals->bfd_of_glue_owner != NULL)
13606 {
13607 if (! elf32_arm_output_glue_section (info, abfd,
13608 globals->bfd_of_glue_owner,
13609 ARM2THUMB_GLUE_SECTION_NAME))
13610 return FALSE;
13611
13612 if (! elf32_arm_output_glue_section (info, abfd,
13613 globals->bfd_of_glue_owner,
13614 THUMB2ARM_GLUE_SECTION_NAME))
13615 return FALSE;
13616
13617 if (! elf32_arm_output_glue_section (info, abfd,
13618 globals->bfd_of_glue_owner,
13619 VFP11_ERRATUM_VENEER_SECTION_NAME))
13620 return FALSE;
13621
13622 if (! elf32_arm_output_glue_section (info, abfd,
13623 globals->bfd_of_glue_owner,
13624 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13625 return FALSE;
13626
13627 if (! elf32_arm_output_glue_section (info, abfd,
13628 globals->bfd_of_glue_owner,
13629 ARM_BX_GLUE_SECTION_NAME))
13630 return FALSE;
13631 }
13632
13633 return TRUE;
13634 }
13635
13636 /* Return a best guess for the machine number based on the attributes. */
13637
13638 static unsigned int
13639 bfd_arm_get_mach_from_attributes (bfd * abfd)
13640 {
13641 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13642
13643 switch (arch)
13644 {
13645 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13646 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13647 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13648 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13649
13650 case TAG_CPU_ARCH_V5TE:
13651 {
13652 char * name;
13653
13654 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13655 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13656
13657 if (name)
13658 {
13659 if (strcmp (name, "IWMMXT2") == 0)
13660 return bfd_mach_arm_iWMMXt2;
13661
13662 if (strcmp (name, "IWMMXT") == 0)
13663 return bfd_mach_arm_iWMMXt;
13664
13665 if (strcmp (name, "XSCALE") == 0)
13666 {
13667 int wmmx;
13668
13669 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13670 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13671 switch (wmmx)
13672 {
13673 case 1: return bfd_mach_arm_iWMMXt;
13674 case 2: return bfd_mach_arm_iWMMXt2;
13675 default: return bfd_mach_arm_XScale;
13676 }
13677 }
13678 }
13679
13680 return bfd_mach_arm_5TE;
13681 }
13682
13683 case TAG_CPU_ARCH_V5TEJ:
13684 return bfd_mach_arm_5TEJ;
13685 case TAG_CPU_ARCH_V6:
13686 return bfd_mach_arm_6;
13687 case TAG_CPU_ARCH_V6KZ:
13688 return bfd_mach_arm_6KZ;
13689 case TAG_CPU_ARCH_V6T2:
13690 return bfd_mach_arm_6T2;
13691 case TAG_CPU_ARCH_V6K:
13692 return bfd_mach_arm_6K;
13693 case TAG_CPU_ARCH_V7:
13694 return bfd_mach_arm_7;
13695 case TAG_CPU_ARCH_V6_M:
13696 return bfd_mach_arm_6M;
13697 case TAG_CPU_ARCH_V6S_M:
13698 return bfd_mach_arm_6SM;
13699 case TAG_CPU_ARCH_V7E_M:
13700 return bfd_mach_arm_7EM;
13701 case TAG_CPU_ARCH_V8:
13702 return bfd_mach_arm_8;
13703 case TAG_CPU_ARCH_V8R:
13704 return bfd_mach_arm_8R;
13705 case TAG_CPU_ARCH_V8M_BASE:
13706 return bfd_mach_arm_8M_BASE;
13707 case TAG_CPU_ARCH_V8M_MAIN:
13708 return bfd_mach_arm_8M_MAIN;
13709
13710 default:
13711 /* Force entry to be added for any new known Tag_CPU_arch value. */
13712 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13713
13714 /* Unknown Tag_CPU_arch value. */
13715 return bfd_mach_arm_unknown;
13716 }
13717 }
13718
13719 /* Set the right machine number. */
13720
13721 static bfd_boolean
13722 elf32_arm_object_p (bfd *abfd)
13723 {
13724 unsigned int mach;
13725
13726 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13727
13728 if (mach == bfd_mach_arm_unknown)
13729 {
13730 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13731 mach = bfd_mach_arm_ep9312;
13732 else
13733 mach = bfd_arm_get_mach_from_attributes (abfd);
13734 }
13735
13736 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13737 return TRUE;
13738 }
13739
13740 /* Function to keep ARM specific flags in the ELF header. */
13741
13742 static bfd_boolean
13743 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13744 {
13745 if (elf_flags_init (abfd)
13746 && elf_elfheader (abfd)->e_flags != flags)
13747 {
13748 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13749 {
13750 if (flags & EF_ARM_INTERWORK)
13751 _bfd_error_handler
13752 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13753 abfd);
13754 else
13755 _bfd_error_handler
13756 (_("warning: clearing the interworking flag of %pB due to outside request"),
13757 abfd);
13758 }
13759 }
13760 else
13761 {
13762 elf_elfheader (abfd)->e_flags = flags;
13763 elf_flags_init (abfd) = TRUE;
13764 }
13765
13766 return TRUE;
13767 }
13768
13769 /* Copy backend specific data from one object module to another. */
13770
13771 static bfd_boolean
13772 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13773 {
13774 flagword in_flags;
13775 flagword out_flags;
13776
13777 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13778 return TRUE;
13779
13780 in_flags = elf_elfheader (ibfd)->e_flags;
13781 out_flags = elf_elfheader (obfd)->e_flags;
13782
13783 if (elf_flags_init (obfd)
13784 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13785 && in_flags != out_flags)
13786 {
13787 /* Cannot mix APCS26 and APCS32 code. */
13788 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13789 return FALSE;
13790
13791 /* Cannot mix float APCS and non-float APCS code. */
13792 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13793 return FALSE;
13794
13795 /* If the src and dest have different interworking flags
13796 then turn off the interworking bit. */
13797 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13798 {
13799 if (out_flags & EF_ARM_INTERWORK)
13800 _bfd_error_handler
13801 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13802 obfd, ibfd);
13803
13804 in_flags &= ~EF_ARM_INTERWORK;
13805 }
13806
13807 /* Likewise for PIC, though don't warn for this case. */
13808 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13809 in_flags &= ~EF_ARM_PIC;
13810 }
13811
13812 elf_elfheader (obfd)->e_flags = in_flags;
13813 elf_flags_init (obfd) = TRUE;
13814
13815 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13816 }
13817
13818 /* Values for Tag_ABI_PCS_R9_use. */
13819 enum
13820 {
13821 AEABI_R9_V6,
13822 AEABI_R9_SB,
13823 AEABI_R9_TLS,
13824 AEABI_R9_unused
13825 };
13826
13827 /* Values for Tag_ABI_PCS_RW_data. */
13828 enum
13829 {
13830 AEABI_PCS_RW_data_absolute,
13831 AEABI_PCS_RW_data_PCrel,
13832 AEABI_PCS_RW_data_SBrel,
13833 AEABI_PCS_RW_data_unused
13834 };
13835
13836 /* Values for Tag_ABI_enum_size. */
13837 enum
13838 {
13839 AEABI_enum_unused,
13840 AEABI_enum_short,
13841 AEABI_enum_wide,
13842 AEABI_enum_forced_wide
13843 };
13844
13845 /* Determine whether an object attribute tag takes an integer, a
13846 string or both. */
13847
13848 static int
13849 elf32_arm_obj_attrs_arg_type (int tag)
13850 {
13851 if (tag == Tag_compatibility)
13852 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
13853 else if (tag == Tag_nodefaults)
13854 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
13855 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
13856 return ATTR_TYPE_FLAG_STR_VAL;
13857 else if (tag < 32)
13858 return ATTR_TYPE_FLAG_INT_VAL;
13859 else
13860 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
13861 }
13862
13863 /* The ABI defines that Tag_conformance should be emitted first, and that
13864 Tag_nodefaults should be second (if either is defined). This sets those
13865 two positions, and bumps up the position of all the remaining tags to
13866 compensate. */
13867 static int
13868 elf32_arm_obj_attrs_order (int num)
13869 {
13870 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
13871 return Tag_conformance;
13872 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
13873 return Tag_nodefaults;
13874 if ((num - 2) < Tag_nodefaults)
13875 return num - 2;
13876 if ((num - 1) < Tag_conformance)
13877 return num - 1;
13878 return num;
13879 }
13880
13881 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
13882 static bfd_boolean
13883 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
13884 {
13885 if ((tag & 127) < 64)
13886 {
13887 _bfd_error_handler
13888 (_("%pB: unknown mandatory EABI object attribute %d"),
13889 abfd, tag);
13890 bfd_set_error (bfd_error_bad_value);
13891 return FALSE;
13892 }
13893 else
13894 {
13895 _bfd_error_handler
13896 (_("warning: %pB: unknown EABI object attribute %d"),
13897 abfd, tag);
13898 return TRUE;
13899 }
13900 }
13901
13902 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
13903 Returns -1 if no architecture could be read. */
13904
13905 static int
13906 get_secondary_compatible_arch (bfd *abfd)
13907 {
13908 obj_attribute *attr =
13909 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13910
13911 /* Note: the tag and its argument below are uleb128 values, though
13912 currently-defined values fit in one byte for each. */
13913 if (attr->s
13914 && attr->s[0] == Tag_CPU_arch
13915 && (attr->s[1] & 128) != 128
13916 && attr->s[2] == 0)
13917 return attr->s[1];
13918
13919 /* This tag is "safely ignorable", so don't complain if it looks funny. */
13920 return -1;
13921 }
13922
13923 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
13924 The tag is removed if ARCH is -1. */
13925
13926 static void
13927 set_secondary_compatible_arch (bfd *abfd, int arch)
13928 {
13929 obj_attribute *attr =
13930 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13931
13932 if (arch == -1)
13933 {
13934 attr->s = NULL;
13935 return;
13936 }
13937
13938 /* Note: the tag and its argument below are uleb128 values, though
13939 currently-defined values fit in one byte for each. */
13940 if (!attr->s)
13941 attr->s = (char *) bfd_alloc (abfd, 3);
13942 attr->s[0] = Tag_CPU_arch;
13943 attr->s[1] = arch;
13944 attr->s[2] = '\0';
13945 }
13946
13947 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
13948 into account. */
13949
13950 static int
13951 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
13952 int newtag, int secondary_compat)
13953 {
13954 #define T(X) TAG_CPU_ARCH_##X
13955 int tagl, tagh, result;
13956 const int v6t2[] =
13957 {
13958 T(V6T2), /* PRE_V4. */
13959 T(V6T2), /* V4. */
13960 T(V6T2), /* V4T. */
13961 T(V6T2), /* V5T. */
13962 T(V6T2), /* V5TE. */
13963 T(V6T2), /* V5TEJ. */
13964 T(V6T2), /* V6. */
13965 T(V7), /* V6KZ. */
13966 T(V6T2) /* V6T2. */
13967 };
13968 const int v6k[] =
13969 {
13970 T(V6K), /* PRE_V4. */
13971 T(V6K), /* V4. */
13972 T(V6K), /* V4T. */
13973 T(V6K), /* V5T. */
13974 T(V6K), /* V5TE. */
13975 T(V6K), /* V5TEJ. */
13976 T(V6K), /* V6. */
13977 T(V6KZ), /* V6KZ. */
13978 T(V7), /* V6T2. */
13979 T(V6K) /* V6K. */
13980 };
13981 const int v7[] =
13982 {
13983 T(V7), /* PRE_V4. */
13984 T(V7), /* V4. */
13985 T(V7), /* V4T. */
13986 T(V7), /* V5T. */
13987 T(V7), /* V5TE. */
13988 T(V7), /* V5TEJ. */
13989 T(V7), /* V6. */
13990 T(V7), /* V6KZ. */
13991 T(V7), /* V6T2. */
13992 T(V7), /* V6K. */
13993 T(V7) /* V7. */
13994 };
13995 const int v6_m[] =
13996 {
13997 -1, /* PRE_V4. */
13998 -1, /* V4. */
13999 T(V6K), /* V4T. */
14000 T(V6K), /* V5T. */
14001 T(V6K), /* V5TE. */
14002 T(V6K), /* V5TEJ. */
14003 T(V6K), /* V6. */
14004 T(V6KZ), /* V6KZ. */
14005 T(V7), /* V6T2. */
14006 T(V6K), /* V6K. */
14007 T(V7), /* V7. */
14008 T(V6_M) /* V6_M. */
14009 };
14010 const int v6s_m[] =
14011 {
14012 -1, /* PRE_V4. */
14013 -1, /* V4. */
14014 T(V6K), /* V4T. */
14015 T(V6K), /* V5T. */
14016 T(V6K), /* V5TE. */
14017 T(V6K), /* V5TEJ. */
14018 T(V6K), /* V6. */
14019 T(V6KZ), /* V6KZ. */
14020 T(V7), /* V6T2. */
14021 T(V6K), /* V6K. */
14022 T(V7), /* V7. */
14023 T(V6S_M), /* V6_M. */
14024 T(V6S_M) /* V6S_M. */
14025 };
14026 const int v7e_m[] =
14027 {
14028 -1, /* PRE_V4. */
14029 -1, /* V4. */
14030 T(V7E_M), /* V4T. */
14031 T(V7E_M), /* V5T. */
14032 T(V7E_M), /* V5TE. */
14033 T(V7E_M), /* V5TEJ. */
14034 T(V7E_M), /* V6. */
14035 T(V7E_M), /* V6KZ. */
14036 T(V7E_M), /* V6T2. */
14037 T(V7E_M), /* V6K. */
14038 T(V7E_M), /* V7. */
14039 T(V7E_M), /* V6_M. */
14040 T(V7E_M), /* V6S_M. */
14041 T(V7E_M) /* V7E_M. */
14042 };
14043 const int v8[] =
14044 {
14045 T(V8), /* PRE_V4. */
14046 T(V8), /* V4. */
14047 T(V8), /* V4T. */
14048 T(V8), /* V5T. */
14049 T(V8), /* V5TE. */
14050 T(V8), /* V5TEJ. */
14051 T(V8), /* V6. */
14052 T(V8), /* V6KZ. */
14053 T(V8), /* V6T2. */
14054 T(V8), /* V6K. */
14055 T(V8), /* V7. */
14056 T(V8), /* V6_M. */
14057 T(V8), /* V6S_M. */
14058 T(V8), /* V7E_M. */
14059 T(V8) /* V8. */
14060 };
14061 const int v8r[] =
14062 {
14063 T(V8R), /* PRE_V4. */
14064 T(V8R), /* V4. */
14065 T(V8R), /* V4T. */
14066 T(V8R), /* V5T. */
14067 T(V8R), /* V5TE. */
14068 T(V8R), /* V5TEJ. */
14069 T(V8R), /* V6. */
14070 T(V8R), /* V6KZ. */
14071 T(V8R), /* V6T2. */
14072 T(V8R), /* V6K. */
14073 T(V8R), /* V7. */
14074 T(V8R), /* V6_M. */
14075 T(V8R), /* V6S_M. */
14076 T(V8R), /* V7E_M. */
14077 T(V8), /* V8. */
14078 T(V8R), /* V8R. */
14079 };
14080 const int v8m_baseline[] =
14081 {
14082 -1, /* PRE_V4. */
14083 -1, /* V4. */
14084 -1, /* V4T. */
14085 -1, /* V5T. */
14086 -1, /* V5TE. */
14087 -1, /* V5TEJ. */
14088 -1, /* V6. */
14089 -1, /* V6KZ. */
14090 -1, /* V6T2. */
14091 -1, /* V6K. */
14092 -1, /* V7. */
14093 T(V8M_BASE), /* V6_M. */
14094 T(V8M_BASE), /* V6S_M. */
14095 -1, /* V7E_M. */
14096 -1, /* V8. */
14097 -1, /* V8R. */
14098 T(V8M_BASE) /* V8-M BASELINE. */
14099 };
14100 const int v8m_mainline[] =
14101 {
14102 -1, /* PRE_V4. */
14103 -1, /* V4. */
14104 -1, /* V4T. */
14105 -1, /* V5T. */
14106 -1, /* V5TE. */
14107 -1, /* V5TEJ. */
14108 -1, /* V6. */
14109 -1, /* V6KZ. */
14110 -1, /* V6T2. */
14111 -1, /* V6K. */
14112 T(V8M_MAIN), /* V7. */
14113 T(V8M_MAIN), /* V6_M. */
14114 T(V8M_MAIN), /* V6S_M. */
14115 T(V8M_MAIN), /* V7E_M. */
14116 -1, /* V8. */
14117 -1, /* V8R. */
14118 T(V8M_MAIN), /* V8-M BASELINE. */
14119 T(V8M_MAIN) /* V8-M MAINLINE. */
14120 };
14121 const int v4t_plus_v6_m[] =
14122 {
14123 -1, /* PRE_V4. */
14124 -1, /* V4. */
14125 T(V4T), /* V4T. */
14126 T(V5T), /* V5T. */
14127 T(V5TE), /* V5TE. */
14128 T(V5TEJ), /* V5TEJ. */
14129 T(V6), /* V6. */
14130 T(V6KZ), /* V6KZ. */
14131 T(V6T2), /* V6T2. */
14132 T(V6K), /* V6K. */
14133 T(V7), /* V7. */
14134 T(V6_M), /* V6_M. */
14135 T(V6S_M), /* V6S_M. */
14136 T(V7E_M), /* V7E_M. */
14137 T(V8), /* V8. */
14138 -1, /* V8R. */
14139 T(V8M_BASE), /* V8-M BASELINE. */
14140 T(V8M_MAIN), /* V8-M MAINLINE. */
14141 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14142 };
14143 const int *comb[] =
14144 {
14145 v6t2,
14146 v6k,
14147 v7,
14148 v6_m,
14149 v6s_m,
14150 v7e_m,
14151 v8,
14152 v8r,
14153 v8m_baseline,
14154 v8m_mainline,
14155 /* Pseudo-architecture. */
14156 v4t_plus_v6_m
14157 };
14158
14159 /* Check we've not got a higher architecture than we know about. */
14160
14161 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14162 {
14163 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14164 return -1;
14165 }
14166
14167 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14168
14169 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14170 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14171 oldtag = T(V4T_PLUS_V6_M);
14172
14173 /* And override the new tag if we have a Tag_also_compatible_with on the
14174 input. */
14175
14176 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14177 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14178 newtag = T(V4T_PLUS_V6_M);
14179
14180 tagl = (oldtag < newtag) ? oldtag : newtag;
14181 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14182
14183 /* Architectures before V6KZ add features monotonically. */
14184 if (tagh <= TAG_CPU_ARCH_V6KZ)
14185 return result;
14186
14187 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14188
14189 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14190 as the canonical version. */
14191 if (result == T(V4T_PLUS_V6_M))
14192 {
14193 result = T(V4T);
14194 *secondary_compat_out = T(V6_M);
14195 }
14196 else
14197 *secondary_compat_out = -1;
14198
14199 if (result == -1)
14200 {
14201 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14202 ibfd, oldtag, newtag);
14203 return -1;
14204 }
14205
14206 return result;
14207 #undef T
14208 }
14209
14210 /* Query attributes object to see if integer divide instructions may be
14211 present in an object. */
14212 static bfd_boolean
14213 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14214 {
14215 int arch = attr[Tag_CPU_arch].i;
14216 int profile = attr[Tag_CPU_arch_profile].i;
14217
14218 switch (attr[Tag_DIV_use].i)
14219 {
14220 case 0:
14221 /* Integer divide allowed if instruction contained in archetecture. */
14222 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14223 return TRUE;
14224 else if (arch >= TAG_CPU_ARCH_V7E_M)
14225 return TRUE;
14226 else
14227 return FALSE;
14228
14229 case 1:
14230 /* Integer divide explicitly prohibited. */
14231 return FALSE;
14232
14233 default:
14234 /* Unrecognised case - treat as allowing divide everywhere. */
14235 case 2:
14236 /* Integer divide allowed in ARM state. */
14237 return TRUE;
14238 }
14239 }
14240
14241 /* Query attributes object to see if integer divide instructions are
14242 forbidden to be in the object. This is not the inverse of
14243 elf32_arm_attributes_accept_div. */
14244 static bfd_boolean
14245 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14246 {
14247 return attr[Tag_DIV_use].i == 1;
14248 }
14249
14250 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14251 are conflicting attributes. */
14252
14253 static bfd_boolean
14254 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14255 {
14256 bfd *obfd = info->output_bfd;
14257 obj_attribute *in_attr;
14258 obj_attribute *out_attr;
14259 /* Some tags have 0 = don't care, 1 = strong requirement,
14260 2 = weak requirement. */
14261 static const int order_021[3] = {0, 2, 1};
14262 int i;
14263 bfd_boolean result = TRUE;
14264 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14265
14266 /* Skip the linker stubs file. This preserves previous behavior
14267 of accepting unknown attributes in the first input file - but
14268 is that a bug? */
14269 if (ibfd->flags & BFD_LINKER_CREATED)
14270 return TRUE;
14271
14272 /* Skip any input that hasn't attribute section.
14273 This enables to link object files without attribute section with
14274 any others. */
14275 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14276 return TRUE;
14277
14278 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14279 {
14280 /* This is the first object. Copy the attributes. */
14281 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14282
14283 out_attr = elf_known_obj_attributes_proc (obfd);
14284
14285 /* Use the Tag_null value to indicate the attributes have been
14286 initialized. */
14287 out_attr[0].i = 1;
14288
14289 /* We do not output objects with Tag_MPextension_use_legacy - we move
14290 the attribute's value to Tag_MPextension_use. */
14291 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14292 {
14293 if (out_attr[Tag_MPextension_use].i != 0
14294 && out_attr[Tag_MPextension_use_legacy].i
14295 != out_attr[Tag_MPextension_use].i)
14296 {
14297 _bfd_error_handler
14298 (_("Error: %pB has both the current and legacy "
14299 "Tag_MPextension_use attributes"), ibfd);
14300 result = FALSE;
14301 }
14302
14303 out_attr[Tag_MPextension_use] =
14304 out_attr[Tag_MPextension_use_legacy];
14305 out_attr[Tag_MPextension_use_legacy].type = 0;
14306 out_attr[Tag_MPextension_use_legacy].i = 0;
14307 }
14308
14309 return result;
14310 }
14311
14312 in_attr = elf_known_obj_attributes_proc (ibfd);
14313 out_attr = elf_known_obj_attributes_proc (obfd);
14314 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14315 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14316 {
14317 /* Ignore mismatches if the object doesn't use floating point or is
14318 floating point ABI independent. */
14319 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14320 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14321 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14322 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14323 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14324 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14325 {
14326 _bfd_error_handler
14327 (_("error: %pB uses VFP register arguments, %pB does not"),
14328 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14329 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14330 result = FALSE;
14331 }
14332 }
14333
14334 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14335 {
14336 /* Merge this attribute with existing attributes. */
14337 switch (i)
14338 {
14339 case Tag_CPU_raw_name:
14340 case Tag_CPU_name:
14341 /* These are merged after Tag_CPU_arch. */
14342 break;
14343
14344 case Tag_ABI_optimization_goals:
14345 case Tag_ABI_FP_optimization_goals:
14346 /* Use the first value seen. */
14347 break;
14348
14349 case Tag_CPU_arch:
14350 {
14351 int secondary_compat = -1, secondary_compat_out = -1;
14352 unsigned int saved_out_attr = out_attr[i].i;
14353 int arch_attr;
14354 static const char *name_table[] =
14355 {
14356 /* These aren't real CPU names, but we can't guess
14357 that from the architecture version alone. */
14358 "Pre v4",
14359 "ARM v4",
14360 "ARM v4T",
14361 "ARM v5T",
14362 "ARM v5TE",
14363 "ARM v5TEJ",
14364 "ARM v6",
14365 "ARM v6KZ",
14366 "ARM v6T2",
14367 "ARM v6K",
14368 "ARM v7",
14369 "ARM v6-M",
14370 "ARM v6S-M",
14371 "ARM v8",
14372 "",
14373 "ARM v8-M.baseline",
14374 "ARM v8-M.mainline",
14375 };
14376
14377 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14378 secondary_compat = get_secondary_compatible_arch (ibfd);
14379 secondary_compat_out = get_secondary_compatible_arch (obfd);
14380 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14381 &secondary_compat_out,
14382 in_attr[i].i,
14383 secondary_compat);
14384
14385 /* Return with error if failed to merge. */
14386 if (arch_attr == -1)
14387 return FALSE;
14388
14389 out_attr[i].i = arch_attr;
14390
14391 set_secondary_compatible_arch (obfd, secondary_compat_out);
14392
14393 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14394 if (out_attr[i].i == saved_out_attr)
14395 ; /* Leave the names alone. */
14396 else if (out_attr[i].i == in_attr[i].i)
14397 {
14398 /* The output architecture has been changed to match the
14399 input architecture. Use the input names. */
14400 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14401 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14402 : NULL;
14403 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14404 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14405 : NULL;
14406 }
14407 else
14408 {
14409 out_attr[Tag_CPU_name].s = NULL;
14410 out_attr[Tag_CPU_raw_name].s = NULL;
14411 }
14412
14413 /* If we still don't have a value for Tag_CPU_name,
14414 make one up now. Tag_CPU_raw_name remains blank. */
14415 if (out_attr[Tag_CPU_name].s == NULL
14416 && out_attr[i].i < ARRAY_SIZE (name_table))
14417 out_attr[Tag_CPU_name].s =
14418 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14419 }
14420 break;
14421
14422 case Tag_ARM_ISA_use:
14423 case Tag_THUMB_ISA_use:
14424 case Tag_WMMX_arch:
14425 case Tag_Advanced_SIMD_arch:
14426 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14427 case Tag_ABI_FP_rounding:
14428 case Tag_ABI_FP_exceptions:
14429 case Tag_ABI_FP_user_exceptions:
14430 case Tag_ABI_FP_number_model:
14431 case Tag_FP_HP_extension:
14432 case Tag_CPU_unaligned_access:
14433 case Tag_T2EE_use:
14434 case Tag_MPextension_use:
14435 /* Use the largest value specified. */
14436 if (in_attr[i].i > out_attr[i].i)
14437 out_attr[i].i = in_attr[i].i;
14438 break;
14439
14440 case Tag_ABI_align_preserved:
14441 case Tag_ABI_PCS_RO_data:
14442 /* Use the smallest value specified. */
14443 if (in_attr[i].i < out_attr[i].i)
14444 out_attr[i].i = in_attr[i].i;
14445 break;
14446
14447 case Tag_ABI_align_needed:
14448 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14449 && (in_attr[Tag_ABI_align_preserved].i == 0
14450 || out_attr[Tag_ABI_align_preserved].i == 0))
14451 {
14452 /* This error message should be enabled once all non-conformant
14453 binaries in the toolchain have had the attributes set
14454 properly.
14455 _bfd_error_handler
14456 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14457 obfd, ibfd);
14458 result = FALSE; */
14459 }
14460 /* Fall through. */
14461 case Tag_ABI_FP_denormal:
14462 case Tag_ABI_PCS_GOT_use:
14463 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14464 value if greater than 2 (for future-proofing). */
14465 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14466 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14467 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14468 out_attr[i].i = in_attr[i].i;
14469 break;
14470
14471 case Tag_Virtualization_use:
14472 /* The virtualization tag effectively stores two bits of
14473 information: the intended use of TrustZone (in bit 0), and the
14474 intended use of Virtualization (in bit 1). */
14475 if (out_attr[i].i == 0)
14476 out_attr[i].i = in_attr[i].i;
14477 else if (in_attr[i].i != 0
14478 && in_attr[i].i != out_attr[i].i)
14479 {
14480 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14481 out_attr[i].i = 3;
14482 else
14483 {
14484 _bfd_error_handler
14485 (_("error: %pB: unable to merge virtualization attributes "
14486 "with %pB"),
14487 obfd, ibfd);
14488 result = FALSE;
14489 }
14490 }
14491 break;
14492
14493 case Tag_CPU_arch_profile:
14494 if (out_attr[i].i != in_attr[i].i)
14495 {
14496 /* 0 will merge with anything.
14497 'A' and 'S' merge to 'A'.
14498 'R' and 'S' merge to 'R'.
14499 'M' and 'A|R|S' is an error. */
14500 if (out_attr[i].i == 0
14501 || (out_attr[i].i == 'S'
14502 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14503 out_attr[i].i = in_attr[i].i;
14504 else if (in_attr[i].i == 0
14505 || (in_attr[i].i == 'S'
14506 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14507 ; /* Do nothing. */
14508 else
14509 {
14510 _bfd_error_handler
14511 (_("error: %pB: conflicting architecture profiles %c/%c"),
14512 ibfd,
14513 in_attr[i].i ? in_attr[i].i : '0',
14514 out_attr[i].i ? out_attr[i].i : '0');
14515 result = FALSE;
14516 }
14517 }
14518 break;
14519
14520 case Tag_DSP_extension:
14521 /* No need to change output value if any of:
14522 - pre (<=) ARMv5T input architecture (do not have DSP)
14523 - M input profile not ARMv7E-M and do not have DSP. */
14524 if (in_attr[Tag_CPU_arch].i <= 3
14525 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14526 && in_attr[Tag_CPU_arch].i != 13
14527 && in_attr[i].i == 0))
14528 ; /* Do nothing. */
14529 /* Output value should be 0 if DSP part of architecture, ie.
14530 - post (>=) ARMv5te architecture output
14531 - A, R or S profile output or ARMv7E-M output architecture. */
14532 else if (out_attr[Tag_CPU_arch].i >= 4
14533 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14534 || out_attr[Tag_CPU_arch_profile].i == 'R'
14535 || out_attr[Tag_CPU_arch_profile].i == 'S'
14536 || out_attr[Tag_CPU_arch].i == 13))
14537 out_attr[i].i = 0;
14538 /* Otherwise, DSP instructions are added and not part of output
14539 architecture. */
14540 else
14541 out_attr[i].i = 1;
14542 break;
14543
14544 case Tag_FP_arch:
14545 {
14546 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14547 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14548 when it's 0. It might mean absence of FP hardware if
14549 Tag_FP_arch is zero. */
14550
14551 #define VFP_VERSION_COUNT 9
14552 static const struct
14553 {
14554 int ver;
14555 int regs;
14556 } vfp_versions[VFP_VERSION_COUNT] =
14557 {
14558 {0, 0},
14559 {1, 16},
14560 {2, 16},
14561 {3, 32},
14562 {3, 16},
14563 {4, 32},
14564 {4, 16},
14565 {8, 32},
14566 {8, 16}
14567 };
14568 int ver;
14569 int regs;
14570 int newval;
14571
14572 /* If the output has no requirement about FP hardware,
14573 follow the requirement of the input. */
14574 if (out_attr[i].i == 0)
14575 {
14576 /* This assert is still reasonable, we shouldn't
14577 produce the suspicious build attribute
14578 combination (See below for in_attr). */
14579 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14580 out_attr[i].i = in_attr[i].i;
14581 out_attr[Tag_ABI_HardFP_use].i
14582 = in_attr[Tag_ABI_HardFP_use].i;
14583 break;
14584 }
14585 /* If the input has no requirement about FP hardware, do
14586 nothing. */
14587 else if (in_attr[i].i == 0)
14588 {
14589 /* We used to assert that Tag_ABI_HardFP_use was
14590 zero here, but we should never assert when
14591 consuming an object file that has suspicious
14592 build attributes. The single precision variant
14593 of 'no FP architecture' is still 'no FP
14594 architecture', so we just ignore the tag in this
14595 case. */
14596 break;
14597 }
14598
14599 /* Both the input and the output have nonzero Tag_FP_arch.
14600 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14601
14602 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14603 do nothing. */
14604 if (in_attr[Tag_ABI_HardFP_use].i == 0
14605 && out_attr[Tag_ABI_HardFP_use].i == 0)
14606 ;
14607 /* If the input and the output have different Tag_ABI_HardFP_use,
14608 the combination of them is 0 (implied by Tag_FP_arch). */
14609 else if (in_attr[Tag_ABI_HardFP_use].i
14610 != out_attr[Tag_ABI_HardFP_use].i)
14611 out_attr[Tag_ABI_HardFP_use].i = 0;
14612
14613 /* Now we can handle Tag_FP_arch. */
14614
14615 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14616 pick the biggest. */
14617 if (in_attr[i].i >= VFP_VERSION_COUNT
14618 && in_attr[i].i > out_attr[i].i)
14619 {
14620 out_attr[i] = in_attr[i];
14621 break;
14622 }
14623 /* The output uses the superset of input features
14624 (ISA version) and registers. */
14625 ver = vfp_versions[in_attr[i].i].ver;
14626 if (ver < vfp_versions[out_attr[i].i].ver)
14627 ver = vfp_versions[out_attr[i].i].ver;
14628 regs = vfp_versions[in_attr[i].i].regs;
14629 if (regs < vfp_versions[out_attr[i].i].regs)
14630 regs = vfp_versions[out_attr[i].i].regs;
14631 /* This assumes all possible supersets are also a valid
14632 options. */
14633 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14634 {
14635 if (regs == vfp_versions[newval].regs
14636 && ver == vfp_versions[newval].ver)
14637 break;
14638 }
14639 out_attr[i].i = newval;
14640 }
14641 break;
14642 case Tag_PCS_config:
14643 if (out_attr[i].i == 0)
14644 out_attr[i].i = in_attr[i].i;
14645 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14646 {
14647 /* It's sometimes ok to mix different configs, so this is only
14648 a warning. */
14649 _bfd_error_handler
14650 (_("warning: %pB: conflicting platform configuration"), ibfd);
14651 }
14652 break;
14653 case Tag_ABI_PCS_R9_use:
14654 if (in_attr[i].i != out_attr[i].i
14655 && out_attr[i].i != AEABI_R9_unused
14656 && in_attr[i].i != AEABI_R9_unused)
14657 {
14658 _bfd_error_handler
14659 (_("error: %pB: conflicting use of R9"), ibfd);
14660 result = FALSE;
14661 }
14662 if (out_attr[i].i == AEABI_R9_unused)
14663 out_attr[i].i = in_attr[i].i;
14664 break;
14665 case Tag_ABI_PCS_RW_data:
14666 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14667 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14668 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14669 {
14670 _bfd_error_handler
14671 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14672 ibfd);
14673 result = FALSE;
14674 }
14675 /* Use the smallest value specified. */
14676 if (in_attr[i].i < out_attr[i].i)
14677 out_attr[i].i = in_attr[i].i;
14678 break;
14679 case Tag_ABI_PCS_wchar_t:
14680 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14681 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14682 {
14683 _bfd_error_handler
14684 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14685 ibfd, in_attr[i].i, out_attr[i].i);
14686 }
14687 else if (in_attr[i].i && !out_attr[i].i)
14688 out_attr[i].i = in_attr[i].i;
14689 break;
14690 case Tag_ABI_enum_size:
14691 if (in_attr[i].i != AEABI_enum_unused)
14692 {
14693 if (out_attr[i].i == AEABI_enum_unused
14694 || out_attr[i].i == AEABI_enum_forced_wide)
14695 {
14696 /* The existing object is compatible with anything.
14697 Use whatever requirements the new object has. */
14698 out_attr[i].i = in_attr[i].i;
14699 }
14700 else if (in_attr[i].i != AEABI_enum_forced_wide
14701 && out_attr[i].i != in_attr[i].i
14702 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14703 {
14704 static const char *aeabi_enum_names[] =
14705 { "", "variable-size", "32-bit", "" };
14706 const char *in_name =
14707 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14708 ? aeabi_enum_names[in_attr[i].i]
14709 : "<unknown>";
14710 const char *out_name =
14711 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14712 ? aeabi_enum_names[out_attr[i].i]
14713 : "<unknown>";
14714 _bfd_error_handler
14715 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14716 ibfd, in_name, out_name);
14717 }
14718 }
14719 break;
14720 case Tag_ABI_VFP_args:
14721 /* Aready done. */
14722 break;
14723 case Tag_ABI_WMMX_args:
14724 if (in_attr[i].i != out_attr[i].i)
14725 {
14726 _bfd_error_handler
14727 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14728 ibfd, obfd);
14729 result = FALSE;
14730 }
14731 break;
14732 case Tag_compatibility:
14733 /* Merged in target-independent code. */
14734 break;
14735 case Tag_ABI_HardFP_use:
14736 /* This is handled along with Tag_FP_arch. */
14737 break;
14738 case Tag_ABI_FP_16bit_format:
14739 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14740 {
14741 if (in_attr[i].i != out_attr[i].i)
14742 {
14743 _bfd_error_handler
14744 (_("error: fp16 format mismatch between %pB and %pB"),
14745 ibfd, obfd);
14746 result = FALSE;
14747 }
14748 }
14749 if (in_attr[i].i != 0)
14750 out_attr[i].i = in_attr[i].i;
14751 break;
14752
14753 case Tag_DIV_use:
14754 /* A value of zero on input means that the divide instruction may
14755 be used if available in the base architecture as specified via
14756 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14757 the user did not want divide instructions. A value of 2
14758 explicitly means that divide instructions were allowed in ARM
14759 and Thumb state. */
14760 if (in_attr[i].i == out_attr[i].i)
14761 /* Do nothing. */ ;
14762 else if (elf32_arm_attributes_forbid_div (in_attr)
14763 && !elf32_arm_attributes_accept_div (out_attr))
14764 out_attr[i].i = 1;
14765 else if (elf32_arm_attributes_forbid_div (out_attr)
14766 && elf32_arm_attributes_accept_div (in_attr))
14767 out_attr[i].i = in_attr[i].i;
14768 else if (in_attr[i].i == 2)
14769 out_attr[i].i = in_attr[i].i;
14770 break;
14771
14772 case Tag_MPextension_use_legacy:
14773 /* We don't output objects with Tag_MPextension_use_legacy - we
14774 move the value to Tag_MPextension_use. */
14775 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14776 {
14777 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14778 {
14779 _bfd_error_handler
14780 (_("%pB has both the current and legacy "
14781 "Tag_MPextension_use attributes"),
14782 ibfd);
14783 result = FALSE;
14784 }
14785 }
14786
14787 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
14788 out_attr[Tag_MPextension_use] = in_attr[i];
14789
14790 break;
14791
14792 case Tag_nodefaults:
14793 /* This tag is set if it exists, but the value is unused (and is
14794 typically zero). We don't actually need to do anything here -
14795 the merge happens automatically when the type flags are merged
14796 below. */
14797 break;
14798 case Tag_also_compatible_with:
14799 /* Already done in Tag_CPU_arch. */
14800 break;
14801 case Tag_conformance:
14802 /* Keep the attribute if it matches. Throw it away otherwise.
14803 No attribute means no claim to conform. */
14804 if (!in_attr[i].s || !out_attr[i].s
14805 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
14806 out_attr[i].s = NULL;
14807 break;
14808
14809 default:
14810 result
14811 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
14812 }
14813
14814 /* If out_attr was copied from in_attr then it won't have a type yet. */
14815 if (in_attr[i].type && !out_attr[i].type)
14816 out_attr[i].type = in_attr[i].type;
14817 }
14818
14819 /* Merge Tag_compatibility attributes and any common GNU ones. */
14820 if (!_bfd_elf_merge_object_attributes (ibfd, info))
14821 return FALSE;
14822
14823 /* Check for any attributes not known on ARM. */
14824 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
14825
14826 return result;
14827 }
14828
14829
14830 /* Return TRUE if the two EABI versions are incompatible. */
14831
14832 static bfd_boolean
14833 elf32_arm_versions_compatible (unsigned iver, unsigned over)
14834 {
14835 /* v4 and v5 are the same spec before and after it was released,
14836 so allow mixing them. */
14837 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
14838 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
14839 return TRUE;
14840
14841 return (iver == over);
14842 }
14843
14844 /* Merge backend specific data from an object file to the output
14845 object file when linking. */
14846
14847 static bfd_boolean
14848 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
14849
14850 /* Display the flags field. */
14851
14852 static bfd_boolean
14853 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
14854 {
14855 FILE * file = (FILE *) ptr;
14856 unsigned long flags;
14857
14858 BFD_ASSERT (abfd != NULL && ptr != NULL);
14859
14860 /* Print normal ELF private data. */
14861 _bfd_elf_print_private_bfd_data (abfd, ptr);
14862
14863 flags = elf_elfheader (abfd)->e_flags;
14864 /* Ignore init flag - it may not be set, despite the flags field
14865 containing valid data. */
14866
14867 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
14868
14869 switch (EF_ARM_EABI_VERSION (flags))
14870 {
14871 case EF_ARM_EABI_UNKNOWN:
14872 /* The following flag bits are GNU extensions and not part of the
14873 official ARM ELF extended ABI. Hence they are only decoded if
14874 the EABI version is not set. */
14875 if (flags & EF_ARM_INTERWORK)
14876 fprintf (file, _(" [interworking enabled]"));
14877
14878 if (flags & EF_ARM_APCS_26)
14879 fprintf (file, " [APCS-26]");
14880 else
14881 fprintf (file, " [APCS-32]");
14882
14883 if (flags & EF_ARM_VFP_FLOAT)
14884 fprintf (file, _(" [VFP float format]"));
14885 else if (flags & EF_ARM_MAVERICK_FLOAT)
14886 fprintf (file, _(" [Maverick float format]"));
14887 else
14888 fprintf (file, _(" [FPA float format]"));
14889
14890 if (flags & EF_ARM_APCS_FLOAT)
14891 fprintf (file, _(" [floats passed in float registers]"));
14892
14893 if (flags & EF_ARM_PIC)
14894 fprintf (file, _(" [position independent]"));
14895
14896 if (flags & EF_ARM_NEW_ABI)
14897 fprintf (file, _(" [new ABI]"));
14898
14899 if (flags & EF_ARM_OLD_ABI)
14900 fprintf (file, _(" [old ABI]"));
14901
14902 if (flags & EF_ARM_SOFT_FLOAT)
14903 fprintf (file, _(" [software FP]"));
14904
14905 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
14906 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
14907 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
14908 | EF_ARM_MAVERICK_FLOAT);
14909 break;
14910
14911 case EF_ARM_EABI_VER1:
14912 fprintf (file, _(" [Version1 EABI]"));
14913
14914 if (flags & EF_ARM_SYMSARESORTED)
14915 fprintf (file, _(" [sorted symbol table]"));
14916 else
14917 fprintf (file, _(" [unsorted symbol table]"));
14918
14919 flags &= ~ EF_ARM_SYMSARESORTED;
14920 break;
14921
14922 case EF_ARM_EABI_VER2:
14923 fprintf (file, _(" [Version2 EABI]"));
14924
14925 if (flags & EF_ARM_SYMSARESORTED)
14926 fprintf (file, _(" [sorted symbol table]"));
14927 else
14928 fprintf (file, _(" [unsorted symbol table]"));
14929
14930 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
14931 fprintf (file, _(" [dynamic symbols use segment index]"));
14932
14933 if (flags & EF_ARM_MAPSYMSFIRST)
14934 fprintf (file, _(" [mapping symbols precede others]"));
14935
14936 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
14937 | EF_ARM_MAPSYMSFIRST);
14938 break;
14939
14940 case EF_ARM_EABI_VER3:
14941 fprintf (file, _(" [Version3 EABI]"));
14942 break;
14943
14944 case EF_ARM_EABI_VER4:
14945 fprintf (file, _(" [Version4 EABI]"));
14946 goto eabi;
14947
14948 case EF_ARM_EABI_VER5:
14949 fprintf (file, _(" [Version5 EABI]"));
14950
14951 if (flags & EF_ARM_ABI_FLOAT_SOFT)
14952 fprintf (file, _(" [soft-float ABI]"));
14953
14954 if (flags & EF_ARM_ABI_FLOAT_HARD)
14955 fprintf (file, _(" [hard-float ABI]"));
14956
14957 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
14958
14959 eabi:
14960 if (flags & EF_ARM_BE8)
14961 fprintf (file, _(" [BE8]"));
14962
14963 if (flags & EF_ARM_LE8)
14964 fprintf (file, _(" [LE8]"));
14965
14966 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
14967 break;
14968
14969 default:
14970 fprintf (file, _(" <EABI version unrecognised>"));
14971 break;
14972 }
14973
14974 flags &= ~ EF_ARM_EABIMASK;
14975
14976 if (flags & EF_ARM_RELEXEC)
14977 fprintf (file, _(" [relocatable executable]"));
14978
14979 if (flags & EF_ARM_PIC)
14980 fprintf (file, _(" [position independent]"));
14981
14982 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
14983 fprintf (file, _(" [FDPIC ABI supplement]"));
14984
14985 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
14986
14987 if (flags)
14988 fprintf (file, _("<Unrecognised flag bits set>"));
14989
14990 fputc ('\n', file);
14991
14992 return TRUE;
14993 }
14994
14995 static int
14996 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
14997 {
14998 switch (ELF_ST_TYPE (elf_sym->st_info))
14999 {
15000 case STT_ARM_TFUNC:
15001 return ELF_ST_TYPE (elf_sym->st_info);
15002
15003 case STT_ARM_16BIT:
15004 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15005 This allows us to distinguish between data used by Thumb instructions
15006 and non-data (which is probably code) inside Thumb regions of an
15007 executable. */
15008 if (type != STT_OBJECT && type != STT_TLS)
15009 return ELF_ST_TYPE (elf_sym->st_info);
15010 break;
15011
15012 default:
15013 break;
15014 }
15015
15016 return type;
15017 }
15018
15019 static asection *
15020 elf32_arm_gc_mark_hook (asection *sec,
15021 struct bfd_link_info *info,
15022 Elf_Internal_Rela *rel,
15023 struct elf_link_hash_entry *h,
15024 Elf_Internal_Sym *sym)
15025 {
15026 if (h != NULL)
15027 switch (ELF32_R_TYPE (rel->r_info))
15028 {
15029 case R_ARM_GNU_VTINHERIT:
15030 case R_ARM_GNU_VTENTRY:
15031 return NULL;
15032 }
15033
15034 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15035 }
15036
15037 /* Look through the relocs for a section during the first phase. */
15038
15039 static bfd_boolean
15040 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15041 asection *sec, const Elf_Internal_Rela *relocs)
15042 {
15043 Elf_Internal_Shdr *symtab_hdr;
15044 struct elf_link_hash_entry **sym_hashes;
15045 const Elf_Internal_Rela *rel;
15046 const Elf_Internal_Rela *rel_end;
15047 bfd *dynobj;
15048 asection *sreloc;
15049 struct elf32_arm_link_hash_table *htab;
15050 bfd_boolean call_reloc_p;
15051 bfd_boolean may_become_dynamic_p;
15052 bfd_boolean may_need_local_target_p;
15053 unsigned long nsyms;
15054
15055 if (bfd_link_relocatable (info))
15056 return TRUE;
15057
15058 BFD_ASSERT (is_arm_elf (abfd));
15059
15060 htab = elf32_arm_hash_table (info);
15061 if (htab == NULL)
15062 return FALSE;
15063
15064 sreloc = NULL;
15065
15066 /* Create dynamic sections for relocatable executables so that we can
15067 copy relocations. */
15068 if (htab->root.is_relocatable_executable
15069 && ! htab->root.dynamic_sections_created)
15070 {
15071 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15072 return FALSE;
15073 }
15074
15075 if (htab->root.dynobj == NULL)
15076 htab->root.dynobj = abfd;
15077 if (!create_ifunc_sections (info))
15078 return FALSE;
15079
15080 dynobj = htab->root.dynobj;
15081
15082 symtab_hdr = & elf_symtab_hdr (abfd);
15083 sym_hashes = elf_sym_hashes (abfd);
15084 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15085
15086 rel_end = relocs + sec->reloc_count;
15087 for (rel = relocs; rel < rel_end; rel++)
15088 {
15089 Elf_Internal_Sym *isym;
15090 struct elf_link_hash_entry *h;
15091 struct elf32_arm_link_hash_entry *eh;
15092 unsigned int r_symndx;
15093 int r_type;
15094
15095 r_symndx = ELF32_R_SYM (rel->r_info);
15096 r_type = ELF32_R_TYPE (rel->r_info);
15097 r_type = arm_real_reloc_type (htab, r_type);
15098
15099 if (r_symndx >= nsyms
15100 /* PR 9934: It is possible to have relocations that do not
15101 refer to symbols, thus it is also possible to have an
15102 object file containing relocations but no symbol table. */
15103 && (r_symndx > STN_UNDEF || nsyms > 0))
15104 {
15105 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15106 r_symndx);
15107 return FALSE;
15108 }
15109
15110 h = NULL;
15111 isym = NULL;
15112 if (nsyms > 0)
15113 {
15114 if (r_symndx < symtab_hdr->sh_info)
15115 {
15116 /* A local symbol. */
15117 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
15118 abfd, r_symndx);
15119 if (isym == NULL)
15120 return FALSE;
15121 }
15122 else
15123 {
15124 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15125 while (h->root.type == bfd_link_hash_indirect
15126 || h->root.type == bfd_link_hash_warning)
15127 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15128 }
15129 }
15130
15131 eh = (struct elf32_arm_link_hash_entry *) h;
15132
15133 call_reloc_p = FALSE;
15134 may_become_dynamic_p = FALSE;
15135 may_need_local_target_p = FALSE;
15136
15137 /* Could be done earlier, if h were already available. */
15138 r_type = elf32_arm_tls_transition (info, r_type, h);
15139 switch (r_type)
15140 {
15141 case R_ARM_GOTOFFFUNCDESC:
15142 {
15143 if (h == NULL)
15144 {
15145 if (!elf32_arm_allocate_local_sym_info (abfd))
15146 return FALSE;
15147 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15148 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15149 }
15150 else
15151 {
15152 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15153 }
15154 }
15155 break;
15156
15157 case R_ARM_GOTFUNCDESC:
15158 {
15159 if (h == NULL)
15160 {
15161 /* Such a relocation is not supposed to be generated
15162 by gcc on a static function. */
15163 /* Anyway if needed it could be handled. */
15164 abort();
15165 }
15166 else
15167 {
15168 eh->fdpic_cnts.gotfuncdesc_cnt++;
15169 }
15170 }
15171 break;
15172
15173 case R_ARM_FUNCDESC:
15174 {
15175 if (h == NULL)
15176 {
15177 if (!elf32_arm_allocate_local_sym_info (abfd))
15178 return FALSE;
15179 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15180 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15181 }
15182 else
15183 {
15184 eh->fdpic_cnts.funcdesc_cnt++;
15185 }
15186 }
15187 break;
15188
15189 case R_ARM_GOT32:
15190 case R_ARM_GOT_PREL:
15191 case R_ARM_TLS_GD32:
15192 case R_ARM_TLS_GD32_FDPIC:
15193 case R_ARM_TLS_IE32:
15194 case R_ARM_TLS_IE32_FDPIC:
15195 case R_ARM_TLS_GOTDESC:
15196 case R_ARM_TLS_DESCSEQ:
15197 case R_ARM_THM_TLS_DESCSEQ:
15198 case R_ARM_TLS_CALL:
15199 case R_ARM_THM_TLS_CALL:
15200 /* This symbol requires a global offset table entry. */
15201 {
15202 int tls_type, old_tls_type;
15203
15204 switch (r_type)
15205 {
15206 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15207 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15208
15209 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15210 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15211
15212 case R_ARM_TLS_GOTDESC:
15213 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15214 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15215 tls_type = GOT_TLS_GDESC; break;
15216
15217 default: tls_type = GOT_NORMAL; break;
15218 }
15219
15220 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15221 info->flags |= DF_STATIC_TLS;
15222
15223 if (h != NULL)
15224 {
15225 h->got.refcount++;
15226 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15227 }
15228 else
15229 {
15230 /* This is a global offset table entry for a local symbol. */
15231 if (!elf32_arm_allocate_local_sym_info (abfd))
15232 return FALSE;
15233 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15234 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15235 }
15236
15237 /* If a variable is accessed with both tls methods, two
15238 slots may be created. */
15239 if (GOT_TLS_GD_ANY_P (old_tls_type)
15240 && GOT_TLS_GD_ANY_P (tls_type))
15241 tls_type |= old_tls_type;
15242
15243 /* We will already have issued an error message if there
15244 is a TLS/non-TLS mismatch, based on the symbol
15245 type. So just combine any TLS types needed. */
15246 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15247 && tls_type != GOT_NORMAL)
15248 tls_type |= old_tls_type;
15249
15250 /* If the symbol is accessed in both IE and GDESC
15251 method, we're able to relax. Turn off the GDESC flag,
15252 without messing up with any other kind of tls types
15253 that may be involved. */
15254 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15255 tls_type &= ~GOT_TLS_GDESC;
15256
15257 if (old_tls_type != tls_type)
15258 {
15259 if (h != NULL)
15260 elf32_arm_hash_entry (h)->tls_type = tls_type;
15261 else
15262 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15263 }
15264 }
15265 /* Fall through. */
15266
15267 case R_ARM_TLS_LDM32:
15268 case R_ARM_TLS_LDM32_FDPIC:
15269 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15270 htab->tls_ldm_got.refcount++;
15271 /* Fall through. */
15272
15273 case R_ARM_GOTOFF32:
15274 case R_ARM_GOTPC:
15275 if (htab->root.sgot == NULL
15276 && !create_got_section (htab->root.dynobj, info))
15277 return FALSE;
15278 break;
15279
15280 case R_ARM_PC24:
15281 case R_ARM_PLT32:
15282 case R_ARM_CALL:
15283 case R_ARM_JUMP24:
15284 case R_ARM_PREL31:
15285 case R_ARM_THM_CALL:
15286 case R_ARM_THM_JUMP24:
15287 case R_ARM_THM_JUMP19:
15288 call_reloc_p = TRUE;
15289 may_need_local_target_p = TRUE;
15290 break;
15291
15292 case R_ARM_ABS12:
15293 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15294 ldr __GOTT_INDEX__ offsets. */
15295 if (!htab->vxworks_p)
15296 {
15297 may_need_local_target_p = TRUE;
15298 break;
15299 }
15300 else goto jump_over;
15301
15302 /* Fall through. */
15303
15304 case R_ARM_MOVW_ABS_NC:
15305 case R_ARM_MOVT_ABS:
15306 case R_ARM_THM_MOVW_ABS_NC:
15307 case R_ARM_THM_MOVT_ABS:
15308 if (bfd_link_pic (info))
15309 {
15310 _bfd_error_handler
15311 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15312 abfd, elf32_arm_howto_table_1[r_type].name,
15313 (h) ? h->root.root.string : "a local symbol");
15314 bfd_set_error (bfd_error_bad_value);
15315 return FALSE;
15316 }
15317
15318 /* Fall through. */
15319 case R_ARM_ABS32:
15320 case R_ARM_ABS32_NOI:
15321 jump_over:
15322 if (h != NULL && bfd_link_executable (info))
15323 {
15324 h->pointer_equality_needed = 1;
15325 }
15326 /* Fall through. */
15327 case R_ARM_REL32:
15328 case R_ARM_REL32_NOI:
15329 case R_ARM_MOVW_PREL_NC:
15330 case R_ARM_MOVT_PREL:
15331 case R_ARM_THM_MOVW_PREL_NC:
15332 case R_ARM_THM_MOVT_PREL:
15333
15334 /* Should the interworking branches be listed here? */
15335 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15336 || htab->fdpic_p)
15337 && (sec->flags & SEC_ALLOC) != 0)
15338 {
15339 if (h == NULL
15340 && elf32_arm_howto_from_type (r_type)->pc_relative)
15341 {
15342 /* In shared libraries and relocatable executables,
15343 we treat local relative references as calls;
15344 see the related SYMBOL_CALLS_LOCAL code in
15345 allocate_dynrelocs. */
15346 call_reloc_p = TRUE;
15347 may_need_local_target_p = TRUE;
15348 }
15349 else
15350 /* We are creating a shared library or relocatable
15351 executable, and this is a reloc against a global symbol,
15352 or a non-PC-relative reloc against a local symbol.
15353 We may need to copy the reloc into the output. */
15354 may_become_dynamic_p = TRUE;
15355 }
15356 else
15357 may_need_local_target_p = TRUE;
15358 break;
15359
15360 /* This relocation describes the C++ object vtable hierarchy.
15361 Reconstruct it for later use during GC. */
15362 case R_ARM_GNU_VTINHERIT:
15363 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15364 return FALSE;
15365 break;
15366
15367 /* This relocation describes which C++ vtable entries are actually
15368 used. Record for later use during GC. */
15369 case R_ARM_GNU_VTENTRY:
15370 BFD_ASSERT (h != NULL);
15371 if (h != NULL
15372 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15373 return FALSE;
15374 break;
15375 }
15376
15377 if (h != NULL)
15378 {
15379 if (call_reloc_p)
15380 /* We may need a .plt entry if the function this reloc
15381 refers to is in a different object, regardless of the
15382 symbol's type. We can't tell for sure yet, because
15383 something later might force the symbol local. */
15384 h->needs_plt = 1;
15385 else if (may_need_local_target_p)
15386 /* If this reloc is in a read-only section, we might
15387 need a copy reloc. We can't check reliably at this
15388 stage whether the section is read-only, as input
15389 sections have not yet been mapped to output sections.
15390 Tentatively set the flag for now, and correct in
15391 adjust_dynamic_symbol. */
15392 h->non_got_ref = 1;
15393 }
15394
15395 if (may_need_local_target_p
15396 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15397 {
15398 union gotplt_union *root_plt;
15399 struct arm_plt_info *arm_plt;
15400 struct arm_local_iplt_info *local_iplt;
15401
15402 if (h != NULL)
15403 {
15404 root_plt = &h->plt;
15405 arm_plt = &eh->plt;
15406 }
15407 else
15408 {
15409 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15410 if (local_iplt == NULL)
15411 return FALSE;
15412 root_plt = &local_iplt->root;
15413 arm_plt = &local_iplt->arm;
15414 }
15415
15416 /* If the symbol is a function that doesn't bind locally,
15417 this relocation will need a PLT entry. */
15418 if (root_plt->refcount != -1)
15419 root_plt->refcount += 1;
15420
15421 if (!call_reloc_p)
15422 arm_plt->noncall_refcount++;
15423
15424 /* It's too early to use htab->use_blx here, so we have to
15425 record possible blx references separately from
15426 relocs that definitely need a thumb stub. */
15427
15428 if (r_type == R_ARM_THM_CALL)
15429 arm_plt->maybe_thumb_refcount += 1;
15430
15431 if (r_type == R_ARM_THM_JUMP24
15432 || r_type == R_ARM_THM_JUMP19)
15433 arm_plt->thumb_refcount += 1;
15434 }
15435
15436 if (may_become_dynamic_p)
15437 {
15438 struct elf_dyn_relocs *p, **head;
15439
15440 /* Create a reloc section in dynobj. */
15441 if (sreloc == NULL)
15442 {
15443 sreloc = _bfd_elf_make_dynamic_reloc_section
15444 (sec, dynobj, 2, abfd, ! htab->use_rel);
15445
15446 if (sreloc == NULL)
15447 return FALSE;
15448
15449 /* BPABI objects never have dynamic relocations mapped. */
15450 if (htab->symbian_p)
15451 {
15452 flagword flags;
15453
15454 flags = bfd_get_section_flags (dynobj, sreloc);
15455 flags &= ~(SEC_LOAD | SEC_ALLOC);
15456 bfd_set_section_flags (dynobj, sreloc, flags);
15457 }
15458 }
15459
15460 /* If this is a global symbol, count the number of
15461 relocations we need for this symbol. */
15462 if (h != NULL)
15463 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
15464 else
15465 {
15466 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15467 if (head == NULL)
15468 return FALSE;
15469 }
15470
15471 p = *head;
15472 if (p == NULL || p->sec != sec)
15473 {
15474 bfd_size_type amt = sizeof *p;
15475
15476 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15477 if (p == NULL)
15478 return FALSE;
15479 p->next = *head;
15480 *head = p;
15481 p->sec = sec;
15482 p->count = 0;
15483 p->pc_count = 0;
15484 }
15485
15486 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15487 p->pc_count += 1;
15488 p->count += 1;
15489 if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15490 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15491 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15492 that will become rofixup. */
15493 /* This is due to the fact that we suppose all will become rofixup. */
15494 fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15495 _bfd_error_handler
15496 (_("FDPIC does not yet support %s relocation"
15497 " to become dynamic for executable"),
15498 elf32_arm_howto_table_1[r_type].name);
15499 abort();
15500 }
15501 }
15502 }
15503
15504 return TRUE;
15505 }
15506
15507 static void
15508 elf32_arm_update_relocs (asection *o,
15509 struct bfd_elf_section_reloc_data *reldata)
15510 {
15511 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15512 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15513 const struct elf_backend_data *bed;
15514 _arm_elf_section_data *eado;
15515 struct bfd_link_order *p;
15516 bfd_byte *erela_head, *erela;
15517 Elf_Internal_Rela *irela_head, *irela;
15518 Elf_Internal_Shdr *rel_hdr;
15519 bfd *abfd;
15520 unsigned int count;
15521
15522 eado = get_arm_elf_section_data (o);
15523
15524 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15525 return;
15526
15527 abfd = o->owner;
15528 bed = get_elf_backend_data (abfd);
15529 rel_hdr = reldata->hdr;
15530
15531 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15532 {
15533 swap_in = bed->s->swap_reloc_in;
15534 swap_out = bed->s->swap_reloc_out;
15535 }
15536 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15537 {
15538 swap_in = bed->s->swap_reloca_in;
15539 swap_out = bed->s->swap_reloca_out;
15540 }
15541 else
15542 abort ();
15543
15544 erela_head = rel_hdr->contents;
15545 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15546 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15547
15548 erela = erela_head;
15549 irela = irela_head;
15550 count = 0;
15551
15552 for (p = o->map_head.link_order; p; p = p->next)
15553 {
15554 if (p->type == bfd_section_reloc_link_order
15555 || p->type == bfd_symbol_reloc_link_order)
15556 {
15557 (*swap_in) (abfd, erela, irela);
15558 erela += rel_hdr->sh_entsize;
15559 irela++;
15560 count++;
15561 }
15562 else if (p->type == bfd_indirect_link_order)
15563 {
15564 struct bfd_elf_section_reloc_data *input_reldata;
15565 arm_unwind_table_edit *edit_list, *edit_tail;
15566 _arm_elf_section_data *eadi;
15567 bfd_size_type j;
15568 bfd_vma offset;
15569 asection *i;
15570
15571 i = p->u.indirect.section;
15572
15573 eadi = get_arm_elf_section_data (i);
15574 edit_list = eadi->u.exidx.unwind_edit_list;
15575 edit_tail = eadi->u.exidx.unwind_edit_tail;
15576 offset = o->vma + i->output_offset;
15577
15578 if (eadi->elf.rel.hdr &&
15579 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15580 input_reldata = &eadi->elf.rel;
15581 else if (eadi->elf.rela.hdr &&
15582 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15583 input_reldata = &eadi->elf.rela;
15584 else
15585 abort ();
15586
15587 if (edit_list)
15588 {
15589 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15590 {
15591 arm_unwind_table_edit *edit_node, *edit_next;
15592 bfd_vma bias;
15593 bfd_vma reloc_index;
15594
15595 (*swap_in) (abfd, erela, irela);
15596 reloc_index = (irela->r_offset - offset) / 8;
15597
15598 bias = 0;
15599 edit_node = edit_list;
15600 for (edit_next = edit_list;
15601 edit_next && edit_next->index <= reloc_index;
15602 edit_next = edit_node->next)
15603 {
15604 bias++;
15605 edit_node = edit_next;
15606 }
15607
15608 if (edit_node->type != DELETE_EXIDX_ENTRY
15609 || edit_node->index != reloc_index)
15610 {
15611 irela->r_offset -= bias * 8;
15612 irela++;
15613 count++;
15614 }
15615
15616 erela += rel_hdr->sh_entsize;
15617 }
15618
15619 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15620 {
15621 /* New relocation entity. */
15622 asection *text_sec = edit_tail->linked_section;
15623 asection *text_out = text_sec->output_section;
15624 bfd_vma exidx_offset = offset + i->size - 8;
15625
15626 irela->r_addend = 0;
15627 irela->r_offset = exidx_offset;
15628 irela->r_info = ELF32_R_INFO
15629 (text_out->target_index, R_ARM_PREL31);
15630 irela++;
15631 count++;
15632 }
15633 }
15634 else
15635 {
15636 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15637 {
15638 (*swap_in) (abfd, erela, irela);
15639 erela += rel_hdr->sh_entsize;
15640 irela++;
15641 }
15642
15643 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15644 }
15645 }
15646 }
15647
15648 reldata->count = count;
15649 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15650
15651 erela = erela_head;
15652 irela = irela_head;
15653 while (count > 0)
15654 {
15655 (*swap_out) (abfd, irela, erela);
15656 erela += rel_hdr->sh_entsize;
15657 irela++;
15658 count--;
15659 }
15660
15661 free (irela_head);
15662
15663 /* Hashes are no longer valid. */
15664 free (reldata->hashes);
15665 reldata->hashes = NULL;
15666 }
15667
15668 /* Unwinding tables are not referenced directly. This pass marks them as
15669 required if the corresponding code section is marked. Similarly, ARMv8-M
15670 secure entry functions can only be referenced by SG veneers which are
15671 created after the GC process. They need to be marked in case they reside in
15672 their own section (as would be the case if code was compiled with
15673 -ffunction-sections). */
15674
15675 static bfd_boolean
15676 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15677 elf_gc_mark_hook_fn gc_mark_hook)
15678 {
15679 bfd *sub;
15680 Elf_Internal_Shdr **elf_shdrp;
15681 asection *cmse_sec;
15682 obj_attribute *out_attr;
15683 Elf_Internal_Shdr *symtab_hdr;
15684 unsigned i, sym_count, ext_start;
15685 const struct elf_backend_data *bed;
15686 struct elf_link_hash_entry **sym_hashes;
15687 struct elf32_arm_link_hash_entry *cmse_hash;
15688 bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15689
15690 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15691
15692 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15693 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15694 && out_attr[Tag_CPU_arch_profile].i == 'M';
15695
15696 /* Marking EH data may cause additional code sections to be marked,
15697 requiring multiple passes. */
15698 again = TRUE;
15699 while (again)
15700 {
15701 again = FALSE;
15702 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15703 {
15704 asection *o;
15705
15706 if (! is_arm_elf (sub))
15707 continue;
15708
15709 elf_shdrp = elf_elfsections (sub);
15710 for (o = sub->sections; o != NULL; o = o->next)
15711 {
15712 Elf_Internal_Shdr *hdr;
15713
15714 hdr = &elf_section_data (o)->this_hdr;
15715 if (hdr->sh_type == SHT_ARM_EXIDX
15716 && hdr->sh_link
15717 && hdr->sh_link < elf_numsections (sub)
15718 && !o->gc_mark
15719 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15720 {
15721 again = TRUE;
15722 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15723 return FALSE;
15724 }
15725 }
15726
15727 /* Mark section holding ARMv8-M secure entry functions. We mark all
15728 of them so no need for a second browsing. */
15729 if (is_v8m && first_bfd_browse)
15730 {
15731 sym_hashes = elf_sym_hashes (sub);
15732 bed = get_elf_backend_data (sub);
15733 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15734 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15735 ext_start = symtab_hdr->sh_info;
15736
15737 /* Scan symbols. */
15738 for (i = ext_start; i < sym_count; i++)
15739 {
15740 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15741
15742 /* Assume it is a special symbol. If not, cmse_scan will
15743 warn about it and user can do something about it. */
15744 if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
15745 {
15746 cmse_sec = cmse_hash->root.root.u.def.section;
15747 if (!cmse_sec->gc_mark
15748 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15749 return FALSE;
15750 }
15751 }
15752 }
15753 }
15754 first_bfd_browse = FALSE;
15755 }
15756
15757 return TRUE;
15758 }
15759
15760 /* Treat mapping symbols as special target symbols. */
15761
15762 static bfd_boolean
15763 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15764 {
15765 return bfd_is_arm_special_symbol_name (sym->name,
15766 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15767 }
15768
15769 /* This is a copy of elf_find_function() from elf.c except that
15770 ARM mapping symbols are ignored when looking for function names
15771 and STT_ARM_TFUNC is considered to a function type. */
15772
15773 static bfd_boolean
15774 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
15775 asymbol ** symbols,
15776 asection * section,
15777 bfd_vma offset,
15778 const char ** filename_ptr,
15779 const char ** functionname_ptr)
15780 {
15781 const char * filename = NULL;
15782 asymbol * func = NULL;
15783 bfd_vma low_func = 0;
15784 asymbol ** p;
15785
15786 for (p = symbols; *p != NULL; p++)
15787 {
15788 elf_symbol_type *q;
15789
15790 q = (elf_symbol_type *) *p;
15791
15792 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
15793 {
15794 default:
15795 break;
15796 case STT_FILE:
15797 filename = bfd_asymbol_name (&q->symbol);
15798 break;
15799 case STT_FUNC:
15800 case STT_ARM_TFUNC:
15801 case STT_NOTYPE:
15802 /* Skip mapping symbols. */
15803 if ((q->symbol.flags & BSF_LOCAL)
15804 && bfd_is_arm_special_symbol_name (q->symbol.name,
15805 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
15806 continue;
15807 /* Fall through. */
15808 if (bfd_get_section (&q->symbol) == section
15809 && q->symbol.value >= low_func
15810 && q->symbol.value <= offset)
15811 {
15812 func = (asymbol *) q;
15813 low_func = q->symbol.value;
15814 }
15815 break;
15816 }
15817 }
15818
15819 if (func == NULL)
15820 return FALSE;
15821
15822 if (filename_ptr)
15823 *filename_ptr = filename;
15824 if (functionname_ptr)
15825 *functionname_ptr = bfd_asymbol_name (func);
15826
15827 return TRUE;
15828 }
15829
15830
15831 /* Find the nearest line to a particular section and offset, for error
15832 reporting. This code is a duplicate of the code in elf.c, except
15833 that it uses arm_elf_find_function. */
15834
15835 static bfd_boolean
15836 elf32_arm_find_nearest_line (bfd * abfd,
15837 asymbol ** symbols,
15838 asection * section,
15839 bfd_vma offset,
15840 const char ** filename_ptr,
15841 const char ** functionname_ptr,
15842 unsigned int * line_ptr,
15843 unsigned int * discriminator_ptr)
15844 {
15845 bfd_boolean found = FALSE;
15846
15847 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
15848 filename_ptr, functionname_ptr,
15849 line_ptr, discriminator_ptr,
15850 dwarf_debug_sections, 0,
15851 & elf_tdata (abfd)->dwarf2_find_line_info))
15852 {
15853 if (!*functionname_ptr)
15854 arm_elf_find_function (abfd, symbols, section, offset,
15855 *filename_ptr ? NULL : filename_ptr,
15856 functionname_ptr);
15857
15858 return TRUE;
15859 }
15860
15861 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
15862 uses DWARF1. */
15863
15864 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
15865 & found, filename_ptr,
15866 functionname_ptr, line_ptr,
15867 & elf_tdata (abfd)->line_info))
15868 return FALSE;
15869
15870 if (found && (*functionname_ptr || *line_ptr))
15871 return TRUE;
15872
15873 if (symbols == NULL)
15874 return FALSE;
15875
15876 if (! arm_elf_find_function (abfd, symbols, section, offset,
15877 filename_ptr, functionname_ptr))
15878 return FALSE;
15879
15880 *line_ptr = 0;
15881 return TRUE;
15882 }
15883
15884 static bfd_boolean
15885 elf32_arm_find_inliner_info (bfd * abfd,
15886 const char ** filename_ptr,
15887 const char ** functionname_ptr,
15888 unsigned int * line_ptr)
15889 {
15890 bfd_boolean found;
15891 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
15892 functionname_ptr, line_ptr,
15893 & elf_tdata (abfd)->dwarf2_find_line_info);
15894 return found;
15895 }
15896
15897 /* Find dynamic relocs for H that apply to read-only sections. */
15898
15899 static asection *
15900 readonly_dynrelocs (struct elf_link_hash_entry *h)
15901 {
15902 struct elf_dyn_relocs *p;
15903
15904 for (p = elf32_arm_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
15905 {
15906 asection *s = p->sec->output_section;
15907
15908 if (s != NULL && (s->flags & SEC_READONLY) != 0)
15909 return p->sec;
15910 }
15911 return NULL;
15912 }
15913
15914 /* Adjust a symbol defined by a dynamic object and referenced by a
15915 regular object. The current definition is in some section of the
15916 dynamic object, but we're not including those sections. We have to
15917 change the definition to something the rest of the link can
15918 understand. */
15919
15920 static bfd_boolean
15921 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
15922 struct elf_link_hash_entry * h)
15923 {
15924 bfd * dynobj;
15925 asection *s, *srel;
15926 struct elf32_arm_link_hash_entry * eh;
15927 struct elf32_arm_link_hash_table *globals;
15928
15929 globals = elf32_arm_hash_table (info);
15930 if (globals == NULL)
15931 return FALSE;
15932
15933 dynobj = elf_hash_table (info)->dynobj;
15934
15935 /* Make sure we know what is going on here. */
15936 BFD_ASSERT (dynobj != NULL
15937 && (h->needs_plt
15938 || h->type == STT_GNU_IFUNC
15939 || h->is_weakalias
15940 || (h->def_dynamic
15941 && h->ref_regular
15942 && !h->def_regular)));
15943
15944 eh = (struct elf32_arm_link_hash_entry *) h;
15945
15946 /* If this is a function, put it in the procedure linkage table. We
15947 will fill in the contents of the procedure linkage table later,
15948 when we know the address of the .got section. */
15949 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
15950 {
15951 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
15952 symbol binds locally. */
15953 if (h->plt.refcount <= 0
15954 || (h->type != STT_GNU_IFUNC
15955 && (SYMBOL_CALLS_LOCAL (info, h)
15956 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
15957 && h->root.type == bfd_link_hash_undefweak))))
15958 {
15959 /* This case can occur if we saw a PLT32 reloc in an input
15960 file, but the symbol was never referred to by a dynamic
15961 object, or if all references were garbage collected. In
15962 such a case, we don't actually need to build a procedure
15963 linkage table, and we can just do a PC24 reloc instead. */
15964 h->plt.offset = (bfd_vma) -1;
15965 eh->plt.thumb_refcount = 0;
15966 eh->plt.maybe_thumb_refcount = 0;
15967 eh->plt.noncall_refcount = 0;
15968 h->needs_plt = 0;
15969 }
15970
15971 return TRUE;
15972 }
15973 else
15974 {
15975 /* It's possible that we incorrectly decided a .plt reloc was
15976 needed for an R_ARM_PC24 or similar reloc to a non-function sym
15977 in check_relocs. We can't decide accurately between function
15978 and non-function syms in check-relocs; Objects loaded later in
15979 the link may change h->type. So fix it now. */
15980 h->plt.offset = (bfd_vma) -1;
15981 eh->plt.thumb_refcount = 0;
15982 eh->plt.maybe_thumb_refcount = 0;
15983 eh->plt.noncall_refcount = 0;
15984 }
15985
15986 /* If this is a weak symbol, and there is a real definition, the
15987 processor independent code will have arranged for us to see the
15988 real definition first, and we can just use the same value. */
15989 if (h->is_weakalias)
15990 {
15991 struct elf_link_hash_entry *def = weakdef (h);
15992 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
15993 h->root.u.def.section = def->root.u.def.section;
15994 h->root.u.def.value = def->root.u.def.value;
15995 return TRUE;
15996 }
15997
15998 /* If there are no non-GOT references, we do not need a copy
15999 relocation. */
16000 if (!h->non_got_ref)
16001 return TRUE;
16002
16003 /* This is a reference to a symbol defined by a dynamic object which
16004 is not a function. */
16005
16006 /* If we are creating a shared library, we must presume that the
16007 only references to the symbol are via the global offset table.
16008 For such cases we need not do anything here; the relocations will
16009 be handled correctly by relocate_section. Relocatable executables
16010 can reference data in shared objects directly, so we don't need to
16011 do anything here. */
16012 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16013 return TRUE;
16014
16015 /* We must allocate the symbol in our .dynbss section, which will
16016 become part of the .bss section of the executable. There will be
16017 an entry for this symbol in the .dynsym section. The dynamic
16018 object will contain position independent code, so all references
16019 from the dynamic object to this symbol will go through the global
16020 offset table. The dynamic linker will use the .dynsym entry to
16021 determine the address it must put in the global offset table, so
16022 both the dynamic object and the regular object will refer to the
16023 same memory location for the variable. */
16024 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16025 linker to copy the initial value out of the dynamic object and into
16026 the runtime process image. We need to remember the offset into the
16027 .rel(a).bss section we are going to use. */
16028 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16029 {
16030 s = globals->root.sdynrelro;
16031 srel = globals->root.sreldynrelro;
16032 }
16033 else
16034 {
16035 s = globals->root.sdynbss;
16036 srel = globals->root.srelbss;
16037 }
16038 if (info->nocopyreloc == 0
16039 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16040 && h->size != 0)
16041 {
16042 elf32_arm_allocate_dynrelocs (info, srel, 1);
16043 h->needs_copy = 1;
16044 }
16045
16046 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16047 }
16048
16049 /* Allocate space in .plt, .got and associated reloc sections for
16050 dynamic relocs. */
16051
16052 static bfd_boolean
16053 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16054 {
16055 struct bfd_link_info *info;
16056 struct elf32_arm_link_hash_table *htab;
16057 struct elf32_arm_link_hash_entry *eh;
16058 struct elf_dyn_relocs *p;
16059
16060 if (h->root.type == bfd_link_hash_indirect)
16061 return TRUE;
16062
16063 eh = (struct elf32_arm_link_hash_entry *) h;
16064
16065 info = (struct bfd_link_info *) inf;
16066 htab = elf32_arm_hash_table (info);
16067 if (htab == NULL)
16068 return FALSE;
16069
16070 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16071 && h->plt.refcount > 0)
16072 {
16073 /* Make sure this symbol is output as a dynamic symbol.
16074 Undefined weak syms won't yet be marked as dynamic. */
16075 if (h->dynindx == -1 && !h->forced_local
16076 && h->root.type == bfd_link_hash_undefweak)
16077 {
16078 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16079 return FALSE;
16080 }
16081
16082 /* If the call in the PLT entry binds locally, the associated
16083 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16084 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16085 than the .plt section. */
16086 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16087 {
16088 eh->is_iplt = 1;
16089 if (eh->plt.noncall_refcount == 0
16090 && SYMBOL_REFERENCES_LOCAL (info, h))
16091 /* All non-call references can be resolved directly.
16092 This means that they can (and in some cases, must)
16093 resolve directly to the run-time target, rather than
16094 to the PLT. That in turns means that any .got entry
16095 would be equal to the .igot.plt entry, so there's
16096 no point having both. */
16097 h->got.refcount = 0;
16098 }
16099
16100 if (bfd_link_pic (info)
16101 || eh->is_iplt
16102 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16103 {
16104 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16105
16106 /* If this symbol is not defined in a regular file, and we are
16107 not generating a shared library, then set the symbol to this
16108 location in the .plt. This is required to make function
16109 pointers compare as equal between the normal executable and
16110 the shared library. */
16111 if (! bfd_link_pic (info)
16112 && !h->def_regular)
16113 {
16114 h->root.u.def.section = htab->root.splt;
16115 h->root.u.def.value = h->plt.offset;
16116
16117 /* Make sure the function is not marked as Thumb, in case
16118 it is the target of an ABS32 relocation, which will
16119 point to the PLT entry. */
16120 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16121 }
16122
16123 /* VxWorks executables have a second set of relocations for
16124 each PLT entry. They go in a separate relocation section,
16125 which is processed by the kernel loader. */
16126 if (htab->vxworks_p && !bfd_link_pic (info))
16127 {
16128 /* There is a relocation for the initial PLT entry:
16129 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16130 if (h->plt.offset == htab->plt_header_size)
16131 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16132
16133 /* There are two extra relocations for each subsequent
16134 PLT entry: an R_ARM_32 relocation for the GOT entry,
16135 and an R_ARM_32 relocation for the PLT entry. */
16136 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16137 }
16138 }
16139 else
16140 {
16141 h->plt.offset = (bfd_vma) -1;
16142 h->needs_plt = 0;
16143 }
16144 }
16145 else
16146 {
16147 h->plt.offset = (bfd_vma) -1;
16148 h->needs_plt = 0;
16149 }
16150
16151 eh = (struct elf32_arm_link_hash_entry *) h;
16152 eh->tlsdesc_got = (bfd_vma) -1;
16153
16154 if (h->got.refcount > 0)
16155 {
16156 asection *s;
16157 bfd_boolean dyn;
16158 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16159 int indx;
16160
16161 /* Make sure this symbol is output as a dynamic symbol.
16162 Undefined weak syms won't yet be marked as dynamic. */
16163 if (htab->root.dynamic_sections_created && h->dynindx == -1 && !h->forced_local
16164 && h->root.type == bfd_link_hash_undefweak)
16165 {
16166 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16167 return FALSE;
16168 }
16169
16170 if (!htab->symbian_p)
16171 {
16172 s = htab->root.sgot;
16173 h->got.offset = s->size;
16174
16175 if (tls_type == GOT_UNKNOWN)
16176 abort ();
16177
16178 if (tls_type == GOT_NORMAL)
16179 /* Non-TLS symbols need one GOT slot. */
16180 s->size += 4;
16181 else
16182 {
16183 if (tls_type & GOT_TLS_GDESC)
16184 {
16185 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16186 eh->tlsdesc_got
16187 = (htab->root.sgotplt->size
16188 - elf32_arm_compute_jump_table_size (htab));
16189 htab->root.sgotplt->size += 8;
16190 h->got.offset = (bfd_vma) -2;
16191 /* plt.got_offset needs to know there's a TLS_DESC
16192 reloc in the middle of .got.plt. */
16193 htab->num_tls_desc++;
16194 }
16195
16196 if (tls_type & GOT_TLS_GD)
16197 {
16198 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16199 consecutive GOT slots. If the symbol is both GD
16200 and GDESC, got.offset may have been
16201 overwritten. */
16202 h->got.offset = s->size;
16203 s->size += 8;
16204 }
16205
16206 if (tls_type & GOT_TLS_IE)
16207 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16208 slot. */
16209 s->size += 4;
16210 }
16211
16212 dyn = htab->root.dynamic_sections_created;
16213
16214 indx = 0;
16215 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
16216 bfd_link_pic (info),
16217 h)
16218 && (!bfd_link_pic (info)
16219 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16220 indx = h->dynindx;
16221
16222 if (tls_type != GOT_NORMAL
16223 && (bfd_link_pic (info) || indx != 0)
16224 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16225 || h->root.type != bfd_link_hash_undefweak))
16226 {
16227 if (tls_type & GOT_TLS_IE)
16228 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16229
16230 if (tls_type & GOT_TLS_GD)
16231 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16232
16233 if (tls_type & GOT_TLS_GDESC)
16234 {
16235 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16236 /* GDESC needs a trampoline to jump to. */
16237 htab->tls_trampoline = -1;
16238 }
16239
16240 /* Only GD needs it. GDESC just emits one relocation per
16241 2 entries. */
16242 if ((tls_type & GOT_TLS_GD) && indx != 0)
16243 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16244 }
16245 else if (((indx != -1) || htab->fdpic_p)
16246 && !SYMBOL_REFERENCES_LOCAL (info, h))
16247 {
16248 if (htab->root.dynamic_sections_created)
16249 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16250 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16251 }
16252 else if (h->type == STT_GNU_IFUNC
16253 && eh->plt.noncall_refcount == 0)
16254 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16255 they all resolve dynamically instead. Reserve room for the
16256 GOT entry's R_ARM_IRELATIVE relocation. */
16257 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16258 else if (bfd_link_pic (info)
16259 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16260 || h->root.type != bfd_link_hash_undefweak))
16261 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16262 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16263 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16264 /* Reserve room for rofixup for FDPIC executable. */
16265 /* TLS relocs do not need space since they are completely
16266 resolved. */
16267 htab->srofixup->size += 4;
16268 }
16269 }
16270 else
16271 h->got.offset = (bfd_vma) -1;
16272
16273 /* FDPIC support. */
16274 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16275 {
16276 /* Symbol musn't be exported. */
16277 if (h->dynindx != -1)
16278 abort();
16279
16280 /* We only allocate one function descriptor with its associated relocation. */
16281 if (eh->fdpic_cnts.funcdesc_offset == -1)
16282 {
16283 asection *s = htab->root.sgot;
16284
16285 eh->fdpic_cnts.funcdesc_offset = s->size;
16286 s->size += 8;
16287 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16288 if (bfd_link_pic(info))
16289 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16290 else
16291 htab->srofixup->size += 8;
16292 }
16293 }
16294
16295 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16296 {
16297 asection *s = htab->root.sgot;
16298
16299 if (htab->root.dynamic_sections_created && h->dynindx == -1
16300 && !h->forced_local)
16301 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16302 return FALSE;
16303
16304 if (h->dynindx == -1)
16305 {
16306 /* We only allocate one function descriptor with its associated relocation. q */
16307 if (eh->fdpic_cnts.funcdesc_offset == -1)
16308 {
16309
16310 eh->fdpic_cnts.funcdesc_offset = s->size;
16311 s->size += 8;
16312 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16313 if (bfd_link_pic(info))
16314 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16315 else
16316 htab->srofixup->size += 8;
16317 }
16318 }
16319
16320 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16321 R_ARM_RELATIVE/rofixup relocation on it. */
16322 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16323 s->size += 4;
16324 if (h->dynindx == -1 && !bfd_link_pic(info))
16325 htab->srofixup->size += 4;
16326 else
16327 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16328 }
16329
16330 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16331 {
16332 if (htab->root.dynamic_sections_created && h->dynindx == -1
16333 && !h->forced_local)
16334 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16335 return FALSE;
16336
16337 if (h->dynindx == -1)
16338 {
16339 /* We only allocate one function descriptor with its associated relocation. */
16340 if (eh->fdpic_cnts.funcdesc_offset == -1)
16341 {
16342 asection *s = htab->root.sgot;
16343
16344 eh->fdpic_cnts.funcdesc_offset = s->size;
16345 s->size += 8;
16346 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16347 if (bfd_link_pic(info))
16348 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16349 else
16350 htab->srofixup->size += 8;
16351 }
16352 }
16353 if (h->dynindx == -1 && !bfd_link_pic(info))
16354 {
16355 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16356 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16357 }
16358 else
16359 {
16360 /* Will need one dynamic reloc per reference. will be either
16361 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16362 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16363 eh->fdpic_cnts.funcdesc_cnt);
16364 }
16365 }
16366
16367 /* Allocate stubs for exported Thumb functions on v4t. */
16368 if (!htab->use_blx && h->dynindx != -1
16369 && h->def_regular
16370 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16371 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16372 {
16373 struct elf_link_hash_entry * th;
16374 struct bfd_link_hash_entry * bh;
16375 struct elf_link_hash_entry * myh;
16376 char name[1024];
16377 asection *s;
16378 bh = NULL;
16379 /* Create a new symbol to regist the real location of the function. */
16380 s = h->root.u.def.section;
16381 sprintf (name, "__real_%s", h->root.root.string);
16382 _bfd_generic_link_add_one_symbol (info, s->owner,
16383 name, BSF_GLOBAL, s,
16384 h->root.u.def.value,
16385 NULL, TRUE, FALSE, &bh);
16386
16387 myh = (struct elf_link_hash_entry *) bh;
16388 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16389 myh->forced_local = 1;
16390 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16391 eh->export_glue = myh;
16392 th = record_arm_to_thumb_glue (info, h);
16393 /* Point the symbol at the stub. */
16394 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16395 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16396 h->root.u.def.section = th->root.u.def.section;
16397 h->root.u.def.value = th->root.u.def.value & ~1;
16398 }
16399
16400 if (eh->dyn_relocs == NULL)
16401 return TRUE;
16402
16403 /* In the shared -Bsymbolic case, discard space allocated for
16404 dynamic pc-relative relocs against symbols which turn out to be
16405 defined in regular objects. For the normal shared case, discard
16406 space for pc-relative relocs that have become local due to symbol
16407 visibility changes. */
16408
16409 if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->fdpic_p)
16410 {
16411 /* Relocs that use pc_count are PC-relative forms, which will appear
16412 on something like ".long foo - ." or "movw REG, foo - .". We want
16413 calls to protected symbols to resolve directly to the function
16414 rather than going via the plt. If people want function pointer
16415 comparisons to work as expected then they should avoid writing
16416 assembly like ".long foo - .". */
16417 if (SYMBOL_CALLS_LOCAL (info, h))
16418 {
16419 struct elf_dyn_relocs **pp;
16420
16421 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16422 {
16423 p->count -= p->pc_count;
16424 p->pc_count = 0;
16425 if (p->count == 0)
16426 *pp = p->next;
16427 else
16428 pp = &p->next;
16429 }
16430 }
16431
16432 if (htab->vxworks_p)
16433 {
16434 struct elf_dyn_relocs **pp;
16435
16436 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16437 {
16438 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16439 *pp = p->next;
16440 else
16441 pp = &p->next;
16442 }
16443 }
16444
16445 /* Also discard relocs on undefined weak syms with non-default
16446 visibility. */
16447 if (eh->dyn_relocs != NULL
16448 && h->root.type == bfd_link_hash_undefweak)
16449 {
16450 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16451 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16452 eh->dyn_relocs = NULL;
16453
16454 /* Make sure undefined weak symbols are output as a dynamic
16455 symbol in PIEs. */
16456 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16457 && !h->forced_local)
16458 {
16459 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16460 return FALSE;
16461 }
16462 }
16463
16464 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16465 && h->root.type == bfd_link_hash_new)
16466 {
16467 /* Output absolute symbols so that we can create relocations
16468 against them. For normal symbols we output a relocation
16469 against the section that contains them. */
16470 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16471 return FALSE;
16472 }
16473
16474 }
16475 else
16476 {
16477 /* For the non-shared case, discard space for relocs against
16478 symbols which turn out to need copy relocs or are not
16479 dynamic. */
16480
16481 if (!h->non_got_ref
16482 && ((h->def_dynamic
16483 && !h->def_regular)
16484 || (htab->root.dynamic_sections_created
16485 && (h->root.type == bfd_link_hash_undefweak
16486 || h->root.type == bfd_link_hash_undefined))))
16487 {
16488 /* Make sure this symbol is output as a dynamic symbol.
16489 Undefined weak syms won't yet be marked as dynamic. */
16490 if (h->dynindx == -1 && !h->forced_local
16491 && h->root.type == bfd_link_hash_undefweak)
16492 {
16493 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16494 return FALSE;
16495 }
16496
16497 /* If that succeeded, we know we'll be keeping all the
16498 relocs. */
16499 if (h->dynindx != -1)
16500 goto keep;
16501 }
16502
16503 eh->dyn_relocs = NULL;
16504
16505 keep: ;
16506 }
16507
16508 /* Finally, allocate space. */
16509 for (p = eh->dyn_relocs; p != NULL; p = p->next)
16510 {
16511 asection *sreloc = elf_section_data (p->sec)->sreloc;
16512
16513 if (h->type == STT_GNU_IFUNC
16514 && eh->plt.noncall_refcount == 0
16515 && SYMBOL_REFERENCES_LOCAL (info, h))
16516 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16517 else if (h->dynindx != -1 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16518 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16519 else if (htab->fdpic_p && !bfd_link_pic(info))
16520 htab->srofixup->size += 4 * p->count;
16521 else
16522 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16523 }
16524
16525 return TRUE;
16526 }
16527
16528 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16529 read-only sections. */
16530
16531 static bfd_boolean
16532 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
16533 {
16534 asection *sec;
16535
16536 if (h->root.type == bfd_link_hash_indirect)
16537 return TRUE;
16538
16539 sec = readonly_dynrelocs (h);
16540 if (sec != NULL)
16541 {
16542 struct bfd_link_info *info = (struct bfd_link_info *) info_p;
16543
16544 info->flags |= DF_TEXTREL;
16545 info->callbacks->minfo
16546 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16547 sec->owner, h->root.root.string, sec);
16548
16549 /* Not an error, just cut short the traversal. */
16550 return FALSE;
16551 }
16552
16553 return TRUE;
16554 }
16555
16556 void
16557 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16558 int byteswap_code)
16559 {
16560 struct elf32_arm_link_hash_table *globals;
16561
16562 globals = elf32_arm_hash_table (info);
16563 if (globals == NULL)
16564 return;
16565
16566 globals->byteswap_code = byteswap_code;
16567 }
16568
16569 /* Set the sizes of the dynamic sections. */
16570
16571 static bfd_boolean
16572 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16573 struct bfd_link_info * info)
16574 {
16575 bfd * dynobj;
16576 asection * s;
16577 bfd_boolean plt;
16578 bfd_boolean relocs;
16579 bfd *ibfd;
16580 struct elf32_arm_link_hash_table *htab;
16581
16582 htab = elf32_arm_hash_table (info);
16583 if (htab == NULL)
16584 return FALSE;
16585
16586 dynobj = elf_hash_table (info)->dynobj;
16587 BFD_ASSERT (dynobj != NULL);
16588 check_use_blx (htab);
16589
16590 if (elf_hash_table (info)->dynamic_sections_created)
16591 {
16592 /* Set the contents of the .interp section to the interpreter. */
16593 if (bfd_link_executable (info) && !info->nointerp)
16594 {
16595 s = bfd_get_linker_section (dynobj, ".interp");
16596 BFD_ASSERT (s != NULL);
16597 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16598 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16599 }
16600 }
16601
16602 /* Set up .got offsets for local syms, and space for local dynamic
16603 relocs. */
16604 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16605 {
16606 bfd_signed_vma *local_got;
16607 bfd_signed_vma *end_local_got;
16608 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16609 char *local_tls_type;
16610 bfd_vma *local_tlsdesc_gotent;
16611 bfd_size_type locsymcount;
16612 Elf_Internal_Shdr *symtab_hdr;
16613 asection *srel;
16614 bfd_boolean is_vxworks = htab->vxworks_p;
16615 unsigned int symndx;
16616 struct fdpic_local *local_fdpic_cnts;
16617
16618 if (! is_arm_elf (ibfd))
16619 continue;
16620
16621 for (s = ibfd->sections; s != NULL; s = s->next)
16622 {
16623 struct elf_dyn_relocs *p;
16624
16625 for (p = (struct elf_dyn_relocs *)
16626 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16627 {
16628 if (!bfd_is_abs_section (p->sec)
16629 && bfd_is_abs_section (p->sec->output_section))
16630 {
16631 /* Input section has been discarded, either because
16632 it is a copy of a linkonce section or due to
16633 linker script /DISCARD/, so we'll be discarding
16634 the relocs too. */
16635 }
16636 else if (is_vxworks
16637 && strcmp (p->sec->output_section->name,
16638 ".tls_vars") == 0)
16639 {
16640 /* Relocations in vxworks .tls_vars sections are
16641 handled specially by the loader. */
16642 }
16643 else if (p->count != 0)
16644 {
16645 srel = elf_section_data (p->sec)->sreloc;
16646 if (htab->fdpic_p && !bfd_link_pic(info))
16647 htab->srofixup->size += 4 * p->count;
16648 else
16649 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16650 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16651 info->flags |= DF_TEXTREL;
16652 }
16653 }
16654 }
16655
16656 local_got = elf_local_got_refcounts (ibfd);
16657 if (!local_got)
16658 continue;
16659
16660 symtab_hdr = & elf_symtab_hdr (ibfd);
16661 locsymcount = symtab_hdr->sh_info;
16662 end_local_got = local_got + locsymcount;
16663 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16664 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16665 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16666 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16667 symndx = 0;
16668 s = htab->root.sgot;
16669 srel = htab->root.srelgot;
16670 for (; local_got < end_local_got;
16671 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16672 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16673 {
16674 *local_tlsdesc_gotent = (bfd_vma) -1;
16675 local_iplt = *local_iplt_ptr;
16676
16677 /* FDPIC support. */
16678 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16679 {
16680 if (local_fdpic_cnts->funcdesc_offset == -1)
16681 {
16682 local_fdpic_cnts->funcdesc_offset = s->size;
16683 s->size += 8;
16684
16685 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16686 if (bfd_link_pic(info))
16687 elf32_arm_allocate_dynrelocs (info, srel, 1);
16688 else
16689 htab->srofixup->size += 8;
16690 }
16691 }
16692
16693 if (local_fdpic_cnts->funcdesc_cnt > 0)
16694 {
16695 if (local_fdpic_cnts->funcdesc_offset == -1)
16696 {
16697 local_fdpic_cnts->funcdesc_offset = s->size;
16698 s->size += 8;
16699
16700 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16701 if (bfd_link_pic(info))
16702 elf32_arm_allocate_dynrelocs (info, srel, 1);
16703 else
16704 htab->srofixup->size += 8;
16705 }
16706
16707 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16708 if (bfd_link_pic(info))
16709 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16710 else
16711 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16712 }
16713
16714 if (local_iplt != NULL)
16715 {
16716 struct elf_dyn_relocs *p;
16717
16718 if (local_iplt->root.refcount > 0)
16719 {
16720 elf32_arm_allocate_plt_entry (info, TRUE,
16721 &local_iplt->root,
16722 &local_iplt->arm);
16723 if (local_iplt->arm.noncall_refcount == 0)
16724 /* All references to the PLT are calls, so all
16725 non-call references can resolve directly to the
16726 run-time target. This means that the .got entry
16727 would be the same as the .igot.plt entry, so there's
16728 no point creating both. */
16729 *local_got = 0;
16730 }
16731 else
16732 {
16733 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16734 local_iplt->root.offset = (bfd_vma) -1;
16735 }
16736
16737 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16738 {
16739 asection *psrel;
16740
16741 psrel = elf_section_data (p->sec)->sreloc;
16742 if (local_iplt->arm.noncall_refcount == 0)
16743 elf32_arm_allocate_irelocs (info, psrel, p->count);
16744 else
16745 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16746 }
16747 }
16748 if (*local_got > 0)
16749 {
16750 Elf_Internal_Sym *isym;
16751
16752 *local_got = s->size;
16753 if (*local_tls_type & GOT_TLS_GD)
16754 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16755 s->size += 8;
16756 if (*local_tls_type & GOT_TLS_GDESC)
16757 {
16758 *local_tlsdesc_gotent = htab->root.sgotplt->size
16759 - elf32_arm_compute_jump_table_size (htab);
16760 htab->root.sgotplt->size += 8;
16761 *local_got = (bfd_vma) -2;
16762 /* plt.got_offset needs to know there's a TLS_DESC
16763 reloc in the middle of .got.plt. */
16764 htab->num_tls_desc++;
16765 }
16766 if (*local_tls_type & GOT_TLS_IE)
16767 s->size += 4;
16768
16769 if (*local_tls_type & GOT_NORMAL)
16770 {
16771 /* If the symbol is both GD and GDESC, *local_got
16772 may have been overwritten. */
16773 *local_got = s->size;
16774 s->size += 4;
16775 }
16776
16777 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
16778 if (isym == NULL)
16779 return FALSE;
16780
16781 /* If all references to an STT_GNU_IFUNC PLT are calls,
16782 then all non-call references, including this GOT entry,
16783 resolve directly to the run-time target. */
16784 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16785 && (local_iplt == NULL
16786 || local_iplt->arm.noncall_refcount == 0))
16787 elf32_arm_allocate_irelocs (info, srel, 1);
16788 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16789 {
16790 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16791 elf32_arm_allocate_dynrelocs (info, srel, 1);
16792 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16793 htab->srofixup->size += 4;
16794
16795 if ((bfd_link_pic (info) || htab->fdpic_p)
16796 && *local_tls_type & GOT_TLS_GDESC)
16797 {
16798 elf32_arm_allocate_dynrelocs (info,
16799 htab->root.srelplt, 1);
16800 htab->tls_trampoline = -1;
16801 }
16802 }
16803 }
16804 else
16805 *local_got = (bfd_vma) -1;
16806 }
16807 }
16808
16809 if (htab->tls_ldm_got.refcount > 0)
16810 {
16811 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16812 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16813 htab->tls_ldm_got.offset = htab->root.sgot->size;
16814 htab->root.sgot->size += 8;
16815 if (bfd_link_pic (info))
16816 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16817 }
16818 else
16819 htab->tls_ldm_got.offset = -1;
16820
16821 /* At the very end of the .rofixup section is a pointer to the GOT,
16822 reserve space for it. */
16823 if (htab->fdpic_p && htab->srofixup != NULL)
16824 htab->srofixup->size += 4;
16825
16826 /* Allocate global sym .plt and .got entries, and space for global
16827 sym dynamic relocs. */
16828 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16829
16830 /* Here we rummage through the found bfds to collect glue information. */
16831 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16832 {
16833 if (! is_arm_elf (ibfd))
16834 continue;
16835
16836 /* Initialise mapping tables for code/data. */
16837 bfd_elf32_arm_init_maps (ibfd);
16838
16839 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
16840 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
16841 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
16842 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
16843 }
16844
16845 /* Allocate space for the glue sections now that we've sized them. */
16846 bfd_elf32_arm_allocate_interworking_sections (info);
16847
16848 /* For every jump slot reserved in the sgotplt, reloc_count is
16849 incremented. However, when we reserve space for TLS descriptors,
16850 it's not incremented, so in order to compute the space reserved
16851 for them, it suffices to multiply the reloc count by the jump
16852 slot size. */
16853 if (htab->root.srelplt)
16854 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
16855
16856 if (htab->tls_trampoline)
16857 {
16858 if (htab->root.splt->size == 0)
16859 htab->root.splt->size += htab->plt_header_size;
16860
16861 htab->tls_trampoline = htab->root.splt->size;
16862 htab->root.splt->size += htab->plt_entry_size;
16863
16864 /* If we're not using lazy TLS relocations, don't generate the
16865 PLT and GOT entries they require. */
16866 if (!(info->flags & DF_BIND_NOW))
16867 {
16868 htab->dt_tlsdesc_got = htab->root.sgot->size;
16869 htab->root.sgot->size += 4;
16870
16871 htab->dt_tlsdesc_plt = htab->root.splt->size;
16872 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
16873 }
16874 }
16875
16876 /* The check_relocs and adjust_dynamic_symbol entry points have
16877 determined the sizes of the various dynamic sections. Allocate
16878 memory for them. */
16879 plt = FALSE;
16880 relocs = FALSE;
16881 for (s = dynobj->sections; s != NULL; s = s->next)
16882 {
16883 const char * name;
16884
16885 if ((s->flags & SEC_LINKER_CREATED) == 0)
16886 continue;
16887
16888 /* It's OK to base decisions on the section name, because none
16889 of the dynobj section names depend upon the input files. */
16890 name = bfd_get_section_name (dynobj, s);
16891
16892 if (s == htab->root.splt)
16893 {
16894 /* Remember whether there is a PLT. */
16895 plt = s->size != 0;
16896 }
16897 else if (CONST_STRNEQ (name, ".rel"))
16898 {
16899 if (s->size != 0)
16900 {
16901 /* Remember whether there are any reloc sections other
16902 than .rel(a).plt and .rela.plt.unloaded. */
16903 if (s != htab->root.srelplt && s != htab->srelplt2)
16904 relocs = TRUE;
16905
16906 /* We use the reloc_count field as a counter if we need
16907 to copy relocs into the output file. */
16908 s->reloc_count = 0;
16909 }
16910 }
16911 else if (s != htab->root.sgot
16912 && s != htab->root.sgotplt
16913 && s != htab->root.iplt
16914 && s != htab->root.igotplt
16915 && s != htab->root.sdynbss
16916 && s != htab->root.sdynrelro
16917 && s != htab->srofixup)
16918 {
16919 /* It's not one of our sections, so don't allocate space. */
16920 continue;
16921 }
16922
16923 if (s->size == 0)
16924 {
16925 /* If we don't need this section, strip it from the
16926 output file. This is mostly to handle .rel(a).bss and
16927 .rel(a).plt. We must create both sections in
16928 create_dynamic_sections, because they must be created
16929 before the linker maps input sections to output
16930 sections. The linker does that before
16931 adjust_dynamic_symbol is called, and it is that
16932 function which decides whether anything needs to go
16933 into these sections. */
16934 s->flags |= SEC_EXCLUDE;
16935 continue;
16936 }
16937
16938 if ((s->flags & SEC_HAS_CONTENTS) == 0)
16939 continue;
16940
16941 /* Allocate memory for the section contents. */
16942 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
16943 if (s->contents == NULL)
16944 return FALSE;
16945 }
16946
16947 if (elf_hash_table (info)->dynamic_sections_created)
16948 {
16949 /* Add some entries to the .dynamic section. We fill in the
16950 values later, in elf32_arm_finish_dynamic_sections, but we
16951 must add the entries now so that we get the correct size for
16952 the .dynamic section. The DT_DEBUG entry is filled in by the
16953 dynamic linker and used by the debugger. */
16954 #define add_dynamic_entry(TAG, VAL) \
16955 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
16956
16957 if (bfd_link_executable (info))
16958 {
16959 if (!add_dynamic_entry (DT_DEBUG, 0))
16960 return FALSE;
16961 }
16962
16963 if (plt)
16964 {
16965 if ( !add_dynamic_entry (DT_PLTGOT, 0)
16966 || !add_dynamic_entry (DT_PLTRELSZ, 0)
16967 || !add_dynamic_entry (DT_PLTREL,
16968 htab->use_rel ? DT_REL : DT_RELA)
16969 || !add_dynamic_entry (DT_JMPREL, 0))
16970 return FALSE;
16971
16972 if (htab->dt_tlsdesc_plt
16973 && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
16974 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
16975 return FALSE;
16976 }
16977
16978 if (relocs)
16979 {
16980 if (htab->use_rel)
16981 {
16982 if (!add_dynamic_entry (DT_REL, 0)
16983 || !add_dynamic_entry (DT_RELSZ, 0)
16984 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
16985 return FALSE;
16986 }
16987 else
16988 {
16989 if (!add_dynamic_entry (DT_RELA, 0)
16990 || !add_dynamic_entry (DT_RELASZ, 0)
16991 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
16992 return FALSE;
16993 }
16994 }
16995
16996 /* If any dynamic relocs apply to a read-only section,
16997 then we need a DT_TEXTREL entry. */
16998 if ((info->flags & DF_TEXTREL) == 0)
16999 elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
17000
17001 if ((info->flags & DF_TEXTREL) != 0)
17002 {
17003 if (!add_dynamic_entry (DT_TEXTREL, 0))
17004 return FALSE;
17005 }
17006 if (htab->vxworks_p
17007 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
17008 return FALSE;
17009 }
17010 #undef add_dynamic_entry
17011
17012 return TRUE;
17013 }
17014
17015 /* Size sections even though they're not dynamic. We use it to setup
17016 _TLS_MODULE_BASE_, if needed. */
17017
17018 static bfd_boolean
17019 elf32_arm_always_size_sections (bfd *output_bfd,
17020 struct bfd_link_info *info)
17021 {
17022 asection *tls_sec;
17023 struct elf32_arm_link_hash_table *htab;
17024
17025 htab = elf32_arm_hash_table (info);
17026
17027 if (bfd_link_relocatable (info))
17028 return TRUE;
17029
17030 tls_sec = elf_hash_table (info)->tls_sec;
17031
17032 if (tls_sec)
17033 {
17034 struct elf_link_hash_entry *tlsbase;
17035
17036 tlsbase = elf_link_hash_lookup
17037 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
17038
17039 if (tlsbase)
17040 {
17041 struct bfd_link_hash_entry *bh = NULL;
17042 const struct elf_backend_data *bed
17043 = get_elf_backend_data (output_bfd);
17044
17045 if (!(_bfd_generic_link_add_one_symbol
17046 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17047 tls_sec, 0, NULL, FALSE,
17048 bed->collect, &bh)))
17049 return FALSE;
17050
17051 tlsbase->type = STT_TLS;
17052 tlsbase = (struct elf_link_hash_entry *)bh;
17053 tlsbase->def_regular = 1;
17054 tlsbase->other = STV_HIDDEN;
17055 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
17056 }
17057 }
17058
17059 if (htab->fdpic_p && !bfd_link_relocatable (info)
17060 && !bfd_elf_stack_segment_size (output_bfd, info,
17061 "__stacksize", DEFAULT_STACK_SIZE))
17062 return FALSE;
17063
17064 return TRUE;
17065 }
17066
17067 /* Finish up dynamic symbol handling. We set the contents of various
17068 dynamic sections here. */
17069
17070 static bfd_boolean
17071 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17072 struct bfd_link_info * info,
17073 struct elf_link_hash_entry * h,
17074 Elf_Internal_Sym * sym)
17075 {
17076 struct elf32_arm_link_hash_table *htab;
17077 struct elf32_arm_link_hash_entry *eh;
17078
17079 htab = elf32_arm_hash_table (info);
17080 if (htab == NULL)
17081 return FALSE;
17082
17083 eh = (struct elf32_arm_link_hash_entry *) h;
17084
17085 if (h->plt.offset != (bfd_vma) -1)
17086 {
17087 if (!eh->is_iplt)
17088 {
17089 BFD_ASSERT (h->dynindx != -1);
17090 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17091 h->dynindx, 0))
17092 return FALSE;
17093 }
17094
17095 if (!h->def_regular)
17096 {
17097 /* Mark the symbol as undefined, rather than as defined in
17098 the .plt section. */
17099 sym->st_shndx = SHN_UNDEF;
17100 /* If the symbol is weak we need to clear the value.
17101 Otherwise, the PLT entry would provide a definition for
17102 the symbol even if the symbol wasn't defined anywhere,
17103 and so the symbol would never be NULL. Leave the value if
17104 there were any relocations where pointer equality matters
17105 (this is a clue for the dynamic linker, to make function
17106 pointer comparisons work between an application and shared
17107 library). */
17108 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17109 sym->st_value = 0;
17110 }
17111 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17112 {
17113 /* At least one non-call relocation references this .iplt entry,
17114 so the .iplt entry is the function's canonical address. */
17115 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17116 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17117 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17118 (output_bfd, htab->root.iplt->output_section));
17119 sym->st_value = (h->plt.offset
17120 + htab->root.iplt->output_section->vma
17121 + htab->root.iplt->output_offset);
17122 }
17123 }
17124
17125 if (h->needs_copy)
17126 {
17127 asection * s;
17128 Elf_Internal_Rela rel;
17129
17130 /* This symbol needs a copy reloc. Set it up. */
17131 BFD_ASSERT (h->dynindx != -1
17132 && (h->root.type == bfd_link_hash_defined
17133 || h->root.type == bfd_link_hash_defweak));
17134
17135 rel.r_addend = 0;
17136 rel.r_offset = (h->root.u.def.value
17137 + h->root.u.def.section->output_section->vma
17138 + h->root.u.def.section->output_offset);
17139 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17140 if (h->root.u.def.section == htab->root.sdynrelro)
17141 s = htab->root.sreldynrelro;
17142 else
17143 s = htab->root.srelbss;
17144 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17145 }
17146
17147 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17148 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17149 it is relative to the ".got" section. */
17150 if (h == htab->root.hdynamic
17151 || (!htab->fdpic_p && !htab->vxworks_p && h == htab->root.hgot))
17152 sym->st_shndx = SHN_ABS;
17153
17154 return TRUE;
17155 }
17156
17157 static void
17158 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17159 void *contents,
17160 const unsigned long *template, unsigned count)
17161 {
17162 unsigned ix;
17163
17164 for (ix = 0; ix != count; ix++)
17165 {
17166 unsigned long insn = template[ix];
17167
17168 /* Emit mov pc,rx if bx is not permitted. */
17169 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17170 insn = (insn & 0xf000000f) | 0x01a0f000;
17171 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17172 }
17173 }
17174
17175 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17176 other variants, NaCl needs this entry in a static executable's
17177 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17178 zero. For .iplt really only the last bundle is useful, and .iplt
17179 could have a shorter first entry, with each individual PLT entry's
17180 relative branch calculated differently so it targets the last
17181 bundle instead of the instruction before it (labelled .Lplt_tail
17182 above). But it's simpler to keep the size and layout of PLT0
17183 consistent with the dynamic case, at the cost of some dead code at
17184 the start of .iplt and the one dead store to the stack at the start
17185 of .Lplt_tail. */
17186 static void
17187 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17188 asection *plt, bfd_vma got_displacement)
17189 {
17190 unsigned int i;
17191
17192 put_arm_insn (htab, output_bfd,
17193 elf32_arm_nacl_plt0_entry[0]
17194 | arm_movw_immediate (got_displacement),
17195 plt->contents + 0);
17196 put_arm_insn (htab, output_bfd,
17197 elf32_arm_nacl_plt0_entry[1]
17198 | arm_movt_immediate (got_displacement),
17199 plt->contents + 4);
17200
17201 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17202 put_arm_insn (htab, output_bfd,
17203 elf32_arm_nacl_plt0_entry[i],
17204 plt->contents + (i * 4));
17205 }
17206
17207 /* Finish up the dynamic sections. */
17208
17209 static bfd_boolean
17210 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17211 {
17212 bfd * dynobj;
17213 asection * sgot;
17214 asection * sdyn;
17215 struct elf32_arm_link_hash_table *htab;
17216
17217 htab = elf32_arm_hash_table (info);
17218 if (htab == NULL)
17219 return FALSE;
17220
17221 dynobj = elf_hash_table (info)->dynobj;
17222
17223 sgot = htab->root.sgotplt;
17224 /* A broken linker script might have discarded the dynamic sections.
17225 Catch this here so that we do not seg-fault later on. */
17226 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17227 return FALSE;
17228 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17229
17230 if (elf_hash_table (info)->dynamic_sections_created)
17231 {
17232 asection *splt;
17233 Elf32_External_Dyn *dyncon, *dynconend;
17234
17235 splt = htab->root.splt;
17236 BFD_ASSERT (splt != NULL && sdyn != NULL);
17237 BFD_ASSERT (htab->symbian_p || sgot != NULL);
17238
17239 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17240 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17241
17242 for (; dyncon < dynconend; dyncon++)
17243 {
17244 Elf_Internal_Dyn dyn;
17245 const char * name;
17246 asection * s;
17247
17248 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17249
17250 switch (dyn.d_tag)
17251 {
17252 unsigned int type;
17253
17254 default:
17255 if (htab->vxworks_p
17256 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17257 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17258 break;
17259
17260 case DT_HASH:
17261 name = ".hash";
17262 goto get_vma_if_bpabi;
17263 case DT_STRTAB:
17264 name = ".dynstr";
17265 goto get_vma_if_bpabi;
17266 case DT_SYMTAB:
17267 name = ".dynsym";
17268 goto get_vma_if_bpabi;
17269 case DT_VERSYM:
17270 name = ".gnu.version";
17271 goto get_vma_if_bpabi;
17272 case DT_VERDEF:
17273 name = ".gnu.version_d";
17274 goto get_vma_if_bpabi;
17275 case DT_VERNEED:
17276 name = ".gnu.version_r";
17277 goto get_vma_if_bpabi;
17278
17279 case DT_PLTGOT:
17280 name = htab->symbian_p ? ".got" : ".got.plt";
17281 goto get_vma;
17282 case DT_JMPREL:
17283 name = RELOC_SECTION (htab, ".plt");
17284 get_vma:
17285 s = bfd_get_linker_section (dynobj, name);
17286 if (s == NULL)
17287 {
17288 _bfd_error_handler
17289 (_("could not find section %s"), name);
17290 bfd_set_error (bfd_error_invalid_operation);
17291 return FALSE;
17292 }
17293 if (!htab->symbian_p)
17294 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17295 else
17296 /* In the BPABI, tags in the PT_DYNAMIC section point
17297 at the file offset, not the memory address, for the
17298 convenience of the post linker. */
17299 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
17300 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17301 break;
17302
17303 get_vma_if_bpabi:
17304 if (htab->symbian_p)
17305 goto get_vma;
17306 break;
17307
17308 case DT_PLTRELSZ:
17309 s = htab->root.srelplt;
17310 BFD_ASSERT (s != NULL);
17311 dyn.d_un.d_val = s->size;
17312 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17313 break;
17314
17315 case DT_RELSZ:
17316 case DT_RELASZ:
17317 case DT_REL:
17318 case DT_RELA:
17319 /* In the BPABI, the DT_REL tag must point at the file
17320 offset, not the VMA, of the first relocation
17321 section. So, we use code similar to that in
17322 elflink.c, but do not check for SHF_ALLOC on the
17323 relocation section, since relocation sections are
17324 never allocated under the BPABI. PLT relocs are also
17325 included. */
17326 if (htab->symbian_p)
17327 {
17328 unsigned int i;
17329 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
17330 ? SHT_REL : SHT_RELA);
17331 dyn.d_un.d_val = 0;
17332 for (i = 1; i < elf_numsections (output_bfd); i++)
17333 {
17334 Elf_Internal_Shdr *hdr
17335 = elf_elfsections (output_bfd)[i];
17336 if (hdr->sh_type == type)
17337 {
17338 if (dyn.d_tag == DT_RELSZ
17339 || dyn.d_tag == DT_RELASZ)
17340 dyn.d_un.d_val += hdr->sh_size;
17341 else if ((ufile_ptr) hdr->sh_offset
17342 <= dyn.d_un.d_val - 1)
17343 dyn.d_un.d_val = hdr->sh_offset;
17344 }
17345 }
17346 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17347 }
17348 break;
17349
17350 case DT_TLSDESC_PLT:
17351 s = htab->root.splt;
17352 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17353 + htab->dt_tlsdesc_plt);
17354 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17355 break;
17356
17357 case DT_TLSDESC_GOT:
17358 s = htab->root.sgot;
17359 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17360 + htab->dt_tlsdesc_got);
17361 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17362 break;
17363
17364 /* Set the bottom bit of DT_INIT/FINI if the
17365 corresponding function is Thumb. */
17366 case DT_INIT:
17367 name = info->init_function;
17368 goto get_sym;
17369 case DT_FINI:
17370 name = info->fini_function;
17371 get_sym:
17372 /* If it wasn't set by elf_bfd_final_link
17373 then there is nothing to adjust. */
17374 if (dyn.d_un.d_val != 0)
17375 {
17376 struct elf_link_hash_entry * eh;
17377
17378 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17379 FALSE, FALSE, TRUE);
17380 if (eh != NULL
17381 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17382 == ST_BRANCH_TO_THUMB)
17383 {
17384 dyn.d_un.d_val |= 1;
17385 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17386 }
17387 }
17388 break;
17389 }
17390 }
17391
17392 /* Fill in the first entry in the procedure linkage table. */
17393 if (splt->size > 0 && htab->plt_header_size)
17394 {
17395 const bfd_vma *plt0_entry;
17396 bfd_vma got_address, plt_address, got_displacement;
17397
17398 /* Calculate the addresses of the GOT and PLT. */
17399 got_address = sgot->output_section->vma + sgot->output_offset;
17400 plt_address = splt->output_section->vma + splt->output_offset;
17401
17402 if (htab->vxworks_p)
17403 {
17404 /* The VxWorks GOT is relocated by the dynamic linker.
17405 Therefore, we must emit relocations rather than simply
17406 computing the values now. */
17407 Elf_Internal_Rela rel;
17408
17409 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17410 put_arm_insn (htab, output_bfd, plt0_entry[0],
17411 splt->contents + 0);
17412 put_arm_insn (htab, output_bfd, plt0_entry[1],
17413 splt->contents + 4);
17414 put_arm_insn (htab, output_bfd, plt0_entry[2],
17415 splt->contents + 8);
17416 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17417
17418 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17419 rel.r_offset = plt_address + 12;
17420 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17421 rel.r_addend = 0;
17422 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17423 htab->srelplt2->contents);
17424 }
17425 else if (htab->nacl_p)
17426 arm_nacl_put_plt0 (htab, output_bfd, splt,
17427 got_address + 8 - (plt_address + 16));
17428 else if (using_thumb_only (htab))
17429 {
17430 got_displacement = got_address - (plt_address + 12);
17431
17432 plt0_entry = elf32_thumb2_plt0_entry;
17433 put_arm_insn (htab, output_bfd, plt0_entry[0],
17434 splt->contents + 0);
17435 put_arm_insn (htab, output_bfd, plt0_entry[1],
17436 splt->contents + 4);
17437 put_arm_insn (htab, output_bfd, plt0_entry[2],
17438 splt->contents + 8);
17439
17440 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17441 }
17442 else
17443 {
17444 got_displacement = got_address - (plt_address + 16);
17445
17446 plt0_entry = elf32_arm_plt0_entry;
17447 put_arm_insn (htab, output_bfd, plt0_entry[0],
17448 splt->contents + 0);
17449 put_arm_insn (htab, output_bfd, plt0_entry[1],
17450 splt->contents + 4);
17451 put_arm_insn (htab, output_bfd, plt0_entry[2],
17452 splt->contents + 8);
17453 put_arm_insn (htab, output_bfd, plt0_entry[3],
17454 splt->contents + 12);
17455
17456 #ifdef FOUR_WORD_PLT
17457 /* The displacement value goes in the otherwise-unused
17458 last word of the second entry. */
17459 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17460 #else
17461 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17462 #endif
17463 }
17464 }
17465
17466 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17467 really seem like the right value. */
17468 if (splt->output_section->owner == output_bfd)
17469 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17470
17471 if (htab->dt_tlsdesc_plt)
17472 {
17473 bfd_vma got_address
17474 = sgot->output_section->vma + sgot->output_offset;
17475 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17476 + htab->root.sgot->output_offset);
17477 bfd_vma plt_address
17478 = splt->output_section->vma + splt->output_offset;
17479
17480 arm_put_trampoline (htab, output_bfd,
17481 splt->contents + htab->dt_tlsdesc_plt,
17482 dl_tlsdesc_lazy_trampoline, 6);
17483
17484 bfd_put_32 (output_bfd,
17485 gotplt_address + htab->dt_tlsdesc_got
17486 - (plt_address + htab->dt_tlsdesc_plt)
17487 - dl_tlsdesc_lazy_trampoline[6],
17488 splt->contents + htab->dt_tlsdesc_plt + 24);
17489 bfd_put_32 (output_bfd,
17490 got_address - (plt_address + htab->dt_tlsdesc_plt)
17491 - dl_tlsdesc_lazy_trampoline[7],
17492 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
17493 }
17494
17495 if (htab->tls_trampoline)
17496 {
17497 arm_put_trampoline (htab, output_bfd,
17498 splt->contents + htab->tls_trampoline,
17499 tls_trampoline, 3);
17500 #ifdef FOUR_WORD_PLT
17501 bfd_put_32 (output_bfd, 0x00000000,
17502 splt->contents + htab->tls_trampoline + 12);
17503 #endif
17504 }
17505
17506 if (htab->vxworks_p
17507 && !bfd_link_pic (info)
17508 && htab->root.splt->size > 0)
17509 {
17510 /* Correct the .rel(a).plt.unloaded relocations. They will have
17511 incorrect symbol indexes. */
17512 int num_plts;
17513 unsigned char *p;
17514
17515 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17516 / htab->plt_entry_size);
17517 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17518
17519 for (; num_plts; num_plts--)
17520 {
17521 Elf_Internal_Rela rel;
17522
17523 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17524 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17525 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17526 p += RELOC_SIZE (htab);
17527
17528 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17529 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17530 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17531 p += RELOC_SIZE (htab);
17532 }
17533 }
17534 }
17535
17536 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
17537 /* NaCl uses a special first entry in .iplt too. */
17538 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17539
17540 /* Fill in the first three entries in the global offset table. */
17541 if (sgot)
17542 {
17543 if (sgot->size > 0)
17544 {
17545 if (sdyn == NULL)
17546 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17547 else
17548 bfd_put_32 (output_bfd,
17549 sdyn->output_section->vma + sdyn->output_offset,
17550 sgot->contents);
17551 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17552 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17553 }
17554
17555 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17556 }
17557
17558 /* At the very end of the .rofixup section is a pointer to the GOT. */
17559 if (htab->fdpic_p && htab->srofixup != NULL)
17560 {
17561 struct elf_link_hash_entry *hgot = htab->root.hgot;
17562
17563 bfd_vma got_value = hgot->root.u.def.value
17564 + hgot->root.u.def.section->output_section->vma
17565 + hgot->root.u.def.section->output_offset;
17566
17567 arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17568
17569 /* Make sure we allocated and generated the same number of fixups. */
17570 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17571 }
17572
17573 return TRUE;
17574 }
17575
17576 static void
17577 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
17578 {
17579 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17580 struct elf32_arm_link_hash_table *globals;
17581 struct elf_segment_map *m;
17582
17583 i_ehdrp = elf_elfheader (abfd);
17584
17585 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17586 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17587 else
17588 _bfd_elf_post_process_headers (abfd, link_info);
17589 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17590
17591 if (link_info)
17592 {
17593 globals = elf32_arm_hash_table (link_info);
17594 if (globals != NULL && globals->byteswap_code)
17595 i_ehdrp->e_flags |= EF_ARM_BE8;
17596
17597 if (globals->fdpic_p)
17598 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17599 }
17600
17601 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17602 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17603 {
17604 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17605 if (abi == AEABI_VFP_args_vfp)
17606 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17607 else
17608 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17609 }
17610
17611 /* Scan segment to set p_flags attribute if it contains only sections with
17612 SHF_ARM_PURECODE flag. */
17613 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17614 {
17615 unsigned int j;
17616
17617 if (m->count == 0)
17618 continue;
17619 for (j = 0; j < m->count; j++)
17620 {
17621 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17622 break;
17623 }
17624 if (j == m->count)
17625 {
17626 m->p_flags = PF_X;
17627 m->p_flags_valid = 1;
17628 }
17629 }
17630 }
17631
17632 static enum elf_reloc_type_class
17633 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17634 const asection *rel_sec ATTRIBUTE_UNUSED,
17635 const Elf_Internal_Rela *rela)
17636 {
17637 switch ((int) ELF32_R_TYPE (rela->r_info))
17638 {
17639 case R_ARM_RELATIVE:
17640 return reloc_class_relative;
17641 case R_ARM_JUMP_SLOT:
17642 return reloc_class_plt;
17643 case R_ARM_COPY:
17644 return reloc_class_copy;
17645 case R_ARM_IRELATIVE:
17646 return reloc_class_ifunc;
17647 default:
17648 return reloc_class_normal;
17649 }
17650 }
17651
17652 static void
17653 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
17654 {
17655 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17656 }
17657
17658 /* Return TRUE if this is an unwinding table entry. */
17659
17660 static bfd_boolean
17661 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17662 {
17663 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
17664 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
17665 }
17666
17667
17668 /* Set the type and flags for an ARM section. We do this by
17669 the section name, which is a hack, but ought to work. */
17670
17671 static bfd_boolean
17672 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17673 {
17674 const char * name;
17675
17676 name = bfd_get_section_name (abfd, sec);
17677
17678 if (is_arm_elf_unwind_section_name (abfd, name))
17679 {
17680 hdr->sh_type = SHT_ARM_EXIDX;
17681 hdr->sh_flags |= SHF_LINK_ORDER;
17682 }
17683
17684 if (sec->flags & SEC_ELF_PURECODE)
17685 hdr->sh_flags |= SHF_ARM_PURECODE;
17686
17687 return TRUE;
17688 }
17689
17690 /* Handle an ARM specific section when reading an object file. This is
17691 called when bfd_section_from_shdr finds a section with an unknown
17692 type. */
17693
17694 static bfd_boolean
17695 elf32_arm_section_from_shdr (bfd *abfd,
17696 Elf_Internal_Shdr * hdr,
17697 const char *name,
17698 int shindex)
17699 {
17700 /* There ought to be a place to keep ELF backend specific flags, but
17701 at the moment there isn't one. We just keep track of the
17702 sections by their name, instead. Fortunately, the ABI gives
17703 names for all the ARM specific sections, so we will probably get
17704 away with this. */
17705 switch (hdr->sh_type)
17706 {
17707 case SHT_ARM_EXIDX:
17708 case SHT_ARM_PREEMPTMAP:
17709 case SHT_ARM_ATTRIBUTES:
17710 break;
17711
17712 default:
17713 return FALSE;
17714 }
17715
17716 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17717 return FALSE;
17718
17719 return TRUE;
17720 }
17721
17722 static _arm_elf_section_data *
17723 get_arm_elf_section_data (asection * sec)
17724 {
17725 if (sec && sec->owner && is_arm_elf (sec->owner))
17726 return elf32_arm_section_data (sec);
17727 else
17728 return NULL;
17729 }
17730
17731 typedef struct
17732 {
17733 void *flaginfo;
17734 struct bfd_link_info *info;
17735 asection *sec;
17736 int sec_shndx;
17737 int (*func) (void *, const char *, Elf_Internal_Sym *,
17738 asection *, struct elf_link_hash_entry *);
17739 } output_arch_syminfo;
17740
17741 enum map_symbol_type
17742 {
17743 ARM_MAP_ARM,
17744 ARM_MAP_THUMB,
17745 ARM_MAP_DATA
17746 };
17747
17748
17749 /* Output a single mapping symbol. */
17750
17751 static bfd_boolean
17752 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17753 enum map_symbol_type type,
17754 bfd_vma offset)
17755 {
17756 static const char *names[3] = {"$a", "$t", "$d"};
17757 Elf_Internal_Sym sym;
17758
17759 sym.st_value = osi->sec->output_section->vma
17760 + osi->sec->output_offset
17761 + offset;
17762 sym.st_size = 0;
17763 sym.st_other = 0;
17764 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17765 sym.st_shndx = osi->sec_shndx;
17766 sym.st_target_internal = 0;
17767 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17768 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17769 }
17770
17771 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17772 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17773
17774 static bfd_boolean
17775 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17776 bfd_boolean is_iplt_entry_p,
17777 union gotplt_union *root_plt,
17778 struct arm_plt_info *arm_plt)
17779 {
17780 struct elf32_arm_link_hash_table *htab;
17781 bfd_vma addr, plt_header_size;
17782
17783 if (root_plt->offset == (bfd_vma) -1)
17784 return TRUE;
17785
17786 htab = elf32_arm_hash_table (osi->info);
17787 if (htab == NULL)
17788 return FALSE;
17789
17790 if (is_iplt_entry_p)
17791 {
17792 osi->sec = htab->root.iplt;
17793 plt_header_size = 0;
17794 }
17795 else
17796 {
17797 osi->sec = htab->root.splt;
17798 plt_header_size = htab->plt_header_size;
17799 }
17800 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17801 (osi->info->output_bfd, osi->sec->output_section));
17802
17803 addr = root_plt->offset & -2;
17804 if (htab->symbian_p)
17805 {
17806 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17807 return FALSE;
17808 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
17809 return FALSE;
17810 }
17811 else if (htab->vxworks_p)
17812 {
17813 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17814 return FALSE;
17815 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17816 return FALSE;
17817 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17818 return FALSE;
17819 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17820 return FALSE;
17821 }
17822 else if (htab->nacl_p)
17823 {
17824 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17825 return FALSE;
17826 }
17827 else if (htab->fdpic_p)
17828 {
17829 enum map_symbol_type type = using_thumb_only(htab)
17830 ? ARM_MAP_THUMB
17831 : ARM_MAP_ARM;
17832
17833 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17834 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17835 return FALSE;
17836 if (!elf32_arm_output_map_sym (osi, type, addr))
17837 return FALSE;
17838 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17839 return FALSE;
17840 if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
17841 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17842 return FALSE;
17843 }
17844 else if (using_thumb_only (htab))
17845 {
17846 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17847 return FALSE;
17848 }
17849 else
17850 {
17851 bfd_boolean thumb_stub_p;
17852
17853 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17854 if (thumb_stub_p)
17855 {
17856 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17857 return FALSE;
17858 }
17859 #ifdef FOUR_WORD_PLT
17860 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17861 return FALSE;
17862 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17863 return FALSE;
17864 #else
17865 /* A three-word PLT with no Thumb thunk contains only Arm code,
17866 so only need to output a mapping symbol for the first PLT entry and
17867 entries with thumb thunks. */
17868 if (thumb_stub_p || addr == plt_header_size)
17869 {
17870 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17871 return FALSE;
17872 }
17873 #endif
17874 }
17875
17876 return TRUE;
17877 }
17878
17879 /* Output mapping symbols for PLT entries associated with H. */
17880
17881 static bfd_boolean
17882 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17883 {
17884 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17885 struct elf32_arm_link_hash_entry *eh;
17886
17887 if (h->root.type == bfd_link_hash_indirect)
17888 return TRUE;
17889
17890 if (h->root.type == bfd_link_hash_warning)
17891 /* When warning symbols are created, they **replace** the "real"
17892 entry in the hash table, thus we never get to see the real
17893 symbol in a hash traversal. So look at it now. */
17894 h = (struct elf_link_hash_entry *) h->root.u.i.link;
17895
17896 eh = (struct elf32_arm_link_hash_entry *) h;
17897 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17898 &h->plt, &eh->plt);
17899 }
17900
17901 /* Bind a veneered symbol to its veneer identified by its hash entry
17902 STUB_ENTRY. The veneered location thus loose its symbol. */
17903
17904 static void
17905 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17906 {
17907 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17908
17909 BFD_ASSERT (hash);
17910 hash->root.root.u.def.section = stub_entry->stub_sec;
17911 hash->root.root.u.def.value = stub_entry->stub_offset;
17912 hash->root.size = stub_entry->stub_size;
17913 }
17914
17915 /* Output a single local symbol for a generated stub. */
17916
17917 static bfd_boolean
17918 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17919 bfd_vma offset, bfd_vma size)
17920 {
17921 Elf_Internal_Sym sym;
17922
17923 sym.st_value = osi->sec->output_section->vma
17924 + osi->sec->output_offset
17925 + offset;
17926 sym.st_size = size;
17927 sym.st_other = 0;
17928 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
17929 sym.st_shndx = osi->sec_shndx;
17930 sym.st_target_internal = 0;
17931 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
17932 }
17933
17934 static bfd_boolean
17935 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
17936 void * in_arg)
17937 {
17938 struct elf32_arm_stub_hash_entry *stub_entry;
17939 asection *stub_sec;
17940 bfd_vma addr;
17941 char *stub_name;
17942 output_arch_syminfo *osi;
17943 const insn_sequence *template_sequence;
17944 enum stub_insn_type prev_type;
17945 int size;
17946 int i;
17947 enum map_symbol_type sym_type;
17948
17949 /* Massage our args to the form they really have. */
17950 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
17951 osi = (output_arch_syminfo *) in_arg;
17952
17953 stub_sec = stub_entry->stub_sec;
17954
17955 /* Ensure this stub is attached to the current section being
17956 processed. */
17957 if (stub_sec != osi->sec)
17958 return TRUE;
17959
17960 addr = (bfd_vma) stub_entry->stub_offset;
17961 template_sequence = stub_entry->stub_template;
17962
17963 if (arm_stub_sym_claimed (stub_entry->stub_type))
17964 arm_stub_claim_sym (stub_entry);
17965 else
17966 {
17967 stub_name = stub_entry->output_name;
17968 switch (template_sequence[0].type)
17969 {
17970 case ARM_TYPE:
17971 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
17972 stub_entry->stub_size))
17973 return FALSE;
17974 break;
17975 case THUMB16_TYPE:
17976 case THUMB32_TYPE:
17977 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
17978 stub_entry->stub_size))
17979 return FALSE;
17980 break;
17981 default:
17982 BFD_FAIL ();
17983 return 0;
17984 }
17985 }
17986
17987 prev_type = DATA_TYPE;
17988 size = 0;
17989 for (i = 0; i < stub_entry->stub_template_size; i++)
17990 {
17991 switch (template_sequence[i].type)
17992 {
17993 case ARM_TYPE:
17994 sym_type = ARM_MAP_ARM;
17995 break;
17996
17997 case THUMB16_TYPE:
17998 case THUMB32_TYPE:
17999 sym_type = ARM_MAP_THUMB;
18000 break;
18001
18002 case DATA_TYPE:
18003 sym_type = ARM_MAP_DATA;
18004 break;
18005
18006 default:
18007 BFD_FAIL ();
18008 return FALSE;
18009 }
18010
18011 if (template_sequence[i].type != prev_type)
18012 {
18013 prev_type = template_sequence[i].type;
18014 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18015 return FALSE;
18016 }
18017
18018 switch (template_sequence[i].type)
18019 {
18020 case ARM_TYPE:
18021 case THUMB32_TYPE:
18022 size += 4;
18023 break;
18024
18025 case THUMB16_TYPE:
18026 size += 2;
18027 break;
18028
18029 case DATA_TYPE:
18030 size += 4;
18031 break;
18032
18033 default:
18034 BFD_FAIL ();
18035 return FALSE;
18036 }
18037 }
18038
18039 return TRUE;
18040 }
18041
18042 /* Output mapping symbols for linker generated sections,
18043 and for those data-only sections that do not have a
18044 $d. */
18045
18046 static bfd_boolean
18047 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18048 struct bfd_link_info *info,
18049 void *flaginfo,
18050 int (*func) (void *, const char *,
18051 Elf_Internal_Sym *,
18052 asection *,
18053 struct elf_link_hash_entry *))
18054 {
18055 output_arch_syminfo osi;
18056 struct elf32_arm_link_hash_table *htab;
18057 bfd_vma offset;
18058 bfd_size_type size;
18059 bfd *input_bfd;
18060
18061 htab = elf32_arm_hash_table (info);
18062 if (htab == NULL)
18063 return FALSE;
18064
18065 check_use_blx (htab);
18066
18067 osi.flaginfo = flaginfo;
18068 osi.info = info;
18069 osi.func = func;
18070
18071 /* Add a $d mapping symbol to data-only sections that
18072 don't have any mapping symbol. This may result in (harmless) redundant
18073 mapping symbols. */
18074 for (input_bfd = info->input_bfds;
18075 input_bfd != NULL;
18076 input_bfd = input_bfd->link.next)
18077 {
18078 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18079 for (osi.sec = input_bfd->sections;
18080 osi.sec != NULL;
18081 osi.sec = osi.sec->next)
18082 {
18083 if (osi.sec->output_section != NULL
18084 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18085 != 0)
18086 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18087 == SEC_HAS_CONTENTS
18088 && get_arm_elf_section_data (osi.sec) != NULL
18089 && get_arm_elf_section_data (osi.sec)->mapcount == 0
18090 && osi.sec->size > 0
18091 && (osi.sec->flags & SEC_EXCLUDE) == 0)
18092 {
18093 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18094 (output_bfd, osi.sec->output_section);
18095 if (osi.sec_shndx != (int)SHN_BAD)
18096 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18097 }
18098 }
18099 }
18100
18101 /* ARM->Thumb glue. */
18102 if (htab->arm_glue_size > 0)
18103 {
18104 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18105 ARM2THUMB_GLUE_SECTION_NAME);
18106
18107 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18108 (output_bfd, osi.sec->output_section);
18109 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18110 || htab->pic_veneer)
18111 size = ARM2THUMB_PIC_GLUE_SIZE;
18112 else if (htab->use_blx)
18113 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18114 else
18115 size = ARM2THUMB_STATIC_GLUE_SIZE;
18116
18117 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18118 {
18119 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18120 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18121 }
18122 }
18123
18124 /* Thumb->ARM glue. */
18125 if (htab->thumb_glue_size > 0)
18126 {
18127 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18128 THUMB2ARM_GLUE_SECTION_NAME);
18129
18130 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18131 (output_bfd, osi.sec->output_section);
18132 size = THUMB2ARM_GLUE_SIZE;
18133
18134 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18135 {
18136 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18137 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18138 }
18139 }
18140
18141 /* ARMv4 BX veneers. */
18142 if (htab->bx_glue_size > 0)
18143 {
18144 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18145 ARM_BX_GLUE_SECTION_NAME);
18146
18147 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18148 (output_bfd, osi.sec->output_section);
18149
18150 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18151 }
18152
18153 /* Long calls stubs. */
18154 if (htab->stub_bfd && htab->stub_bfd->sections)
18155 {
18156 asection* stub_sec;
18157
18158 for (stub_sec = htab->stub_bfd->sections;
18159 stub_sec != NULL;
18160 stub_sec = stub_sec->next)
18161 {
18162 /* Ignore non-stub sections. */
18163 if (!strstr (stub_sec->name, STUB_SUFFIX))
18164 continue;
18165
18166 osi.sec = stub_sec;
18167
18168 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18169 (output_bfd, osi.sec->output_section);
18170
18171 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18172 }
18173 }
18174
18175 /* Finally, output mapping symbols for the PLT. */
18176 if (htab->root.splt && htab->root.splt->size > 0)
18177 {
18178 osi.sec = htab->root.splt;
18179 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18180 (output_bfd, osi.sec->output_section));
18181
18182 /* Output mapping symbols for the plt header. SymbianOS does not have a
18183 plt header. */
18184 if (htab->vxworks_p)
18185 {
18186 /* VxWorks shared libraries have no PLT header. */
18187 if (!bfd_link_pic (info))
18188 {
18189 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18190 return FALSE;
18191 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18192 return FALSE;
18193 }
18194 }
18195 else if (htab->nacl_p)
18196 {
18197 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18198 return FALSE;
18199 }
18200 else if (using_thumb_only (htab) && !htab->fdpic_p)
18201 {
18202 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18203 return FALSE;
18204 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18205 return FALSE;
18206 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18207 return FALSE;
18208 }
18209 else if (!htab->symbian_p && !htab->fdpic_p)
18210 {
18211 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18212 return FALSE;
18213 #ifndef FOUR_WORD_PLT
18214 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18215 return FALSE;
18216 #endif
18217 }
18218 }
18219 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
18220 {
18221 /* NaCl uses a special first entry in .iplt too. */
18222 osi.sec = htab->root.iplt;
18223 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18224 (output_bfd, osi.sec->output_section));
18225 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18226 return FALSE;
18227 }
18228 if ((htab->root.splt && htab->root.splt->size > 0)
18229 || (htab->root.iplt && htab->root.iplt->size > 0))
18230 {
18231 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18232 for (input_bfd = info->input_bfds;
18233 input_bfd != NULL;
18234 input_bfd = input_bfd->link.next)
18235 {
18236 struct arm_local_iplt_info **local_iplt;
18237 unsigned int i, num_syms;
18238
18239 local_iplt = elf32_arm_local_iplt (input_bfd);
18240 if (local_iplt != NULL)
18241 {
18242 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18243 for (i = 0; i < num_syms; i++)
18244 if (local_iplt[i] != NULL
18245 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18246 &local_iplt[i]->root,
18247 &local_iplt[i]->arm))
18248 return FALSE;
18249 }
18250 }
18251 }
18252 if (htab->dt_tlsdesc_plt != 0)
18253 {
18254 /* Mapping symbols for the lazy tls trampoline. */
18255 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
18256 return FALSE;
18257
18258 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18259 htab->dt_tlsdesc_plt + 24))
18260 return FALSE;
18261 }
18262 if (htab->tls_trampoline != 0)
18263 {
18264 /* Mapping symbols for the tls trampoline. */
18265 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18266 return FALSE;
18267 #ifdef FOUR_WORD_PLT
18268 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18269 htab->tls_trampoline + 12))
18270 return FALSE;
18271 #endif
18272 }
18273
18274 return TRUE;
18275 }
18276
18277 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18278 the import library. All SYMCOUNT symbols of ABFD can be examined
18279 from their pointers in SYMS. Pointers of symbols to keep should be
18280 stored continuously at the beginning of that array.
18281
18282 Returns the number of symbols to keep. */
18283
18284 static unsigned int
18285 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18286 struct bfd_link_info *info,
18287 asymbol **syms, long symcount)
18288 {
18289 size_t maxnamelen;
18290 char *cmse_name;
18291 long src_count, dst_count = 0;
18292 struct elf32_arm_link_hash_table *htab;
18293
18294 htab = elf32_arm_hash_table (info);
18295 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18296 symcount = 0;
18297
18298 maxnamelen = 128;
18299 cmse_name = (char *) bfd_malloc (maxnamelen);
18300 for (src_count = 0; src_count < symcount; src_count++)
18301 {
18302 struct elf32_arm_link_hash_entry *cmse_hash;
18303 asymbol *sym;
18304 flagword flags;
18305 char *name;
18306 size_t namelen;
18307
18308 sym = syms[src_count];
18309 flags = sym->flags;
18310 name = (char *) bfd_asymbol_name (sym);
18311
18312 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18313 continue;
18314 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18315 continue;
18316
18317 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18318 if (namelen > maxnamelen)
18319 {
18320 cmse_name = (char *)
18321 bfd_realloc (cmse_name, namelen);
18322 maxnamelen = namelen;
18323 }
18324 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18325 cmse_hash = (struct elf32_arm_link_hash_entry *)
18326 elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18327
18328 if (!cmse_hash
18329 || (cmse_hash->root.root.type != bfd_link_hash_defined
18330 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18331 || cmse_hash->root.type != STT_FUNC)
18332 continue;
18333
18334 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
18335 continue;
18336
18337 syms[dst_count++] = sym;
18338 }
18339 free (cmse_name);
18340
18341 syms[dst_count] = NULL;
18342
18343 return dst_count;
18344 }
18345
18346 /* Filter symbols of ABFD to include in the import library. All
18347 SYMCOUNT symbols of ABFD can be examined from their pointers in
18348 SYMS. Pointers of symbols to keep should be stored continuously at
18349 the beginning of that array.
18350
18351 Returns the number of symbols to keep. */
18352
18353 static unsigned int
18354 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18355 struct bfd_link_info *info,
18356 asymbol **syms, long symcount)
18357 {
18358 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18359
18360 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18361 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18362 library to be a relocatable object file. */
18363 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18364 if (globals->cmse_implib)
18365 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18366 else
18367 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18368 }
18369
18370 /* Allocate target specific section data. */
18371
18372 static bfd_boolean
18373 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18374 {
18375 if (!sec->used_by_bfd)
18376 {
18377 _arm_elf_section_data *sdata;
18378 bfd_size_type amt = sizeof (*sdata);
18379
18380 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18381 if (sdata == NULL)
18382 return FALSE;
18383 sec->used_by_bfd = sdata;
18384 }
18385
18386 return _bfd_elf_new_section_hook (abfd, sec);
18387 }
18388
18389
18390 /* Used to order a list of mapping symbols by address. */
18391
18392 static int
18393 elf32_arm_compare_mapping (const void * a, const void * b)
18394 {
18395 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18396 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18397
18398 if (amap->vma > bmap->vma)
18399 return 1;
18400 else if (amap->vma < bmap->vma)
18401 return -1;
18402 else if (amap->type > bmap->type)
18403 /* Ensure results do not depend on the host qsort for objects with
18404 multiple mapping symbols at the same address by sorting on type
18405 after vma. */
18406 return 1;
18407 else if (amap->type < bmap->type)
18408 return -1;
18409 else
18410 return 0;
18411 }
18412
18413 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18414
18415 static unsigned long
18416 offset_prel31 (unsigned long addr, bfd_vma offset)
18417 {
18418 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18419 }
18420
18421 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18422 relocations. */
18423
18424 static void
18425 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18426 {
18427 unsigned long first_word = bfd_get_32 (output_bfd, from);
18428 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18429
18430 /* High bit of first word is supposed to be zero. */
18431 if ((first_word & 0x80000000ul) == 0)
18432 first_word = offset_prel31 (first_word, offset);
18433
18434 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18435 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18436 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18437 second_word = offset_prel31 (second_word, offset);
18438
18439 bfd_put_32 (output_bfd, first_word, to);
18440 bfd_put_32 (output_bfd, second_word, to + 4);
18441 }
18442
18443 /* Data for make_branch_to_a8_stub(). */
18444
18445 struct a8_branch_to_stub_data
18446 {
18447 asection *writing_section;
18448 bfd_byte *contents;
18449 };
18450
18451
18452 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18453 places for a particular section. */
18454
18455 static bfd_boolean
18456 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18457 void *in_arg)
18458 {
18459 struct elf32_arm_stub_hash_entry *stub_entry;
18460 struct a8_branch_to_stub_data *data;
18461 bfd_byte *contents;
18462 unsigned long branch_insn;
18463 bfd_vma veneered_insn_loc, veneer_entry_loc;
18464 bfd_signed_vma branch_offset;
18465 bfd *abfd;
18466 unsigned int loc;
18467
18468 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18469 data = (struct a8_branch_to_stub_data *) in_arg;
18470
18471 if (stub_entry->target_section != data->writing_section
18472 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18473 return TRUE;
18474
18475 contents = data->contents;
18476
18477 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18478 generated when both source and target are in the same section. */
18479 veneered_insn_loc = stub_entry->target_section->output_section->vma
18480 + stub_entry->target_section->output_offset
18481 + stub_entry->source_value;
18482
18483 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18484 + stub_entry->stub_sec->output_offset
18485 + stub_entry->stub_offset;
18486
18487 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18488 veneered_insn_loc &= ~3u;
18489
18490 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18491
18492 abfd = stub_entry->target_section->owner;
18493 loc = stub_entry->source_value;
18494
18495 /* We attempt to avoid this condition by setting stubs_always_after_branch
18496 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18497 This check is just to be on the safe side... */
18498 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18499 {
18500 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18501 "allocated in unsafe location"), abfd);
18502 return FALSE;
18503 }
18504
18505 switch (stub_entry->stub_type)
18506 {
18507 case arm_stub_a8_veneer_b:
18508 case arm_stub_a8_veneer_b_cond:
18509 branch_insn = 0xf0009000;
18510 goto jump24;
18511
18512 case arm_stub_a8_veneer_blx:
18513 branch_insn = 0xf000e800;
18514 goto jump24;
18515
18516 case arm_stub_a8_veneer_bl:
18517 {
18518 unsigned int i1, j1, i2, j2, s;
18519
18520 branch_insn = 0xf000d000;
18521
18522 jump24:
18523 if (branch_offset < -16777216 || branch_offset > 16777214)
18524 {
18525 /* There's not much we can do apart from complain if this
18526 happens. */
18527 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18528 "of range (input file too large)"), abfd);
18529 return FALSE;
18530 }
18531
18532 /* i1 = not(j1 eor s), so:
18533 not i1 = j1 eor s
18534 j1 = (not i1) eor s. */
18535
18536 branch_insn |= (branch_offset >> 1) & 0x7ff;
18537 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18538 i2 = (branch_offset >> 22) & 1;
18539 i1 = (branch_offset >> 23) & 1;
18540 s = (branch_offset >> 24) & 1;
18541 j1 = (!i1) ^ s;
18542 j2 = (!i2) ^ s;
18543 branch_insn |= j2 << 11;
18544 branch_insn |= j1 << 13;
18545 branch_insn |= s << 26;
18546 }
18547 break;
18548
18549 default:
18550 BFD_FAIL ();
18551 return FALSE;
18552 }
18553
18554 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18555 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18556
18557 return TRUE;
18558 }
18559
18560 /* Beginning of stm32l4xx work-around. */
18561
18562 /* Functions encoding instructions necessary for the emission of the
18563 fix-stm32l4xx-629360.
18564 Encoding is extracted from the
18565 ARM (C) Architecture Reference Manual
18566 ARMv7-A and ARMv7-R edition
18567 ARM DDI 0406C.b (ID072512). */
18568
18569 static inline bfd_vma
18570 create_instruction_branch_absolute (int branch_offset)
18571 {
18572 /* A8.8.18 B (A8-334)
18573 B target_address (Encoding T4). */
18574 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18575 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18576 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18577
18578 int s = ((branch_offset & 0x1000000) >> 24);
18579 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18580 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18581
18582 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18583 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18584
18585 bfd_vma patched_inst = 0xf0009000
18586 | s << 26 /* S. */
18587 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18588 | j1 << 13 /* J1. */
18589 | j2 << 11 /* J2. */
18590 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18591
18592 return patched_inst;
18593 }
18594
18595 static inline bfd_vma
18596 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18597 {
18598 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18599 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18600 bfd_vma patched_inst = 0xe8900000
18601 | (/*W=*/wback << 21)
18602 | (base_reg << 16)
18603 | (reg_mask & 0x0000ffff);
18604
18605 return patched_inst;
18606 }
18607
18608 static inline bfd_vma
18609 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18610 {
18611 /* A8.8.60 LDMDB/LDMEA (A8-402)
18612 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18613 bfd_vma patched_inst = 0xe9100000
18614 | (/*W=*/wback << 21)
18615 | (base_reg << 16)
18616 | (reg_mask & 0x0000ffff);
18617
18618 return patched_inst;
18619 }
18620
18621 static inline bfd_vma
18622 create_instruction_mov (int target_reg, int source_reg)
18623 {
18624 /* A8.8.103 MOV (register) (A8-486)
18625 MOV Rd, Rm (Encoding T1). */
18626 bfd_vma patched_inst = 0x4600
18627 | (target_reg & 0x7)
18628 | ((target_reg & 0x8) >> 3) << 7
18629 | (source_reg << 3);
18630
18631 return patched_inst;
18632 }
18633
18634 static inline bfd_vma
18635 create_instruction_sub (int target_reg, int source_reg, int value)
18636 {
18637 /* A8.8.221 SUB (immediate) (A8-708)
18638 SUB Rd, Rn, #value (Encoding T3). */
18639 bfd_vma patched_inst = 0xf1a00000
18640 | (target_reg << 8)
18641 | (source_reg << 16)
18642 | (/*S=*/0 << 20)
18643 | ((value & 0x800) >> 11) << 26
18644 | ((value & 0x700) >> 8) << 12
18645 | (value & 0x0ff);
18646
18647 return patched_inst;
18648 }
18649
18650 static inline bfd_vma
18651 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18652 int first_reg)
18653 {
18654 /* A8.8.332 VLDM (A8-922)
18655 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18656 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18657 | (/*W=*/wback << 21)
18658 | (base_reg << 16)
18659 | (num_words & 0x000000ff)
18660 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18661 | (first_reg & 0x00000001) << 22;
18662
18663 return patched_inst;
18664 }
18665
18666 static inline bfd_vma
18667 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18668 int first_reg)
18669 {
18670 /* A8.8.332 VLDM (A8-922)
18671 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18672 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18673 | (base_reg << 16)
18674 | (num_words & 0x000000ff)
18675 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18676 | (first_reg & 0x00000001) << 22;
18677
18678 return patched_inst;
18679 }
18680
18681 static inline bfd_vma
18682 create_instruction_udf_w (int value)
18683 {
18684 /* A8.8.247 UDF (A8-758)
18685 Undefined (Encoding T2). */
18686 bfd_vma patched_inst = 0xf7f0a000
18687 | (value & 0x00000fff)
18688 | (value & 0x000f0000) << 16;
18689
18690 return patched_inst;
18691 }
18692
18693 static inline bfd_vma
18694 create_instruction_udf (int value)
18695 {
18696 /* A8.8.247 UDF (A8-758)
18697 Undefined (Encoding T1). */
18698 bfd_vma patched_inst = 0xde00
18699 | (value & 0xff);
18700
18701 return patched_inst;
18702 }
18703
18704 /* Functions writing an instruction in memory, returning the next
18705 memory position to write to. */
18706
18707 static inline bfd_byte *
18708 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18709 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18710 {
18711 put_thumb2_insn (htab, output_bfd, insn, pt);
18712 return pt + 4;
18713 }
18714
18715 static inline bfd_byte *
18716 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18717 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18718 {
18719 put_thumb_insn (htab, output_bfd, insn, pt);
18720 return pt + 2;
18721 }
18722
18723 /* Function filling up a region in memory with T1 and T2 UDFs taking
18724 care of alignment. */
18725
18726 static bfd_byte *
18727 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18728 bfd * output_bfd,
18729 const bfd_byte * const base_stub_contents,
18730 bfd_byte * const from_stub_contents,
18731 const bfd_byte * const end_stub_contents)
18732 {
18733 bfd_byte *current_stub_contents = from_stub_contents;
18734
18735 /* Fill the remaining of the stub with deterministic contents : UDF
18736 instructions.
18737 Check if realignment is needed on modulo 4 frontier using T1, to
18738 further use T2. */
18739 if ((current_stub_contents < end_stub_contents)
18740 && !((current_stub_contents - base_stub_contents) % 2)
18741 && ((current_stub_contents - base_stub_contents) % 4))
18742 current_stub_contents =
18743 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18744 create_instruction_udf (0));
18745
18746 for (; current_stub_contents < end_stub_contents;)
18747 current_stub_contents =
18748 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18749 create_instruction_udf_w (0));
18750
18751 return current_stub_contents;
18752 }
18753
18754 /* Functions writing the stream of instructions equivalent to the
18755 derived sequence for ldmia, ldmdb, vldm respectively. */
18756
18757 static void
18758 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18759 bfd * output_bfd,
18760 const insn32 initial_insn,
18761 const bfd_byte *const initial_insn_addr,
18762 bfd_byte *const base_stub_contents)
18763 {
18764 int wback = (initial_insn & 0x00200000) >> 21;
18765 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18766 int insn_all_registers = initial_insn & 0x0000ffff;
18767 int insn_low_registers, insn_high_registers;
18768 int usable_register_mask;
18769 int nb_registers = elf32_arm_popcount (insn_all_registers);
18770 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18771 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18772 bfd_byte *current_stub_contents = base_stub_contents;
18773
18774 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18775
18776 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18777 smaller than 8 registers load sequences that do not cause the
18778 hardware issue. */
18779 if (nb_registers <= 8)
18780 {
18781 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18782 current_stub_contents =
18783 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18784 initial_insn);
18785
18786 /* B initial_insn_addr+4. */
18787 if (!restore_pc)
18788 current_stub_contents =
18789 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18790 create_instruction_branch_absolute
18791 (initial_insn_addr - current_stub_contents));
18792
18793 /* Fill the remaining of the stub with deterministic contents. */
18794 current_stub_contents =
18795 stm32l4xx_fill_stub_udf (htab, output_bfd,
18796 base_stub_contents, current_stub_contents,
18797 base_stub_contents +
18798 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18799
18800 return;
18801 }
18802
18803 /* - reg_list[13] == 0. */
18804 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18805
18806 /* - reg_list[14] & reg_list[15] != 1. */
18807 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18808
18809 /* - if (wback==1) reg_list[rn] == 0. */
18810 BFD_ASSERT (!wback || !restore_rn);
18811
18812 /* - nb_registers > 8. */
18813 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18814
18815 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18816
18817 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18818 - One with the 7 lowest registers (register mask 0x007F)
18819 This LDM will finally contain between 2 and 7 registers
18820 - One with the 7 highest registers (register mask 0xDF80)
18821 This ldm will finally contain between 2 and 7 registers. */
18822 insn_low_registers = insn_all_registers & 0x007F;
18823 insn_high_registers = insn_all_registers & 0xDF80;
18824
18825 /* A spare register may be needed during this veneer to temporarily
18826 handle the base register. This register will be restored with the
18827 last LDM operation.
18828 The usable register may be any general purpose register (that
18829 excludes PC, SP, LR : register mask is 0x1FFF). */
18830 usable_register_mask = 0x1FFF;
18831
18832 /* Generate the stub function. */
18833 if (wback)
18834 {
18835 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18836 current_stub_contents =
18837 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18838 create_instruction_ldmia
18839 (rn, /*wback=*/1, insn_low_registers));
18840
18841 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18842 current_stub_contents =
18843 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18844 create_instruction_ldmia
18845 (rn, /*wback=*/1, insn_high_registers));
18846 if (!restore_pc)
18847 {
18848 /* B initial_insn_addr+4. */
18849 current_stub_contents =
18850 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18851 create_instruction_branch_absolute
18852 (initial_insn_addr - current_stub_contents));
18853 }
18854 }
18855 else /* if (!wback). */
18856 {
18857 ri = rn;
18858
18859 /* If Rn is not part of the high-register-list, move it there. */
18860 if (!(insn_high_registers & (1 << rn)))
18861 {
18862 /* Choose a Ri in the high-register-list that will be restored. */
18863 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18864
18865 /* MOV Ri, Rn. */
18866 current_stub_contents =
18867 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18868 create_instruction_mov (ri, rn));
18869 }
18870
18871 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18872 current_stub_contents =
18873 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18874 create_instruction_ldmia
18875 (ri, /*wback=*/1, insn_low_registers));
18876
18877 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18878 current_stub_contents =
18879 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18880 create_instruction_ldmia
18881 (ri, /*wback=*/0, insn_high_registers));
18882
18883 if (!restore_pc)
18884 {
18885 /* B initial_insn_addr+4. */
18886 current_stub_contents =
18887 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18888 create_instruction_branch_absolute
18889 (initial_insn_addr - current_stub_contents));
18890 }
18891 }
18892
18893 /* Fill the remaining of the stub with deterministic contents. */
18894 current_stub_contents =
18895 stm32l4xx_fill_stub_udf (htab, output_bfd,
18896 base_stub_contents, current_stub_contents,
18897 base_stub_contents +
18898 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18899 }
18900
18901 static void
18902 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18903 bfd * output_bfd,
18904 const insn32 initial_insn,
18905 const bfd_byte *const initial_insn_addr,
18906 bfd_byte *const base_stub_contents)
18907 {
18908 int wback = (initial_insn & 0x00200000) >> 21;
18909 int ri, rn = (initial_insn & 0x000f0000) >> 16;
18910 int insn_all_registers = initial_insn & 0x0000ffff;
18911 int insn_low_registers, insn_high_registers;
18912 int usable_register_mask;
18913 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18914 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18915 int nb_registers = elf32_arm_popcount (insn_all_registers);
18916 bfd_byte *current_stub_contents = base_stub_contents;
18917
18918 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
18919
18920 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18921 smaller than 8 registers load sequences that do not cause the
18922 hardware issue. */
18923 if (nb_registers <= 8)
18924 {
18925 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18926 current_stub_contents =
18927 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18928 initial_insn);
18929
18930 /* B initial_insn_addr+4. */
18931 current_stub_contents =
18932 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18933 create_instruction_branch_absolute
18934 (initial_insn_addr - current_stub_contents));
18935
18936 /* Fill the remaining of the stub with deterministic contents. */
18937 current_stub_contents =
18938 stm32l4xx_fill_stub_udf (htab, output_bfd,
18939 base_stub_contents, current_stub_contents,
18940 base_stub_contents +
18941 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18942
18943 return;
18944 }
18945
18946 /* - reg_list[13] == 0. */
18947 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
18948
18949 /* - reg_list[14] & reg_list[15] != 1. */
18950 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18951
18952 /* - if (wback==1) reg_list[rn] == 0. */
18953 BFD_ASSERT (!wback || !restore_rn);
18954
18955 /* - nb_registers > 8. */
18956 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18957
18958 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18959
18960 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
18961 - One with the 7 lowest registers (register mask 0x007F)
18962 This LDM will finally contain between 2 and 7 registers
18963 - One with the 7 highest registers (register mask 0xDF80)
18964 This ldm will finally contain between 2 and 7 registers. */
18965 insn_low_registers = insn_all_registers & 0x007F;
18966 insn_high_registers = insn_all_registers & 0xDF80;
18967
18968 /* A spare register may be needed during this veneer to temporarily
18969 handle the base register. This register will be restored with
18970 the last LDM operation.
18971 The usable register may be any general purpose register (that excludes
18972 PC, SP, LR : register mask is 0x1FFF). */
18973 usable_register_mask = 0x1FFF;
18974
18975 /* Generate the stub function. */
18976 if (!wback && !restore_pc && !restore_rn)
18977 {
18978 /* Choose a Ri in the low-register-list that will be restored. */
18979 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18980
18981 /* MOV Ri, Rn. */
18982 current_stub_contents =
18983 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18984 create_instruction_mov (ri, rn));
18985
18986 /* LDMDB Ri!, {R-high-register-list}. */
18987 current_stub_contents =
18988 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18989 create_instruction_ldmdb
18990 (ri, /*wback=*/1, insn_high_registers));
18991
18992 /* LDMDB Ri, {R-low-register-list}. */
18993 current_stub_contents =
18994 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18995 create_instruction_ldmdb
18996 (ri, /*wback=*/0, insn_low_registers));
18997
18998 /* B initial_insn_addr+4. */
18999 current_stub_contents =
19000 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19001 create_instruction_branch_absolute
19002 (initial_insn_addr - current_stub_contents));
19003 }
19004 else if (wback && !restore_pc && !restore_rn)
19005 {
19006 /* LDMDB Rn!, {R-high-register-list}. */
19007 current_stub_contents =
19008 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19009 create_instruction_ldmdb
19010 (rn, /*wback=*/1, insn_high_registers));
19011
19012 /* LDMDB Rn!, {R-low-register-list}. */
19013 current_stub_contents =
19014 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19015 create_instruction_ldmdb
19016 (rn, /*wback=*/1, insn_low_registers));
19017
19018 /* B initial_insn_addr+4. */
19019 current_stub_contents =
19020 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19021 create_instruction_branch_absolute
19022 (initial_insn_addr - current_stub_contents));
19023 }
19024 else if (!wback && restore_pc && !restore_rn)
19025 {
19026 /* Choose a Ri in the high-register-list that will be restored. */
19027 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19028
19029 /* SUB Ri, Rn, #(4*nb_registers). */
19030 current_stub_contents =
19031 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19032 create_instruction_sub (ri, rn, (4 * nb_registers)));
19033
19034 /* LDMIA Ri!, {R-low-register-list}. */
19035 current_stub_contents =
19036 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19037 create_instruction_ldmia
19038 (ri, /*wback=*/1, insn_low_registers));
19039
19040 /* LDMIA Ri, {R-high-register-list}. */
19041 current_stub_contents =
19042 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19043 create_instruction_ldmia
19044 (ri, /*wback=*/0, insn_high_registers));
19045 }
19046 else if (wback && restore_pc && !restore_rn)
19047 {
19048 /* Choose a Ri in the high-register-list that will be restored. */
19049 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19050
19051 /* SUB Rn, Rn, #(4*nb_registers) */
19052 current_stub_contents =
19053 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19054 create_instruction_sub (rn, rn, (4 * nb_registers)));
19055
19056 /* MOV Ri, Rn. */
19057 current_stub_contents =
19058 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19059 create_instruction_mov (ri, rn));
19060
19061 /* LDMIA Ri!, {R-low-register-list}. */
19062 current_stub_contents =
19063 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19064 create_instruction_ldmia
19065 (ri, /*wback=*/1, insn_low_registers));
19066
19067 /* LDMIA Ri, {R-high-register-list}. */
19068 current_stub_contents =
19069 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19070 create_instruction_ldmia
19071 (ri, /*wback=*/0, insn_high_registers));
19072 }
19073 else if (!wback && !restore_pc && restore_rn)
19074 {
19075 ri = rn;
19076 if (!(insn_low_registers & (1 << rn)))
19077 {
19078 /* Choose a Ri in the low-register-list that will be restored. */
19079 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19080
19081 /* MOV Ri, Rn. */
19082 current_stub_contents =
19083 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19084 create_instruction_mov (ri, rn));
19085 }
19086
19087 /* LDMDB Ri!, {R-high-register-list}. */
19088 current_stub_contents =
19089 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19090 create_instruction_ldmdb
19091 (ri, /*wback=*/1, insn_high_registers));
19092
19093 /* LDMDB Ri, {R-low-register-list}. */
19094 current_stub_contents =
19095 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19096 create_instruction_ldmdb
19097 (ri, /*wback=*/0, insn_low_registers));
19098
19099 /* B initial_insn_addr+4. */
19100 current_stub_contents =
19101 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19102 create_instruction_branch_absolute
19103 (initial_insn_addr - current_stub_contents));
19104 }
19105 else if (!wback && restore_pc && restore_rn)
19106 {
19107 ri = rn;
19108 if (!(insn_high_registers & (1 << rn)))
19109 {
19110 /* Choose a Ri in the high-register-list that will be restored. */
19111 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19112 }
19113
19114 /* SUB Ri, Rn, #(4*nb_registers). */
19115 current_stub_contents =
19116 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19117 create_instruction_sub (ri, rn, (4 * nb_registers)));
19118
19119 /* LDMIA Ri!, {R-low-register-list}. */
19120 current_stub_contents =
19121 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19122 create_instruction_ldmia
19123 (ri, /*wback=*/1, insn_low_registers));
19124
19125 /* LDMIA Ri, {R-high-register-list}. */
19126 current_stub_contents =
19127 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19128 create_instruction_ldmia
19129 (ri, /*wback=*/0, insn_high_registers));
19130 }
19131 else if (wback && restore_rn)
19132 {
19133 /* The assembler should not have accepted to encode this. */
19134 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19135 "undefined behavior.\n");
19136 }
19137
19138 /* Fill the remaining of the stub with deterministic contents. */
19139 current_stub_contents =
19140 stm32l4xx_fill_stub_udf (htab, output_bfd,
19141 base_stub_contents, current_stub_contents,
19142 base_stub_contents +
19143 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19144
19145 }
19146
19147 static void
19148 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19149 bfd * output_bfd,
19150 const insn32 initial_insn,
19151 const bfd_byte *const initial_insn_addr,
19152 bfd_byte *const base_stub_contents)
19153 {
19154 int num_words = ((unsigned int) initial_insn << 24) >> 24;
19155 bfd_byte *current_stub_contents = base_stub_contents;
19156
19157 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19158
19159 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19160 smaller than 8 words load sequences that do not cause the
19161 hardware issue. */
19162 if (num_words <= 8)
19163 {
19164 /* Untouched instruction. */
19165 current_stub_contents =
19166 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19167 initial_insn);
19168
19169 /* B initial_insn_addr+4. */
19170 current_stub_contents =
19171 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19172 create_instruction_branch_absolute
19173 (initial_insn_addr - current_stub_contents));
19174 }
19175 else
19176 {
19177 bfd_boolean is_dp = /* DP encoding. */
19178 (initial_insn & 0xfe100f00) == 0xec100b00;
19179 bfd_boolean is_ia_nobang = /* (IA without !). */
19180 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19181 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
19182 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19183 bfd_boolean is_db_bang = /* (DB with !). */
19184 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19185 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19186 /* d = UInt (Vd:D);. */
19187 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19188 | (((unsigned int)initial_insn << 9) >> 31);
19189
19190 /* Compute the number of 8-words chunks needed to split. */
19191 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19192 int chunk;
19193
19194 /* The test coverage has been done assuming the following
19195 hypothesis that exactly one of the previous is_ predicates is
19196 true. */
19197 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19198 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19199
19200 /* We treat the cutting of the words in one pass for all
19201 cases, then we emit the adjustments:
19202
19203 vldm rx, {...}
19204 -> vldm rx!, {8_words_or_less} for each needed 8_word
19205 -> sub rx, rx, #size (list)
19206
19207 vldm rx!, {...}
19208 -> vldm rx!, {8_words_or_less} for each needed 8_word
19209 This also handles vpop instruction (when rx is sp)
19210
19211 vldmd rx!, {...}
19212 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19213 for (chunk = 0; chunk < chunks; ++chunk)
19214 {
19215 bfd_vma new_insn = 0;
19216
19217 if (is_ia_nobang || is_ia_bang)
19218 {
19219 new_insn = create_instruction_vldmia
19220 (base_reg,
19221 is_dp,
19222 /*wback= . */1,
19223 chunks - (chunk + 1) ?
19224 8 : num_words - chunk * 8,
19225 first_reg + chunk * 8);
19226 }
19227 else if (is_db_bang)
19228 {
19229 new_insn = create_instruction_vldmdb
19230 (base_reg,
19231 is_dp,
19232 chunks - (chunk + 1) ?
19233 8 : num_words - chunk * 8,
19234 first_reg + chunk * 8);
19235 }
19236
19237 if (new_insn)
19238 current_stub_contents =
19239 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19240 new_insn);
19241 }
19242
19243 /* Only this case requires the base register compensation
19244 subtract. */
19245 if (is_ia_nobang)
19246 {
19247 current_stub_contents =
19248 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19249 create_instruction_sub
19250 (base_reg, base_reg, 4*num_words));
19251 }
19252
19253 /* B initial_insn_addr+4. */
19254 current_stub_contents =
19255 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19256 create_instruction_branch_absolute
19257 (initial_insn_addr - current_stub_contents));
19258 }
19259
19260 /* Fill the remaining of the stub with deterministic contents. */
19261 current_stub_contents =
19262 stm32l4xx_fill_stub_udf (htab, output_bfd,
19263 base_stub_contents, current_stub_contents,
19264 base_stub_contents +
19265 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19266 }
19267
19268 static void
19269 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19270 bfd * output_bfd,
19271 const insn32 wrong_insn,
19272 const bfd_byte *const wrong_insn_addr,
19273 bfd_byte *const stub_contents)
19274 {
19275 if (is_thumb2_ldmia (wrong_insn))
19276 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19277 wrong_insn, wrong_insn_addr,
19278 stub_contents);
19279 else if (is_thumb2_ldmdb (wrong_insn))
19280 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19281 wrong_insn, wrong_insn_addr,
19282 stub_contents);
19283 else if (is_thumb2_vldm (wrong_insn))
19284 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19285 wrong_insn, wrong_insn_addr,
19286 stub_contents);
19287 }
19288
19289 /* End of stm32l4xx work-around. */
19290
19291
19292 /* Do code byteswapping. Return FALSE afterwards so that the section is
19293 written out as normal. */
19294
19295 static bfd_boolean
19296 elf32_arm_write_section (bfd *output_bfd,
19297 struct bfd_link_info *link_info,
19298 asection *sec,
19299 bfd_byte *contents)
19300 {
19301 unsigned int mapcount, errcount;
19302 _arm_elf_section_data *arm_data;
19303 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19304 elf32_arm_section_map *map;
19305 elf32_vfp11_erratum_list *errnode;
19306 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19307 bfd_vma ptr;
19308 bfd_vma end;
19309 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19310 bfd_byte tmp;
19311 unsigned int i;
19312
19313 if (globals == NULL)
19314 return FALSE;
19315
19316 /* If this section has not been allocated an _arm_elf_section_data
19317 structure then we cannot record anything. */
19318 arm_data = get_arm_elf_section_data (sec);
19319 if (arm_data == NULL)
19320 return FALSE;
19321
19322 mapcount = arm_data->mapcount;
19323 map = arm_data->map;
19324 errcount = arm_data->erratumcount;
19325
19326 if (errcount != 0)
19327 {
19328 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19329
19330 for (errnode = arm_data->erratumlist; errnode != 0;
19331 errnode = errnode->next)
19332 {
19333 bfd_vma target = errnode->vma - offset;
19334
19335 switch (errnode->type)
19336 {
19337 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19338 {
19339 bfd_vma branch_to_veneer;
19340 /* Original condition code of instruction, plus bit mask for
19341 ARM B instruction. */
19342 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19343 | 0x0a000000;
19344
19345 /* The instruction is before the label. */
19346 target -= 4;
19347
19348 /* Above offset included in -4 below. */
19349 branch_to_veneer = errnode->u.b.veneer->vma
19350 - errnode->vma - 4;
19351
19352 if ((signed) branch_to_veneer < -(1 << 25)
19353 || (signed) branch_to_veneer >= (1 << 25))
19354 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19355 "range"), output_bfd);
19356
19357 insn |= (branch_to_veneer >> 2) & 0xffffff;
19358 contents[endianflip ^ target] = insn & 0xff;
19359 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19360 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19361 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19362 }
19363 break;
19364
19365 case VFP11_ERRATUM_ARM_VENEER:
19366 {
19367 bfd_vma branch_from_veneer;
19368 unsigned int insn;
19369
19370 /* Take size of veneer into account. */
19371 branch_from_veneer = errnode->u.v.branch->vma
19372 - errnode->vma - 12;
19373
19374 if ((signed) branch_from_veneer < -(1 << 25)
19375 || (signed) branch_from_veneer >= (1 << 25))
19376 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19377 "range"), output_bfd);
19378
19379 /* Original instruction. */
19380 insn = errnode->u.v.branch->u.b.vfp_insn;
19381 contents[endianflip ^ target] = insn & 0xff;
19382 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19383 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19384 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19385
19386 /* Branch back to insn after original insn. */
19387 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19388 contents[endianflip ^ (target + 4)] = insn & 0xff;
19389 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19390 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19391 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19392 }
19393 break;
19394
19395 default:
19396 abort ();
19397 }
19398 }
19399 }
19400
19401 if (arm_data->stm32l4xx_erratumcount != 0)
19402 {
19403 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19404 stm32l4xx_errnode != 0;
19405 stm32l4xx_errnode = stm32l4xx_errnode->next)
19406 {
19407 bfd_vma target = stm32l4xx_errnode->vma - offset;
19408
19409 switch (stm32l4xx_errnode->type)
19410 {
19411 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19412 {
19413 unsigned int insn;
19414 bfd_vma branch_to_veneer =
19415 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19416
19417 if ((signed) branch_to_veneer < -(1 << 24)
19418 || (signed) branch_to_veneer >= (1 << 24))
19419 {
19420 bfd_vma out_of_range =
19421 ((signed) branch_to_veneer < -(1 << 24)) ?
19422 - branch_to_veneer - (1 << 24) :
19423 ((signed) branch_to_veneer >= (1 << 24)) ?
19424 branch_to_veneer - (1 << 24) : 0;
19425
19426 _bfd_error_handler
19427 (_("%pB(%#" PRIx64 "): error: "
19428 "cannot create STM32L4XX veneer; "
19429 "jump out of range by %" PRId64 " bytes; "
19430 "cannot encode branch instruction"),
19431 output_bfd,
19432 (uint64_t) (stm32l4xx_errnode->vma - 4),
19433 (int64_t) out_of_range);
19434 continue;
19435 }
19436
19437 insn = create_instruction_branch_absolute
19438 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19439
19440 /* The instruction is before the label. */
19441 target -= 4;
19442
19443 put_thumb2_insn (globals, output_bfd,
19444 (bfd_vma) insn, contents + target);
19445 }
19446 break;
19447
19448 case STM32L4XX_ERRATUM_VENEER:
19449 {
19450 bfd_byte * veneer;
19451 bfd_byte * veneer_r;
19452 unsigned int insn;
19453
19454 veneer = contents + target;
19455 veneer_r = veneer
19456 + stm32l4xx_errnode->u.b.veneer->vma
19457 - stm32l4xx_errnode->vma - 4;
19458
19459 if ((signed) (veneer_r - veneer -
19460 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19461 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19462 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19463 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19464 || (signed) (veneer_r - veneer) >= (1 << 24))
19465 {
19466 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19467 "veneer"), output_bfd);
19468 continue;
19469 }
19470
19471 /* Original instruction. */
19472 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19473
19474 stm32l4xx_create_replacing_stub
19475 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19476 }
19477 break;
19478
19479 default:
19480 abort ();
19481 }
19482 }
19483 }
19484
19485 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19486 {
19487 arm_unwind_table_edit *edit_node
19488 = arm_data->u.exidx.unwind_edit_list;
19489 /* Now, sec->size is the size of the section we will write. The original
19490 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19491 markers) was sec->rawsize. (This isn't the case if we perform no
19492 edits, then rawsize will be zero and we should use size). */
19493 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19494 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19495 unsigned int in_index, out_index;
19496 bfd_vma add_to_offsets = 0;
19497
19498 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19499 {
19500 if (edit_node)
19501 {
19502 unsigned int edit_index = edit_node->index;
19503
19504 if (in_index < edit_index && in_index * 8 < input_size)
19505 {
19506 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19507 contents + in_index * 8, add_to_offsets);
19508 out_index++;
19509 in_index++;
19510 }
19511 else if (in_index == edit_index
19512 || (in_index * 8 >= input_size
19513 && edit_index == UINT_MAX))
19514 {
19515 switch (edit_node->type)
19516 {
19517 case DELETE_EXIDX_ENTRY:
19518 in_index++;
19519 add_to_offsets += 8;
19520 break;
19521
19522 case INSERT_EXIDX_CANTUNWIND_AT_END:
19523 {
19524 asection *text_sec = edit_node->linked_section;
19525 bfd_vma text_offset = text_sec->output_section->vma
19526 + text_sec->output_offset
19527 + text_sec->size;
19528 bfd_vma exidx_offset = offset + out_index * 8;
19529 unsigned long prel31_offset;
19530
19531 /* Note: this is meant to be equivalent to an
19532 R_ARM_PREL31 relocation. These synthetic
19533 EXIDX_CANTUNWIND markers are not relocated by the
19534 usual BFD method. */
19535 prel31_offset = (text_offset - exidx_offset)
19536 & 0x7ffffffful;
19537 if (bfd_link_relocatable (link_info))
19538 {
19539 /* Here relocation for new EXIDX_CANTUNWIND is
19540 created, so there is no need to
19541 adjust offset by hand. */
19542 prel31_offset = text_sec->output_offset
19543 + text_sec->size;
19544 }
19545
19546 /* First address we can't unwind. */
19547 bfd_put_32 (output_bfd, prel31_offset,
19548 &edited_contents[out_index * 8]);
19549
19550 /* Code for EXIDX_CANTUNWIND. */
19551 bfd_put_32 (output_bfd, 0x1,
19552 &edited_contents[out_index * 8 + 4]);
19553
19554 out_index++;
19555 add_to_offsets -= 8;
19556 }
19557 break;
19558 }
19559
19560 edit_node = edit_node->next;
19561 }
19562 }
19563 else
19564 {
19565 /* No more edits, copy remaining entries verbatim. */
19566 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19567 contents + in_index * 8, add_to_offsets);
19568 out_index++;
19569 in_index++;
19570 }
19571 }
19572
19573 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19574 bfd_set_section_contents (output_bfd, sec->output_section,
19575 edited_contents,
19576 (file_ptr) sec->output_offset, sec->size);
19577
19578 return TRUE;
19579 }
19580
19581 /* Fix code to point to Cortex-A8 erratum stubs. */
19582 if (globals->fix_cortex_a8)
19583 {
19584 struct a8_branch_to_stub_data data;
19585
19586 data.writing_section = sec;
19587 data.contents = contents;
19588
19589 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19590 & data);
19591 }
19592
19593 if (mapcount == 0)
19594 return FALSE;
19595
19596 if (globals->byteswap_code)
19597 {
19598 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19599
19600 ptr = map[0].vma;
19601 for (i = 0; i < mapcount; i++)
19602 {
19603 if (i == mapcount - 1)
19604 end = sec->size;
19605 else
19606 end = map[i + 1].vma;
19607
19608 switch (map[i].type)
19609 {
19610 case 'a':
19611 /* Byte swap code words. */
19612 while (ptr + 3 < end)
19613 {
19614 tmp = contents[ptr];
19615 contents[ptr] = contents[ptr + 3];
19616 contents[ptr + 3] = tmp;
19617 tmp = contents[ptr + 1];
19618 contents[ptr + 1] = contents[ptr + 2];
19619 contents[ptr + 2] = tmp;
19620 ptr += 4;
19621 }
19622 break;
19623
19624 case 't':
19625 /* Byte swap code halfwords. */
19626 while (ptr + 1 < end)
19627 {
19628 tmp = contents[ptr];
19629 contents[ptr] = contents[ptr + 1];
19630 contents[ptr + 1] = tmp;
19631 ptr += 2;
19632 }
19633 break;
19634
19635 case 'd':
19636 /* Leave data alone. */
19637 break;
19638 }
19639 ptr = end;
19640 }
19641 }
19642
19643 free (map);
19644 arm_data->mapcount = -1;
19645 arm_data->mapsize = 0;
19646 arm_data->map = NULL;
19647
19648 return FALSE;
19649 }
19650
19651 /* Mangle thumb function symbols as we read them in. */
19652
19653 static bfd_boolean
19654 elf32_arm_swap_symbol_in (bfd * abfd,
19655 const void *psrc,
19656 const void *pshn,
19657 Elf_Internal_Sym *dst)
19658 {
19659 Elf_Internal_Shdr *symtab_hdr;
19660 const char *name = NULL;
19661
19662 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19663 return FALSE;
19664 dst->st_target_internal = 0;
19665
19666 /* New EABI objects mark thumb function symbols by setting the low bit of
19667 the address. */
19668 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19669 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19670 {
19671 if (dst->st_value & 1)
19672 {
19673 dst->st_value &= ~(bfd_vma) 1;
19674 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19675 ST_BRANCH_TO_THUMB);
19676 }
19677 else
19678 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19679 }
19680 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19681 {
19682 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19683 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19684 }
19685 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19686 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19687 else
19688 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19689
19690 /* Mark CMSE special symbols. */
19691 symtab_hdr = & elf_symtab_hdr (abfd);
19692 if (symtab_hdr->sh_size)
19693 name = bfd_elf_sym_name (abfd, symtab_hdr, dst, NULL);
19694 if (name && CONST_STRNEQ (name, CMSE_PREFIX))
19695 ARM_SET_SYM_CMSE_SPCL (dst->st_target_internal);
19696
19697 return TRUE;
19698 }
19699
19700
19701 /* Mangle thumb function symbols as we write them out. */
19702
19703 static void
19704 elf32_arm_swap_symbol_out (bfd *abfd,
19705 const Elf_Internal_Sym *src,
19706 void *cdst,
19707 void *shndx)
19708 {
19709 Elf_Internal_Sym newsym;
19710
19711 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19712 of the address set, as per the new EABI. We do this unconditionally
19713 because objcopy does not set the elf header flags until after
19714 it writes out the symbol table. */
19715 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19716 {
19717 newsym = *src;
19718 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19719 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19720 if (newsym.st_shndx != SHN_UNDEF)
19721 {
19722 /* Do this only for defined symbols. At link type, the static
19723 linker will simulate the work of dynamic linker of resolving
19724 symbols and will carry over the thumbness of found symbols to
19725 the output symbol table. It's not clear how it happens, but
19726 the thumbness of undefined symbols can well be different at
19727 runtime, and writing '1' for them will be confusing for users
19728 and possibly for dynamic linker itself.
19729 */
19730 newsym.st_value |= 1;
19731 }
19732
19733 src = &newsym;
19734 }
19735 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19736 }
19737
19738 /* Add the PT_ARM_EXIDX program header. */
19739
19740 static bfd_boolean
19741 elf32_arm_modify_segment_map (bfd *abfd,
19742 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19743 {
19744 struct elf_segment_map *m;
19745 asection *sec;
19746
19747 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19748 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19749 {
19750 /* If there is already a PT_ARM_EXIDX header, then we do not
19751 want to add another one. This situation arises when running
19752 "strip"; the input binary already has the header. */
19753 m = elf_seg_map (abfd);
19754 while (m && m->p_type != PT_ARM_EXIDX)
19755 m = m->next;
19756 if (!m)
19757 {
19758 m = (struct elf_segment_map *)
19759 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19760 if (m == NULL)
19761 return FALSE;
19762 m->p_type = PT_ARM_EXIDX;
19763 m->count = 1;
19764 m->sections[0] = sec;
19765
19766 m->next = elf_seg_map (abfd);
19767 elf_seg_map (abfd) = m;
19768 }
19769 }
19770
19771 return TRUE;
19772 }
19773
19774 /* We may add a PT_ARM_EXIDX program header. */
19775
19776 static int
19777 elf32_arm_additional_program_headers (bfd *abfd,
19778 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19779 {
19780 asection *sec;
19781
19782 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19783 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19784 return 1;
19785 else
19786 return 0;
19787 }
19788
19789 /* Hook called by the linker routine which adds symbols from an object
19790 file. */
19791
19792 static bfd_boolean
19793 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19794 Elf_Internal_Sym *sym, const char **namep,
19795 flagword *flagsp, asection **secp, bfd_vma *valp)
19796 {
19797 if (elf32_arm_hash_table (info) == NULL)
19798 return FALSE;
19799
19800 if (elf32_arm_hash_table (info)->vxworks_p
19801 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19802 flagsp, secp, valp))
19803 return FALSE;
19804
19805 return TRUE;
19806 }
19807
19808 /* We use this to override swap_symbol_in and swap_symbol_out. */
19809 const struct elf_size_info elf32_arm_size_info =
19810 {
19811 sizeof (Elf32_External_Ehdr),
19812 sizeof (Elf32_External_Phdr),
19813 sizeof (Elf32_External_Shdr),
19814 sizeof (Elf32_External_Rel),
19815 sizeof (Elf32_External_Rela),
19816 sizeof (Elf32_External_Sym),
19817 sizeof (Elf32_External_Dyn),
19818 sizeof (Elf_External_Note),
19819 4,
19820 1,
19821 32, 2,
19822 ELFCLASS32, EV_CURRENT,
19823 bfd_elf32_write_out_phdrs,
19824 bfd_elf32_write_shdrs_and_ehdr,
19825 bfd_elf32_checksum_contents,
19826 bfd_elf32_write_relocs,
19827 elf32_arm_swap_symbol_in,
19828 elf32_arm_swap_symbol_out,
19829 bfd_elf32_slurp_reloc_table,
19830 bfd_elf32_slurp_symbol_table,
19831 bfd_elf32_swap_dyn_in,
19832 bfd_elf32_swap_dyn_out,
19833 bfd_elf32_swap_reloc_in,
19834 bfd_elf32_swap_reloc_out,
19835 bfd_elf32_swap_reloca_in,
19836 bfd_elf32_swap_reloca_out
19837 };
19838
19839 static bfd_vma
19840 read_code32 (const bfd *abfd, const bfd_byte *addr)
19841 {
19842 /* V7 BE8 code is always little endian. */
19843 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19844 return bfd_getl32 (addr);
19845
19846 return bfd_get_32 (abfd, addr);
19847 }
19848
19849 static bfd_vma
19850 read_code16 (const bfd *abfd, const bfd_byte *addr)
19851 {
19852 /* V7 BE8 code is always little endian. */
19853 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19854 return bfd_getl16 (addr);
19855
19856 return bfd_get_16 (abfd, addr);
19857 }
19858
19859 /* Return size of plt0 entry starting at ADDR
19860 or (bfd_vma) -1 if size can not be determined. */
19861
19862 static bfd_vma
19863 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
19864 {
19865 bfd_vma first_word;
19866 bfd_vma plt0_size;
19867
19868 first_word = read_code32 (abfd, addr);
19869
19870 if (first_word == elf32_arm_plt0_entry[0])
19871 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19872 else if (first_word == elf32_thumb2_plt0_entry[0])
19873 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19874 else
19875 /* We don't yet handle this PLT format. */
19876 return (bfd_vma) -1;
19877
19878 return plt0_size;
19879 }
19880
19881 /* Return size of plt entry starting at offset OFFSET
19882 of plt section located at address START
19883 or (bfd_vma) -1 if size can not be determined. */
19884
19885 static bfd_vma
19886 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
19887 {
19888 bfd_vma first_insn;
19889 bfd_vma plt_size = 0;
19890 const bfd_byte *addr = start + offset;
19891
19892 /* PLT entry size if fixed on Thumb-only platforms. */
19893 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19894 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19895
19896 /* Respect Thumb stub if necessary. */
19897 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
19898 {
19899 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
19900 }
19901
19902 /* Strip immediate from first add. */
19903 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
19904
19905 #ifdef FOUR_WORD_PLT
19906 if (first_insn == elf32_arm_plt_entry[0])
19907 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19908 #else
19909 if (first_insn == elf32_arm_plt_entry_long[0])
19910 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
19911 else if (first_insn == elf32_arm_plt_entry_short[0])
19912 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
19913 #endif
19914 else
19915 /* We don't yet handle this PLT format. */
19916 return (bfd_vma) -1;
19917
19918 return plt_size;
19919 }
19920
19921 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
19922
19923 static long
19924 elf32_arm_get_synthetic_symtab (bfd *abfd,
19925 long symcount ATTRIBUTE_UNUSED,
19926 asymbol **syms ATTRIBUTE_UNUSED,
19927 long dynsymcount,
19928 asymbol **dynsyms,
19929 asymbol **ret)
19930 {
19931 asection *relplt;
19932 asymbol *s;
19933 arelent *p;
19934 long count, i, n;
19935 size_t size;
19936 Elf_Internal_Shdr *hdr;
19937 char *names;
19938 asection *plt;
19939 bfd_vma offset;
19940 bfd_byte *data;
19941
19942 *ret = NULL;
19943
19944 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
19945 return 0;
19946
19947 if (dynsymcount <= 0)
19948 return 0;
19949
19950 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
19951 if (relplt == NULL)
19952 return 0;
19953
19954 hdr = &elf_section_data (relplt)->this_hdr;
19955 if (hdr->sh_link != elf_dynsymtab (abfd)
19956 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
19957 return 0;
19958
19959 plt = bfd_get_section_by_name (abfd, ".plt");
19960 if (plt == NULL)
19961 return 0;
19962
19963 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
19964 return -1;
19965
19966 data = plt->contents;
19967 if (data == NULL)
19968 {
19969 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
19970 return -1;
19971 bfd_cache_section_contents((asection *) plt, data);
19972 }
19973
19974 count = relplt->size / hdr->sh_entsize;
19975 size = count * sizeof (asymbol);
19976 p = relplt->relocation;
19977 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19978 {
19979 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
19980 if (p->addend != 0)
19981 size += sizeof ("+0x") - 1 + 8;
19982 }
19983
19984 s = *ret = (asymbol *) bfd_malloc (size);
19985 if (s == NULL)
19986 return -1;
19987
19988 offset = elf32_arm_plt0_size (abfd, data);
19989 if (offset == (bfd_vma) -1)
19990 return -1;
19991
19992 names = (char *) (s + count);
19993 p = relplt->relocation;
19994 n = 0;
19995 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19996 {
19997 size_t len;
19998
19999 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
20000 if (plt_size == (bfd_vma) -1)
20001 break;
20002
20003 *s = **p->sym_ptr_ptr;
20004 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20005 we are defining a symbol, ensure one of them is set. */
20006 if ((s->flags & BSF_LOCAL) == 0)
20007 s->flags |= BSF_GLOBAL;
20008 s->flags |= BSF_SYNTHETIC;
20009 s->section = plt;
20010 s->value = offset;
20011 s->name = names;
20012 s->udata.p = NULL;
20013 len = strlen ((*p->sym_ptr_ptr)->name);
20014 memcpy (names, (*p->sym_ptr_ptr)->name, len);
20015 names += len;
20016 if (p->addend != 0)
20017 {
20018 char buf[30], *a;
20019
20020 memcpy (names, "+0x", sizeof ("+0x") - 1);
20021 names += sizeof ("+0x") - 1;
20022 bfd_sprintf_vma (abfd, buf, p->addend);
20023 for (a = buf; *a == '0'; ++a)
20024 ;
20025 len = strlen (a);
20026 memcpy (names, a, len);
20027 names += len;
20028 }
20029 memcpy (names, "@plt", sizeof ("@plt"));
20030 names += sizeof ("@plt");
20031 ++s, ++n;
20032 offset += plt_size;
20033 }
20034
20035 return n;
20036 }
20037
20038 static bfd_boolean
20039 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
20040 {
20041 if (hdr->sh_flags & SHF_ARM_PURECODE)
20042 *flags |= SEC_ELF_PURECODE;
20043 return TRUE;
20044 }
20045
20046 static flagword
20047 elf32_arm_lookup_section_flags (char *flag_name)
20048 {
20049 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20050 return SHF_ARM_PURECODE;
20051
20052 return SEC_NO_FLAGS;
20053 }
20054
20055 static unsigned int
20056 elf32_arm_count_additional_relocs (asection *sec)
20057 {
20058 struct _arm_elf_section_data *arm_data;
20059 arm_data = get_arm_elf_section_data (sec);
20060
20061 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20062 }
20063
20064 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20065 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20066 FALSE otherwise. ISECTION is the best guess matching section from the
20067 input bfd IBFD, but it might be NULL. */
20068
20069 static bfd_boolean
20070 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20071 bfd *obfd ATTRIBUTE_UNUSED,
20072 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20073 Elf_Internal_Shdr *osection)
20074 {
20075 switch (osection->sh_type)
20076 {
20077 case SHT_ARM_EXIDX:
20078 {
20079 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20080 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20081 unsigned i = 0;
20082
20083 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20084 osection->sh_info = 0;
20085
20086 /* The sh_link field must be set to the text section associated with
20087 this index section. Unfortunately the ARM EHABI does not specify
20088 exactly how to determine this association. Our caller does try
20089 to match up OSECTION with its corresponding input section however
20090 so that is a good first guess. */
20091 if (isection != NULL
20092 && osection->bfd_section != NULL
20093 && isection->bfd_section != NULL
20094 && isection->bfd_section->output_section != NULL
20095 && isection->bfd_section->output_section == osection->bfd_section
20096 && iheaders != NULL
20097 && isection->sh_link > 0
20098 && isection->sh_link < elf_numsections (ibfd)
20099 && iheaders[isection->sh_link]->bfd_section != NULL
20100 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20101 )
20102 {
20103 for (i = elf_numsections (obfd); i-- > 0;)
20104 if (oheaders[i]->bfd_section
20105 == iheaders[isection->sh_link]->bfd_section->output_section)
20106 break;
20107 }
20108
20109 if (i == 0)
20110 {
20111 /* Failing that we have to find a matching section ourselves. If
20112 we had the output section name available we could compare that
20113 with input section names. Unfortunately we don't. So instead
20114 we use a simple heuristic and look for the nearest executable
20115 section before this one. */
20116 for (i = elf_numsections (obfd); i-- > 0;)
20117 if (oheaders[i] == osection)
20118 break;
20119 if (i == 0)
20120 break;
20121
20122 while (i-- > 0)
20123 if (oheaders[i]->sh_type == SHT_PROGBITS
20124 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20125 == (SHF_ALLOC | SHF_EXECINSTR))
20126 break;
20127 }
20128
20129 if (i)
20130 {
20131 osection->sh_link = i;
20132 /* If the text section was part of a group
20133 then the index section should be too. */
20134 if (oheaders[i]->sh_flags & SHF_GROUP)
20135 osection->sh_flags |= SHF_GROUP;
20136 return TRUE;
20137 }
20138 }
20139 break;
20140
20141 case SHT_ARM_PREEMPTMAP:
20142 osection->sh_flags = SHF_ALLOC;
20143 break;
20144
20145 case SHT_ARM_ATTRIBUTES:
20146 case SHT_ARM_DEBUGOVERLAY:
20147 case SHT_ARM_OVERLAYSECTION:
20148 default:
20149 break;
20150 }
20151
20152 return FALSE;
20153 }
20154
20155 /* Returns TRUE if NAME is an ARM mapping symbol.
20156 Traditionally the symbols $a, $d and $t have been used.
20157 The ARM ELF standard also defines $x (for A64 code). It also allows a
20158 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20159 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20160 not support them here. $t.x indicates the start of ThumbEE instructions. */
20161
20162 static bfd_boolean
20163 is_arm_mapping_symbol (const char * name)
20164 {
20165 return name != NULL /* Paranoia. */
20166 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20167 the mapping symbols could have acquired a prefix.
20168 We do not support this here, since such symbols no
20169 longer conform to the ARM ELF ABI. */
20170 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20171 && (name[2] == 0 || name[2] == '.');
20172 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20173 any characters that follow the period are legal characters for the body
20174 of a symbol's name. For now we just assume that this is the case. */
20175 }
20176
20177 /* Make sure that mapping symbols in object files are not removed via the
20178 "strip --strip-unneeded" tool. These symbols are needed in order to
20179 correctly generate interworking veneers, and for byte swapping code
20180 regions. Once an object file has been linked, it is safe to remove the
20181 symbols as they will no longer be needed. */
20182
20183 static void
20184 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20185 {
20186 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20187 && sym->section != bfd_abs_section_ptr
20188 && is_arm_mapping_symbol (sym->name))
20189 sym->flags |= BSF_KEEP;
20190 }
20191
20192 #undef elf_backend_copy_special_section_fields
20193 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20194
20195 #define ELF_ARCH bfd_arch_arm
20196 #define ELF_TARGET_ID ARM_ELF_DATA
20197 #define ELF_MACHINE_CODE EM_ARM
20198 #ifdef __QNXTARGET__
20199 #define ELF_MAXPAGESIZE 0x1000
20200 #else
20201 #define ELF_MAXPAGESIZE 0x10000
20202 #endif
20203 #define ELF_MINPAGESIZE 0x1000
20204 #define ELF_COMMONPAGESIZE 0x1000
20205
20206 #define bfd_elf32_mkobject elf32_arm_mkobject
20207
20208 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20209 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20210 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20211 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20212 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20213 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20214 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20215 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
20216 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20217 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20218 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20219 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20220 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20221
20222 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20223 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20224 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20225 #define elf_backend_check_relocs elf32_arm_check_relocs
20226 #define elf_backend_update_relocs elf32_arm_update_relocs
20227 #define elf_backend_relocate_section elf32_arm_relocate_section
20228 #define elf_backend_write_section elf32_arm_write_section
20229 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20230 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20231 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20232 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20233 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20234 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20235 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20236 #define elf_backend_post_process_headers elf32_arm_post_process_headers
20237 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20238 #define elf_backend_object_p elf32_arm_object_p
20239 #define elf_backend_fake_sections elf32_arm_fake_sections
20240 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20241 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20242 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20243 #define elf_backend_size_info elf32_arm_size_info
20244 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20245 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20246 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20247 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20248 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20249 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20250 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20251 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20252
20253 #define elf_backend_can_refcount 1
20254 #define elf_backend_can_gc_sections 1
20255 #define elf_backend_plt_readonly 1
20256 #define elf_backend_want_got_plt 1
20257 #define elf_backend_want_plt_sym 0
20258 #define elf_backend_want_dynrelro 1
20259 #define elf_backend_may_use_rel_p 1
20260 #define elf_backend_may_use_rela_p 0
20261 #define elf_backend_default_use_rela_p 0
20262 #define elf_backend_dtrel_excludes_plt 1
20263
20264 #define elf_backend_got_header_size 12
20265 #define elf_backend_extern_protected_data 1
20266
20267 #undef elf_backend_obj_attrs_vendor
20268 #define elf_backend_obj_attrs_vendor "aeabi"
20269 #undef elf_backend_obj_attrs_section
20270 #define elf_backend_obj_attrs_section ".ARM.attributes"
20271 #undef elf_backend_obj_attrs_arg_type
20272 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20273 #undef elf_backend_obj_attrs_section_type
20274 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20275 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20276 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20277
20278 #undef elf_backend_section_flags
20279 #define elf_backend_section_flags elf32_arm_section_flags
20280 #undef elf_backend_lookup_section_flags_hook
20281 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20282
20283 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20284
20285 #include "elf32-target.h"
20286
20287 /* Native Client targets. */
20288
20289 #undef TARGET_LITTLE_SYM
20290 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20291 #undef TARGET_LITTLE_NAME
20292 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20293 #undef TARGET_BIG_SYM
20294 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20295 #undef TARGET_BIG_NAME
20296 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20297
20298 /* Like elf32_arm_link_hash_table_create -- but overrides
20299 appropriately for NaCl. */
20300
20301 static struct bfd_link_hash_table *
20302 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20303 {
20304 struct bfd_link_hash_table *ret;
20305
20306 ret = elf32_arm_link_hash_table_create (abfd);
20307 if (ret)
20308 {
20309 struct elf32_arm_link_hash_table *htab
20310 = (struct elf32_arm_link_hash_table *) ret;
20311
20312 htab->nacl_p = 1;
20313
20314 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20315 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20316 }
20317 return ret;
20318 }
20319
20320 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20321 really need to use elf32_arm_modify_segment_map. But we do it
20322 anyway just to reduce gratuitous differences with the stock ARM backend. */
20323
20324 static bfd_boolean
20325 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20326 {
20327 return (elf32_arm_modify_segment_map (abfd, info)
20328 && nacl_modify_segment_map (abfd, info));
20329 }
20330
20331 static void
20332 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
20333 {
20334 elf32_arm_final_write_processing (abfd, linker);
20335 nacl_final_write_processing (abfd, linker);
20336 }
20337
20338 static bfd_vma
20339 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20340 const arelent *rel ATTRIBUTE_UNUSED)
20341 {
20342 return plt->vma
20343 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20344 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20345 }
20346
20347 #undef elf32_bed
20348 #define elf32_bed elf32_arm_nacl_bed
20349 #undef bfd_elf32_bfd_link_hash_table_create
20350 #define bfd_elf32_bfd_link_hash_table_create \
20351 elf32_arm_nacl_link_hash_table_create
20352 #undef elf_backend_plt_alignment
20353 #define elf_backend_plt_alignment 4
20354 #undef elf_backend_modify_segment_map
20355 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20356 #undef elf_backend_modify_program_headers
20357 #define elf_backend_modify_program_headers nacl_modify_program_headers
20358 #undef elf_backend_final_write_processing
20359 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20360 #undef bfd_elf32_get_synthetic_symtab
20361 #undef elf_backend_plt_sym_val
20362 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20363 #undef elf_backend_copy_special_section_fields
20364
20365 #undef ELF_MINPAGESIZE
20366 #undef ELF_COMMONPAGESIZE
20367
20368
20369 #include "elf32-target.h"
20370
20371 /* Reset to defaults. */
20372 #undef elf_backend_plt_alignment
20373 #undef elf_backend_modify_segment_map
20374 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20375 #undef elf_backend_modify_program_headers
20376 #undef elf_backend_final_write_processing
20377 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20378 #undef ELF_MINPAGESIZE
20379 #define ELF_MINPAGESIZE 0x1000
20380 #undef ELF_COMMONPAGESIZE
20381 #define ELF_COMMONPAGESIZE 0x1000
20382
20383
20384 /* FDPIC Targets. */
20385
20386 #undef TARGET_LITTLE_SYM
20387 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20388 #undef TARGET_LITTLE_NAME
20389 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20390 #undef TARGET_BIG_SYM
20391 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20392 #undef TARGET_BIG_NAME
20393 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20394 #undef elf_match_priority
20395 #define elf_match_priority 128
20396 #undef ELF_OSABI
20397 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20398
20399 /* Like elf32_arm_link_hash_table_create -- but overrides
20400 appropriately for FDPIC. */
20401
20402 static struct bfd_link_hash_table *
20403 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20404 {
20405 struct bfd_link_hash_table *ret;
20406
20407 ret = elf32_arm_link_hash_table_create (abfd);
20408 if (ret)
20409 {
20410 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20411
20412 htab->fdpic_p = 1;
20413 }
20414 return ret;
20415 }
20416
20417 /* We need dynamic symbols for every section, since segments can
20418 relocate independently. */
20419 static bfd_boolean
20420 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20421 struct bfd_link_info *info
20422 ATTRIBUTE_UNUSED,
20423 asection *p ATTRIBUTE_UNUSED)
20424 {
20425 switch (elf_section_data (p)->this_hdr.sh_type)
20426 {
20427 case SHT_PROGBITS:
20428 case SHT_NOBITS:
20429 /* If sh_type is yet undecided, assume it could be
20430 SHT_PROGBITS/SHT_NOBITS. */
20431 case SHT_NULL:
20432 return FALSE;
20433
20434 /* There shouldn't be section relative relocations
20435 against any other section. */
20436 default:
20437 return TRUE;
20438 }
20439 }
20440
20441 #undef elf32_bed
20442 #define elf32_bed elf32_arm_fdpic_bed
20443
20444 #undef bfd_elf32_bfd_link_hash_table_create
20445 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20446
20447 #undef elf_backend_omit_section_dynsym
20448 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20449
20450 #include "elf32-target.h"
20451
20452 #undef elf_match_priority
20453 #undef ELF_OSABI
20454 #undef elf_backend_omit_section_dynsym
20455
20456 /* VxWorks Targets. */
20457
20458 #undef TARGET_LITTLE_SYM
20459 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20460 #undef TARGET_LITTLE_NAME
20461 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20462 #undef TARGET_BIG_SYM
20463 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20464 #undef TARGET_BIG_NAME
20465 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20466
20467 /* Like elf32_arm_link_hash_table_create -- but overrides
20468 appropriately for VxWorks. */
20469
20470 static struct bfd_link_hash_table *
20471 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20472 {
20473 struct bfd_link_hash_table *ret;
20474
20475 ret = elf32_arm_link_hash_table_create (abfd);
20476 if (ret)
20477 {
20478 struct elf32_arm_link_hash_table *htab
20479 = (struct elf32_arm_link_hash_table *) ret;
20480 htab->use_rel = 0;
20481 htab->vxworks_p = 1;
20482 }
20483 return ret;
20484 }
20485
20486 static void
20487 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
20488 {
20489 elf32_arm_final_write_processing (abfd, linker);
20490 elf_vxworks_final_write_processing (abfd, linker);
20491 }
20492
20493 #undef elf32_bed
20494 #define elf32_bed elf32_arm_vxworks_bed
20495
20496 #undef bfd_elf32_bfd_link_hash_table_create
20497 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20498 #undef elf_backend_final_write_processing
20499 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20500 #undef elf_backend_emit_relocs
20501 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20502
20503 #undef elf_backend_may_use_rel_p
20504 #define elf_backend_may_use_rel_p 0
20505 #undef elf_backend_may_use_rela_p
20506 #define elf_backend_may_use_rela_p 1
20507 #undef elf_backend_default_use_rela_p
20508 #define elf_backend_default_use_rela_p 1
20509 #undef elf_backend_want_plt_sym
20510 #define elf_backend_want_plt_sym 1
20511 #undef ELF_MAXPAGESIZE
20512 #define ELF_MAXPAGESIZE 0x1000
20513
20514 #include "elf32-target.h"
20515
20516
20517 /* Merge backend specific data from an object file to the output
20518 object file when linking. */
20519
20520 static bfd_boolean
20521 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20522 {
20523 bfd *obfd = info->output_bfd;
20524 flagword out_flags;
20525 flagword in_flags;
20526 bfd_boolean flags_compatible = TRUE;
20527 asection *sec;
20528
20529 /* Check if we have the same endianness. */
20530 if (! _bfd_generic_verify_endian_match (ibfd, info))
20531 return FALSE;
20532
20533 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20534 return TRUE;
20535
20536 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20537 return FALSE;
20538
20539 /* The input BFD must have had its flags initialised. */
20540 /* The following seems bogus to me -- The flags are initialized in
20541 the assembler but I don't think an elf_flags_init field is
20542 written into the object. */
20543 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20544
20545 in_flags = elf_elfheader (ibfd)->e_flags;
20546 out_flags = elf_elfheader (obfd)->e_flags;
20547
20548 /* In theory there is no reason why we couldn't handle this. However
20549 in practice it isn't even close to working and there is no real
20550 reason to want it. */
20551 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20552 && !(ibfd->flags & DYNAMIC)
20553 && (in_flags & EF_ARM_BE8))
20554 {
20555 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20556 ibfd);
20557 return FALSE;
20558 }
20559
20560 if (!elf_flags_init (obfd))
20561 {
20562 /* If the input is the default architecture and had the default
20563 flags then do not bother setting the flags for the output
20564 architecture, instead allow future merges to do this. If no
20565 future merges ever set these flags then they will retain their
20566 uninitialised values, which surprise surprise, correspond
20567 to the default values. */
20568 if (bfd_get_arch_info (ibfd)->the_default
20569 && elf_elfheader (ibfd)->e_flags == 0)
20570 return TRUE;
20571
20572 elf_flags_init (obfd) = TRUE;
20573 elf_elfheader (obfd)->e_flags = in_flags;
20574
20575 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20576 && bfd_get_arch_info (obfd)->the_default)
20577 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20578
20579 return TRUE;
20580 }
20581
20582 /* Determine what should happen if the input ARM architecture
20583 does not match the output ARM architecture. */
20584 if (! bfd_arm_merge_machines (ibfd, obfd))
20585 return FALSE;
20586
20587 /* Identical flags must be compatible. */
20588 if (in_flags == out_flags)
20589 return TRUE;
20590
20591 /* Check to see if the input BFD actually contains any sections. If
20592 not, its flags may not have been initialised either, but it
20593 cannot actually cause any incompatiblity. Do not short-circuit
20594 dynamic objects; their section list may be emptied by
20595 elf_link_add_object_symbols.
20596
20597 Also check to see if there are no code sections in the input.
20598 In this case there is no need to check for code specific flags.
20599 XXX - do we need to worry about floating-point format compatability
20600 in data sections ? */
20601 if (!(ibfd->flags & DYNAMIC))
20602 {
20603 bfd_boolean null_input_bfd = TRUE;
20604 bfd_boolean only_data_sections = TRUE;
20605
20606 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20607 {
20608 /* Ignore synthetic glue sections. */
20609 if (strcmp (sec->name, ".glue_7")
20610 && strcmp (sec->name, ".glue_7t"))
20611 {
20612 if ((bfd_get_section_flags (ibfd, sec)
20613 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20614 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20615 only_data_sections = FALSE;
20616
20617 null_input_bfd = FALSE;
20618 break;
20619 }
20620 }
20621
20622 if (null_input_bfd || only_data_sections)
20623 return TRUE;
20624 }
20625
20626 /* Complain about various flag mismatches. */
20627 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20628 EF_ARM_EABI_VERSION (out_flags)))
20629 {
20630 _bfd_error_handler
20631 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20632 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20633 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20634 return FALSE;
20635 }
20636
20637 /* Not sure what needs to be checked for EABI versions >= 1. */
20638 /* VxWorks libraries do not use these flags. */
20639 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20640 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20641 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20642 {
20643 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20644 {
20645 _bfd_error_handler
20646 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20647 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20648 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20649 flags_compatible = FALSE;
20650 }
20651
20652 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20653 {
20654 if (in_flags & EF_ARM_APCS_FLOAT)
20655 _bfd_error_handler
20656 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20657 ibfd, obfd);
20658 else
20659 _bfd_error_handler
20660 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20661 ibfd, obfd);
20662
20663 flags_compatible = FALSE;
20664 }
20665
20666 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20667 {
20668 if (in_flags & EF_ARM_VFP_FLOAT)
20669 _bfd_error_handler
20670 (_("error: %pB uses %s instructions, whereas %pB does not"),
20671 ibfd, "VFP", obfd);
20672 else
20673 _bfd_error_handler
20674 (_("error: %pB uses %s instructions, whereas %pB does not"),
20675 ibfd, "FPA", obfd);
20676
20677 flags_compatible = FALSE;
20678 }
20679
20680 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20681 {
20682 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20683 _bfd_error_handler
20684 (_("error: %pB uses %s instructions, whereas %pB does not"),
20685 ibfd, "Maverick", obfd);
20686 else
20687 _bfd_error_handler
20688 (_("error: %pB does not use %s instructions, whereas %pB does"),
20689 ibfd, "Maverick", obfd);
20690
20691 flags_compatible = FALSE;
20692 }
20693
20694 #ifdef EF_ARM_SOFT_FLOAT
20695 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20696 {
20697 /* We can allow interworking between code that is VFP format
20698 layout, and uses either soft float or integer regs for
20699 passing floating point arguments and results. We already
20700 know that the APCS_FLOAT flags match; similarly for VFP
20701 flags. */
20702 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20703 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20704 {
20705 if (in_flags & EF_ARM_SOFT_FLOAT)
20706 _bfd_error_handler
20707 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20708 ibfd, obfd);
20709 else
20710 _bfd_error_handler
20711 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20712 ibfd, obfd);
20713
20714 flags_compatible = FALSE;
20715 }
20716 }
20717 #endif
20718
20719 /* Interworking mismatch is only a warning. */
20720 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20721 {
20722 if (in_flags & EF_ARM_INTERWORK)
20723 {
20724 _bfd_error_handler
20725 (_("warning: %pB supports interworking, whereas %pB does not"),
20726 ibfd, obfd);
20727 }
20728 else
20729 {
20730 _bfd_error_handler
20731 (_("warning: %pB does not support interworking, whereas %pB does"),
20732 ibfd, obfd);
20733 }
20734 }
20735 }
20736
20737 return flags_compatible;
20738 }
20739
20740
20741 /* Symbian OS Targets. */
20742
20743 #undef TARGET_LITTLE_SYM
20744 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
20745 #undef TARGET_LITTLE_NAME
20746 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
20747 #undef TARGET_BIG_SYM
20748 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
20749 #undef TARGET_BIG_NAME
20750 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
20751
20752 /* Like elf32_arm_link_hash_table_create -- but overrides
20753 appropriately for Symbian OS. */
20754
20755 static struct bfd_link_hash_table *
20756 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
20757 {
20758 struct bfd_link_hash_table *ret;
20759
20760 ret = elf32_arm_link_hash_table_create (abfd);
20761 if (ret)
20762 {
20763 struct elf32_arm_link_hash_table *htab
20764 = (struct elf32_arm_link_hash_table *)ret;
20765 /* There is no PLT header for Symbian OS. */
20766 htab->plt_header_size = 0;
20767 /* The PLT entries are each one instruction and one word. */
20768 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
20769 htab->symbian_p = 1;
20770 /* Symbian uses armv5t or above, so use_blx is always true. */
20771 htab->use_blx = 1;
20772 htab->root.is_relocatable_executable = 1;
20773 }
20774 return ret;
20775 }
20776
20777 static const struct bfd_elf_special_section
20778 elf32_arm_symbian_special_sections[] =
20779 {
20780 /* In a BPABI executable, the dynamic linking sections do not go in
20781 the loadable read-only segment. The post-linker may wish to
20782 refer to these sections, but they are not part of the final
20783 program image. */
20784 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
20785 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
20786 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
20787 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
20788 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
20789 /* These sections do not need to be writable as the SymbianOS
20790 postlinker will arrange things so that no dynamic relocation is
20791 required. */
20792 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
20793 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
20794 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
20795 { NULL, 0, 0, 0, 0 }
20796 };
20797
20798 static void
20799 elf32_arm_symbian_begin_write_processing (bfd *abfd,
20800 struct bfd_link_info *link_info)
20801 {
20802 /* BPABI objects are never loaded directly by an OS kernel; they are
20803 processed by a postlinker first, into an OS-specific format. If
20804 the D_PAGED bit is set on the file, BFD will align segments on
20805 page boundaries, so that an OS can directly map the file. With
20806 BPABI objects, that just results in wasted space. In addition,
20807 because we clear the D_PAGED bit, map_sections_to_segments will
20808 recognize that the program headers should not be mapped into any
20809 loadable segment. */
20810 abfd->flags &= ~D_PAGED;
20811 elf32_arm_begin_write_processing (abfd, link_info);
20812 }
20813
20814 static bfd_boolean
20815 elf32_arm_symbian_modify_segment_map (bfd *abfd,
20816 struct bfd_link_info *info)
20817 {
20818 struct elf_segment_map *m;
20819 asection *dynsec;
20820
20821 /* BPABI shared libraries and executables should have a PT_DYNAMIC
20822 segment. However, because the .dynamic section is not marked
20823 with SEC_LOAD, the generic ELF code will not create such a
20824 segment. */
20825 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
20826 if (dynsec)
20827 {
20828 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
20829 if (m->p_type == PT_DYNAMIC)
20830 break;
20831
20832 if (m == NULL)
20833 {
20834 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
20835 m->next = elf_seg_map (abfd);
20836 elf_seg_map (abfd) = m;
20837 }
20838 }
20839
20840 /* Also call the generic arm routine. */
20841 return elf32_arm_modify_segment_map (abfd, info);
20842 }
20843
20844 /* Return address for Ith PLT stub in section PLT, for relocation REL
20845 or (bfd_vma) -1 if it should not be included. */
20846
20847 static bfd_vma
20848 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
20849 const arelent *rel ATTRIBUTE_UNUSED)
20850 {
20851 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
20852 }
20853
20854 #undef elf32_bed
20855 #define elf32_bed elf32_arm_symbian_bed
20856
20857 /* The dynamic sections are not allocated on SymbianOS; the postlinker
20858 will process them and then discard them. */
20859 #undef ELF_DYNAMIC_SEC_FLAGS
20860 #define ELF_DYNAMIC_SEC_FLAGS \
20861 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
20862
20863 #undef elf_backend_emit_relocs
20864
20865 #undef bfd_elf32_bfd_link_hash_table_create
20866 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
20867 #undef elf_backend_special_sections
20868 #define elf_backend_special_sections elf32_arm_symbian_special_sections
20869 #undef elf_backend_begin_write_processing
20870 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
20871 #undef elf_backend_final_write_processing
20872 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20873
20874 #undef elf_backend_modify_segment_map
20875 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
20876
20877 /* There is no .got section for BPABI objects, and hence no header. */
20878 #undef elf_backend_got_header_size
20879 #define elf_backend_got_header_size 0
20880
20881 /* Similarly, there is no .got.plt section. */
20882 #undef elf_backend_want_got_plt
20883 #define elf_backend_want_got_plt 0
20884
20885 #undef elf_backend_plt_sym_val
20886 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
20887
20888 #undef elf_backend_may_use_rel_p
20889 #define elf_backend_may_use_rel_p 1
20890 #undef elf_backend_may_use_rela_p
20891 #define elf_backend_may_use_rela_p 0
20892 #undef elf_backend_default_use_rela_p
20893 #define elf_backend_default_use_rela_p 0
20894 #undef elf_backend_want_plt_sym
20895 #define elf_backend_want_plt_sym 0
20896 #undef elf_backend_dtrel_excludes_plt
20897 #define elf_backend_dtrel_excludes_plt 0
20898 #undef ELF_MAXPAGESIZE
20899 #define ELF_MAXPAGESIZE 0x8000
20900
20901 #include "elf32-target.h"
This page took 0.510374 seconds and 5 git commands to generate.